hexsha
stringlengths 40
40
| size
int64 24
287k
| ext
stringclasses 2
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 7
126
| max_stars_repo_name
stringlengths 8
97
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequence | max_stars_count
float64 1
15.9k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 7
126
| max_issues_repo_name
stringlengths 8
97
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequence | max_issues_count
float64 1
14.6k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 7
126
| max_forks_repo_name
stringlengths 8
97
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequence | max_forks_count
float64 1
8.43k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 24
287k
| avg_line_length
float64 12.3
530
| max_line_length
int64 24
10.2k
| alphanum_fraction
float64 0.41
0.88
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f5fb4a5bc17dcc886611a1ff2057ba73a35a0b9 | 323 | py | Python | tests/urls.py | nypublicradio/legacy-publisher-django-socialregistration | 181f75d152f553f77fa899dac895c4276108204f | [
"MIT"
] | 63 | 2015-01-27T16:52:03.000Z | 2021-08-29T04:23:51.000Z | tests/urls.py | nypublicradio/legacy-publisher-django-socialregistration | 181f75d152f553f77fa899dac895c4276108204f | [
"MIT"
] | 3 | 2016-05-26T07:46:53.000Z | 2022-02-16T15:25:16.000Z | tests/urls.py | nypublicradio/legacy-publisher-django-socialregistration | 181f75d152f553f77fa899dac895c4276108204f | [
"MIT"
] | 23 | 2015-02-02T13:33:46.000Z | 2020-10-25T20:02:53.000Z | from socialregistration.compat.urls import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^social/', include('socialregistration.urls', namespace='socialregistration')),
url(r'^$', 'tests.app.views.index', name='index'),
)
| 26.916667 | 89 | 0.705882 |
4f603724b3f981a265170e04e41f8f1e7b1cc2df | 7,780 | py | Python | tests/ifcc-test.py | Voltariuss/C-Compiler | f9b985b232dba0b5867cf3efea59221b9c4e37bc | [
"MIT"
] | null | null | null | tests/ifcc-test.py | Voltariuss/C-Compiler | f9b985b232dba0b5867cf3efea59221b9c4e37bc | [
"MIT"
] | null | null | null | tests/ifcc-test.py | Voltariuss/C-Compiler | f9b985b232dba0b5867cf3efea59221b9c4e37bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This scripts runs GCC as well as IFCC on each test-case provided and compares the results.
#
# input: the test-cases are specified either as individual
# command-line arguments, or as part of a directory tree
#
# output:
#
# The script is divided in three distinct steps:
# - in the ARGPARSE step, we understand the command-line arguments
# - in the PREPARE step, we copy all our test-cases into a single directory tree
# - in the TEST step, we actually run GCC and IFCC on each test-case
#
#
import argparse
import glob
import os
import shutil
import sys
import subprocess
def command(string, logfile=None):
"""execute `string` as a shell command, optionnaly logging stdout+stderr to a file. return exit status.)"""
if args.verbose:
print("ifcc-test.py: "+string)
try:
output=subprocess.check_output(string,stderr=subprocess.STDOUT,shell=True)
ret= 0
except subprocess.CalledProcessError as e:
ret=e.returncode
output = e.output
if logfile:
f=open(logfile,'w')
print(output.decode(sys.stdout.encoding)+'\n'+'return code: '+str(ret),file=f)
f.close()
return ret
def dumpfile(name):
print(open(name).read(),end='')
######################################################################################
## ARGPARSE step: make sense of our command-line arguments
argparser = argparse.ArgumentParser(
description = "Compile multiple programs with both GCC and IFCC, run them, and compare the results.",
epilog = ""
)
argparser.add_argument('input',metavar='PATH',nargs='+',help='For each path given:'
+' if it\'s a file, use this file;'
+' if it\'s a directory, use all *.c files in this subtree')
argparser.add_argument('-d','--debug',action="count",default=0,
help='Increase quantity of debugging messages (only useful to debug the test script itself)')
argparser.add_argument('-v','--verbose',action="count",default=0,
help='Increase verbosity level. You can use this option multiple times.')
argparser.add_argument('-w','--wrapper',metavar='PATH',
help='Invoke your compiler through the shell script at PATH. (default: `ifcc-wrapper.sh`)')
args=argparser.parse_args()
if args.debug >=2:
print('debug: command-line arguments '+str(args))
orig_cwd=os.getcwd()
if "ifcc-test-output" in orig_cwd:
print('error: cannot run from within the output directory')
exit(1)
if os.path.isdir('ifcc-test-output'):
# cleanup previous output directory
command('rm -rf ifcc-test-output')
os.mkdir('ifcc-test-output')
## Then we process the inputs arguments i.e. filenames or subtrees
inputfilenames=[]
for path in args.input:
path=os.path.normpath(path) # collapse redundant slashes etc.
if os.path.isfile(path):
if path[-2:] == '.c':
inputfilenames.append(path)
else:
print("error: incorrect filename suffix (should be '.c'): "+path)
exit(1)
elif os.path.isdir(path):
for dirpath,dirnames,filenames in os.walk(path):
inputfilenames+=[dirpath+'/'+name for name in filenames if name[-2:]=='.c']
else:
print("error: cannot read input path `"+path+"'")
sys.exit(1)
## debug: after treewalk
if args.debug:
print("debug: list of files after tree walk:"," ".join(inputfilenames))
## sanity check
if len(inputfilenames) == 0:
print("error: found no test-case in: "+" ".join(args.input))
sys.exit(1)
## Here we check that we can actually read the files. Our goal is to
## fail as early as possible when the CLI arguments are wrong.
for inputfilename in inputfilenames:
try:
f=open(inputfilename,"r")
f.close()
except Exception as e:
print("error: "+e.args[1]+": "+inputfilename)
sys.exit(1)
## Last but not least: we now locate the "wrapper script" that we will
## use to invoke ifcc
if args.wrapper:
wrapper=os.path.realpath(os.getcwd()+"/"+ args.wrapper)
else:
wrapper=os.path.dirname(os.path.realpath(__file__))+"/ifcc-wrapper.sh"
if not os.path.isfile(wrapper):
print("error: cannot find "+os.path.basename(wrapper)+" in directory: "+os.path.dirname(wrapper))
exit(1)
if args.debug:
print("debug: wrapper path: "+wrapper)
######################################################################################
## PREPARE step: copy all test-cases under ifcc-test-output
jobs=[]
for inputfilename in inputfilenames:
if args.debug>=2:
print("debug: PREPARING "+inputfilename)
if 'ifcc-test-output' in os.path.realpath(inputfilename):
print('error: input filename is within output directory: '+inputfilename)
exit(1)
## each test-case gets copied and processed in its own subdirectory:
## ../somedir/subdir/file.c becomes ./ifcc-test-output/somedir-subdir-file/input.c
subdir='ifcc-test-output/'+inputfilename.strip("./")[:-2].replace('/','-')
os.mkdir(subdir)
shutil.copyfile(inputfilename, subdir+'/input.c')
jobs.append(subdir)
## eliminate duplicate paths from the 'jobs' list
unique_jobs=[]
for j in jobs:
for d in unique_jobs:
if os.path.samefile(j,d):
break # and skip the 'else' branch
else:
unique_jobs.append(j)
jobs=sorted(unique_jobs)
# debug: after deduplication
if args.debug:
print("debug: list of test-cases after deduplication:"," ".join(jobs))
######################################################################################
## TEST step: actually compile all test-cases with both compilers
for jobname in jobs:
os.chdir(orig_cwd)
print('TEST-CASE: '+jobname)
os.chdir(jobname)
## Reference compiler = GCC
gccstatus=command("gcc -S -o asm-gcc.s input.c", "gcc-compile.txt")
if gccstatus == 0:
# test-case is a valid program. we should run it
gccstatus=command("gcc -o exe-gcc asm-gcc.s", "gcc-link.txt")
if gccstatus == 0: # then both compile and link stage went well
exegccstatus=command("./exe-gcc", "gcc-execute.txt")
if args.verbose >=2:
dumpfile("gcc-execute.txt")
print("IFCC");
## IFCC compiler
ifccstatus=command(wrapper+" asm-ifcc.s input.c", "ifcc-compile.txt")
if gccstatus != 0 and ifccstatus != 0:
## ifcc correctly rejects invalid program -> test-case ok
print("TEST OK")
continue
elif gccstatus != 0 and ifccstatus == 0:
## ifcc wrongly accepts invalid program -> error
print("TEST FAIL (your compiler accepts an invalid program)")
continue
elif gccstatus == 0 and ifccstatus != 0:
## ifcc wrongly rejects valid program -> error
print("TEST FAIL (your compiler rejects a valid program)")
if args.verbose:
dumpfile("ifcc-compile.txt")
continue
else:
## ifcc accepts to compile valid program -> let's link it
ldstatus=command("gcc -o exe-ifcc asm-ifcc.s", "ifcc-link.txt")
if ldstatus:
print("TEST FAIL (your compiler produces incorrect assembly)")
if args.verbose:
dumpfile("ifcc-link.txt")
continue
## both compilers did produce an executable, so now we run both
## these executables and compare the results.
command("./exe-ifcc","ifcc-execute.txt")
if open("gcc-execute.txt").read() != open("ifcc-execute.txt").read() :
print("TEST FAIL (different results at execution)")
if args.verbose:
print("GCC:")
dumpfile("gcc-execute.txt")
print("you:")
dumpfile("ifcc-execute.txt")
continue
## last but not least
print("TEST OK")
| 34.887892 | 116 | 0.626221 |
4f5c74833347b4043dcc3c81d48cb93cf86f3128 | 23,645 | py | Python | test/drivers/test_keysight_33500b.py | John2202W/Fixate | 132f7cda69f3d53ff1bd660518d11b45dc6182fd | [
"MIT"
] | 15 | 2018-10-05T04:55:18.000Z | 2022-03-10T08:08:20.000Z | test/drivers/test_keysight_33500b.py | John2202W/Fixate | 132f7cda69f3d53ff1bd660518d11b45dc6182fd | [
"MIT"
] | 86 | 2018-09-26T02:33:11.000Z | 2022-01-10T06:12:17.000Z | test/drivers/test_keysight_33500b.py | John2202W/Fixate | 132f7cda69f3d53ff1bd660518d11b45dc6182fd | [
"MIT"
] | 12 | 2018-10-09T01:32:11.000Z | 2022-03-22T01:19:09.000Z | import unittest
from fixate.drivers.funcgen.keysight_33500b import Keysight33500B
from fixate.core.exceptions import *
import fixate.drivers.funcgen
def get_funcgen():
return fixate.drivers.funcgen.open()
class BaseSetup:
def setUp(self):
import visa
rm = visa.ResourceManager()
resource = rm.open_resource("USB0::2391::9991::MY52303676::0::INSTR")
self.testcls = Keysight33500B(instrument=resource)
self.testcls.reset()
def tearDown(self):
self.testcls.reset()
@unittest.skip("Requires instrument connected to run")
class Waveforms(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_sin(self):
self.testcls.channel1.waveform.square()
self.testcls.channel1.waveform.sin()
self.assertIn("SIN", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_square(self):
self.testcls.channel1.waveform.square()
self.assertIn("SQU", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_ramp(self):
self.testcls.channel1.waveform.ramp()
self.assertIn("RAMP", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_pulse(self):
self.testcls.channel1.waveform.pulse()
self.assertIn("PULS", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_arb(self):
self.testcls.channel1.waveform.arb()
self.assertIn("ARB", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_triangle(self):
self.testcls.channel1.waveform.triangle()
self.assertIn("TRI", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_noise(self):
self.testcls.channel1.waveform.noise()
self.assertIn("NOIS", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_dc(self):
self.testcls.channel1.waveform.dc()
self.assertIn("DC", self.testcls.instrument.query("SOUR1:FUNC?"))
def test_prbs(self):
self.testcls.channel1.waveform.prbs()
self.assertIn("PRBS", self.testcls.instrument.query("SOUR1:FUNC?"))
@unittest.skip("Requires instrument connected to run")
class ChannelConfig(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_vrms(self):
self.testcls.channel1.load(50)
self.testcls.channel1.vrms(2)
# Units
self.assertIn("VRMS", self.testcls.instrument.query("SOUR1:VOLT:UNIT?"))
# Nominal Level
self.assertAlmostEqual(2.0, float(self.testcls.instrument.query("SOUR1:VOLT?")))
# Upper Limits
self.testcls.channel1.vrms(3.5)
self.assertAlmostEqual(3.5, float(self.testcls.instrument.query("SOUR1:VOLT?")))
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.vrms,
3.6,
)
# Lower Limits
self.testcls.channel1.vrms(354e-6)
self.assertAlmostEqual(
354e-6, float(self.testcls.instrument.query("SOUR1:VOLT?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.vrms,
353e-6,
)
def test_vpp(self):
# Units
self.assertIn("VPP", self.testcls.instrument.query("SOUR1:VOLT:UNIT?"))
self.testcls.channel1.load(50)
# Nominal Level
self.testcls.channel1.vpp(2.1)
self.assertAlmostEqual(2.1, float(self.testcls.instrument.query("SOUR1:VOLT?")))
# Upper Limits
self.testcls.channel1.vpp(10)
self.assertAlmostEqual(10, float(self.testcls.instrument.query("SOUR1:VOLT?")))
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.vpp,
11,
)
# Lower Limits
self.testcls.channel1.vpp(0.001)
self.assertAlmostEqual(
0.001, float(self.testcls.instrument.query("SOUR1:VOLT?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.vpp,
0.0001,
)
def test_dbm(self):
self.testcls.channel1.waveform.sin()
self.assertIn("SIN", self.testcls.instrument.query("SOUR1:FUNC?"))
self.testcls.channel1.load(50)
# Nominal Level
self.testcls.channel1.dbm(2)
# Units
self.assertIn("DBM", self.testcls.instrument.query("SOUR1:VOLT:UNIT?"))
self.assertAlmostEqual(2.0, float(self.testcls.instrument.query("SOUR1:VOLT?")))
# Upper Limits
self.testcls.channel1.dbm(23.97)
self.assertAlmostEqual(
23.97, float(self.testcls.instrument.query("SOUR1:VOLT?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.dbm,
23.98,
)
# Lower Limits
self.testcls.channel1.dbm(-56)
self.assertAlmostEqual(-56, float(self.testcls.instrument.query("SOUR1:VOLT?")))
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.dbm,
-60,
)
def test_frequency(self):
self.testcls.channel1.frequency(5000)
# Nominal Level
self.assertAlmostEqual(
5000, float(self.testcls.instrument.query("SOUR1:FREQ?"))
)
# Upper Limits
self.testcls.channel1.frequency(20e6)
self.assertAlmostEqual(
20e6, float(self.testcls.instrument.query("SOUR1:FREQ?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.frequency,
30e6,
)
# Lower Limits
self.testcls.channel1.frequency(0.000001)
self.assertAlmostEqual(
0.000001, float(self.testcls.instrument.query("SOUR1:FREQ?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.frequency,
1e-7,
)
def test_phase(self):
self.testcls.channel1.phase(30)
# Nominal Level
self.assertAlmostEqual(30, float(self.testcls.instrument.query("SOUR1:PHAS?")))
# Upper Limits
self.testcls.channel1.phase(360)
self.assertAlmostEqual(360, float(self.testcls.instrument.query("SOUR1:PHAS?")))
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.phase,
362,
)
# Lower Limits
self.testcls.channel1.phase(-360)
self.assertAlmostEqual(
-360, float(self.testcls.instrument.query("SOUR1:PHAS?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.phase,
-363,
)
def test_offset(self):
self.testcls.channel1.waveform.dc()
self.testcls.channel1.load(50)
self.assertIn("DC", self.testcls.instrument.query("SOUR1:FUNC?"))
# self.testcls.channel1.waveform.square()
# self.assertIn("SQU", self.testcls.instrument.query("SOUR1:FUNC?")) #For Rigol
# self.testcls.channel1.vpp(1e-3)
# self.assertIn("VPP", self.testcls.instrument.query("SOUR1:VOLT:UNIT?"))
# self.assertAlmostEqual(1e-3, float(self.testcls.instrument.query("SOUR1:VOLT?")))
self.testcls.channel1.offset(100e-3)
self.assertAlmostEqual(
100e-3, float(self.testcls.instrument.query("SOUR1:VOLT:OFFS?"))
)
# Upper Limits
self.testcls.channel1.offset(5)
self.assertAlmostEqual(
5, float(self.testcls.instrument.query("SOUR1:VOLT:OFFS?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.offset,
5.01,
)
# Lower Limits
self.testcls.channel1.offset(-5)
self.assertAlmostEqual(
-5, float(self.testcls.instrument.query("SOUR1:VOLT:OFFS?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.offset,
-5.01,
)
def test_duty(self):
# Check Ordering
self.testcls.channel1.waveform.square()
self.testcls.channel1.duty(40)
self.assertAlmostEqual(
40, float(self.testcls.instrument.query("SOUR1:FUNC:SQU:DCYC?"))
)
self.assertIn("SQU", self.testcls.instrument.query("SOUR1:FUNC?"))
self.testcls.channel1.waveform.pulse()
self.assertAlmostEqual(
40, float(self.testcls.instrument.query("SOUR1:FUNC:PULS:DCYC?"))
)
self.assertIn("PULS", self.testcls.instrument.query("SOUR1:FUNC?"))
self.testcls.channel1.duty(60)
self.assertAlmostEqual(
60, float(self.testcls.instrument.query("SOUR1:FUNC:PULS:DCYC?"))
)
self.assertIn("PULS", self.testcls.instrument.query("SOUR1:FUNC?"))
self.testcls.channel1.waveform.square()
self.assertAlmostEqual(
60, float(self.testcls.instrument.query("SOUR1:FUNC:SQU:DCYC?"))
)
self.assertIn("SQU", self.testcls.instrument.query("SOUR1:FUNC?"))
# Upper Limits
self.testcls.channel1.waveform.square()
self.testcls.channel1.duty(99)
self.assertAlmostEqual(
99, float(self.testcls.instrument.query("SOUR1:FUNC:SQU:DCYC?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.duty,
100,
)
self.testcls.channel1.duty(50)
self.testcls.channel1.waveform.pulse()
self.testcls.channel1.duty(99)
self.assertAlmostEqual(
99, float(self.testcls.instrument.query("SOUR1:FUNC:PULS:DCYC?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to upper limit",
self.testcls.channel1.duty,
100,
)
# Lower Limits
self.testcls.channel1.duty(50)
self.testcls.channel1.waveform.square()
self.testcls.channel1.duty(1)
self.assertAlmostEqual(
1, float(self.testcls.instrument.query("SOUR1:FUNC:SQU:DCYC?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.duty,
-1,
)
self.testcls.channel1.duty(50)
self.testcls.channel1.waveform.pulse()
self.testcls.channel1.duty(1)
self.assertAlmostEqual(
1, float(self.testcls.instrument.query("SOUR1:FUNC:PULS:DCYC?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.duty,
-1,
)
self.testcls.channel1.duty(1)
self.assertAlmostEqual(
1, float(self.testcls.instrument.query("SOUR1:FUNC:PULS:DCYC?"))
)
self.testcls.channel1.waveform.square()
self.assertAlmostEqual(
1, float(self.testcls.instrument.query("SOUR1:FUNC:SQU:DCYC?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.duty,
-1,
)
self.testcls.channel1.duty(0.01)
self.assertAlmostEqual(
0.01, float(self.testcls.instrument.query("SOUR1:FUNC:SQU:DCYC?"))
)
self.testcls.channel1.waveform.pulse()
self.assertAlmostEqual(
0.01, float(self.testcls.instrument.query("SOUR1:FUNC:PULS:DCYC?"))
)
self.assertRaisesRegex(
InstrumentError,
"value clipped to lower limit",
self.testcls.channel1.duty,
-1,
)
@unittest.skip("Requires instrument connected to run")
class Burst(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_burst_state(self):
self.testcls.channel1.burst("1")
self.assertIn("1", self.testcls.instrument.query("SOUR1:BURS:STAT?"))
def test_gated(self):
self.testcls.channel1.burst.gated()
self.assertIn("GAT", self.testcls.instrument.query("SOUR1:BURS:MODE?"))
def test_ncycle(self):
self.testcls.channel1.burst.ncycle()
self.assertIn("TRIG", self.testcls.instrument.query("SOUR1:BURS:MODE?"))
def test_ncycle_cycles(self):
self.testcls.channel1.burst.ncycle.cycles(3)
self.assertAlmostEqual(
3, float(self.testcls.instrument.query("SOUR1:BURS:NCYC?"))
)
def test_cycles_infinite(self):
self.testcls.channel1.burst.ncycle.cycles.infinite()
self.assertAlmostEqual(
9.9e37, float(self.testcls.instrument.query("SOUR1:BURS:NCYC?"))
)
def test_period(self):
self.testcls.channel1.burst.ncycle.burst_period(100)
self.assertAlmostEqual(
100, float(self.testcls.instrument.query("SOUR1:BURS:INT:PER?"))
)
def test_gated_positive(self):
self.testcls.channel1.burst.gated.positive()
self.assertIn("NORM", self.testcls.instrument.query("SOUR1:BURS:GATE:POL?"))
def test_gated_negative(self):
self.testcls.channel1.burst.gated.negative()
self.assertIn("INV", self.testcls.instrument.query("SOUR1:BURS:GATE:POL?"))
def test_phase(self):
self.testcls.channel1.burst.phase(30)
self.assertAlmostEqual(
30, float(self.testcls.instrument.query("SOUR1:BURS:PHAS?"))
)
@unittest.skip("Requires instrument connected to run")
class Modulate_Options(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_am_depth(self):
self.testcls.channel1.modulate.am.depth(100)
self.assertAlmostEqual(
100, float(self.testcls.instrument.query("SOUR1:AM:DEPT?"))
)
def test_am_dssc(self):
self.testcls.channel1.modulate.am.dssc()
self.assertIn("1", self.testcls.instrument.query("SOUR1:AM:DSSC?"))
def test_fm_freq_dev(self):
self.testcls.channel1.modulate.fm.freq_dev(100e3)
self.assertAlmostEqual(
100e3, float(self.testcls.instrument.query("SOUR1:FM:DEV?"))
)
def test_pm_phase_dev(self):
self.testcls.channel1.modulate.pm.phase_dev(100)
self.assertAlmostEqual(
100, float(self.testcls.instrument.query("SOUR1:PM:DEV?"))
)
def test_fsk_hop_freq(self):
self.testcls.channel1.modulate.fsk.hop_freq(100)
self.assertAlmostEqual(
100, float(self.testcls.instrument.query("SOUR1:FSK:FREQ?"))
)
def test_rate(self):
self.testcls.channel1.modulate.fsk.rate(100)
self.assertAlmostEqual(
100, float(self.testcls.instrument.query("SOUR1:FSK:INT:RATE?"))
)
def test_modulate_percent(self):
self.testcls.channel1.modulate.sum()
self.assertIn("0", self.testcls.instrument.query("SOUR1:SUM:STAT?"))
self.testcls.channel1.modulate.sum.modulate_percent(50)
self.assertAlmostEqual(
50, float(self.testcls.instrument.query("SOUR1:SUM:AMPL?"))
)
@unittest.skip("Requires instrument connected to run")
class Modulate(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_am(self):
self.testcls.channel1.modulate.am()
self.assertIn("0", self.testcls.instrument.query("SOUR1:AM:STAT?"))
def test_fm(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
def test_pm(self):
self.testcls.channel1.modulate.pm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:PM:STAT?"))
def test_fsk(self):
self.testcls.channel1.modulate.fsk()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FSK:STAT?"))
def test_bpsk(self):
self.testcls.channel1.modulate.bpsk()
self.assertIn("0", self.testcls.instrument.query("SOUR1:BPSK:STAT?"))
def test_sum(self):
self.testcls.channel1.modulate.sum()
self.assertIn("0", self.testcls.instrument.query("SOUR1:SUM:STAT?"))
@unittest.skip("Requires instrument connected to run")
class Trigger(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_immediate(self):
self.testcls.trigger.immediate()
self.assertIn("IMM", self.testcls.instrument.query("TRIG1:SOUR?"))
def test_external(self):
self.testcls.trigger.external()
self.assertIn("EXT", self.testcls.instrument.query("TRIG1:SOUR?"))
def test_external_rising(self):
self.testcls.trigger.external()
self.assertIn("EXT", self.testcls.instrument.query("TRIG1:SOUR?"))
self.testcls.trigger.external.rising()
self.assertIn("POS", self.testcls.instrument.query("TRIG1:SLOP?"))
def test_external_falling(self):
self.testcls.trigger.external()
self.assertIn("EXT", self.testcls.instrument.query("TRIG1:SOUR?"))
self.testcls.trigger.external.falling()
self.assertIn("NEG", self.testcls.instrument.query("TRIG1:SLOP?"))
def test_manual(self):
self.testcls.trigger.manual()
self.assertIn("BUS", self.testcls.instrument.query("TRIG1:SOUR?"))
def test_initiate(self):
self.testcls.trigger.manual.initiate()
self.assertIn("BUS", self.testcls.instrument.query("TRIG1:SOUR?"))
def test_timer(self):
self.testcls.trigger.timer(10)
self.assertIn("TIM", self.testcls.instrument.query("TRIG1:SOUR?"))
self.assertAlmostEqual(10, float(self.testcls.instrument.query("TRIG1:TIM?")))
def test_delay(self):
self.testcls.trigger.delay(10)
self.assertAlmostEqual(10, float(self.testcls.instrument.query("TRIG1:DEL?")))
def test_out_off(self):
self.testcls.trigger.out.off()
self.assertIn("0", self.testcls.instrument.query("OUTP:TRIG?"))
def test_out_rising(self):
self.testcls.trigger.out.rising()
self.assertIn("POS", self.testcls.instrument.query("OUTP:TRIG:SLOP?"))
def test_out_falling(self):
self.testcls.trigger.out.falling()
self.assertIn("NEG", self.testcls.instrument.query("OUTP:TRIG:SLOP?"))
@unittest.skip("Requires instrument connected to run")
class Modulate_Source(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_internal(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal()
self.assertIn("INT", self.testcls.instrument.query("SOUR1:FM:SOUR?"))
def test_external(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.external()
self.assertIn("EXT", self.testcls.instrument.query("SOUR1:FM:SOUR?"))
@unittest.skip("Requires instrument connected to run")
class Channel_Activation(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_channel1(self):
self.testcls.channel1(True)
self.assertIn("1", self.testcls.instrument.query("OUTP1?"))
@unittest.skip("Requires instrument connected to run")
class Modulate_Shape(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_mod_sin(self):
self.testcls.channel1.modulate.fm()
self.testcls.channel1.modulate(True)
self.assertIn("1", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.sin()
self.assertIn("SIN", self.testcls.instrument.query("FM:INT:FUNC?"))
def test_mod_square(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.square()
self.assertIn("SQU", self.testcls.instrument.query("FM:INT:FUNC?"))
def test_mod_triangle(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.triangle()
self.assertIn("TRI", self.testcls.instrument.query("FM:INT:FUNC?"))
def test_mod_up_ramp(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.up_ramp()
self.assertIn("RAMP", self.testcls.instrument.query("FM:INT:FUNC?"))
def test_mod_down_ramp(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.down_ramp()
self.assertIn("NRAM", self.testcls.instrument.query("FM:INT:FUNC?"))
def test_mod_noise(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.noise()
self.assertIn("NOIS", self.testcls.instrument.query("FM:INT:FUNC?"))
@unittest.skip("Requires instrument connected to run")
class Modulate_Activation(unittest.TestCase):
def setUp(self):
import visa
rm = visa.ResourceManager()
resource = rm.open_resource("USB0::2391::9991::MY52303676::0::INSTR")
self.testcls = Keysight33500B(instrument=resource)
self.testcls.reset()
def test_mod_activation(self):
self.testcls.channel1.modulate.fm()
self.assertIn("0", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal()
self.assertIn("INT", self.testcls.instrument.query("SOUR1:FM:SOUR?"))
self.testcls.channel1.modulate(True)
self.assertIn("1", self.testcls.instrument.query("SOUR1:FM:STAT?"))
@unittest.skip("Requires instrument connected to run")
class Modulate_Frequency(unittest.TestCase):
def setUp(self):
self.testcls = get_funcgen()
self.testcls.reset()
def test_mod_freq(self):
self.testcls.channel1.modulate.fm()
self.testcls.channel1.modulate(True)
self.assertIn("1", self.testcls.instrument.query("SOUR1:FM:STAT?"))
self.testcls.channel1.modulate.source.internal.shape.sin()
self.assertIn("SIN", self.testcls.instrument.query("SOUR1:FM:INT:FUNC?"))
# self.testcls.channel1.modulate.source.internal.shape.square()
self.testcls.channel1.modulate.source.internal.frequency(100e3)
self.assertAlmostEqual(
100e3, float(self.testcls.instrument.query("SOUR1:FM:INT:FREQ?"))
)
| 35.23845 | 96 | 0.632607 |
4f60b36d9000db668df6a0f55880a3d80a5a32ee | 301 | py | Python | third_party/lowtran-2.4.1/setup.py | bgorr/instrupy | e3dca871ce2dcd2ef279898fcc36bf9d18f0c243 | [
"Apache-2.0"
] | 1 | 2022-01-14T17:22:05.000Z | 2022-01-14T17:22:05.000Z | third_party/lowtran-2.4.1/setup.py | bgorr/instrupy | e3dca871ce2dcd2ef279898fcc36bf9d18f0c243 | [
"Apache-2.0"
] | 4 | 2021-06-20T13:45:54.000Z | 2022-02-05T05:55:43.000Z | third_party/lowtran-2.4.1/setup.py | bgorr/instrupy | e3dca871ce2dcd2ef279898fcc36bf9d18f0c243 | [
"Apache-2.0"
] | 1 | 2022-03-08T19:06:38.000Z | 2022-03-08T19:06:38.000Z | #!/usr/bin/env python
import setuptools # noqa: F401
from numpy.distutils.core import setup, Extension
ext = [Extension(name='lowtran7',
sources=['src/lowtran7.f'],
f2py_options=['--quiet'],
extra_f77_compile_args=['-w'])]
setup(ext_modules=ext)
| 25.083333 | 49 | 0.607973 |
4f5b854e6f6825659048f46c922e24134bc73613 | 4,191 | py | Python | tensorflow/python/kernel_tests/draw_bounding_box_op_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 522 | 2016-06-08T02:15:50.000Z | 2022-03-02T05:30:36.000Z | tensorflow/python/kernel_tests/draw_bounding_box_op_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/python/kernel_tests/draw_bounding_box_op_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 108 | 2016-06-16T15:34:05.000Z | 2022-03-12T13:23:11.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for draw_bounding_box_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DrawBoundingBoxOpTest(test.TestCase):
def _fillBorder(self, image, color):
"""Fill the border of the image.
Args:
image: Numpy array of shape [height, width, depth].
color: Numpy color of shape [depth] and either contents RGB/RGBA.
Returns:
image of original shape with border filled with "color".
Raises:
ValueError: Depths of image and color don"t match.
"""
height, width, depth = image.shape
if depth != color.shape[0]:
raise ValueError("Image (%d) and color (%d) depths must match." %
(depth, color.shape[0]))
image[0:height, 0, 0:depth] = color
image[0:height, width - 1, 0:depth] = color
image[0, 0:width, 0:depth] = color
image[height - 1, 0:width, 0:depth] = color
return image
def _testDrawBoundingBoxColorCycling(self, img):
"""Tests if cycling works appropriately.
Args:
img: 3-D numpy image on which to draw.
"""
# THIS TABLE MUST MATCH draw_bounding_box_op.cc
color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1],
[0, 1, 0, 1], [0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1],
[0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1],
[1, 0, 1, 1]])
assert len(img.shape) == 3
depth = img.shape[2]
assert depth <= color_table.shape[1]
assert depth == 1 or depth == 3 or depth == 4
## Set red channel to 1 if image is GRY.
if depth == 1:
color_table[:, 0] = 1
num_colors = color_table.shape[0]
for num_boxes in range(1, num_colors + 2):
# Generate draw_bounding_box_op drawn image
image = np.copy(img)
color = color_table[(num_boxes - 1) % num_colors, 0:depth]
test_drawn_image = self._fillBorder(image, color)
bboxes = np.asarray([0, 0, 1, 1])
bboxes = np.vstack([bboxes for _ in range(num_boxes)])
bboxes = math_ops.to_float(bboxes)
bboxes = array_ops.expand_dims(bboxes, 0)
image = ops.convert_to_tensor(image)
image = image_ops_impl.convert_image_dtype(image, dtypes.float32)
image = array_ops.expand_dims(image, 0)
image = image_ops.draw_bounding_boxes(image, bboxes)
with self.test_session(use_gpu=False) as sess:
op_drawn_image = np.squeeze(sess.run(image), 0)
self.assertAllEqual(test_drawn_image, op_drawn_image)
def testDrawBoundingBoxRGBColorCycling(self):
"""Test if RGB color cycling works correctly."""
image = np.zeros([10, 10, 3], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxRGBAColorCycling(self):
"""Test if RGBA color cycling works correctly."""
image = np.zeros([10, 10, 4], "float32")
self._testDrawBoundingBoxColorCycling(image)
def testDrawBoundingBoxGRY(self):
"""Test if drawing bounding box on a GRY image works."""
image = np.zeros([4, 4, 1], "float32")
self._testDrawBoundingBoxColorCycling(image)
if __name__ == "__main__":
test.main()
| 37.756757 | 80 | 0.660224 |
4f5e1081f7465470a1ddf7566491370f3e3d8063 | 4,322 | py | Python | src/clients/SerialEcho.py | rgaensler/gcode | c6a6b617a04490dedefb2bae7b596a2e12ab4ab1 | [
"MIT"
] | null | null | null | src/clients/SerialEcho.py | rgaensler/gcode | c6a6b617a04490dedefb2bae7b596a2e12ab4ab1 | [
"MIT"
] | 314 | 2020-02-26T12:37:17.000Z | 2021-08-02T00:32:32.000Z | src/clients/SerialEcho.py | rgaensler/gcode | c6a6b617a04490dedefb2bae7b596a2e12ab4ab1 | [
"MIT"
] | 2 | 2020-11-12T16:07:48.000Z | 2020-11-16T09:14:48.000Z | from threading import RLock
from time import sleep
from typing import Optional
from serial import SerialException
from src.clients.ComClient import ComClient
class SerialEcho(ComClient):
"""
Test class to fake responses from a serial device
"""
def __init__(self, port: str):
"""
Create an echoing COM-client.
:param port: String of the port name to connect to
"""
super().__init__(port=port)
def hook_handle_msg(self, msg: str) -> str:
pass
def hook_post_successful_connect(self) -> None:
"""
Override this method to be blank.
:return:
"""
return
def hook_pre_send(self, msg: str) -> str:
"""
Override this method to be blank.
:param msg:
:return:
"""
return msg
def hook_thread_name(self) -> Optional[str]:
return f'Serial Echo ({self._ser.port})'
def mainloop(self) -> None:
"""
Run the echo client.
:return: None
"""
buffer = bytes()
term = b'\n'
while self.alive.isSet():
if self._ser.in_waiting:
try:
# Attempt to read new bits
buffer += self._ser.read_all()
except SerialException as e:
print(e)
else:
# Search for newline terminator
idx = buffer.find(term)
if idx > -1:
incoming_msg = buffer[:idx + 1 + len(term)].decode(encoding=self.read_encoding)
buffer = buffer[idx + 2:]
outgoing_msg = self.resolve_msg(incoming_msg)
self.serial_send(outgoing_msg)
while self.send_q.unfinished_tasks > 0:
self.send_q.task_done()
@staticmethod
def resolve_msg(msg: str) -> str:
"""
Map each incoming message to an outgoing message. Here they are identical but this can be overriden.
:param msg: Incoming message string
:return: Outgoing message string (identical)
"""
return msg
class ConfigurableEcho(SerialEcho):
"""
Responding COM-Client with configurable response.
"""
def __init__(self, port: str):
"""
Create a COM-client with configrable echo.
:param port: String of the port name to connect to
"""
super().__init__(port)
self._prefix = None
self._postfix = None
self._replace_msg = None
self._delay = 0
self.lock = RLock()
def reconfigure(self, pre: Optional[str] = None, post: Optional[str] = None, msg: Optional[str] = None,
dly: float = 0) -> None:
"""
Adjust the calculation of the server response.
:param pre: String to be inserted before each actual message
:param post: String to be inserted before each actual message
:param msg: String to replace the actual message with
:param dly: Float representing the time to wait before responding in seconds
:return: None
"""
# Ensure that the parameters are not read while setting new values
with self.lock:
# Set the new parameters
self._prefix = pre
self._postfix = post
self._replace_msg = msg
self._delay = dly
def resolve_msg(self, msg: str) -> str:
"""
Map each incoming message to an outgoing message. Manipulators defined by reconfigure are applied one by one.
:param msg: Incoming message string
:return: Outgoing message string
"""
# Ensure that the parameters are not changed in the mean time
with self.lock:
# Do the manipulations
if self._replace_msg is not None:
msg = self._replace_msg
if self._prefix is not None:
msg = self._prefix + msg
if self._postfix is not None:
msg = msg + self._postfix
# Delay the return of the message to simulate processing time at the responding end
sleep(self._delay)
return msg
if __name__ == '__main__':
with ConfigurableEcho(port='COM5'):
pass
| 31.318841 | 117 | 0.56571 |
4f61c3a88eeba3c91a2e2488f8926a72882b2cc9 | 24,584 | py | Python | cirq/protocols/json_serialization_test.py | Mu-L/Cirq | 680ae219fc53b4ecfdd1c948f5bb5658949ecead | [
"Apache-2.0"
] | null | null | null | cirq/protocols/json_serialization_test.py | Mu-L/Cirq | 680ae219fc53b4ecfdd1c948f5bb5658949ecead | [
"Apache-2.0"
] | null | null | null | cirq/protocols/json_serialization_test.py | Mu-L/Cirq | 680ae219fc53b4ecfdd1c948f5bb5658949ecead | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import inspect
import datetime
import io
import json
import os
import pathlib
import textwrap
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Type
import pytest
import numpy as np
import pandas as pd
import sympy
import cirq
from cirq._compat import proper_repr, proper_eq
from cirq.protocols import json_serialization
from cirq.testing import assert_json_roundtrip_works
TEST_DATA_PATH = pathlib.Path(__file__).parent / 'json_test_data'
TEST_DATA_REL = 'cirq/protocols/json_test_data'
def test_line_qubit_roundtrip():
q1 = cirq.LineQubit(12)
assert_json_roundtrip_works(
q1,
text_should_be="""{
"cirq_type": "LineQubit",
"x": 12
}""",
)
def test_gridqubit_roundtrip():
q = cirq.GridQubit(15, 18)
assert_json_roundtrip_works(
q,
text_should_be="""{
"cirq_type": "GridQubit",
"row": 15,
"col": 18
}""",
)
def test_op_roundtrip():
q = cirq.LineQubit(5)
op1 = cirq.rx(0.123).on(q)
assert_json_roundtrip_works(
op1,
text_should_be="""{
"cirq_type": "GateOperation",
"gate": {
"cirq_type": "XPowGate",
"exponent": 0.03915211600060625,
"global_shift": -0.5
},
"qubits": [
{
"cirq_type": "LineQubit",
"x": 5
}
]
}""",
)
def test_op_roundtrip_filename(tmpdir):
filename = f'{tmpdir}/op.json'
q = cirq.LineQubit(5)
op1 = cirq.rx(0.123).on(q)
cirq.to_json(op1, filename)
assert os.path.exists(filename)
op2 = cirq.read_json(filename)
assert op1 == op2
def test_fail_to_resolve():
buffer = io.StringIO()
buffer.write(
"""
{
"cirq_type": "MyCustomClass",
"data": [1, 2, 3]
}
"""
)
buffer.seek(0)
with pytest.raises(ValueError) as e:
cirq.read_json(buffer)
assert e.match("Could not resolve type 'MyCustomClass' during deserialization")
QUBITS = cirq.LineQubit.range(5)
Q0, Q1, Q2, Q3, Q4 = QUBITS
# TODO: Include cirq.rx in the Circuit test case file.
# Github issue: https://github.com/quantumlib/Cirq/issues/2014
# Note that even the following doesn't work because theta gets
# multiplied by 1/pi:
# cirq.Circuit(cirq.rx(sympy.Symbol('theta')).on(Q0)),
SHOULDNT_BE_SERIALIZED = [
# Intermediate states with work buffers and unknown external prng guts.
'ActOnCliffordTableauArgs',
'ActOnStabilizerCHFormArgs',
'ActOnStateVectorArgs',
'ApplyChannelArgs',
'ApplyMixtureArgs',
'ApplyUnitaryArgs',
# Circuit optimizers are function-like. Only attributes
# are ignore_failures, tolerance, and other feature flags
'AlignLeft',
'AlignRight',
'ConvertToCzAndSingleGates',
'ConvertToIonGates',
'ConvertToNeutralAtomGates',
'ConvertToSqrtIswapGates',
'ConvertToSycamoreGates',
'ConvertToXmonGates',
'DropEmptyMoments',
'DropNegligible',
'EjectPhasedPaulis',
'EjectZ',
'ExpandComposite',
'MergeInteractions',
'MergeSingleQubitGates',
'PointOptimizer',
'SynchronizeTerminalMeasurements',
# global objects
'CONTROL_TAG',
'PAULI_BASIS',
'PAULI_STATES',
# abstract, but not inspect.isabstract():
'Device',
'InterchangeableQubitsGate',
'Pauli',
'SingleQubitGate',
'ThreeQubitGate',
'TwoQubitGate',
'ABCMetaImplementAnyOneOf',
# protocols:
'SupportsActOn',
'SupportsApplyChannel',
'SupportsApplyMixture',
'SupportsApproximateEquality',
'SupportsChannel',
'SupportsCircuitDiagramInfo',
'SupportsCommutes',
'SupportsConsistentApplyUnitary',
'SupportsDecompose',
'SupportsDecomposeWithQubits',
'SupportsEqualUpToGlobalPhase',
'SupportsExplicitHasUnitary',
'SupportsExplicitNumQubits',
'SupportsExplicitQidShape',
'SupportsJSON',
'SupportsMeasurementKey',
'SupportsMixture',
'SupportsParameterization',
'SupportsPauliExpansion',
'SupportsPhase',
'SupportsQasm',
'SupportsQasmWithArgs',
'SupportsQasmWithArgsAndQubits',
'SupportsTraceDistanceBound',
'SupportsUnitary',
# mypy types:
'CIRCUIT_LIKE',
'DURATION_LIKE',
'JsonResolver',
'NOISE_MODEL_LIKE',
'OP_TREE',
'PAULI_GATE_LIKE',
'PAULI_STRING_LIKE',
'ParamResolverOrSimilarType',
'PauliSumLike',
'QubitOrderOrList',
'RANDOM_STATE_OR_SEED_LIKE',
'STATE_VECTOR_LIKE',
'Sweepable',
'TParamKey',
'TParamVal',
'ParamDictType',
# utility:
'AnnealSequenceSearchStrategy',
'CliffordSimulator',
'DeserializingArg',
'GateOpDeserializer',
'GateOpSerializer',
'GreedySequenceSearchStrategy',
'SerializingArg',
'Simulator',
'StabilizerSampler',
'Unique',
'DEFAULT_RESOLVERS',
# Quantum Engine
'Engine',
'EngineJob',
'EngineProcessor',
'EngineProgram',
'EngineTimeSlot',
'QuantumEngineSampler',
'NAMED_GATESETS',
# enums
'ProtoVersion',
]
def _get_all_public_classes(module) -> Iterator[Tuple[str, Type]]:
for name, obj in inspect.getmembers(module):
if inspect.isfunction(obj) or inspect.ismodule(obj):
continue
if name in SHOULDNT_BE_SERIALIZED:
continue
if not inspect.isclass(obj):
# singletons, for instance
obj = obj.__class__
if name.startswith('_'):
continue
if inspect.isclass(obj) and inspect.isabstract(obj):
continue
# assert name != 'XPowGate'
yield name, obj
def _get_all_names() -> Iterator[str]:
def not_module_or_function(x):
return not (inspect.ismodule(x) or inspect.isfunction(x))
for name, _ in inspect.getmembers(cirq, not_module_or_function):
yield name
for name, _ in inspect.getmembers(cirq.google, not_module_or_function):
yield name
def test_shouldnt_be_serialized_no_superfluous():
# everything in the list should be ignored for a reason
names = set(_get_all_names())
for name in SHOULDNT_BE_SERIALIZED:
assert name in names
def test_not_yet_serializable_no_superfluous():
# everything in the list should be ignored for a reason
names = set(_get_all_names())
for name in NOT_YET_SERIALIZABLE:
assert name in names
def test_mutually_exclusive_blacklist():
assert len(set(SHOULDNT_BE_SERIALIZED) & set(NOT_YET_SERIALIZABLE)) == 0
NOT_YET_SERIALIZABLE = [
'AsymmetricDepolarizingChannel',
'AxisAngleDecomposition',
'CircuitDag',
'CircuitDiagramInfo',
'CircuitDiagramInfoArgs',
'CircuitSampleJob',
'CliffordSimulatorStepResult',
'CliffordState',
'CliffordTrialResult',
'ConstantQubitNoiseModel',
'DensityMatrixSimulator',
'DensityMatrixSimulatorState',
'DensityMatrixStepResult',
'DensityMatrixTrialResult',
'ExpressionMap',
'FSIM_GATESET',
'Heatmap',
'InsertStrategy',
'IonDevice',
'KakDecomposition',
'LinearCombinationOfGates',
'LinearCombinationOfOperations',
'Linspace',
'ListSweep',
'MPSSimulator',
'MPSSimulatorStepResult',
'MPSState',
'MPSTrialResult',
'NeutralAtomDevice',
'PauliInteractionGate',
'PauliStringPhasor',
'PauliSum',
'PauliSumCollector',
'PauliTransform',
'PeriodicValue',
'PointOptimizationSummary',
'Points',
'Product',
'QasmArgs',
'QasmOutput',
'QubitOrder',
'QubitPermutationGate',
'QuilFormatter',
'QuilOutput',
'SerializableDevice',
'SerializableGateSet',
'SimulationTrialResult',
'SingleQubitCliffordGate',
'SparseSimulatorStep',
'SQRT_ISWAP_GATESET',
'StateVectorMixin',
'SYC_GATESET',
'Sycamore',
'Sycamore23',
'TextDiagramDrawer',
'ThreeQubitDiagonalGate',
'Timestamp',
'TwoQubitDiagonalGate',
'UnitSweep',
'StateVectorSimulatorState',
'StateVectorTrialResult',
'WaveFunctionSimulatorState',
'WaveFunctionTrialResult',
'XmonDevice',
'XMON',
'ZerosSampler',
'Zip',
]
def _find_classes_that_should_serialize() -> Set[Tuple[str, Optional[type]]]:
result: Set[Tuple[str, Optional[type]]] = set()
result.update(_get_all_public_classes(cirq))
result.update(_get_all_public_classes(cirq.google))
result.update(_get_all_public_classes(cirq.work))
for k, v in json_serialization._cirq_class_resolver_dictionary().items():
t = v if isinstance(v, type) else None
result.add((k, t))
return result
def test_builtins():
assert_json_roundtrip_works(True)
assert_json_roundtrip_works(1)
assert_json_roundtrip_works(1 + 2j)
assert_json_roundtrip_works(
{
'test': [123, 5.5],
'key2': 'asdf',
'3': None,
'0.0': [],
}
)
def test_numpy():
x = np.ones(1)[0]
assert_json_roundtrip_works(x.astype(np.bool))
assert_json_roundtrip_works(x.astype(np.int8))
assert_json_roundtrip_works(x.astype(np.int16))
assert_json_roundtrip_works(x.astype(np.int32))
assert_json_roundtrip_works(x.astype(np.int64))
assert_json_roundtrip_works(x.astype(np.uint8))
assert_json_roundtrip_works(x.astype(np.uint16))
assert_json_roundtrip_works(x.astype(np.uint32))
assert_json_roundtrip_works(x.astype(np.uint64))
assert_json_roundtrip_works(x.astype(np.float32))
assert_json_roundtrip_works(x.astype(np.float64))
assert_json_roundtrip_works(x.astype(np.complex64))
assert_json_roundtrip_works(x.astype(np.complex128))
assert_json_roundtrip_works(np.ones((11, 5)))
assert_json_roundtrip_works(np.arange(3))
def test_pandas():
assert_json_roundtrip_works(
pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=['x', 'y', 'z'], index=[2, 5])
)
assert_json_roundtrip_works(pd.Index([1, 2, 3], name='test'))
assert_json_roundtrip_works(
pd.MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6)], names=['alice', 'bob'])
)
assert_json_roundtrip_works(
pd.DataFrame(
index=pd.Index([1, 2, 3], name='test'),
data=[[11, 21.0], [12, 22.0], [13, 23.0]],
columns=['a', 'b'],
)
)
assert_json_roundtrip_works(
pd.DataFrame(
index=pd.MultiIndex.from_tuples([(1, 2), (2, 3), (3, 4)], names=['x', 'y']),
data=[[11, 21.0], [12, 22.0], [13, 23.0]],
columns=pd.Index(['a', 'b'], name='c'),
)
)
def test_sympy():
# Raw values.
assert_json_roundtrip_works(sympy.Symbol('theta'))
assert_json_roundtrip_works(sympy.Integer(5))
assert_json_roundtrip_works(sympy.Rational(2, 3))
assert_json_roundtrip_works(sympy.Float(1.1))
# Basic operations.
s = sympy.Symbol('s')
t = sympy.Symbol('t')
assert_json_roundtrip_works(t + s)
assert_json_roundtrip_works(t * s)
assert_json_roundtrip_works(t / s)
assert_json_roundtrip_works(t - s)
assert_json_roundtrip_works(t ** s)
# Linear combinations.
assert_json_roundtrip_works(t * 2)
assert_json_roundtrip_works(4 * t + 3 * s + 2)
class SBKImpl:
"""A test implementation of SerializableByKey."""
def __init__(
self,
name: str,
data_list: Optional[List] = None,
data_tuple: Optional[Tuple] = None,
data_dict: Optional[Dict] = None,
):
self.name = name
self.data_list = data_list or []
self.data_tuple = data_tuple or ()
self.data_dict = data_dict or {}
def __eq__(self, other):
if not isinstance(other, SBKImpl):
return False
return (
self.name == other.name
and self.data_list == other.data_list
and self.data_tuple == other.data_tuple
and self.data_dict == other.data_dict
)
def _json_dict_(self):
return {
"cirq_type": "SBKImpl",
"name": self.name,
"data_list": self.data_list,
"data_tuple": self.data_tuple,
"data_dict": self.data_dict,
}
def _serialization_key_(self):
return self.name
@classmethod
def _from_json_dict_(cls, name, data_list, data_tuple, data_dict, **kwargs):
return cls(name, data_list, tuple(data_tuple), data_dict)
def test_context_serialization():
def custom_resolver(name):
if name == 'SBKImpl':
return SBKImpl
test_resolvers = [custom_resolver] + cirq.DEFAULT_RESOLVERS
sbki_empty = SBKImpl('sbki_empty')
assert_json_roundtrip_works(sbki_empty, resolvers=test_resolvers)
sbki_list = SBKImpl('sbki_list', data_list=[sbki_empty, sbki_empty])
assert_json_roundtrip_works(sbki_list, resolvers=test_resolvers)
sbki_tuple = SBKImpl('sbki_tuple', data_tuple=(sbki_list, sbki_list))
assert_json_roundtrip_works(sbki_tuple, resolvers=test_resolvers)
sbki_dict = SBKImpl('sbki_dict', data_dict={'a': sbki_tuple, 'b': sbki_tuple})
assert_json_roundtrip_works(sbki_dict, resolvers=test_resolvers)
sbki_json = str(cirq.to_json(sbki_dict))
# There should be exactly one context item for each previous SBKImpl.
assert sbki_json.count('"cirq_type": "_SerializedContext"') == 4
# There should be exactly two key items for each of sbki_(empty|list|tuple),
# plus one for the top-level sbki_dict.
assert sbki_json.count('"cirq_type": "_SerializedKey"') == 7
# The final object should be a _SerializedKey for sbki_dict.
final_obj_idx = sbki_json.rfind('{')
final_obj = sbki_json[final_obj_idx : sbki_json.find('}', final_obj_idx) + 1]
assert (
final_obj
== """{
"cirq_type": "_SerializedKey",
"key": "sbki_dict"
}"""
)
list_sbki = [sbki_dict]
assert_json_roundtrip_works(list_sbki, resolvers=test_resolvers)
dict_sbki = {'a': sbki_dict}
assert_json_roundtrip_works(dict_sbki, resolvers=test_resolvers)
assert sbki_list != json_serialization._SerializedKey(sbki_list)
sbki_other_list = SBKImpl('sbki_list', data_list=[sbki_list])
with pytest.raises(ValueError, match='different objects with the same serialization key'):
_ = cirq.to_json(sbki_other_list)
def test_internal_serializer_types():
sbki = SBKImpl('test_key')
test_key = json_serialization._SerializedKey(sbki)
test_context = json_serialization._SerializedContext(sbki)
test_serialization = json_serialization._ContextualSerialization(sbki)
key_json = test_key._json_dict_()
with pytest.raises(TypeError, match='_from_json_dict_'):
_ = json_serialization._SerializedKey._from_json_dict_(**key_json)
context_json = test_context._json_dict_()
with pytest.raises(TypeError, match='_from_json_dict_'):
_ = json_serialization._SerializedContext._from_json_dict_(**context_json)
serialization_json = test_serialization._json_dict_()
with pytest.raises(TypeError, match='_from_json_dict_'):
_ = json_serialization._ContextualSerialization._from_json_dict_(**serialization_json)
def _write_test_data(key: str, *test_instances: Any):
"""Helper method for creating initial test data."""
# coverage: ignore
cirq.to_json(test_instances, TEST_DATA_PATH / f'{key}.json')
with open(TEST_DATA_PATH / f'{key}.repr', 'w') as f:
f.write('[\n')
for e in test_instances:
f.write(proper_repr(e))
f.write(',\n')
f.write(']')
@pytest.mark.parametrize('cirq_obj_name,cls', _find_classes_that_should_serialize())
def test_json_test_data_coverage(cirq_obj_name: str, cls: Optional[type]):
if cirq_obj_name in NOT_YET_SERIALIZABLE:
return pytest.xfail(reason="Not serializable (yet)")
json_path = TEST_DATA_PATH / f'{cirq_obj_name}.json'
json_path2 = TEST_DATA_PATH / f'{cirq_obj_name}.json_inward'
if not json_path.exists() and not json_path2.exists():
# coverage: ignore
raise NotImplementedError(
textwrap.fill(
f"Hello intrepid developer. There is a new public or "
f"serializable object named '{cirq_obj_name}' that does not "
f"have associated test data.\n"
f"\n"
f"You must create the file\n"
f" cirq/protocols/json_test_data/{cirq_obj_name}.json\n"
f"and the file\n"
f" cirq/protocols/json_test_data/{cirq_obj_name}.repr\n"
f"in order to guarantee this public object is, and will "
f"remain, serializable.\n"
f"\n"
f"The content of the .repr file should be the string returned "
f"by `repr(obj)` where `obj` is a test {cirq_obj_name} value "
f"or list of such values. To get this to work you may need to "
f"implement a __repr__ method for {cirq_obj_name}. The repr "
f"must be a parsable python expression that evaluates to "
f"something equal to `obj`."
f"\n"
f"The content of the .json file should be the string returned "
f"by `cirq.to_json(obj)` where `obj` is the same object or "
f"list of test objects.\n"
f"To get this to work you likely need "
f"to add {cirq_obj_name} to the "
f"`cirq_class_resolver_dictionary` method in "
f"the cirq/protocols/json_serialization.py source file. "
f"You may also need to add a _json_dict_ method to "
f"{cirq_obj_name}. In some cases you will also need to add a "
f"_from_json_dict_ method to {cirq_obj_name}."
f"\n"
f"For more information on JSON serialization, please read the "
f"docstring for protocols.SupportsJSON. If this object or "
f"class is not appropriate for serialization, add its name to "
f"the SHOULDNT_BE_SERIALIZED list in the "
f"cirq/protocols/json_serialization_test.py source file."
)
)
repr_file = TEST_DATA_PATH / f'{cirq_obj_name}.repr'
if repr_file.exists() and cls is not None:
objs = _eval_repr_data_file(repr_file)
if not isinstance(objs, list):
objs = [objs]
for obj in objs:
assert type(obj) == cls, (
f"Value in {TEST_DATA_REL}/{cirq_obj_name}.repr must be of "
f"exact type {cls}, or a list of instances of that type. But "
f"the value (or one of the list entries) had type "
f"{type(obj)}.\n"
f"\n"
f"If using a value of the wrong type is intended, move the "
f"value to {TEST_DATA_REL}/{cirq_obj_name}.repr_inward\n"
f"\n"
f"Value with wrong type:\n{obj!r}."
)
def test_to_from_strings():
x_json_text = """{
"cirq_type": "_PauliX",
"exponent": 1.0,
"global_shift": 0.0
}"""
assert cirq.to_json(cirq.X) == x_json_text
assert cirq.read_json(json_text=x_json_text) == cirq.X
with pytest.raises(ValueError, match='specify ONE'):
cirq.read_json(io.StringIO(), json_text=x_json_text)
def _eval_repr_data_file(path: pathlib.Path):
return eval(
path.read_text(),
{
'cirq': cirq,
'datetime': datetime,
'pd': pd,
'sympy': sympy,
'np': np,
'datetime': datetime,
},
{},
)
def assert_repr_and_json_test_data_agree(
repr_path: pathlib.Path, json_path: pathlib.Path, inward_only: bool
):
if not repr_path.exists() and not json_path.exists():
return
rel_repr_path = f'{TEST_DATA_REL}/{repr_path.name}'
rel_json_path = f'{TEST_DATA_REL}/{json_path.name}'
try:
json_from_file = json_path.read_text()
json_obj = cirq.read_json(json_text=json_from_file)
except Exception as ex: # coverage: ignore
# coverage: ignore
raise IOError(f'Failed to parse test json data from {rel_json_path}.') from ex
try:
repr_obj = _eval_repr_data_file(repr_path)
except Exception as ex: # coverage: ignore
# coverage: ignore
raise IOError(f'Failed to parse test repr data from {rel_repr_path}.') from ex
assert proper_eq(json_obj, repr_obj), (
f'The json data from {rel_json_path} did not parse '
f'into an object equivalent to the repr data from {rel_repr_path}.\n'
f'\n'
f'json object: {json_obj!r}\n'
f'repr object: {repr_obj!r}\n'
)
if not inward_only:
json_from_cirq = cirq.to_json(repr_obj)
json_from_cirq_obj = json.loads(json_from_cirq)
json_from_file_obj = json.loads(json_from_file)
assert json_from_cirq_obj == json_from_file_obj, (
f'The json produced by cirq no longer agrees with the json in the '
f'{rel_json_path} test data file.\n'
f'\n'
f'You must either fix the cirq code to continue to produce the '
f'same output, or you must move the old test data to '
f'{rel_json_path}_inward and create a fresh {rel_json_path} file.\n'
f'\n'
f'test data json:\n'
f'{json_from_file}\n'
f'\n'
f'cirq produced json:\n'
f'{json_from_cirq}\n'
)
def all_test_data_keys() -> List[str]:
seen = set()
for file in TEST_DATA_PATH.iterdir():
name = file.name
if name.endswith('.json') or name.endswith('.repr'):
seen.add(file.name[: -len('.json')])
elif name.endswith('.json_inward') or name.endswith('.repr_inward'):
seen.add(file.name[: -len('.json_inward')])
return sorted(seen)
@pytest.mark.parametrize('key', all_test_data_keys())
def test_json_and_repr_data(key: str):
assert_repr_and_json_test_data_agree(
repr_path=TEST_DATA_PATH / f'{key}.repr',
json_path=TEST_DATA_PATH / f'{key}.json',
inward_only=False,
)
assert_repr_and_json_test_data_agree(
repr_path=TEST_DATA_PATH / f'{key}.repr_inward',
json_path=TEST_DATA_PATH / f'{key}.json_inward',
inward_only=True,
)
def test_pathlib_paths(tmpdir):
path = pathlib.Path(tmpdir) / 'op.json'
cirq.to_json(cirq.X, path)
assert cirq.read_json(path) == cirq.X
def test_json_serializable_dataclass():
@cirq.json_serializable_dataclass
class MyDC:
q: cirq.LineQubit
desc: str
my_dc = MyDC(cirq.LineQubit(4), 'hi mom')
def custom_resolver(name):
if name == 'MyDC':
return MyDC
assert_json_roundtrip_works(
my_dc,
text_should_be="\n".join(
[
'{',
' "cirq_type": "MyDC",',
' "q": {',
' "cirq_type": "LineQubit",',
' "x": 4',
' },',
' "desc": "hi mom"',
'}',
]
),
resolvers=[custom_resolver] + cirq.DEFAULT_RESOLVERS,
)
def test_json_serializable_dataclass_parenthesis():
@cirq.json_serializable_dataclass()
class MyDC:
q: cirq.LineQubit
desc: str
def custom_resolver(name):
if name == 'MyDC':
return MyDC
my_dc = MyDC(cirq.LineQubit(4), 'hi mom')
assert_json_roundtrip_works(my_dc, resolvers=[custom_resolver] + cirq.DEFAULT_RESOLVERS)
def test_json_serializable_dataclass_namespace():
@cirq.json_serializable_dataclass(namespace='cirq.experiments')
class QuantumVolumeParams:
width: int
depth: int
circuit_i: int
qvp = QuantumVolumeParams(width=5, depth=5, circuit_i=0)
def custom_resolver(name):
if name == 'cirq.experiments.QuantumVolumeParams':
return QuantumVolumeParams
assert_json_roundtrip_works(qvp, resolvers=[custom_resolver] + cirq.DEFAULT_RESOLVERS)
| 30.691635 | 94 | 0.645135 |
4f6190f2394d1a4c9a5b414a568f3f3ceb1b6977 | 1,052 | py | Python | bot/commands/pyramidreply.py | NMisko/monkalot | 965a16ca6a4921c8a9e6e996e9a0e3a9beb8444b | [
"MIT"
] | 20 | 2017-09-08T21:13:38.000Z | 2022-01-29T03:24:13.000Z | bot/commands/pyramidreply.py | NMisko/monkalot | 965a16ca6a4921c8a9e6e996e9a0e3a9beb8444b | [
"MIT"
] | 32 | 2017-08-20T17:46:14.000Z | 2021-11-18T22:54:59.000Z | bot/commands/pyramidreply.py | NMisko/monkalot | 965a16ca6a4921c8a9e6e996e9a0e3a9beb8444b | [
"MIT"
] | 10 | 2017-08-19T01:13:41.000Z | 2021-08-07T08:45:30.000Z | """Commands: "!pjsalt"."""
from bot.commands.abstract.command import Command
from bot.utilities.permission import Permission
class PyramidReply(Command):
"""Simple meta-command to output a reply with a pyramid given a specific command.
Basic key to value mapping.
"""
perm = Permission.User
replies = {
"!pjsalt": "PJSalt",
}
def match(self, bot, user, msg, tag_info):
"""Match if message is a possible command."""
cmd = msg.lower().strip()
for key in self.replies:
if cmd == key:
return True
return False
def run(self, bot, user, msg, tag_info):
"""Print out a pyramid of emotes."""
cmd = msg.lower().strip()
for key, reply in self.replies.items():
if cmd == key:
bot.write(reply)
bot.write(reply + " " + reply)
bot.write(reply + " " + reply + " " + reply)
bot.write(reply + " " + reply)
bot.write(reply)
break
| 27.684211 | 85 | 0.538973 |
4f5aefdd0f985692adfc136c4320a2b269c1d4e5 | 2,233 | py | Python | var/spack/repos/builtin/packages/xlf/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/xlf/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17 | 2019-03-21T15:54:00.000Z | 2022-03-29T19:34:28.000Z | var/spack/repos/builtin/packages/xlf/package.py | mvanwaveren/spack | c192e72188c7882361b1f3dd681a3a4b2145538d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import collections
import re
import spack.compiler
import llnl.util.tty as tty
class Xlf(Package):
"""IBM XL Fortran is an advanced, high-performance compiler that can be
used for developing complex, computationally intensive programs, including
interlanguage calls with C and Fortran programs.
"""
homepage = "https://www.ibm.com/support/knowledgecenter/SSXVZZ_16.1.1/com.ibm.compilers.linux.doc/welcome.html"
variant('r', default=True, description='The _r version of compilers')
def install(self, spec, prefix):
raise InstallError(
'XL compilers are not installable yet, but can be '
'detected on a system where they are supplied by vendor'
)
executables = [r'xlf']
@classmethod
def determine_version(cls, exe):
version_regex = re.compile(r'([0-9]?[0-9]\.[0-9])')
try:
output = spack.compiler.get_compiler_version_output(
exe, '-qversion'
)
match = version_regex.search(output)
if match:
return match.group(1)
except spack.util.executable.ProcessError:
pass
except Exception as e:
tty.debug(e)
@classmethod
def determine_variants(cls, exes, version_str):
variants = collections.defaultdict(dict)
for exe in exes:
if os.path.basename(exe) == 'xlf':
variants['~r']['fortran'] = exe
continue
if os.path.basename(exe) == 'xlf_r':
variants['+r']['fortran'] = exe
continue
results = []
for variant_str, compilers in variants.items():
results.append((variant_str, {'compilers': compilers}))
return results
@property
def fortran(self):
if self.spec.external:
return self.spec.extra_attributes['compilers']['fortran']
msg = "cannot retrieve C compiler [spec is not concrete]"
assert self.spec.concrete, msg
| 31.9 | 115 | 0.622033 |
4f5e6623e15120594da821be336a6b197407430e | 49,063 | py | Python | pandas/core/indexing.py | ssalonen/pandas | 1929563fdb5358a41420d103a388aa2bd494d543 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexing.py | ssalonen/pandas | 1929563fdb5358a41420d103a388aa2bd494d543 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexing.py | ssalonen/pandas | 1929563fdb5358a41420d103a388aa2bd494d543 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # pylint: disable=W0223
from datetime import datetime
from pandas.core.common import _asarray_tuplesafe, is_list_like
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.compat import range, zip
import pandas.compat as compat
import pandas.core.common as com
from pandas.core.common import (_is_bool_indexer,
ABCSeries, ABCDataFrame, ABCPanel)
import pandas.lib as lib
import numpy as np
# the supported indexers
def get_indexers_list():
return [
('ix' ,_NDFrameIndexer),
('iloc',_iLocIndexer ),
('loc' ,_LocIndexer ),
('at' ,_AtIndexer ),
('iat' ,_iAtIndexer ),
]
# "null slice"
_NS = slice(None, None)
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_exception = KeyError
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
try:
return self.obj.get_value(*key)
except Exception:
pass
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
# ueber-hack
if (isinstance(label, tuple) and
isinstance(label[axis], slice)):
raise IndexingError('no slices here')
try:
return self.obj._xs(label, axis=axis, copy=False)
except Exception:
return self.obj._xs(label, axis=axis, copy=True)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, raise_on_error=False):
return self.obj._slice(obj, axis=axis, raise_on_error=raise_on_error)
def __setitem__(self, key, value):
# kludgetastic
ax = self.obj._get_axis(0)
if isinstance(ax, MultiIndex):
try:
indexer = ax.get_loc(key)
self._setitem_with_indexer(indexer, value)
return
except Exception:
pass
if isinstance(key, tuple):
if len(key) > self.ndim:
raise IndexingError('only tuples of length <= %d supported' %
self.ndim)
indexer = self._convert_tuple(key, is_setter=True)
else:
indexer = self._convert_to_indexer(key, is_setter=True)
self._setitem_with_indexer(indexer, value)
def _has_valid_tuple(self, key):
pass
def _convert_tuple(self, key, is_setter=False):
keyidx = []
for i, k in enumerate(key):
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally """
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object".format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes,indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like(i):
# should check the elements?
pass
elif com.is_integer(i):
if i >= len(ax):
raise IndexError("{0} cannot enlarge its target object".format(self.name))
elif isinstance(i, dict):
raise IndexError("{0} cannot enlarge its target object".format(self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
from pandas import Panel, DataFrame, Series
# maybe partial set
take_split_path = self.obj._is_mixed_type
if isinstance(indexer,tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key,_ = _convert_missing_indexer(idx)
# if this is the items axes, then take the main missing path
# first; this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed to possibly
# be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
new_indexer = _convert_from_missing_indexer_tuple(indexer)
self.obj[key] = np.nan
self.obj.loc[new_indexer] = value
return self.obj
# reindex the axis
index = self.obj._get_axis(i)
labels = _safe_append_to_index(index, key)
self.obj._data = self.obj.reindex_axis(labels,i)._data
if isinstance(labels,MultiIndex):
self.obj.sortlevel(inplace=True)
labels = self.obj._get_axis(i)
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = _convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
if len(index) == 0:
new_index = Index([indexer])
else:
new_index = _safe_append_to_index(index, indexer)
new_values = np.concatenate([self.obj.values, [value]])
self.obj._data = self.obj._constructor(new_values, index=new_index, name=self.obj.name)
return self.obj
elif self.ndim == 2:
index = self.obj._get_axis(0)
labels = _safe_append_to_index(index, indexer)
self.obj._data = self.obj.reindex_axis(labels,0)._data
return getattr(self.obj,self.name).__setitem__(indexer,value)
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer,value)
# set
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
# if we have a complicated setup, take the split path
if isinstance(indexer, tuple) and any([ isinstance(ax,MultiIndex) for ax in self.obj.axes ]):
take_split_path = True
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if com.is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane indexer here
if len(labels) == 1 and isinstance(self.obj[labels[0]].index,MultiIndex):
index = self.obj[labels[0]].index
idx = indexer[:info_axis][0]
try:
if idx in index:
idx = index.get_loc(idx)
except:
pass
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = _length_of_indexer(plane_indexer[0],index)
if is_list_like(value) and lplane_indexer != len(value):
raise ValueError("cannot set using a multi-index selection indexer with a different length than the value")
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = _length_of_indexer(plane_indexer[0],plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# set the item, possibly having a dtype change
s = s.copy()
s._data = s._data.setitem(pi,v)
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1:
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
if _is_list_like(value):
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
for item in labels:
# align to
if item in value:
v = value[item]
v = v.reindex(self.obj[item].index & v.index)
setter(item, v.values)
else:
setter(item, np.nan)
# we have an equal len ndarray to our labels
elif isinstance(value, np.ndarray) and value.ndim == 2:
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value when'
' setting with an ndarray')
for i, item in enumerate(labels):
setter(item, value[:,i])
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = _maybe_convert_ix(*indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
self.obj._data = self.obj._data.setitem(indexer,value)
def _align_series(self, indexer, ser):
# indexer to assign Series can be tuple or scalar
if isinstance(indexer, tuple):
aligners = [ not _is_null_slice(idx) for idx in indexer ]
single_aligner = sum(aligners) == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = single_aligner and (aligners[1] or aligners[2])
obj = self.obj
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if com._is_sequence(idx) or isinstance(idx, slice):
if single_aligner and _is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like(new_ix):
new_ix = Index([new_ix])
if ser.index.equals(new_ix):
return ser.values.copy()
return ser.reindex(new_ix).values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax):
return ser.values.copy()
return ser.reindex(ax).values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n,len(labels)))
# broadcast along other dims
ser = ser.values.copy()
for (axis,l) in broadcast:
shape = [ -1 ] * (len(broadcast)+1)
shape[axis] = l
ser = np.tile(ser,l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif np.isscalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser.values.copy()
return ser.reindex(ax).values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if com._is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix].ravel()
elif cols is None:
cols = ax[ix].ravel()
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
if len(sindexers) == 1 and idx is None and cols is None:
if sindexers[0] == 0:
df = df.T
return self.obj.conform(df,axis=sindexers[0])
df = df.T
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy().values
else:
val = df.reindex(idx, columns=cols).values
return val
elif ((isinstance(indexer, slice) or com.is_list_like(indexer))
and is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy().values
else:
val = df.reindex(ax).values
return val
elif np.isscalar(indexer) and not is_frame:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
if is_panel:
df = df.T
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy().values
# a passed in dataframe which is actually a transpose
# of what is needed
elif idx.equals(df.columns) and cols.equals(df.index):
return df.T.copy().values
return df.reindex(idx, columns=cols).values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
raise NotImplementedError("cannot set using an indexer with a Panel yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if _is_null_slice(key):
continue
retval = getattr(retval,self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(_is_list_like(x) for x in tup):
return False
# just too complicated
for ax in self.obj._data.axes:
if isinstance(ax, MultiIndex):
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we can't create the indexer """
try:
o = self.obj
d = dict([ (a,self._convert_for_reindex(t, axis=o._get_axis_number(a))) for t, a in zip(tup, o._AXIS_ORDERS) ])
return o.reindex(**d)
except:
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if com._is_bool_indexer(key):
key = _check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
keyarr = key
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if _is_integer_dtype(keyarr) and not _is_integer_index(labels):
keyarr = com._ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _getitem_lowerdim(self, tup):
ax0 = self.obj._get_axis(0)
# a bit kludgy
if isinstance(ax0, MultiIndex):
try:
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError
# raise the error if we are not sorted
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
try:
loc = ax0.get_loc(tup[0])
except KeyError:
raise e1
if len(tup) > self.obj.ndim:
raise IndexingError
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if _is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not _is_list_like(section):
return section
# might have been a MultiIndex
elif section.ndim == self.ndim:
new_key = tup[:i] + (_NS,) + tup[i + 1:]
# new_key = tup[:i] + tup[i+1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0
and len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
return getattr(section,self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif _is_list_like(key) and not (isinstance(key, tuple) and
isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
if com.is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if _is_integer_index(self.obj.index.levels[0]):
raise
if not _is_integer_index(labels):
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
labels = self.obj._get_axis(axis)
def _reindex(keys, level=None):
try:
return self.obj.reindex_axis(keys, axis=axis, level=level)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keys, level=level)
if com._is_bool_indexer(key):
key = _check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
keyarr = key
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if _is_integer_dtype(keyarr):
if labels.inferred_type != 'integer':
keyarr = np.where(keyarr < 0,
len(labels) + keyarr, keyarr)
if labels.inferred_type == 'mixed-integer':
indexer = labels.get_indexer(keyarr)
if (indexer >= 0).all():
self.obj.take(indexer, axis=axis, convert=True)
else:
return self.obj.take(keyarr, axis=axis)
elif not labels.inferred_type == 'integer':
return self.obj.take(keyarr, axis=axis)
# this is not the most robust, but...
if (isinstance(labels, MultiIndex) and
not isinstance(keyarr[0], tuple)):
level = 0
else:
level = None
keyarr_is_unique = Index(keyarr).is_unique
# existing labels are unique and indexer is unique
if labels.is_unique and keyarr_is_unique:
return _reindex(keyarr, level=level)
else:
indexer, missing = labels.get_indexer_non_unique(keyarr)
check = indexer != -1
result = self.obj.take(indexer[check], axis=axis, convert=False)
# need to merge the result labels and the missing labels
if len(missing):
l = np.arange(len(indexer))
missing = com._ensure_platform_int(missing)
missing_labels = keyarr.take(missing)
missing_indexer = com._ensure_int64(l[~check])
cur_labels = result._get_axis(axis).values
cur_indexer = com._ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]),dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# reindex with the specified axis
ndim = self.obj.ndim
if axis+1 > ndim:
raise AssertionError("invalid indexing error with non-unique index")
# a unique indexer
if keyarr_is_unique:
new_indexer = (Index(cur_indexer) + Index(missing_indexer)).values
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original indexer here
else:
# need to retake to have the same size as the indexer
rindexer = indexer.values
rindexer[~check] = 0
result = self.obj.take(rindexer, axis=axis, convert=False)
# reset the new indexer to account for the new size
new_indexer = np.arange(len(result))
new_indexer[~check] = -1
result = result._reindex_with_indexers({ axis : [ new_labels, new_indexer ] }, copy=True, allow_dups=True)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
is_int_index = _is_integer_index(labels)
if com.is_integer(obj) and not is_int_index:
return obj
try:
return labels.get_loc(obj)
except (KeyError, TypeError):
pass
if isinstance(obj, slice):
ltype = labels.inferred_type
# in case of providing all floats, use label-based indexing
float_slice = (labels.inferred_type == 'floating'
and _is_float_slice(obj))
# floats that are within tolerance of int used as positions
int_slice = _is_index_slice(obj)
null_slice = obj.start is None and obj.stop is None
# could have integers in the first level of the MultiIndex,
# in which case we wouldn't want to do position-based slicing
position_slice = (int_slice
and not ltype == 'integer'
and not isinstance(labels, MultiIndex)
and not float_slice)
start, stop = obj.start, obj.stop
# last ditch effort: if we are mixed and have integers
try:
if position_slice and 'mixed' in ltype:
if start is not None:
i = labels.get_loc(start)
if stop is not None:
j = labels.get_loc(stop)
position_slice = False
except KeyError:
if ltype == 'mixed-integer-float':
raise
if null_slice or position_slice:
indexer = obj
else:
try:
indexer = labels.slice_indexer(start, stop, obj.step)
except Exception:
if _is_index_slice(obj):
if ltype == 'integer':
raise
indexer = obj
else:
raise
return indexer
elif _is_list_like(obj):
if com._is_bool_indexer(obj):
obj = _check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
if isinstance(obj, Index):
objarr = obj.values
else:
objarr = _asarray_tuplesafe(obj)
# If have integer labels, defer to label-based indexing
if _is_integer_dtype(objarr) and not is_int_index:
if labels.inferred_type != 'integer':
objarr = np.where(objarr < 0,
len(labels) + objarr, objarr)
return objarr
# this is not the most robust, but...
if (isinstance(labels, MultiIndex) and
not isinstance(objarr[0], tuple)):
level = 0
_, indexer = labels.reindex(objarr, level=level)
check = labels.levels[0].get_indexer(objarr)
else:
level = None
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
indexer, missing = labels.get_indexer_non_unique(objarr)
check = indexer
mask = check == -1
if mask.any():
# mi here
if isinstance(obj, tuple) and is_setter:
return { 'key' : obj }
raise KeyError('%s not in index' % objarr[mask])
return indexer
else:
try:
return labels.get_loc(obj)
except (KeyError):
# allow a not found key only if we are a setter
if not is_list_like(obj) and is_setter:
return { 'key' : obj }
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not _need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
ltype = labels.inferred_type
# in case of providing all floats, use label-based indexing
float_slice = (labels.inferred_type == 'floating'
and _is_float_slice(slice_obj))
# floats that are within tolerance of int used as positions
int_slice = _is_index_slice(slice_obj)
null_slice = slice_obj.start is None and slice_obj.stop is None
# could have integers in the first level of the MultiIndex,
# in which case we wouldn't want to do position-based slicing
position_slice = (int_slice
and not ltype == 'integer'
and not isinstance(labels, MultiIndex)
and not float_slice)
start, stop = slice_obj.start, slice_obj.stop
# last ditch effort: if we are mixed and have integers
try:
if position_slice and 'mixed' in ltype:
if start is not None:
i = labels.get_loc(start)
if stop is not None:
j = labels.get_loc(stop)
position_slice = False
except KeyError:
if ltype == 'mixed-integer-float':
raise
if null_slice or position_slice:
indexer = slice_obj
else:
try:
indexer = labels.slice_indexer(start, stop, slice_obj.step)
except Exception:
if _is_index_slice(slice_obj):
if ltype == 'integer':
raise
indexer = slice_obj
else:
raise
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis)
else:
return self.obj.take(indexer, axis=axis)
class _LocationIndexer(_NDFrameIndexer):
_valid_types = None
_exception = Exception
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise ValueError('Too many indexers')
if not self._has_valid_type(k,i):
raise ValueError("Location based indexing can only have [%s] types" % self._valid_types)
def __getitem__(self, key):
if type(key) is tuple:
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = _check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except (Exception) as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not _need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis)
else:
return self.obj.take(indexer, axis=axis)
class _LocIndexer(_LocationIndexer):
""" purely label based location based indexing """
_valid_types = "labels (MUST BE IN THE INDEX), slices of labels (BOTH endpoints included! Can be slices of integers if the index is integers), listlike of labels, boolean"
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
if key.start is not None:
if key.start not in ax:
raise KeyError("start bound [%s] is not the [%s]" % (key.start,self.obj._get_axis_name(axis)))
if key.stop is not None:
if key.stop not in ax:
raise KeyError("stop bound [%s] is not in the [%s]" % (key.stop,self.obj._get_axis_name(axis)))
elif com._is_bool_indexer(key):
return True
elif _is_list_like(key):
# require all elements in the index
idx = _ensure_index(key)
if not idx.isin(ax).all():
raise KeyError("[%s] are not in ALL in the [%s]" % (key,self.obj._get_axis_name(axis)))
return True
else:
# if its empty we want a KeyError here
if not len(ax):
raise KeyError("The [%s] axis is empty" % self.obj._get_axis_name(axis))
try:
if not key in ax:
raise KeyError("the label [%s] is not in the [%s]" % (key,self.obj._get_axis_name(axis)))
except (TypeError):
# if we have a weird type of key/ax
raise KeyError("the label [%s] is not in the [%s]" % (key,self.obj._get_axis_name(axis)))
return True
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
self._has_valid_type(key,axis)
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif _is_list_like(key) and not (isinstance(key, tuple) and
isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
""" purely integer based location based indexing """
_valid_types = "integer, integer slice (START point is INCLUDED, END point is EXCLUDED), listlike of integers, boolean array"
_exception = IndexError
def _has_valid_type(self, key, axis):
if com._is_bool_indexer(key):
if hasattr(key,'index') and isinstance(key.index,Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean indexing on an integer type is not available")
raise ValueError("iLocation based boolean indexing cannot use an indexable as a mask")
return True
return isinstance(key, slice) or com.is_integer(key) or _is_list_like(key)
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if _is_null_slice(key):
continue
retval = getattr(retval,self.name)._getitem_axis(key, axis=i)
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not _need_slice(slice_obj):
return obj
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, raise_on_error=True)
else:
return self.obj.take(slice_obj, axis=axis)
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key,axis)
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
self._has_valid_type(key,axis)
return self._getbool_axis(key, axis=axis)
# a single integer or a list of integers
else:
if not (com.is_integer(key) or _is_list_like(key)):
raise ValueError("Cannot index by location index with a non-integer key")
return self._get_loc(key,axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
if self._has_valid_type(obj,axis):
return obj
raise ValueError("Can only index by location with a [%s]" % self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not _is_list_like(key):
key = tuple([ key ])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access (setting)!')
key = self._convert_key(key)
key.append(value)
self.obj.set_value(*key)
class _AtIndexer(_ScalarAccessIndexer):
""" label based scalar accessor """
pass
class _iAtIndexer(_ScalarAccessIndexer):
""" integer based scalar accessor """
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key):
""" require integer args (and convert to label arguments) """
ckey = []
for a, i in zip(self.obj.axes,key):
if not com.is_integer(i):
raise ValueError("iAt based indexing can only have integer indexers")
ckey.append(a[i])
return ckey
# 32-bit floating point machine epsilon
_eps = np.finfo('f4').eps
def _length_of_indexer(indexer,target=None):
""" return the length of a single non-tuple indexer which could be a slice """
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = abs(step)
return (stop-start) / step
elif isinstance(indexer, (ABCSeries, np.ndarray, list)):
return len(indexer)
elif not is_list_like(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def _convert_to_index_sliceable(obj, key):
""" if we are index sliceable, then return my slicer, otherwise return None """
idx = obj.index
if isinstance(key, slice):
idx_type = idx.inferred_type
if idx_type == 'floating':
indexer = obj.ix._convert_to_indexer(key, axis=0)
elif idx_type == 'integer' or _is_index_slice(key):
indexer = key
else:
indexer = obj.ix._convert_to_indexer(key, axis=0)
return indexer
elif isinstance(key, compat.string_types):
# we are an actual column
if key in obj._data.items:
return None
# we need a timelike key here
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except:
return None
return None
def _is_index_slice(obj):
def _is_valid_index(x):
return (com.is_integer(x) or com.is_float(x)
and np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def _is_int_slice(obj):
def _is_valid_index(x):
return com.is_integer(x)
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def _is_float_slice(obj):
def _is_valid_index(x):
return com.is_float(x)
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
class _SeriesIndexer(_NDFrameIndexer):
"""
Class to support fancy indexing, potentially using labels
Notes
-----
Indexing based on labels is INCLUSIVE
Slicing uses PYTHON SEMANTICS (endpoint is excluded)
If Index contains int labels, these will be used rather than the locations,
so be very careful (ambiguous).
Examples
--------
>>> ts.ix[5:10] # equivalent to ts[5:10]
>>> ts.ix[[date1, date2, date3]]
>>> ts.ix[date1:date2] = 0
"""
def _get_label(self, key, axis=0):
return self.obj[key]
def _get_loc(self, key, axis=0):
return self.obj.values[key]
def _slice(self, indexer, axis=0):
return self.obj._get_values(indexer)
def _setitem_with_indexer(self, indexer, value):
# need to delegate to the super setter
if isinstance(indexer, dict):
return super(_SeriesIndexer, self)._setitem_with_indexer(indexer, value)
# fast access
self.obj._set_values(indexer, value)
def _check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that com._is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = com.isnull(result.values)
if mask.any():
raise IndexingError('Unalignable boolean Series key provided')
result = result.astype(bool).values
else:
# com._is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def _convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted """
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def _convert_from_missing_indexer_tuple(indexer):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_idx):
return _idx['key'] if isinstance(_idx,dict) else _idx
return tuple([ get_indexer(_idx) for _i, _idx in enumerate(indexer) ])
def _safe_append_to_index(index, key):
""" a safe append to an index, if incorrect type, then catch and recreate """
try:
return index.insert(len(index), key)
except:
return Index(np.concatenate([index.asobject.values,np.array([key])]))
def _maybe_convert_indices(indices, n):
""" if we have negative indicies, translate to postive here
if have indicies that are out-of-bounds, raise an IndexError """
if isinstance(indices, list):
indices = np.array(indices)
mask = indices<0
if mask.any():
indices[mask] += n
mask = (indices>=n) | (indices<0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def _maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def _is_null_slice(obj):
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
def _is_integer_dtype(arr):
return (issubclass(arr.dtype.type, np.integer) and
not arr.dtype.type == np.datetime64)
def _is_integer_index(index):
return index.inferred_type == 'integer'
def _is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not _is_list_like(key)
def _is_list_like(obj):
# Consider namedtuples to be not list like as they are useful as indices
return (np.iterable(obj)
and not isinstance(obj, compat.string_types)
and not (isinstance(obj, tuple) and type(obj) is not tuple))
def _need_slice(obj):
return (obj.start is not None or
obj.stop is not None or
(obj.step is not None and obj.step != 1))
def _check_slice_bounds(slobj, values):
l = len(values)
start = slobj.start
if start is not None:
if start < -l or start > l-1:
raise IndexError("out-of-bounds on slice (start)")
stop = slobj.stop
if stop is not None:
if stop < -l-1 or stop > l:
raise IndexError("out-of-bounds on slice (end)")
def _maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
index = index.droplevel(0)
return index
| 34.357843 | 175 | 0.543709 |
4f59a26c8c9e312a9392365a7294d78d99e24c58 | 2,327 | py | Python | gearman/util.py | znanja/python-gearman | a16f2bc108bf21ad47ac34046ae1ed730c0f2f75 | [
"Apache-2.0"
] | null | null | null | gearman/util.py | znanja/python-gearman | a16f2bc108bf21ad47ac34046ae1ed730c0f2f75 | [
"Apache-2.0"
] | null | null | null | gearman/util.py | znanja/python-gearman | a16f2bc108bf21ad47ac34046ae1ed730c0f2f75 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Gearman Client Utils
"""
import errno
import select as select_lib
import time
from gearman.constants import DEFAULT_GEARMAN_PORT
class Stopwatch(object):
"""Timer class that keeps track of time remaining"""
def __init__(self, time_remaining):
if time_remaining is not None:
self.stop_time = time.time() + time_remaining
else:
self.stop_time = None
def get_time_remaining(self):
if self.stop_time is None:
return None
current_time = time.time()
if not self.has_time_remaining(current_time):
return 0.0
time_remaining = self.stop_time - current_time
return time_remaining
def has_time_remaining(self, time_comparison=None):
time_comparison = time_comparison or self.get_time_remaining()
if self.stop_time is None:
return True
return bool(time_comparison < self.stop_time)
def disambiguate_server_parameter(hostport_tuple):
"""Takes either a tuple of (address, port) or a string of 'address:port' and disambiguates them for us"""
if type(hostport_tuple) is tuple:
gearman_host, gearman_port = hostport_tuple
elif ':' in hostport_tuple:
gearman_host, gearman_possible_port = hostport_tuple.split(':')
gearman_port = int(gearman_possible_port)
else:
gearman_host = hostport_tuple
gearman_port = DEFAULT_GEARMAN_PORT
return gearman_host, gearman_port
def select(rlist, wlist, xlist, timeout=None):
"""Behave similar to select.select, except ignoring certain types of exceptions"""
rd_list = []
wr_list = []
ex_list = []
select_args = [rlist, wlist, xlist]
if timeout is not None:
select_args.append(timeout)
try:
rd_list, wr_list, ex_list = select_lib.select(*select_args)
except select_lib.error as exc:
# Ignore interrupted system call, reraise anything else
if exc.errno != errno.EINTR:
raise
return rd_list, wr_list, ex_list
def unlist(given_list):
"""Convert the (possibly) single item list into a single item"""
list_size = len(given_list)
if list_size == 0:
return None
elif list_size == 1:
return given_list[0]
else:
raise ValueError(list_size)
| 29.833333 | 109 | 0.67168 |
4f60bb5559ab6e2d345db3dd5bdd04e8aa982dae | 4,375 | py | Python | alipay/aop/api/domain/MorphoAppItem.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/MorphoAppItem.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/MorphoAppItem.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MorphoMiniMeta import MorphoMiniMeta
from alipay.aop.api.domain.MorphoUser import MorphoUser
class MorphoAppItem(object):
def __init__(self):
self._gmt_modified = None
self._id = None
self._mini_meta = None
self._online_state = None
self._owner = None
self._status = None
self._title = None
self._type = None
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def mini_meta(self):
return self._mini_meta
@mini_meta.setter
def mini_meta(self, value):
if isinstance(value, MorphoMiniMeta):
self._mini_meta = value
else:
self._mini_meta = MorphoMiniMeta.from_alipay_dict(value)
@property
def online_state(self):
return self._online_state
@online_state.setter
def online_state(self, value):
self._online_state = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
if isinstance(value, MorphoUser):
self._owner = value
else:
self._owner = MorphoUser.from_alipay_dict(value)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.mini_meta:
if hasattr(self.mini_meta, 'to_alipay_dict'):
params['mini_meta'] = self.mini_meta.to_alipay_dict()
else:
params['mini_meta'] = self.mini_meta
if self.online_state:
if hasattr(self.online_state, 'to_alipay_dict'):
params['online_state'] = self.online_state.to_alipay_dict()
else:
params['online_state'] = self.online_state
if self.owner:
if hasattr(self.owner, 'to_alipay_dict'):
params['owner'] = self.owner.to_alipay_dict()
else:
params['owner'] = self.owner
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MorphoAppItem()
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'id' in d:
o.id = d['id']
if 'mini_meta' in d:
o.mini_meta = d['mini_meta']
if 'online_state' in d:
o.online_state = d['online_state']
if 'owner' in d:
o.owner = d['owner']
if 'status' in d:
o.status = d['status']
if 'title' in d:
o.title = d['title']
if 'type' in d:
o.type = d['type']
return o
| 28.409091 | 75 | 0.5584 |
4f61a7b5ac6e841f6258552a1a97f2f9ed93e4af | 2,786 | py | Python | python/cugraph/tests/test_subgraph_extraction.py | ogreen/cugraph | d94ab29f14e6212a0c8bb5ec5fbe9e300cd57594 | [
"Apache-2.0"
] | null | null | null | python/cugraph/tests/test_subgraph_extraction.py | ogreen/cugraph | d94ab29f14e6212a0c8bb5ec5fbe9e300cd57594 | [
"Apache-2.0"
] | null | null | null | python/cugraph/tests/test_subgraph_extraction.py | ogreen/cugraph | d94ab29f14e6212a0c8bb5ec5fbe9e300cd57594 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from itertools import product
import numpy as np
import pytest
import cudf
import cugraph
from cugraph.tests import utils
import rmm
# Temporarily suppress warnings till networkX fixes deprecation warnings
# (Using or importing the ABCs from 'collections' instead of from
# 'collections.abc' is deprecated, and in 3.8 it will stop working) for
# python 3.7. Also, this import networkx needs to be relocated in the
# third-party group once this gets fixed.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import networkx as nx
def compare_edges(cg, nxg, verts):
edgelist_df = cg.view_edge_list()
assert cg.edgelist.weights is False
assert len(edgelist_df) == nxg.size()
for i in range(len(edgelist_df)):
assert nxg.has_edge(verts[edgelist_df['src'][i]],
verts[edgelist_df['dst'][i]])
return True
def cugraph_call(M, verts):
G = cugraph.DiGraph()
cu_M = cudf.DataFrame()
cu_M['src'] = cudf.Series(M.row)
cu_M['dst'] = cudf.Series(M.col)
G.from_cudf_edgelist(cu_M, source='src', destination='dst')
cu_verts = cudf.Series(verts)
return cugraph.subgraph(G, cu_verts)
def nx_call(M, verts):
G = nx.DiGraph(M)
return nx.subgraph(G, verts)
DATASETS = ['../datasets/karate.csv',
'../datasets/dolphins.csv',
'../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
# Test all combinations of default/managed and pooled/non-pooled allocation
@pytest.mark.parametrize('managed, pool',
list(product([False, True], [False, True])))
@pytest.mark.parametrize('graph_file', DATASETS)
def test_subgraph_extraction(managed, pool, graph_file):
gc.collect()
rmm.reinitialize(
managed_memory=managed,
pool_allocator=pool,
initial_pool_size=2 << 27
)
assert(rmm.is_initialized())
M = utils.read_csv_for_nx(graph_file)
verts = np.zeros(3, dtype=np.int32)
verts[0] = 0
verts[1] = 1
verts[2] = 17
cu_sg = cugraph_call(M, verts)
nx_sg = nx_call(M, verts)
assert compare_edges(cu_sg, nx_sg, verts)
| 30.955556 | 75 | 0.693826 |
4f5db6be2cd75e866393d9240d30fb4de8343384 | 4,837 | py | Python | runs/Re200_St0.6_AR1.27_psi60/scripts/get_propulsive_efficiency.py | VanillaBrooks/petibm-rollingpitching | fbcd7459c75ee44fcaf812700ad134b829bafd21 | [
"BSD-3-Clause"
] | 2 | 2021-09-06T03:37:06.000Z | 2021-12-01T02:39:13.000Z | runs/Re200_St0.6_AR1.27_psi60/scripts/get_propulsive_efficiency.py | VanillaBrooks/petibm-rollingpitching | fbcd7459c75ee44fcaf812700ad134b829bafd21 | [
"BSD-3-Clause"
] | 3 | 2020-03-30T21:52:01.000Z | 2021-07-11T13:11:35.000Z | runs/Re200_St0.6_AR1.27_psi60/scripts/get_propulsive_efficiency.py | VanillaBrooks/petibm-rollingpitching | fbcd7459c75ee44fcaf812700ad134b829bafd21 | [
"BSD-3-Clause"
] | 4 | 2021-02-22T21:54:16.000Z | 2022-01-18T18:39:34.000Z | """Compute the hydrodynamic power and propulsive efficiency."""
import h5py
import numpy
import pathlib
from scipy.interpolate import RegularGridInterpolator
import petibmpy
import rodney
def load_probe_solution(filepath, t, field):
return petibmpy.ProbeVolume(field, field).read_hdf5(filepath, t)
def load_time_values_hdf5(filepath, name='p'):
with h5py.File(filepath, 'r') as infile:
times = [float(t_str) for t_str in list(infile[name].keys())]
return times
def compute_hydrodynamic_power(p, n, u, ds):
return numpy.sum(p * numpy.sum(numpy.multiply(n, u), axis=1) * ds)
def wing_get_normal(wing, time):
phi = wing.rolling(time)
theta = wing.pitching(time)
points = numpy.array([[0.0, 0.0, 0.0],
[0.5, 0.0, 0.5],
[-0.5, 0.0, 0.5]]).T
x, y, z = rodney.vrotation(*points,
roll=phi, pitch=theta,
center=wing.hook)
p1, p2, p3 = numpy.array([x, y, z]).T
v1 = (p1 - p2) / numpy.linalg.norm(p1 - p2)
v2 = (p3 - p1) / numpy.linalg.norm(p3 - p1)
v3 = numpy.cross(v1, v2)
return v3 / numpy.linalg.norm(v3)
# Set simulation directory and data directory.
simudir = pathlib.Path(__file__).absolute().parents[1]
datadir = simudir / 'output'
# Create the wing kinematics.
wing = rodney.WingKinematics(Re=200.0, St=0.6, psi=60.0, nt_period=2000)
# Compute the cycle-averaged thrust.
filepath = datadir / 'forces-0.txt'
t, fx, _, _ = petibmpy.read_forces(filepath)
thrust = -fx # switch from drag to thrust
time_limits = (4 * wing.T, 5 * wing.T) # interval to consider for average
thrust_avg, = petibmpy.get_time_averaged_values(t, thrust, limits=time_limits)
# Compute the cycle-averaged thrust coefficient.
rho, U_inf, A_plan = (getattr(wing, name)
for name in ('rho', 'U_inf', 'A_plan'))
scale = 1 / (0.5 * rho * U_inf**2 * A_plan)
ct, = petibmpy.get_force_coefficients(thrust, coeff=scale)
ct_avg, = petibmpy.get_time_averaged_values(t, ct, limits=time_limits)
# Load original boundary coordinates from file.
filepath = simudir / 'wing.body'
wing.load_body(filepath, skiprows=1)
# Compute surface area associated with each Lagrangian marker.
ds = wing.A_plan / wing.size
# Create virtual boundary around flat plate.
# The flat surface is extended by d grid cells on lower and upper surfaces.
d = 0.03 * wing.c # normal distance from original markers (3% chord length)
x0, y0, z0 = wing.get_coordinates()
xv0 = numpy.tile(x0, 2)
yv0 = numpy.concatenate((y0 - d, y0 + d))
zv0 = numpy.tile(z0, 2)
# Replace flat surface with virtual body.
wing.set_coordinates(xv0, yv0, zv0, org=True)
# Read time values of probe recordings.
filepath = datadir / 'probe_vicinity-p.h5'
times = load_time_values_hdf5(filepath)
# Create regular-grid interpolator.
grid, p = load_probe_solution(filepath, times[0], 'p')
interpolator = RegularGridInterpolator(grid, p.T,
bounds_error=False,
fill_value=None)
# Initialize array to contain hydrodynamic power for each time recording.
P_hydro = numpy.empty_like(times)
# Compute the hydrodynamic power over the time records.
for i, time in enumerate(times):
print(f'[t/T = {time / wing.T:.6f}] Computing hydrodynamic power ...')
# Compute the unit normal vector.
n = wing_get_normal(wing, time)
# Update the position and velocity of the body.
wing.update_position(time)
# Update pressure data on regular grid.
_, p = load_probe_solution(filepath, time, 'p')
interpolator.values = p.T
# Interpolate the pressure on the virtual boundary.
xi, yi, zi = wing.get_coordinates()
p = interpolator(numpy.array([xi, yi, zi]).T)
# Get the normal for each marker on the virtual boundary.
n = numpy.concatenate((numpy.tile(-n, (wing.size // 2, 1)),
numpy.tile(+n, (wing.size // 2, 1))))
# Compute the body velocity and gather components.
wing.update_velocity(time)
u = numpy.vstack(wing.get_velocity()).T
# Compute the hydrodynamic power.
P_hydro[i] = compute_hydrodynamic_power(p, n, u, ds)
# Save hydrodynamic power over cycle to file.
filepath = datadir / 'P_hydro.dat'
with open(filepath, 'w') as outfile:
numpy.savetxt(outfile, numpy.c_[times, P_hydro])
# Compute the cycle-averaged hydrdynamic power.
# As in Li & Dong (2016), only positive values are considered.
mask = numpy.where(P_hydro > 0.0)
P_hydro_avg = numpy.mean(P_hydro[mask])
# Compute the propulsive efficiency.
eta = thrust_avg * U_inf / P_hydro_avg
# Print data.
print('Cycle-averaged thrust:', thrust_avg)
print('Cycle-averaged thrust coefficient:', ct_avg)
print('Cycle-averaged hydrodynamic power:', P_hydro_avg)
print('Propulsive efficiency:', eta)
| 34.06338 | 78 | 0.677279 |
4f5f474095298b0f909f9bbd746a4853ba950b03 | 14,634 | py | Python | sa_check.py | luh0907/nn_breaking_detection | 6e810a5296bea3c6ef975b4e62caa2d94e992b81 | [
"BSD-2-Clause"
] | null | null | null | sa_check.py | luh0907/nn_breaking_detection | 6e810a5296bea3c6ef975b4e62caa2d94e992b81 | [
"BSD-2-Clause"
] | null | null | null | sa_check.py | luh0907/nn_breaking_detection | 6e810a5296bea3c6ef975b4e62caa2d94e992b81 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import time
import os
from multiprocessing import Pool
from tqdm import tqdm
from keras.models import load_model, Model
from keras import backend as K
#from scipy.stats import gaussian_kde
from kde import gaussian_kde
import scipy
from utils_sa import *
import tensorflow as tf
def _aggr_output(x):
return [np.mean(x[..., j]) for j in range(x.shape[-1])]
def _get_saved_path(base_path, dataset, dtype, layer_names):
"""Determine saved path of ats and pred
Args:
base_path (str): Base save path.
dataset (str): Name of dataset.
dtype (str): Name of dataset type (e.g., train, test, fgsm, ...).
layer_names (list): List of layer names.
Returns:
ats_path: File path of ats.
pred_path: File path of pred (independent of layers)
"""
joined_layer_names = "_".join(layer_names)
return (
os.path.join(
base_path,
dataset + "_" + dtype + "_" + joined_layer_names + "_ats" + ".npy",
),
os.path.join(base_path, dataset + "_" + dtype + "_pred" + ".npy"),
)
def get_ats(
model,
dataset,
name,
layer_names,
save_path=None,
batch_size=128,
is_classification=True,
num_classes=10,
num_proc=10,
):
"""Extract activation traces of dataset from model.
Args:
model (keras model): Subject model.
dataset (list): Set of inputs fed into the model.
name (str): Name of input set.
layer_names (list): List of selected layer names.
save_path (tuple): Paths of being saved ats and pred.
batch_size (int): Size of batch when serving.
is_classification (bool): Task type, True if classification task or False.
num_classes (int): The number of classes (labels) in the dataset.
num_proc (int): The number of processes for multiprocessing.
Returns:
ats (list): List of (layers, inputs, neuron outputs).
pred (list): List of predicted classes.
"""
temp_model = Model(
inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names],
)
prefix = info("[" + name + "] ")
if is_classification:
p = Pool(num_proc)
print(prefix + "Model serving")
pred = model.predict_classes(dataset, batch_size=batch_size, verbose=1)
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(dataset, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
dataset, batch_size=batch_size, verbose=1
)
print(prefix + "Processing ATs")
ats = None
for layer_name, layer_output in zip(layer_names, layer_outputs):
print("Layer: " + layer_name)
if layer_output[0].ndim == 3:
# For convolutional layers
print(layer_output[0].shape)
layer_matrix = np.array(
p.map(_aggr_output, [layer_output[i] for i in range(len(dataset))])
)
else:
layer_matrix = np.array(layer_output)
if ats is None:
ats = layer_matrix
else:
ats = np.append(ats, layer_matrix, axis=1)
layer_matrix = None
if save_path is not None:
np.save(save_path[0], ats)
np.save(save_path[1], pred)
return ats, pred
def find_closest_at(at, train_ats):
"""The closest distance between subject AT and training ATs.
Args:
at (list): List of activation traces of an input.
train_ats (list): List of activation traces in training set (filtered)
Returns:
dist (int): The closest distance.
at (list): Training activation trace that has the closest distance.
"""
dist = np.linalg.norm(at - train_ats, axis=1)
return (min(dist), train_ats[np.argmin(dist)])
def _get_train_target_ats(model, x_train, x_target, target_name, layer_names, args):
"""Extract ats of train and target inputs. If there are saved files, then skip it.
Args:
model (keras model): Subject model.
x_train (list): Set of training inputs.
x_target (list): Set of target (test or adversarial) inputs.
target_name (str): Name of target set.
layer_names (list): List of selected layer names.
args: keyboard args.
Returns:
train_ats (list): ats of train set.
train_pred (list): pred of train set.
target_ats (list): ats of target set.
target_pred (list): pred of target set.
"""
saved_train_path = _get_saved_path(args.save_path, args.d, "train", layer_names)
if os.path.exists(saved_train_path[0]):
print(infog("Found saved {} ATs, skip serving".format("train")))
# In case train_ats is stored in a disk
train_ats = np.load(saved_train_path[0])
train_pred = np.load(saved_train_path[1])
else:
train_ats, train_pred = get_ats(
model,
x_train,
"train",
layer_names,
num_classes=args.num_classes,
is_classification=args.is_classification,
save_path=saved_train_path,
)
print(infog("train ATs is saved at " + saved_train_path[0]))
saved_target_path = _get_saved_path(
args.save_path, args.d, target_name, layer_names
)
if os.path.exists(saved_target_path[0]):
print(infog("Found saved {} ATs, skip serving").format(target_name))
# In case target_ats is stored in a disk
target_ats = np.load(saved_target_path[0])
target_pred = np.load(saved_target_path[1])
else:
target_ats, target_pred = get_ats(
model,
x_target,
target_name,
layer_names,
num_classes=args.num_classes,
is_classification=args.is_classification,
save_path=saved_target_path,
)
print(infog(target_name + " ATs is saved at " + saved_target_path[0]))
return train_ats, train_pred, target_ats, target_pred
def fetch_dsa(model, x_train, x_target, target_name, layer_names, args):
"""Distance-based SA
Args:
model (keras model): Subject model.
x_train (list): Set of training inputs.
x_target (list): Set of target (test or adversarial) inputs.
target_name (str): Name of target set.
layer_names (list): List of selected layer names.
args: keyboard args.
Returns:
dsa (list): List of dsa for each target input.
"""
assert args.is_classification == True
prefix = info("[" + target_name + "] ")
train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(
model, x_train, x_target, target_name, layer_names, args
)
class_matrix = {}
all_idx = []
for i, label in enumerate(train_pred):
if label not in class_matrix:
class_matrix[label] = []
class_matrix[label].append(i)
all_idx.append(i)
dsa = []
print(prefix + "Fetching DSA")
for i, at in enumerate(tqdm(target_ats)):
label = target_pred[i]
a_dist, a_dot = find_closest_at(at, train_ats[class_matrix[label]])
b_dist, _ = find_closest_at(
a_dot, train_ats[list(set(all_idx) - set(class_matrix[label]))]
)
dsa.append(a_dist / b_dist)
return dsa
# from https://stackoverflow.com/questions/47709854/how-to-get-covariance-matrix-in-tensorflow?rq=1
def tf_cov(x):
mean_x = tf.reduce_mean(x, axis=0, keep_dims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(x), x)/tf.cast(tf.shape(x)[0], tf.float32)
cov_xx = vx - mx
return cov_xx
class DensityEstimate:
def __init__(self, sess, refined_ats, image_size=28, num_channels=1, sigma=20):
self.sess = sess
self.num_images = refined_ats.shape[0]
self.refined_ats = tf.cast(tf.constant(refined_ats), tf.float64)
#self.refined_ats = tf.transpose(self.refined_ats) # T
weights = np.ones(self.num_images)/self.num_images
ats_cov = np.cov(np.transpose(refined_ats), rowvar=1, bias=False, aweights=weights)
#print(ats_cov)
#inv_cov = scipy.linalg.inv(ats_cov)
inv_cov = np.linalg.pinv(ats_cov)
ats_cov *= sigma**2
inv_cov /= sigma**2
norm_factor = np.sqrt(scipy.linalg.det(2*np.pi*ats_cov))
#print(norm_factor)
#print(inv_cov)
#print(scipy.linalg.cholesky(inv_cov))
#print(np.linalg.cholesky(inv_cov))
whitening = tf.cast(tf.constant(scipy.linalg.cholesky(inv_cov)), tf.float64)
#ats_cov = tf_cov(self.refined_ats)
#self.ats_cov = ats_cov
#norm_factor = tf.sqrt(tf.matrix_determinant(1e2*ats_cov))
#self.norm_factor = norm_factor
#inv_cov = tf.matrix_inverse(ats_cov)
#whitening = tf.cholesky(inv_cov)
self.scaled_refined_ats = tf.matmul(whitening, tf.transpose(self.refined_ats))
#print(whitening.shape, self.refined_ats.shape, self.scaled_refined_ats.shape)
self.sigma = sigma
self.X = tf.placeholder(tf.float64, (refined_ats.shape[1]))
#self.X = tf.transpose(self.X[tf.newaxis,:]) # T
scaled_X = tf.matmul(whitening, tf.transpose(self.X[tf.newaxis,:]))
self.scaled_X = scaled_X
#self.dist = tf.reduce_sum(tf.reshape(tf.square(self.refined_ats - self.X), (self.num_images, 1, -1)), axis=2)
self.dist = tf.reduce_sum(tf.square(tf.transpose(self.scaled_refined_ats - scaled_X)), axis=1)
self.Y = tf.reduce_mean(tf.exp(-self.dist), axis=0)
def predict(self, xs):
res = self.sess.run(self.Y, {self.X: xs})
return res
def _get_kdes(train_ats, train_pred, class_matrix, args):
"""Kernel density estimation
Args:
train_ats (list): List of activation traces in training set.
train_pred (list): List of prediction of train set.
class_matrix (list): List of index of classes.
args: Keyboard args.
Returns:
kdes (list): List of kdes per label if classification task.
removed_cols (list): List of removed columns by variance threshold.
"""
sess = K.get_session()
K.set_learning_phase(False)
removed_cols = []
if args.is_classification:
for label in range(args.num_classes):
col_vectors = np.transpose(train_ats[class_matrix[label]])
for i in range(col_vectors.shape[0]):
if (
np.var(col_vectors[i]) < args.var_threshold
and i not in removed_cols
):
removed_cols.append(i)
kdes = {}
for label in tqdm(range(args.num_classes), desc="kde"):
refined_ats = np.transpose(train_ats[class_matrix[label]])
refined_ats = np.delete(refined_ats, removed_cols, axis=0)
if refined_ats.shape[0] == 0:
print(
warn("ats were removed by threshold {}".format(args.var_threshold))
)
break
kdes[label] = gaussian_kde(refined_ats)
#kdes[label] = DensityEstimate(sess, np.transpose(refined_ats), sigma=0.864)
print(refined_ats.shape)
#print(kdes[label].factor)
else:
col_vectors = np.transpose(train_ats)
for i in range(col_vectors.shape[0]):
if np.var(col_vectors[i]) < args.var_threshold:
removed_cols.append(i)
refined_ats = np.transpose(train_ats)
refined_ats = np.delete(refined_ats, removed_cols, axis=0)
if refined_ats.shape[0] == 0:
print(warn("ats were removed by threshold {}".format(args.var_threshold)))
kdes = [gaussian_kde(refined_ats)]
print(infog("The number of removed columns: {}".format(len(removed_cols))))
return kdes, removed_cols
def _get_lsa(kde, at, removed_cols):
refined_at = np.delete(at, removed_cols, axis=0)
#print(refined_at)
#print(-kde.logpdf(np.transpose(refined_at)))
#print(kde.pdf(np.transpose(refined_at)))
return np.asscalar(-kde.logpdf(np.transpose(refined_at)))
#return np.asscalar(-np.log(kde.pdf(np.transpose(refined_at))))
#return np.asscalar(-np.log(kde.predict(refined_at)))
def fetch_lsa(model, x_train, x_target, target_name, layer_names, args):
"""Likelihood-based SA
Args:
model (keras model): Subject model.
x_train (list): Set of training inputs.
x_target (list): Set of target (test or[] adversarial) inputs.
target_name (str): Name of target set.
layer_names (list): List of selected layer names.
args: Keyboard args.
Returns:
lsa (list): List of lsa for each target input.
"""
prefix = info("[" + target_name + "] ")
train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(
model, x_train, x_target, target_name, layer_names, args
)
class_matrix = {}
if args.is_classification:
for i, label in enumerate(train_pred):
if label not in class_matrix:
class_matrix[label] = []
class_matrix[label].append(i)
kdes, removed_cols = _get_kdes(train_ats, train_pred, class_matrix, args)
lsa = []
print(prefix + "Fetching LSA")
if args.is_classification:
for i, at in enumerate(tqdm(target_ats)):
label = target_pred[i]
kde = kdes[label]
lsa.append(_get_lsa(kde, at, removed_cols))
else:
kde = kdes[0]
for at in tqdm(target_ats):
lsa.append(_get_lsa(kde, at, removed_cols))
return lsa
def get_sc(lower, upper, k, sa):
"""Surprise Coverage
Args:
lower (int): Lower bound.
upper (int): Upper bound.
k (int): The number of buckets.
sa (list): List of lsa or dsa.
Returns:
cov (int): Surprise coverage.
"""
buckets = np.digitize(sa, np.linspace(lower, upper, k))
return len(list(set(buckets))) / float(k) * 100
| 34.842857 | 119 | 0.601886 |
4f5f96e2b477249853b9f7d8c0e7b67c811a94a3 | 22,068 | py | Python | tensorflow/python/training/rmsprop_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 4 | 2021-06-15T17:26:07.000Z | 2021-11-17T10:58:08.000Z | tensorflow/python/training/rmsprop_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 1 | 2018-09-17T19:30:27.000Z | 2018-09-17T19:30:27.000Z | tensorflow/python/training/rmsprop_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 6 | 2018-12-20T01:35:20.000Z | 2020-07-10T17:29:57.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import math
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
_DATA_TYPES = [dtypes.half, dtypes.float32]
_TEST_PARAM_VALUES = [
# learning_rate, decay, momentum, epsilon, centered, use_resource
[0.5, 0.9, 0.0, 1e-3, True, False],
[0.5, 0.9, 0.0, 1e-3, False, False],
[0.5, 0.9, 0.0, 1e-3, True, True],
[0.5, 0.9, 0.0, 1e-3, False, True],
[0.1, 0.9, 0.0, 1e-3, True, False],
[0.5, 0.95, 0.0, 1e-3, False, False],
[0.5, 0.95, 0.0, 1e-5, True, False],
[0.5, 0.95, 0.9, 1e-5, True, False],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSPropOptimizerTest(test.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum,
epsilon, centered):
rms_t = rms * decay + (1 - decay) * g * g
denom_t = rms_t + epsilon
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t -= mg_t * mg_t
else:
mg_t = mg
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, decay, momentum, epsilon, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * decay + (1 - decay) * gvalue * gvalue
denom_t = rms_t[gindex] + epsilon
if centered:
mg_t[gindex] = mg_t[gindex] * decay + (1 - decay) * gvalue
denom_t -= mg_t[gindex] * mg_t[gindex]
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
var_t[gindex] = var[gindex] - mom_t[gindex]
return var_t, mg_t, rms_t, mom_t
def testDense(self):
# TODO(yori): Use ParameterizedTest when available
for (dtype, learning_rate, decay, momentum,
epsilon, centered, use_resource) in _TESTPARAMS:
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = rmsprop.RMSPropOptimizer(
learning_rate=learning_rate,
decay=decay,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 4 steps of RMSProp
for _ in range(1, 5):
update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
decay, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
decay, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.0,
momentum=0.0,
epsilon=0.0,
centered=False).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0., 1.]], var0.eval(), atol=0.01)
def testMinimizeSparseResourceVariableCentered(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.0,
momentum=0.0,
epsilon=1.0,
centered=True).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], var0.eval(), atol=0.01)
def testSparse(self):
# TODO(yori): Use ParameterizedTest when available
for (dtype, learning_rate, decay,
momentum, epsilon, centered, _) in _TESTPARAMS:
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([1]))
opt = rmsprop.RMSPropOptimizer(
learning_rate=learning_rate,
decay=decay,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 4 steps of RMSProp
for _ in range(1, 5):
update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, decay, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, decay, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testWithoutMomentum(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = rmsprop.RMSPropOptimizer(
learning_rate=2.0, decay=0.9, momentum=0.0, epsilon=1.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
update.run()
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901, 0.901]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001, 0.90001]), rms1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0))
]), var1.eval())
# Step 2: the root mean square accumulators contain the previous update.
update.run()
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0))
]), var1.eval())
def testWithMomentum(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = rmsprop.RMSPropOptimizer(
learning_rate=2.0, decay=0.9, momentum=0.5, epsilon=1e-5)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: rms = 1, mom = 0. So we should see a normal
# update: v -= grad * learning_rate
update.run()
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901, 0.901]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001, 0.90001]), rms1.eval())
# Check the momentum accumulators
self.assertAllCloseAccordingToType(
np.array([(0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
(0.1 * 2.0 / math.sqrt(0.901 + 1e-5))]), mom0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
(0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))]), mom1.eval())
# Check that the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))
]), var1.eval())
# Step 2: the root mean square accumulators contain the previous update.
update.run()
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
self.assertAllCloseAccordingToType(
np.array([
0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)),
0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))
]), mom0.eval())
self.assertAllCloseAccordingToType(
np.array([
0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)),
0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))
]), mom1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
(0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
(0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
(0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
(0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)))
]), var1.eval())
def testCallableParams(self):
with context.eager_mode():
for dtype in [dtypes.half, dtypes.float32]:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
decay = lambda: 0.9
momentum = lambda: 0.0
epsilon = lambda: 1.0
opt = rmsprop.RMSPropOptimizer(learning_rate, decay, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
| 44.136 | 80 | 0.585055 |
4f626af3dd4f0888c1dee8d4899c19d7f8115514 | 3,409 | py | Python | tests/providers/microsoft/azure/sensors/test_azure_data_factory.py | samkenxstream/airflow | 71c980a8ffb3563bf16d8a23a58de54c9e8cf556 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | tests/providers/microsoft/azure/sensors/test_azure_data_factory.py | samkenxstream/airflow | 71c980a8ffb3563bf16d8a23a58de54c9e8cf556 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | tests/providers/microsoft/azure/sensors/test_azure_data_factory.py | samkenxstream/airflow | 71c980a8ffb3563bf16d8a23a58de54c9e8cf556 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
import pytest
from parameterized import parameterized
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
)
from airflow.providers.microsoft.azure.sensors.data_factory import AzureDataFactoryPipelineRunStatusSensor
class TestPipelineRunStatusSensor(unittest.TestCase):
def setUp(self):
self.config = {
"azure_data_factory_conn_id": "azure_data_factory_test",
"run_id": "run_id",
"resource_group_name": "resource-group-name",
"factory_name": "factory-name",
"timeout": 100,
"poke_interval": 15,
}
self.sensor = AzureDataFactoryPipelineRunStatusSensor(task_id="pipeline_run_sensor", **self.config)
def test_init(self):
assert self.sensor.azure_data_factory_conn_id == self.config["azure_data_factory_conn_id"]
assert self.sensor.run_id == self.config["run_id"]
assert self.sensor.resource_group_name == self.config["resource_group_name"]
assert self.sensor.factory_name == self.config["factory_name"]
assert self.sensor.timeout == self.config["timeout"]
assert self.sensor.poke_interval == self.config["poke_interval"]
@parameterized.expand(
[
(AzureDataFactoryPipelineRunStatus.SUCCEEDED, True),
(AzureDataFactoryPipelineRunStatus.FAILED, "exception"),
(AzureDataFactoryPipelineRunStatus.CANCELLED, "exception"),
(AzureDataFactoryPipelineRunStatus.CANCELING, False),
(AzureDataFactoryPipelineRunStatus.QUEUED, False),
(AzureDataFactoryPipelineRunStatus.IN_PROGRESS, False),
]
)
@patch.object(AzureDataFactoryHook, "get_pipeline_run")
def test_poke(self, pipeline_run_status, expected_status, mock_pipeline_run):
mock_pipeline_run.return_value.status = pipeline_run_status
if expected_status != "exception":
assert self.sensor.poke({}) == expected_status
else:
# The sensor should fail if the pipeline run status is "Failed" or "Cancelled".
if pipeline_run_status == AzureDataFactoryPipelineRunStatus.FAILED:
error_message = f"Pipeline run {self.config['run_id']} has failed."
else:
error_message = f"Pipeline run {self.config['run_id']} has been cancelled."
with pytest.raises(AzureDataFactoryPipelineRunException, match=error_message):
self.sensor.poke({})
| 44.272727 | 107 | 0.712819 |
4f62269f89f730432339c067f5b3e5a8748ffb83 | 11,050 | py | Python | QESalgorithms.py | StudentsZhouPengfei/Automatically-Differentiable-Quantum-Circuit-for-Many-qubit-State-Preparation | 42d3a77380e78819375c9fb2c5600ddc89a3ae3f | [
"MIT"
] | 3 | 2021-05-10T01:49:59.000Z | 2021-06-13T19:03:40.000Z | QESalgorithms.py | StudentsZhouPengfei/Automatically-Differentiable-Quantum-Circuit-for-Many-qubit-State-Preparation | 42d3a77380e78819375c9fb2c5600ddc89a3ae3f | [
"MIT"
] | null | null | null | QESalgorithms.py | StudentsZhouPengfei/Automatically-Differentiable-Quantum-Circuit-for-Many-qubit-State-Preparation | 42d3a77380e78819375c9fb2c5600ddc89a3ae3f | [
"MIT"
] | null | null | null | from BasicFunctions import save_pr, load_pr, print_dict
from DMRGalgorithms import dmrg_infinite_size
from QESclass import QES_1D
from EDspinClass import EDbasic
from Parameters import parameter_qes_gs_by_ed, parameter_qes_ft_by_ed
from HamiltonianModule import hamiltonian_heisenberg
from TensorBasicModule import entanglement_entropy
from scipy.sparse.linalg import LinearOperator as LinearOp
from scipy.sparse.linalg import eigsh as eigs
import os.path as opath
import numpy as np
def prepare_bath_hamilts(para, inputs=None):
# inputs = (bath, ob0, hamilt)
print('Starting iDMRG for the entanglement bath')
bath_data = opath.join(para['bath_path'], para['bath_exp'])
if inputs is None:
if para['if_load_bath'] and opath.isfile(bath_data):
print('Bath data found. Load the bath.')
bath, ob0, hamilt = load_pr(bath_data, ['A', 'ob0', 'hamilt'])
else:
print('Bath data not found. Calculate bath by iDMRG.')
hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'],
para['hx'] / 2, para['hz'] / 2)
bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt)[:2]
save_pr(para['bath_path'], para['bath_exp'], [bath, ob0, hamilt], ['A', 'ob0', 'hamilt'])
else:
bath, ob0, hamilt = inputs
if (bath.is_symme_env is True) and (bath.dmrg_type is 'mpo'):
bath.env[1] = bath.env[0]
print('Preparing the physical-bath Hamiltonians')
qes = QES_1D(para['d'], para['chi'], para['d'] * para['d'],
para['l_phys'], para['tau'], spin=para['spin'])
if bath.dmrg_type is 'mpo':
qes.obtain_physical_gate_tensors(hamilt)
qes.obtain_bath_h(bath.env, 'both')
else:
qes.obtain_bath_h_by_effective_ops_1d(
bath.bath_op_onsite, bath.effective_ops, bath.hamilt_index)
hamilts = [hamilt] + qes.hamilt_bath
return hamilts, bath, ob0
def find_degenerate_ground_state(para, it_time, tol=1e-2):
# if not para['is_symme_env']:
# para['is_symme_env'] = True
# print('In \'find_degenerate_bath\', set para[\'is_symme_env\'] = True')
dege_states = list()
for t in range(it_time):
# Randomly initialize env
env = list()
env.append(np.random.randn(para['chi'], para['d']**para['n_site'], para['chi']))
env[0] = env[0] + env[0].transpose(2, 1, 0)
env[0] /= np.linalg.norm(env[0])
env.append(env[0].copy())
hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'],
para['hx'] / 2, para['hz'] / 2)
bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt, env=env)[:2]
gs = bath.mps[1]
if len(dege_states) > 0:
delta = list()
add_new = True
for n in range(len(dege_states)):
delta.append(np.sqrt(np.abs(2-2*np.abs(
dege_states[n].reshape(1, -1).dot(gs.reshape(-1, 1))[0, 0]))))
add_new = add_new and (delta[-1] > tol)
print('Differences = ' + str(delta))
if add_new:
dege_states.append(gs)
print(str(len(dege_states)) + ' envs have been found.')
else:
dege_states.append(gs)
print('After ' + str(it_time) + ' iterations, ' + str(len(dege_states)) + ' have been found.')
def find_degenerate_rho(para, it_time, tol=1e-2):
dege_rho = list()
for t in range(it_time):
# Randomly initialize env
env = list()
env.append(np.random.randn(para['chi'], para['d']**para['n_site'], para['chi']))
env[0] = env[0] + env[0].transpose(2, 1, 0)
env[0] /= np.linalg.norm(env[0])
env.append(env[0].copy())
hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'],
para['hx'] / 2, para['hz'] / 2)
bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt, env=env)[:2]
bath.rho_from_central_tensor()
rho = bath.rho
if len(dege_rho) > 0:
delta = list()
for n in range(len(dege_rho)):
delta.append(np.sqrt(np.abs(2-2*np.abs(
dege_rho[n].reshape(1, -1).dot(rho.reshape(-1, 1))[0, 0]))))
# delta.append(np.abs(np.trace(dege_rho[n].dot(rho))))
print('Differences = ' + str(delta))
if np.min(delta) > tol:
dege_rho.append(rho)
print(str(len(dege_rho)) + ' have been found.')
else:
dege_rho.append(rho)
print('After ' + str(it_time) + ' iterations, ' + str(len(dege_rho)) + ' have been found.')
def find_degenerate_hbaths(para, it_time, tol=1e-2):
hbaths = list()
for t in range(it_time):
# Randomly initialize env
env = list()
env.append(np.random.randn(para['chi'], para['d'] ** para['n_site'], para['chi']))
env[0] = env[0] + env[0].transpose(2, 1, 0)
env[0] /= np.linalg.norm(env[0])
env.append(env[0].copy())
hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'],
para['hx'] / 2, para['hz'] / 2)
bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt, env=env)[:2]
para_qes = parameter_qes_gs_by_ed(para)
qes_hamilt = prepare_bath_hamilts(para_qes, (bath, ob0, hamilt))[0]
qes_hamilt = qes_hamilt[1]
# print(np.trace(qes_hamilt), qes_hamilt.shape)
# find degenerate hbaths
if len(hbaths) > 0:
delta = list()
add_new = True
for n in range(len(hbaths)):
delta1 = (hbaths[n]/np.linalg.norm(hbaths[n])).reshape(1, -1).dot(
(qes_hamilt/np.linalg.norm(qes_hamilt)).reshape(-1, 1))[0, 0]
delta.append(np.sqrt(np.abs(2 - 2*delta1)))
add_new = add_new and (delta[-1] > tol)
print('Differences = ' + str(delta))
if add_new:
hbaths.append(qes_hamilt)
print(str(len(hbaths)) + ' envs have been found.')
else:
hbaths.append(qes_hamilt)
print('After ' + str(it_time) + ' iterations, ' + str(len(hbaths)) + ' have been found.')
def find_degenerate_rings(para, it_time, tol=1e-2):
# if not para['is_symme_env']:
# para['is_symme_env'] = True
# print('In \'find_degenerate_bath\', set para[\'is_symme_env\'] = True')
rings = list()
for t in range(it_time):
# Randomly initialize env
env = list()
env.append(np.random.randn(para['chi'], para['d']**para['n_site'], para['chi']))
env[0] = env[0] + env[0].transpose(2, 1, 0)
env[0] /= np.linalg.norm(env[0])
env.append(env[0].copy())
hamilt = hamiltonian_heisenberg(para['spin'], para['jxy'], para['jxy'], para['jz'],
para['hx'] / 2, para['hz'] / 2)
bath, ob0 = dmrg_infinite_size(para, hamilt=hamilt, env=env)[:2]
bath.env[1] = bath.env[0]
# find degenerate ring tensors
rt = bath.obtain_ring_tensor()
rt = np.real(rt)
rt /= np.linalg.norm(rt)
if len(rings) > 0:
delta = list()
add_ring = True
for n in range(len(rings)):
delta.append(np.sqrt(np.abs(2-2*np.abs(
rings[n].reshape(1, -1).dot(rt.reshape(-1, 1))[0, 0]))))
add_ring = add_ring and (delta[-1] > tol)
print('Differences = ' + str(delta))
if add_ring:
rings.append(rt)
print(str(len(rings)) + ' envs have been found.')
else:
rings.append(rt)
print('After ' + str(it_time) + ' iterations, ' + str(len(rings)) + ' have been found.')
def qes_gs_1d_ed(para=None):
if para is None:
para = parameter_qes_ft_by_ed()
hamilts, bath, ob0 = prepare_bath_hamilts(para)
print('Starting ED for the entanglement bath')
dims = [para['d'] for _ in range(para['l_phys'])]
dims = [para['chi']] + dims + [para['chi']]
ob = dict()
solver = EDbasic(dims, spin=para['spin'])
heff = LinearOp((solver.dim_tot, solver.dim_tot),
lambda x: solver.project_all_hamilt(
x, hamilts, para['tau'], para['couplings']))
ob['e_eig'], solver.v = eigs(heff, k=1, which='LM', v0=solver.v.reshape(-1, ).copy())
solver.is_vec = True
ob['e_eig'] = (1 - ob['e_eig']) / para['tau']
ob['mx'], ob['mz'] = solver.observe_magnetizations(para['phys_sites'])
ob['eb'] = solver.observe_bond_energies(hamilts[0], para['positions_h2'][1:para['num_h2']-1, :])
ob['lm'] = solver.calculate_entanglement()
ob['ent'] = entanglement_entropy(ob['lm'])
ob['e_site'] = sum(ob['eb']) / (para['l_phys'] - 1)
ob['corr_xx'] = solver.observe_correlations(para['pos4corr'], para['op'][1])
ob['corr_zz'] = solver.observe_correlations(para['pos4corr'], para['op'][3])
for n in range(para['pos4corr'].shape[0]):
p1 = para['pos4corr'][n, 0] - 1
p2 = para['pos4corr'][n, 1] - 1
ob['corr_xx'][n] -= ob['mx'][p1] * ob['mx'][p2]
ob['corr_zz'][n] -= ob['mz'][p1] * ob['mz'][p2]
return bath, solver, ob0, ob
def qes_ft_1d_ltrg(para=None):
if para is None:
para = parameter_qes_gs_by_ed()
hamilts, bath, ob0 = prepare_bath_hamilts(para)
print('Starting ED for the entanglement bath')
dims = [para['d'] for _ in range(para['l_phys'])]
dims = [para['chi']] + dims + [para['chi']]
ob = dict()
solver = EDbasic(dims)
heff = LinearOp((solver.dim_tot, solver.dim_tot),
lambda x: solver.project_all_hamilt(
x, hamilts, para['tau'], para['couplings']))
ob['e_eig'], solver.v = eigs(heff, k=1, which='LM', v0=solver.v.reshape(-1, ).copy())
solver.is_vec = True
ob['e_eig'] = (1 - ob['e_eig']) / para['tau']
ob['mx'], ob['mz'] = solver.observe_magnetizations(para['phys_sites'])
ob['eb'] = solver.observe_bond_energies(hamilts[0], para['positions_h2'][1:para['num_h2']-1, :])
ob['lm'] = solver.calculate_entanglement()
ob['ent'] = entanglement_entropy(ob['lm'])
ob['e_site'] = sum(ob['eb']) / (para['l_phys'] - 1)
ob['corr_xx'] = solver.observe_correlations(para['pos4corr'], para['op'][1])
ob['corr_zz'] = solver.observe_correlations(para['pos4corr'], para['op'][3])
for n in range(para['pos4corr'].shape[0]):
p1 = para['pos4corr'][n, 0] - 1
p2 = para['pos4corr'][n, 1] - 1
ob['corr_xx'][n] -= ob['mx'][p1] * ob['mx'][p2]
ob['corr_zz'][n] -= ob['mz'][p1] * ob['mz'][p2]
return bath, solver, ob0, ob | 46.23431 | 102 | 0.551674 |
4f62d13ffdfb64d410ae1946e7c2994d6398fee6 | 2,133 | py | Python | tests/acceptance/steps/min_isr.py | yemyatthein/kafka-utils | 243cab3031f8af41ab4a8cfdce391f5377eba1c4 | [
"Apache-2.0"
] | 302 | 2016-05-18T02:05:04.000Z | 2022-03-28T21:36:28.000Z | tests/acceptance/steps/min_isr.py | yemyatthein/kafka-utils | 243cab3031f8af41ab4a8cfdce391f5377eba1c4 | [
"Apache-2.0"
] | 135 | 2016-05-17T23:15:16.000Z | 2021-11-04T13:35:51.000Z | tests/acceptance/steps/min_isr.py | yemyatthein/kafka-utils | 243cab3031f8af41ab4a8cfdce391f5377eba1c4 | [
"Apache-2.0"
] | 133 | 2016-05-18T10:23:05.000Z | 2022-01-29T17:24:17.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from behave import then
from behave import when
from steps.util import call_cmd
from steps.util import get_cluster_config
from kafka_utils.util.zookeeper import ZK
ISR_CONF_NAME = 'min.insync.replicas'
CONF_PATH = '/config/topics/'
def call_min_isr():
cmd = ['kafka-check',
'--cluster-type', 'test',
'--cluster-name', 'test_cluster',
'--discovery-base-path', 'tests/acceptance/config',
'min_isr']
return call_cmd(cmd)
def set_min_isr(topic, min_isr):
cluster_config = get_cluster_config()
with ZK(cluster_config) as zk:
config = zk.get_topic_config(topic)
config['config'] = {ISR_CONF_NAME: str(min_isr)}
zk.set_topic_config(topic, config)
@when(u'we call the min_isr command')
def step_impl2(context):
context.min_isr_out = call_min_isr()
@when(u'we change min.isr settings for a topic to 1')
def step_impl3(context):
set_min_isr(context.topic, 1)
@when(u'we change min.isr settings for a topic to 2')
def step_impl4(context):
set_min_isr(context.topic, 2)
@then(u'OK min_isr will be printed')
def step_impl5(context):
assert context.min_isr_out == 'OK: All replicas in sync.\n', context.min_isr_out
@then(u'CRITICAL min_isr will be printed')
def step_impl6(context):
error_msg = ("CRITICAL: 1 partition(s) have the number of "
"replicas in sync that is lower than the specified min ISR.\n")
assert context.min_isr_out == error_msg, context.min_isr_out
| 30.042254 | 84 | 0.714018 |
4f625082b058e486a9d178b41e9e9158a4eab414 | 210 | py | Python | tests/integration/roots/test-widgets1/conf.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
] | 2 | 2017-11-08T19:55:57.000Z | 2018-12-21T12:41:41.000Z | tests/integration/roots/test-references1/conf.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
] | null | null | null | tests/integration/roots/test-references1/conf.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
] | 1 | 2018-10-13T08:59:29.000Z | 2018-10-13T08:59:29.000Z | import kaybee
extensions = [kaybee.__title__]
master_doc = 'index'
html_title = ''
exclude_patterns = ['_build']
kaybee_settings = kaybee.KaybeeSettings(
debugdumper=dict(
use_debug=True
)
)
| 15 | 40 | 0.7 |
4f62010d3a8750a170b593872c2b854a0a84fdc2 | 4,522 | py | Python | tests/trinity/core/p2p-proto/test_peer_receipts_validator_api.py | jin10086/py-evm | da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8 | [
"MIT"
] | 137 | 2017-03-17T11:37:51.000Z | 2022-03-07T07:51:28.000Z | tests/trinity/core/p2p-proto/test_peer_receipts_request_and_response_api.py | retzger/py-evm | 5a52ce035d77483577395a18f782b42ca78de77b | [
"MIT"
] | 102 | 2017-04-07T10:43:03.000Z | 2018-11-11T18:01:56.000Z | tests/trinity/core/p2p-proto/test_peer_receipts_request_and_response_api.py | retzger/py-evm | 5a52ce035d77483577395a18f782b42ca78de77b | [
"MIT"
] | 39 | 2017-03-17T11:38:52.000Z | 2021-02-18T23:05:17.000Z | import asyncio
import os
import time
import pytest
from eth_utils import to_tuple
from eth.db.trie import make_trie_root_and_nodes
from eth.rlp.headers import BlockHeader
from eth.rlp.receipts import Receipt
from trinity.protocol.eth.peer import ETHPeer
from tests.trinity.core.peer_helpers import (
get_directly_linked_peers,
)
@pytest.fixture
async def eth_peer_and_remote(request, event_loop):
peer, remote = await get_directly_linked_peers(
request,
event_loop,
peer1_class=ETHPeer,
peer2_class=ETHPeer,
)
return peer, remote
@to_tuple
def mk_receipts(num_receipts):
for _ in range(num_receipts):
yield Receipt(
state_root=os.urandom(32),
gas_used=21000,
bloom=0,
logs=[],
)
def mk_header_and_receipts(block_number, num_receipts):
receipts = mk_receipts(num_receipts)
root_hash, trie_root_and_data = make_trie_root_and_nodes(receipts)
header = BlockHeader(
difficulty=1000000,
block_number=block_number,
gas_limit=3141592,
timestamp=int(time.time()),
receipt_root=root_hash,
)
return header, receipts, (root_hash, trie_root_and_data)
@to_tuple
def mk_headers(*counts):
for idx, num_receipts in enumerate(counts, 1):
yield mk_header_and_receipts(idx, num_receipts)
@pytest.mark.asyncio
async def test_eth_peer_get_receipts_round_trip_with_full_response(eth_peer_and_remote):
peer, remote = eth_peer_and_remote
headers_bundle = mk_headers(1, 3, 2, 5, 4)
headers, receipts, trie_roots_and_data = zip(*headers_bundle)
receipts_bundle = tuple(zip(receipts, trie_roots_and_data))
async def send_receipts():
remote.sub_proto.send_receipts(receipts)
await asyncio.sleep(0)
get_receipts_task = asyncio.ensure_future(peer.requests.get_receipts(headers))
asyncio.ensure_future(send_receipts())
response = await get_receipts_task
assert len(response) == len(headers)
assert response == receipts_bundle
@pytest.mark.asyncio
async def test_eth_peer_get_receipts_round_trip_with_partial_response(eth_peer_and_remote):
peer, remote = eth_peer_and_remote
headers_bundle = mk_headers(1, 3, 2, 5, 4)
headers, receipts, trie_roots_and_data = zip(*headers_bundle)
receipts_bundle = tuple(zip(receipts, trie_roots_and_data))
async def send_receipts():
remote.sub_proto.send_receipts((receipts[2], receipts[1], receipts[4]))
await asyncio.sleep(0)
get_receipts_task = asyncio.ensure_future(peer.requests.get_receipts(headers))
asyncio.ensure_future(send_receipts())
response = await get_receipts_task
assert len(response) == 3
assert response == (receipts_bundle[2], receipts_bundle[1], receipts_bundle[4])
@pytest.mark.asyncio
async def test_eth_peer_get_receipts_round_trip_with_noise(eth_peer_and_remote):
peer, remote = eth_peer_and_remote
headers_bundle = mk_headers(1, 3, 2, 5, 4)
headers, receipts, trie_roots_and_data = zip(*headers_bundle)
receipts_bundle = tuple(zip(receipts, trie_roots_and_data))
async def send_receipts():
remote.sub_proto.send_transactions([])
await asyncio.sleep(0)
remote.sub_proto.send_receipts(receipts)
await asyncio.sleep(0)
remote.sub_proto.send_transactions([])
await asyncio.sleep(0)
get_receipts_task = asyncio.ensure_future(peer.requests.get_receipts(headers))
asyncio.ensure_future(send_receipts())
response = await get_receipts_task
assert len(response) == len(headers)
assert response == receipts_bundle
@pytest.mark.asyncio
async def test_eth_peer_get_receipts_round_trip_no_match_invalid_response(eth_peer_and_remote):
peer, remote = eth_peer_and_remote
headers_bundle = mk_headers(1, 3, 2, 5, 4)
headers, receipts, trie_roots_and_data = zip(*headers_bundle)
receipts_bundle = tuple(zip(receipts, trie_roots_and_data))
wrong_headers = mk_headers(4, 3, 8)
_, wrong_receipts, _ = zip(*wrong_headers)
async def send_receipts():
remote.sub_proto.send_receipts(wrong_receipts)
await asyncio.sleep(0)
remote.sub_proto.send_receipts(receipts)
await asyncio.sleep(0)
get_receipts_task = asyncio.ensure_future(peer.requests.get_receipts(headers))
asyncio.ensure_future(send_receipts())
response = await get_receipts_task
assert len(response) == len(headers)
assert response == receipts_bundle
| 29.75 | 95 | 0.732419 |
4f621c944eb4f43856e9125ec61544464a97c9bf | 4,037 | py | Python | tests/test_whittaker.py | abdelq/pybaselines | 043aa7875efe1ca01c3e8e9ae7c57a67274aff06 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | tests/test_whittaker.py | abdelq/pybaselines | 043aa7875efe1ca01c3e8e9ae7c57a67274aff06 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | tests/test_whittaker.py | abdelq/pybaselines | 043aa7875efe1ca01c3e8e9ae7c57a67274aff06 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for pybaselines.whittaker.
@author: Donald Erb
Created on March 20, 2021
"""
from pybaselines import whittaker
from .conftest import get_data, AlgorithmTester
class TestAsLS(AlgorithmTester):
"""Class for testing asls baseline."""
func = whittaker.asls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestIAsLS(AlgorithmTester):
"""Class for testing iasls baseline."""
func = whittaker.iasls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, x, y, x)
def test_no_x(self):
super()._test_algorithm_no_x(with_args=(self.y, self.x), without_args=(self.y,))
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestAirPLS(AlgorithmTester):
"""Class for testing airpls baseline."""
func = whittaker.airpls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestArPLS(AlgorithmTester):
"""Class for testing arpls baseline."""
func = whittaker.arpls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestDrPLS(AlgorithmTester):
"""Class for testing drpls baseline."""
func = whittaker.drpls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestIArPLS(AlgorithmTester):
"""Class for testing iarpls baseline."""
func = whittaker.iarpls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestAsPLS(AlgorithmTester):
"""Class for testing aspls baseline."""
func = whittaker.aspls
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
class TestPsalsa(AlgorithmTester):
"""Class for testing psalsa baseline."""
func = whittaker.psalsa
def test_unchanged_data(self, data_fixture):
x, y = get_data()
super()._test_unchanged_data(data_fixture, y, None, y)
def test_output(self):
super()._test_output(self.y, self.y)
def test_list_input(self):
y_list = self.y.tolist()
super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))
| 26.735099 | 88 | 0.660144 |
4f632d28ced326a39b22778ba1b382eb2629a801 | 5,093 | py | Python | life/grid.py | pji/life | 4b38e70725a6f000c92f3aea7ddc593bd43f2749 | [
"MIT"
] | null | null | null | life/grid.py | pji/life | 4b38e70725a6f000c92f3aea7ddc593bd43f2749 | [
"MIT"
] | null | null | null | life/grid.py | pji/life | 4b38e70725a6f000c92f3aea7ddc593bd43f2749 | [
"MIT"
] | null | null | null | """
grid
~~~~
A simple object for handling cells in Conway's Game of Life.
"""
from collections.abc import MutableSequence
from copy import copy
from random import choice
from typing import Generator, List
class Grid(MutableSequence):
def __init__(self, width:int, height:int, rule: str = 'b3/s23') -> None:
"""Initialize an instance of the class."""
self.rule = rule
self.width = width
self.height = height
self._data = self._make_empty_grid(self.width, self.height)
@property
def rule(self) -> str:
"""The B/S notation rule string for the variant of GoL."""
born = ''.join(str(n) for n in self._born)
survive = ''.join(str(n) for n in self._survive)
return f'B{born}/S{survive}'
@rule.setter
def rule(self, value:str):
rules = [s[1:] for s in value.split('/')]
self._born = [int(n) for n in rules[0]]
self._survive = [int(n) for n in rules[1]]
def __delitem__(self, key):
return self._data.__delitem__(key)
def __getitem__(self, key):
return self._data.__getitem__(key)
def __len__(self):
return self._data.__len__()
def __repr__(self):
cls = self.__class__.__name__
return (f'{cls}(width={self.width}, height={self.height}), '
f'rule={self.rule}')
def __setitem__(self, key, value):
return self._data.__setitem__(key, value)
def __str__(self):
rows = []
for row in self._data:
s = ''
for col in row:
if col:
s += 'X'
else:
s += '.'
rows.append(s)
return '\n'.join(rows)
def _gen_coordinates(self, height: int = None,
width: int = None) -> Generator:
"""A generator that returns each valid coordinate of the
grid.
"""
if not height:
height = self.height
if not width:
width = self.width
for row_index in range(height):
for cell_index in range(width):
yield row_index, cell_index
def _get_size_diff(self, size:int, new_size:int) -> tuple[int, int, int]:
delta = size - new_size
if delta >= 0:
return 0, new_size, delta
start = abs(delta // 2)
end = new_size - (abs(delta) - start)
return start, end, delta
def _make_empty_grid(self, width:int, height:int) -> list:
"""Create a blank 2D grid of the given dimensions."""
return [[False for col in range(width)] for row in range(height)]
def _normalize_row_length(self, rows):
"""Ensure each row has the same number of cells."""
width = max(len(row) for row in rows)
for row in rows:
while len(row) < width:
row.append(False)
return rows
def clear(self):
"""Set all cells to False."""
for row, col in self._gen_coordinates():
self._data[row][col] = False
def flip(self, row, col):
"""Flip the value of the cell at the given coordinates."""
self._data[row][col] = not self._data[row][col]
def insert(self, i, x):
return self._data.insert(i, x)
def neighbors(self, x, y):
"""Return the coordinates of the adjacent cells."""
mods = (-1, 0, 1)
rows = [(x + mod) % self.height for mod in mods]
cols = [(y + mod) % self.width for mod in mods]
coords = []
for row in rows:
coords.extend((row, col) for col in cols)
del coords[4]
return coords
def next_generation(self):
"""Calculate the next generation for the grid."""
counts = self._make_empty_grid(self.width, self.height)
for row, col in self._gen_coordinates():
if self[row][col]:
affected = self.neighbors(row, col)
for i, j in affected:
counts[i][j] += 1
new = self._make_empty_grid(self.width, self.height)
for row, col in self._gen_coordinates():
if self[row][col] and counts[row][col] in self._survive:
new[row][col] = True
if not self[row][col] and counts[row][col] in self._born:
new[row][col] = True
self._data = new
def randomize(self):
"""Randomly set each value in the grid to True or False."""
for row, col in self._gen_coordinates():
self._data[row][col] = choice([True, False])
def replace(self, new:List[list]):
"""Replace the current grid data with the given data."""
self.clear()
new = self._normalize_row_length(new)
r_start, r_end, r_delta = self._get_size_diff(self.width, len(new[0]))
c_start, c_end, c_delta = self._get_size_diff(self.height, len(new))
for i in range(c_start, c_end):
for j in range(r_start, r_end):
y = i + c_delta // 2
x = j + r_delta // 2
self._data[y][x] = new[i][j]
| 33.728477 | 78 | 0.557824 |
4f5f917823a9ead2ca1806de073a4fa69ab2a94b | 8,057 | py | Python | cvxpy/tests/test_cbc.py | tongni1975/cvxpy | 34349b5e41c124a6a1e32426e68af95b5044498c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-04-15T14:01:03.000Z | 2019-04-15T14:01:03.000Z | cvxpy/tests/test_cbc.py | tongni1975/cvxpy | 34349b5e41c124a6a1e32426e68af95b5044498c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/tests/test_cbc.py | tongni1975/cvxpy | 34349b5e41c124a6a1e32426e68af95b5044498c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy import *
from cvxpy.tests.base_test import BaseTest
class TestSolvers(BaseTest):
""" Unit tests for solver specific behavior. """
def setUp(self):
self.a = Variable(name='a')
self.b = Variable(name='b')
self.c = Variable(name='c')
self.x = Variable(2, name='x')
self.y = Variable(3, name='y')
self.z = Variable(2, name='z')
self.A = Variable((2,2), name='A')
self.B = Variable((2,2), name='B')
self.C = Variable((3,2), name='C')
def test_lp(self):
"""Tests basic LPs. (from test_elemental.py)
"""
if CBC in installed_solvers():
prob = Problem(Minimize(0), [self.x == 2])
prob.solve(verbose=False, solver=CBC)
self.assertAlmostEqual(prob.value, 0)
self.assertItemsAlmostEqual(self.x.value, [2, 2])
prob = Problem(Minimize(-self.a), [self.a <= 1])
prob.solve(verbose=False, solver=CBC)
self.assertAlmostEqual(prob.value, -1)
self.assertAlmostEqual(self.a.value, 1)
def test_lp_2(self):
"""Test a basic LP. (from test_solver.py::test_cvxopt_glpk)
"""
# Either the problem is solved or CBC is not installed.
if CBC in installed_solvers():
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
prob.solve(verbose=False, solver=CBC)
self.assertAlmostEqual(prob.value, 0)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
# Example from http://cvxopt.org/userguide/coneprog.html?highlight=solvers.lp#cvxopt.solvers.lp
objective = Minimize(-4 * self.x[0] - 5 * self.x[1])
constraints = [2 * self.x[0] + self.x[1] <= 3,
self.x[0] + 2 * self.x[1] <= 3,
self.x[0] >= 0,
self.x[1] >= 0]
prob = Problem(objective, constraints)
prob.solve(verbose=False, solver=CBC)
self.assertAlmostEqual(prob.value, -9)
self.assertItemsAlmostEqual(self.x.value, [1, 1])
else:
with self.assertRaises(Exception) as cm:
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
prob.solve(verbose=False, solver=CBC)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % CBC)
def test_mip(self):
"""Test a basic MILP with CBC. (from test_solver.py::test_cvxopt_glpk_mi)
"""
# Either the problem is solved or CBC is not installed.
if CBC in installed_solvers():
bool_var = Variable(boolean=True)
int_var = Variable(integer=True)
prob = Problem(Minimize(norm(self.x, 1)),
[self.x == bool_var, bool_var == 0])
prob.solve(solver=CBC, verbose=False)
self.assertAlmostEqual(prob.value, 0)
self.assertAlmostEqual(bool_var.value, 0)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
# Example from http://cvxopt.org/userguide/coneprog.html?highlight=solvers.lp#cvxopt.solvers.lp
objective = Minimize(-4 * self.x[0] - 5 * self.x[1])
constraints = [2 * self.x[0] + self.x[1] <= int_var,
self.x[0] + 2 * self.x[1] <= 3*bool_var,
self.x[0] >= 0,
self.x[1] >= 0,
int_var == 3*bool_var,
int_var == 3]
prob = Problem(objective, constraints)
prob.solve(solver=CBC, verbose=False)
self.assertAlmostEqual(prob.value, -9)
self.assertAlmostEqual(int_var.value, 3)
self.assertAlmostEqual(bool_var.value, 1)
self.assertItemsAlmostEqual(self.x.value, [1, 1])
else:
with self.assertRaises(Exception) as cm:
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
prob.solve(solver=CBC, verbose=False)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % CBC)
def test_hard_mip(self):
"""Test a hard knapsack problem with CBC.
"""
# Either the problem is solved or CBC is not installed.
if CBC in installed_solvers():
# Instance "knapPI_1_50_1000_1" from "http://www.diku.dk/~pisinger/genhard.c"
n = 50
c = 995
z = 8373
coeffs = [[1, 94, 485, 0], [2, 506, 326, 0], [3, 416, 248, 0],
[4, 992, 421, 0], [5, 649, 322, 0], [6, 237, 795, 0],
[7, 457, 43, 1], [8, 815, 845, 0], [9, 446, 955, 0],
[10, 422, 252, 0], [11, 791, 9, 1], [12, 359, 901, 0],
[13, 667, 122, 1], [14, 598, 94, 1], [15, 7, 738, 0],
[16, 544, 574, 0], [17, 334, 715, 0], [18, 766, 882, 0],
[19, 994, 367, 0], [20, 893, 984, 0], [21, 633, 299, 0],
[22, 131, 433, 0], [23, 428, 682, 0], [24, 700, 72, 1],
[25, 617, 874, 0], [26, 874, 138, 1], [27, 720, 856, 0],
[28, 419, 145, 0], [29, 794, 995, 0], [30, 196, 529, 0],
[31, 997, 199, 1], [32, 116, 277, 0], [33, 908, 97, 1],
[34, 539, 719, 0], [35, 707, 242, 0], [36, 569, 107, 0],
[37, 537, 122, 0], [38, 931, 70, 1], [39, 726, 98, 1],
[40, 487, 600, 0], [41, 772, 645, 0], [42, 513, 267, 0],
[43, 81, 972, 0], [44, 943, 895, 0], [45, 58, 213, 0],
[46, 303, 748, 0], [47, 764, 487, 0], [48, 536, 923, 0],
[49, 724, 29, 1], [50, 789, 674, 0]] # index, p / w / x
X = Variable(n, boolean=True)
prob = Problem(Maximize(sum(multiply([i[1] for i in coeffs], X))),
[sum(multiply([i[2] for i in coeffs], X)) <= c])
prob.solve(verbose=False, solver=CBC)
self.assertAlmostEqual(prob.value, z) # objective
else:
with self.assertRaises(Exception) as cm:
prob = Problem(Minimize(norm(self.x, 1)), [self.x == 0])
prob.solve(solver=CBC, verbose=False)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % CBC)
def test_options(self):
"""Test that all the CBC solver options work.
"""
if CBC in installed_solvers():
prob = Problem(Minimize(norm(self.x, 1)), [self.x == Variable(2, boolean=True)])
for i in range(2):
# Some cut-generators seem to be buggy for now -> set to false
prob.solve(solver=CBC, verbose=True, GomoryCuts=True, MIRCuts=True,
MIRCuts2=True, TwoMIRCuts=True, ResidualCapacityCuts=True,
KnapsackCuts=True, FlowCoverCuts=True, CliqueCuts=True,
LiftProjectCuts=True, AllDifferentCuts=False, OddHoleCuts=True,
RedSplitCuts=False, LandPCuts=False, PreProcessCuts=False,
ProbingCuts=True, SimpleRoundingCuts=True)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
else:
with self.assertRaises(Exception) as cm:
prob.solve(solver=CBC)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % CBC)
| 48.245509 | 107 | 0.531463 |
4f6325ca06258b36efb7e207032af594613bbedb | 3,036 | py | Python | pywick/models/segmentation/lexpsp.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | 2 | 2020-11-28T07:56:09.000Z | 2021-11-08T09:30:39.000Z | pywick/models/segmentation/lexpsp.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | null | null | null | pywick/models/segmentation/lexpsp.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | null | null | null | # Source: https://github.com/Lextal/pspnet-pytorch
"""
Implementation of `Pyramid Scene Parsing Network <https://arxiv.org/pdf/1612.01105>`_
"""
from .lex_extractors import *
__all__ = ['PSPNet']
extractor_models = {
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'densenet121': densenet
}
class PSPModule(nn.Module):
def __init__(self, features, out_features=1024, sizes=(1, 2, 3, 6)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
self.relu = nn.ReLU()
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return self.relu(bottle)
class PSPUpsample(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
def forward(self, x):
h, w = 2 * x.size(2), 2 * x.size(3)
p = F.upsample(input=x, size=(h, w), mode='bilinear')
return self.conv(p)
class PSPNet(nn.Module):
def __init__(self, num_classes=18, pretrained=True, backend='densenet121', sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, **kwargs):
super().__init__()
self.feats = extractor_models[backend](pretrained=pretrained)
self.psp = PSPModule(psp_size, 1024, sizes)
self.drop_1 = nn.Dropout2d(p=0.3)
self.up_1 = PSPUpsample(1024, 256)
self.up_2 = PSPUpsample(256, 64)
self.up_3 = PSPUpsample(64, 64)
self.drop_2 = nn.Dropout2d(p=0.15)
self.final = nn.Conv2d(64, num_classes, kernel_size=1)
# self.final = nn.Sequential(
# nn.Conv2d(64, num_classes, kernel_size=1),
# nn.LogSoftmax()
# )
self.classifier = nn.Sequential(
nn.Linear(deep_features_size, 256),
nn.ReLU(),
nn.Linear(256, num_classes)
)
def forward(self, x):
f, class_f = self.feats(x)
p = self.psp(f)
p = self.drop_1(p)
p = self.up_1(p)
p = self.drop_2(p)
p = self.up_2(p)
p = self.drop_2(p)
p = self.up_3(p)
p = self.drop_2(p)
# auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).view(-1, class_f.size(1))
return self.final(p) #, self.classifier(auxiliary)
| 31.625 | 149 | 0.602767 |
4f61aae713fe036e676a22a311033f6c6d328f9c | 6,082 | py | Python | pylily/Lily/crawler/vd_archive2.py | joannachuang/2019_summer_joanna | 1773cbad88cc2b7ebf20cebbd97d2988919372e6 | [
"Apache-2.0"
] | null | null | null | pylily/Lily/crawler/vd_archive2.py | joannachuang/2019_summer_joanna | 1773cbad88cc2b7ebf20cebbd97d2988919372e6 | [
"Apache-2.0"
] | null | null | null | pylily/Lily/crawler/vd_archive2.py | joannachuang/2019_summer_joanna | 1773cbad88cc2b7ebf20cebbd97d2988919372e6 | [
"Apache-2.0"
] | 1 | 2019-07-09T08:39:47.000Z | 2019-07-09T08:39:47.000Z | #!/usr/bin/env python3.5
import os
import re
import gzip
import numpy
import pandas
import datetime
import Lily.ctao.hostmetadata as hmd
import Lily.ctao.database as cdb
class vd_archive:
def __init__(self):
self.ctaohost = hmd.hostmetadata()
today = datetime.datetime.today()
self.database_filename = self.ctaohost.warehouse + '/ctao_data_crawler_vehicledetect_{0}.sqlite'.format(today.strftime('%Y%m'))
self.database = cdb.database(self.database_filename)
self.sub_group = 'data_crawler_vd'
self.dict_data = {
'tpec_vddata':
['https://tcgbusfs.blob.core.windows.net/blobtisv/GetVDDATA.xml.gz',
'<ExchangeTime>(.*)</ExchangeTime>', '%Y/%m/%dT%H:%M:%S'],
'tpec_vd':
['https://tcgbusfs.blob.core.windows.net/blobtisv/GetVD.xml.gz',
'<vd:ExchangeTime>(.*)</vd:ExchangeTime>', '%Y/%m/%dT%H:%M:%S'],
'nfbx_1968':
['http://tisvcloud.freeway.gov.tw/xml/1min_incident_data_1968.xml',
'time="([^"]*)"', '%Y-%m-%d %H:%M:%S'],
'nfbx_rlx1':
['http://tisvcloud.freeway.gov.tw/roadlevel_value.xml.gz',
'updatetime="([^"]*)"', '%Y/%m/%d %H:%M:%S'],
'nfbx_rlx5':
['http://tisvcloud.freeway.gov.tw/roadlevel_value5.xml.gz',
'updatetime="([^"]*)"', '%Y/%m/%d %H:%M:%S'],
'nfbx_vdx1':
['http://tisvcloud.freeway.gov.tw/vd_value.xml.gz',
'updatetime="([^"]*)"', '%Y/%m/%d %H:%M:%S'],
'nfbx_vdx5':
['http://tisvcloud.freeway.gov.tw/vd_value5.xml.gz',
'updatetime="([^"]*)"', '%Y/%m/%d %H:%M:%S']}
#all opendata source
self.list_df = pandas.DataFrame.from_dict(self.dict_data, orient='index',
columns=['url', 'exchange_time_repattern', 'exchange_time_datetimepattern'])
self.list_df['gzip_context'] = numpy.random.bytes(1)
self.list_df['download_datetime'] = numpy.datetime64(datetime.datetime.now())
self.list_df['exchange_datetime'] = numpy.datetime64(datetime.datetime.now())
def urlfile_toBytes(self, url):
import io
import gzip
import requests
datafile = io.BytesIO()
response = requests.get(url , verify=True)
for chunk in response.iter_content(chunk_size=512 * 1024):
if chunk: # filter out keep-alive new chunks
datafile.write(chunk)
context = datafile.getvalue()
try:
if url.endswith('.gz') : context = gzip.decompress(context)
except:
context = 'except_'
return context
def current(self):
for key in self.list_df.index:
url = self.list_df.at[key, 'url']
xml_re = self.list_df.at[key, 'exchange_time_repattern']
day_pa = self.list_df.at[key, 'exchange_time_datetimepattern']
self.list_df.at[key, 'gzip_context' ] = numpy.random.bytes(1)
self.list_df.at[key, 'download_datetime'] = numpy.datetime64(datetime.datetime.now())
self.list_df.at[key, 'exchange_datetime'] = numpy.datetime64(datetime.datetime.now())
bintext = self.urlfile_toBytes(url)
context = str( bintext )
if re.findall(xml_re, context):
str_time = re.findall(xml_re, context)[0]
extime = datetime.datetime.strptime(str_time , day_pa)
self.list_df.at[key, 'exchange_datetime'] = extime
self.list_df.at[key, 'gzip_context'] = gzip.compress(bintext)
return self.list_df
def to_database(self):
df = self.list_df.drop( columns=['url', 'exchange_time_repattern', 'exchange_time_datetimepattern'] )
df.to_sql(self.sub_group, self.database.connect, if_exists='append', index=True, index_label='category')
self.database.connect.execute(
'''delete from {0} where length(gzip_context) = 1'''.format(self.sub_group) )
self.database.connect.execute(
'''delete from {0} where rowid not in (select max (rowid)
from {0} group by category, exchange_datetime)'''.format(self.sub_group) )
self.database.connect.commit()
def to_database_idv(self):
df = self.list_df.drop( columns=['url', 'exchange_time_repattern', 'exchange_time_datetimepattern'] )
for tableind in df.index:
tablename = str(tableind)
subdf = df.loc[tableind:tableind]
print (subdf)
subdf.to_sql(tablename, self.database.connect, if_exists='append', index=True, index_label='category')
self.database.connect.execute(
'''delete from {0} where length(gzip_context) = 1'''.format(tablename) )
self.database.connect.execute(
'''delete from {0} where rowid not in (select max (rowid)
from {0} group by category, exchange_datetime, gzip_context)'''.format(tablename) )
self.database.connect.commit()
def check_database(self):
df = pandas.read_sql_query ('''select * from {0} '''.format(self.sub_group), self.database.connect,
index_col=['category', 'exchange_datetime'],
parse_dates=['exchange_datetime', 'download_datetime'])
for key in df.index:
target_dir = self.ctaohost.factory + '/' + key[1].strftime('''%Y-%m-%d''')
target_file = key[1].strftime('''{0}_%Y%m%d_%H%M.gz'''.format(key[0]) )
if os.path.exists(target_dir) != True:
os.mkdir(target_dir)
with open(target_dir + '/' + target_file, 'wb') as xml:
xml.write (df.at[key,'gzip_context'])
if __name__ == '__console__' or __name__ == '__main__':
vd = vd_archive()
vd.current()
vd.to_database()
# vd.check_database()
| 38.987179 | 137 | 0.574482 |
4f61803850e9f1aa051cc40c76f543935265975f | 778 | py | Python | rlkit/envs/half_cheetah_rand_params_wrapper.py | BZSROCKETS/cemrl | 499939535794ee027f08b8a4133eefd0bb7abe14 | [
"MIT"
] | null | null | null | rlkit/envs/half_cheetah_rand_params_wrapper.py | BZSROCKETS/cemrl | 499939535794ee027f08b8a4133eefd0bb7abe14 | [
"MIT"
] | null | null | null | rlkit/envs/half_cheetah_rand_params_wrapper.py | BZSROCKETS/cemrl | 499939535794ee027f08b8a4133eefd0bb7abe14 | [
"MIT"
] | 2 | 2022-01-21T07:49:27.000Z | 2022-02-05T13:23:18.000Z | import numpy as np
from meta_rand_envs.half_cheetah_rand_params import HalfCheetahRandParamsEnv
from . import register_env
@register_env('cheetah-rand-params')
class HalfCheetahRandParamsWrappedEnv(HalfCheetahRandParamsEnv):
def __init__(self, n_tasks=2, randomize_tasks=True, hfield_mode='gentle', log_scale_limit=3.0, change_prob=0.01):
super(HalfCheetahRandParamsWrappedEnv, self).__init__(log_scale_limit=log_scale_limit, mode=hfield_mode, change_prob=change_prob)
self.tasks = self.sample_tasks(n_tasks)
self.reset_task(0)
def get_all_task_idx(self):
return range(len(self.tasks))
def reset_task(self, idx):
self._task = self.tasks[idx]
self._goal = idx
self.set_task(self._task)
self.reset() | 37.047619 | 137 | 0.744216 |
4f628ef870f0f9b11057faab606fe2c869261095 | 1,429 | py | Python | moderation/message_backends.py | EnvSys/django-moderation | afa80926354bdc263d6d9214eb983000334d77e2 | [
"BSD-3-Clause"
] | 97 | 2015-03-21T02:18:39.000Z | 2022-02-22T01:30:19.000Z | moderation/message_backends.py | EnvSys/django-moderation | afa80926354bdc263d6d9214eb983000334d77e2 | [
"BSD-3-Clause"
] | 85 | 2015-05-05T05:22:23.000Z | 2022-03-28T10:32:00.000Z | moderation/message_backends.py | EnvSys/django-moderation | afa80926354bdc263d6d9214eb983000334d77e2 | [
"BSD-3-Clause"
] | 70 | 2015-04-13T07:29:32.000Z | 2022-03-24T10:54:58.000Z | from django.conf import settings
from django.core.mail import send_mail, send_mass_mail
class BaseMessageBackend(object):
def send(self, **kwargs):
raise NotImplementedError
class BaseMultipleMessageBackend(BaseMessageBackend):
"""Used to send mail to multiple users"""
class SyncMessageBackend(BaseMessageBackend):
"""Synchronous backend"""
class AsyncMessageBackend(BaseMessageBackend):
"""Asynchronous backend"""
class EmailMessageBackend(SyncMessageBackend):
"""
Send the message through an email on the main thread
"""
def send(self, **kwargs):
subject = kwargs.get('subject', None)
message = kwargs.get('message', None)
recipient_list = kwargs.get('recipient_list', None)
send_mail(subject=subject,
message=message,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=recipient_list,
fail_silently=True)
class EmailMultipleMessageBackend(SyncMessageBackend):
"""
Send messages through emails on the main thread
"""
def send(self, datatuples, **kwargs):
send_mass_mail(
tuple(tuple(
d.get('subject', None),
d.get('message', None),
settings.DEFAULT_FROM_EMAIL,
d.get('recipient_list', None))
for d in datatuples),
fail_silently=True)
| 26.462963 | 59 | 0.636809 |
4f5dc092b354b03ddd4e81520091bdccb538a838 | 1,864 | py | Python | tests/www/views/test_views_task_norun.py | samkenxstream/airflow | 71c980a8ffb3563bf16d8a23a58de54c9e8cf556 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | tests/www/views/test_views_task_norun.py | samkenxstream/airflow | 71c980a8ffb3563bf16d8a23a58de54c9e8cf556 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | tests/www/views/test_views_task_norun.py | samkenxstream/airflow | 71c980a8ffb3563bf16d8a23a58de54c9e8cf556 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import urllib.parse
import pytest
from airflow.utils import dates
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = dates.days_ago(2)
DEFAULT_VAL = urllib.parse.quote_plus(str(DEFAULT_DATE))
@pytest.fixture(scope="module", autouse=True)
def reset_dagruns():
"""Clean up stray garbage from other tests."""
clear_db_runs()
def test_task_view_no_task_instance(admin_client):
url = f"/task?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}"
resp = admin_client.get(url, follow_redirects=True)
assert resp.status_code == 200
html = resp.data.decode("utf-8")
assert "<h5>No Task Instance Available</h5>" in html
assert "<h5>Task Instance Attributes</h5>" not in html
def test_rendered_templates_view_no_task_instance(admin_client):
url = f"/rendered-templates?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}"
resp = admin_client.get(url, follow_redirects=True)
assert resp.status_code == 200
html = resp.data.decode("utf-8")
assert "Rendered Template" in html
| 36.54902 | 106 | 0.763948 |
4f63a2bb4d320efbc191d0f752f677beb1348328 | 19,031 | py | Python | src/onevision/models/enhancement/mbllen/configs/mbllen_lol.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/models/enhancement/mbllen/configs/mbllen_lol.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/models/enhancement/mbllen/configs/mbllen_lol.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""MBLLEN trained on LoL dataset.
NOTES:
- MBLLEN model requires input shape to be [:, 256, 256].
- Optimizer should be: dict(name="adam", lr=0.0001)
"""
from __future__ import annotations
import os
from onevision.cv import VisionBackend
from onevision.utils import pretrained_dir
__all__ = [
"config",
"model_fullname",
]
# MARK: - Basic Info
model_name = "mbllen"
# Model's name. Also, the root folder located inside `models_zoo_dir`.
data_name = "lol"
# Trained data name.
model_fullname = f"{model_name}_{data_name}"
# It represents the model with trained dataset.
version = 0
# Experiment version.
# MARK: - Dirs
root_dir = model_name
model_dir = os.path.join(pretrained_dir, root_dir, model_fullname)
# MARK: - Configs
callbacks = [
{
"name": "checkpoint_callback",
"model_dir": model_dir,
# Model's dir. The checkpoints will be save to
# `../<model_dir>/<version>/weights/`.
"version": version,
# Experiment version. If version is not specified the logger inspects
# the save directory for existing versions, then automatically assigns
# the next available version. If it is a string then it is used as the
# run-specific subdirectory name, otherwise `version_${version}` is
# used.
"filename": None,
# Checkpoint filename. Can contain named formatting options to be
# auto-filled. If `None`, it will be set to `epoch={epoch}.ckpt`.
# Default: `None`.
"auto_insert_metric_name": True,
# When `True`, the checkpoints filenames will contain the metric name.
# Default: `True`.
"monitor": "checkpoint/psnr/val_epoch", # "loss_epoch",
# Quantity to monitor. Default: `None` which will monitor `loss_epoch`.
"mode": "max",
# One of: [`min`, `max`]. For `acc`, this should be `max`, for `loss`
# this should be `min`, etc.
"verbose": True,
# Verbosity mode. Default: `False`.
"save_weights_only": False,
# If `True`, then only the model’s weights will be saved
# `model.save_weights(filepath)`, else the full model is saved
# `model.save(filepath)`.
"every_n_train_steps": None,
# Number of training steps between checkpoints.
# If `every_n_train_steps == None or every_n_train_steps == 0`, we skip
# saving during training. To disable, set `every_n_train_steps = 0`.
# This value must be `None` or non-negative. This must be mutually
# exclusive with `train_time_interval` and `every_n_epochs`.
# Default: `None`.
"every_n_epochs": 1,
# Number of epochs between checkpoints. If `every_n_epochs == None` or
# `every_n_epochs == 0`, we skip saving when the epoch ends. To
# disable, `set every_n_epochs = 0`. This value must be None or
# non-negative. Default: `1`.
"train_time_interval": None,
# Checkpoints are monitored at the specified time interval. For all
# practical purposes, this cannot be smaller than the amount of time it
# takes to process a single training batch. This is not guaranteed to
# execute at the exact time specified, but should be close. This must
# be mutually exclusive with `every_n_train_steps` and
# `every_n_epochs`. Default: `None`.
"save_on_train_epoch_end": True
# Whether to run checkpointing at the end of the training epoch. If
# this is `False`, then the check runs at the end of the validation.
# If `None` then skip saving. Default: `False`.
},
{
"name": "learning_rate_monitor",
"logging_interval": None,
# Set to `epoch` or `step` to log lr of all optimizers at the same
# interval, set to None to log at individual interval according to the
# interval key of each scheduler. Default: `None`.
"log_momentum": False,
# Option to also log the momentum values of the optimizer, if the
# optimizer has the momentum or betas attribute. Default: `False`.
},
{
"name": "rich_model_summary",
"max_depth": 1,
# Maximum depth of layer nesting that the summary will include.
# A value of 0 turns the layer summary off.
},
{
"name": "rich_progress_bar",
},
]
tb_logger = {
"save_dir": model_dir,
# Save directory.
"name": "",
# Experiment name. Default: `default`. If it is the empty string then no
# per-experiment subdirectory is used.
"version": version,
# Experiment version. If version is not specified the logger inspects the
# save directory for existing versions, then automatically assigns the
# next available version. If it is a string then it is used as the
# run-specific subdirectory name, otherwise `version_${version}` is used.
"sub_dir": None,
# Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
# then logs are saved in `/save_dir/version/sub_dir/`. Default: `None` in
# which logs are saved in `/save_dir/version/`.
"log_graph": False,
# Adds the computational graph to tensorboard. This requires that the
# user has defined the `self.example_input_array` attribute in their model.
"default_hp_metric": True,
# Enables a placeholder metric with key `hp_metric` when
# `log_hyperparams` is called without a metric (otherwise calls to
# log_hyperparams without a metric are ignored).
"prefix": "",
# A string to put at the beginning of metric keys.
}
trainer = {
"accelerator": "gpu",
# Supports passing different accelerator types ("cpu", "gpu", "tpu", "ipu", "hpu", "auto")
# as well as custom accelerator instances. Default: `gpu`.
"accumulate_grad_batches": None,
# Accumulates grads every k batches or as set up in the dict. Default: `None`.
"amp_backend": "native",
# Mixed precision backend to use (`native` or `apex`). Default: `native`.
"amp_level": None,
# Optimization level to use (O1, O2, etc...). By default it will be set
# to "O2" if `amp_backend` is set to `apex`.
"auto_lr_find": False,
# If set to `True`, will make trainer.tune() run a learning rate finder,
# trying to optimize initial learning for faster convergence.
# trainer.tune() method will set the suggested learning rate in self.lr
# or self.learning_rate in the LightningModule. To use a different key
# set a string instead of True with the key name. Default: `False`.
"auto_scale_batch_size": False,
# If set to `True`, will initially run a batch size finder trying to find
# the largest batch size that fits into memory. The result will be stored
# in self.batch_size in the LightningModule. Additionally, can be set to
# either power that estimates the batch size through a power search or
# binsearch that estimates the batch size through a binary search.
# Default: `False`.
"auto_select_gpus": False,
# If enabled and `gpus` is an integer, pick available gpus automatically.
# This is especially useful when GPUs are configured to be in “exclusive
# mode”, such that only one process at a time can access them.
# Default: `False`
"benchmark": False,
# If `True` enables cudnn.benchmark. Default: `False`.
"callbacks": None,
# Add a callback or list of callbacks. Default: `None`, will be defined
# when in code.
"check_val_every_n_epoch": 1,
# Check val every n train epochs. Default: `1`.
"default_root_dir": None,
# Default path for logs and weights when no logger/ckpt_callback passed.
# Default: `None`.
"detect_anomaly": False,
# Enable anomaly detection for the autograd engine. Default: `False`.
"deterministic": False,
# If true enables cudnn.deterministic. Default: `False`.
"devices": None,
# Will be mapped to either gpus, tpu_cores, num_processes or ipus,
# based on the accelerator type. Default: `None`.
"enable_checkpointing": False,
# If `True`, enable checkpointing. It will configure a default
# ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
# `callbacks`.
"enable_model_summary": True,
# Whether to enable model summarization by default.
"enable_progress_bar": True,
# Whether to enable to progress bar by default.
"fast_dev_run": False,
# Runs n if set to n (int) else 1 if set to True batch(es) of train,
# val and test to find any bugs (ie: a sort of unit test). Default: `False`.
"gpus": None,
# Number of gpus to train on (int) or which GPUs to train on (list or
# str) applied per node. Defined at runtime. Default: `None`.
"gradient_clip_val": None,
# Value at which to clip gradients. Passing `gradient_clip_val=None`
# disables gradient clipping. If using Automatic Mixed Precision (AMP), the
# gradients will be unscaled before. Default: `None`.
"gradient_clip_algorithm": None,
# Gradient clipping algorithm to use. Pass
# `gradient_clip_algorithm="value"` to clip by value,
# and `gradient_clip_algorithm="norm"` to clip by norm. By default it will
# be set to `norm`. Default: `None`.
"ipus": None,
# How many IPUs to train on. Default: `None`.
"limit_train_batches": 1.0,
# How much of training dataset to check
# (float = fraction, int = num_batches). Default: 1.0.
"limit_val_batches": 1.0,
# How much of validation dataset to check
# (float = fraction, int = num_batches). Default: 1.0.
"limit_test_batches": 1.0,
# How much of test dataset to check
# (float = fraction, int = num_batches). Default: 1.0.
"limit_predict_batches": 1.0,
# How much of prediction dataset to check
# (float = fraction, int = num_batches). Default: 1.0.
"logger": True,
# Logger (or iterable collection of loggers) for experiment tracking. A
# True value uses the default TensorBoardLogger. False will disable
# logging. If multiple loggers are provided and the save_dir property of
# that logger is not set, local files (checkpoints, profiler traces,
# etc.) are saved in default_root_dir rather than in the log_dir of any
# of the individual loggers. Default: `True`.
"log_every_n_steps": 50,
# How often to log within steps. Default: `50`.
"max_epochs": 200,
# Stop training once this number of epochs is reached. Disabled by
# default (None). If both max_epochs and max_steps are not specified,
# defaults to max_epochs = 1000.
"max_steps": -1,
# Stop training after this number of steps. Default: `-1`, disabled.
"max_time": None,
# Stop training after this amount of time has passed. Disabled by default
# (None). The time duration can be specified in the format DD:HH:MM:SS (
# days, hours, minutes seconds), as a datetime.timedelta, or a dictionary
# with keys that will be passed to datetime.timedelta. Default: `None`.
"min_epochs": 1,
# Force training for at least these many epochs. Disabled by default (
# None). If both min_epochs and min_steps are not specified, defaults to
# min_epochs = 1.
"min_steps": None,
# Force training for at least these number of steps. Default: `None`,
# disabled.
"move_metrics_to_cpu": False,
# Whether to force internal logged metrics to be moved to cpu. This can
# save some gpu memory, but can make training slower. Use with attention.
# Default: `False`.
"multiple_trainloader_mode": "max_size_cycle",
# How to loop over the datasets when there are multiple train loaders. In
# ‘max_size_cycle’ mode, the trainer ends one epoch when the largest
# dataset is traversed, and smaller datasets reload when running out of
# their data. In ‘min_size’ mode, all the datasets reload when reaching
# the minimum length of datasets. Default: `max_size_cycle`.
"num_nodes": 1,
# Number of GPU nodes for distributed training. Default: `1`. Defined at
# runtime. Default: `1`.
"num_processes": 1,
# Number of processes for distributed training with
# distributed_backend=”ddp_cpu”. Defined at runtime. Default: `1`.
"num_sanity_val_steps": 0,
# Sanity check runs n validation batches before starting the training
# routine. Set it to -1 to run all batches in all validation dataloaders.
# Default: `2`.
"overfit_batches": 0.0,
# Overfit a fraction of training data (float) or a set number of batches
# (int). Default: `0.0`.
"profiler": "simple",
# To profile individual steps during training and assist in identifying
# bottlenecks. One of: ["simple", "advanced", "pytorch", None].
# Default: `None`.
"plugins": None,
# Plugins allow modification of core behavior like ddp and amp, and enable
# custom lightning plugins. Default: `None`.
"precision": 32,
# Double precision (64), full precision (32), half precision (16) or
# bfloat16 precision (bf16). Can be used on CPU, GPU or TPUs. Default: `32`.
"reload_dataloaders_every_n_epochs": 0,
# Set to a non-negative integer to reload dataloaders every n epochs.
# Default: `0`.
"replace_sampler_ddp": True,
# Explicitly enables or disables sampler replacement. If not specified
# this will be toggled automatically when DDP is used. By default,
# it will add shuffle=True for train sampler and shuffle=False for
# val/test sampler. If you want to customize it, you can set
# replace_sampler_ddp=False and add your own distributed sampler.
"strategy": "dp",
# Previously known as distributed_backend (dp, ddp, ddp2, etc…). Can also
# take in an accelerator object for custom hardware. Default: `None`.
# Defined at runtime.
"sync_batchnorm": False,
# Synchronize batch norm layers between process groups/whole world.
# Default: `False`.
"tpu_cores": None,
# How many TPU cores to train on (1 or 8) / Single TPU to train on [1].
# Default: `None`.
"track_grad_norm": -1,
# `-1` no tracking. Otherwise, tracks that p-norm. May be set to `inf`
# infinity-norm. Default: `-1`.
"val_check_interval": 1.0,
# How often to check the validation set. Use float to check within a
# training epoch, use int to check every n steps (batches). Default: `1.0`.
}
inference = {
# "default_root_dir": infer_dir,
# Root dir to save predicted data.
"version": None,
# Experiment version. If version is not specified the logger inspects
# the save directory for existing versions, then automatically assigns
# the next available version. If it is a string then it is used as the
# run-specific subdirectory name, otherwise `version_${version}` is used.
"shape": [256, 256, 3],
# Input and output shape of the image as [H, W, C]. If `None`, use the
# input image shape.
"batch_size": 1,
# Batch size. Default: `1`.
"verbose": True,
# Verbosity mode. Default: `False`.
"save_image": True,
# Save predicted images. Default: `False`.
}
data = {
"name": data_name,
# Datasets" name.
"shape": [256, 256, 3],
# Image shape as [H, W, C]. This is compatible with OpenCV format.
"batch_size": 8,
# Number of samples in one forward & backward pass.
"caching_labels": True,
# Should overwrite the existing cached labels? Default: `False`.
"caching_images": False,
# Cache images into memory for faster training. Default: `False`.
"write_labels": False,
# After loading images and labels for the first time, we will convert it
# to our custom data format and write to files. If `True`, we will
# overwrite these files. Default: `False`.
"fast_dev_run": False,
# Take a small subset of the data for fast debug (i.e, like unit testing).
# Default: `False`.
"shuffle": True,
# Set to `True` to have the data reshuffled at every training epoch.
# Default: `True`.
"load_augment": {
"mosaic": 0.0,
"mixup": 0.0,
},
# Augmented loading policy.
"augment": {
"name": "paired_images_auto_augment",
# Name of the augmentation policy.
"policy": "enhancement",
# Augmentation policy. One of: [`enhancement`]. Default: `enhancement`.
"fill": None,
# Pixel fill value for the area outside the transformed image.
# If given a number, the value is used for all bands respectively.
"to_tensor": True,
# Convert a PIL Image or numpy.ndarray [H, W, C] in the range [0, 255]
# to a torch.FloatTensor of shape [C, H, W] in the range [0.0, 1.0].
# Default: `True`.
},
# Augmentation policy.
"vision_backend": VisionBackend.PIL,
# Vision backend option.
}
model = {
"name": model_name,
# Model's name.
"basename": model_name,
# Model's basename.
"fullname": model_fullname,
# Fullname of the model as: {name}_{data_name}. If `None`, the fullname
# will be determined when initializing the model.
"model_dir": model_dir,
# Model's save dir.
"version": version,
# Experiment version. If version is not specified the logger inspects the
# save directory for existing versions, then automatically assigns the
# next available version. If it is a string then it is used as the
# run-specific subdirectory name, otherwise `version_${version}` is used.
"channels": 8,
#
"kernel_size": 5,
#
"num_blocks": 10,
#
"shape": data["shape"],
# Image shape as [H, W, C].
"num_classes": None,
# Number of classes in the dataset that is used to train the model.
"class_labels": None,
# `ClassLabels` object contains all class-labels defined in the dataset.
"pretrained": False,
# Initialize weights from pretrained.
# - If `True`, use the original pretrained described by the author (
# usually, ImageNet or COCO). By default, it is the first element in the
# `model_urls` dictionary.
# - If `str` and is a file/path, then load weights from saved file.
# - In each inherited model, `pretrained` can be a dictionary's key to
# get the corresponding local file or url of the weight.
"out_indexes": -1,
# List of layers' indexes to extract features. This is called in
# `forward_features()` and is useful when the model is used as a
# component in another model.
# - If is a `tuple` or `list`, return an array of features.
# - If is a `int`, return only the feature from that layer's index.
# - If is `-1`, return the last layer's output.
# Default: `-1`.
"loss": None,
# Loss config.
"metrics": {
"train": [{"name": "psnr"}],
"val": [{"name": "psnr"}, {"name": "ssim"}],
"test": [{"name": "psnr"}, {"name": "ssim"}],
},
# Metrics' configs.
"optimizers": [
{
"optimizer": {"name": "adam", "lr": 0.0001},
"lr_scheduler": None,
"frequency": None,
},
],
# Optimizers' configs.
"debugger": {
"every_n_epochs": 10,
# Number of epochs between debugging. To disable, set
# `every_n_epochs=0`. Default: `1`.
"run_in_parallel": True,
# If `True` runs debugging process in a separated thread.
# Default: `True`.
"queue_size": 20,
# Debug queue size.
"save_max_n": 20,
# Maximum debugging items to be kept. Default: `50`.
"save_to_subdir": True,
# Save all debug images of the same epoch to a sub-directory naming
# after the epoch number. Default: `True`.
"image_quality": 95,
# Image quality to be saved. Default: `95`.
"verbose": False,
# If `True` shows the results on the screen. Default: `False`.
"show_max_n": 8,
# Maximum debugging items to be shown. Default: `8`.
"wait_time": 0.001,
# Pause some times before showing the next image. Default: `0.001`.
},
}
config = {
"callbacks": callbacks,
# Callbacks configs used during training.
"tb_logger": tb_logger,
# Tensorboard logger config.
"trainer": trainer,
# Trainer config.
"inference": inference,
# Inference config.
"data": data,
# Dataset config.
"model": model,
# Model config.
}
# MARK: - Simple test
if __name__ == "__main__":
print(config)
| 39.565489 | 91 | 0.705953 |
4f61c19d7261e6fa1a358b3ca3194a51b46160bf | 931 | py | Python | wagtail/wagtailembeds/models.py | thenewguy/wagtail | ecd8d22c04c46486b3c09b8eaeef1d0c96b0b288 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailembeds/models.py | thenewguy/wagtail | ecd8d22c04c46486b3c09b8eaeef1d0c96b0b288 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailembeds/models.py | thenewguy/wagtail | ecd8d22c04c46486b3c09b8eaeef1d0c96b0b288 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
EMBED_TYPES = (
('video', 'Video'),
('photo', 'Photo'),
('link', 'Link'),
('rich', 'Rich'),
)
@python_2_unicode_compatible
class Embed(models.Model):
url = models.URLField()
max_width = models.SmallIntegerField(null=True, blank=True)
type = models.CharField(max_length=10, choices=EMBED_TYPES)
html = models.TextField(blank=True)
title = models.TextField(blank=True)
author_name = models.TextField(blank=True)
provider_name = models.TextField(blank=True)
thumbnail_url = models.URLField(null=True, blank=True)
width = models.IntegerField(null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
last_updated = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('url', 'max_width')
def __str__(self):
return self.url
| 29.09375 | 63 | 0.698174 |
4f63420e724094fb4261341a59db41ef8d83111d | 234 | py | Python | calcs/error_calculation_al.py | cosmolab/cosmogenic | 284c0b431d5143eb2247cfb0189f43402ddfdf1f | [
"BSD-2-Clause"
] | 2 | 2015-07-21T19:32:10.000Z | 2015-11-01T20:47:02.000Z | calcs/error_calculation_al.py | zploskey/cosmogenic | 284c0b431d5143eb2247cfb0189f43402ddfdf1f | [
"BSD-2-Clause"
] | 1 | 2018-05-17T16:57:51.000Z | 2018-05-17T16:57:52.000Z | calcs/error_calculation_al.py | zploskey/cosmogenic | 284c0b431d5143eb2247cfb0189f43402ddfdf1f | [
"BSD-2-Clause"
] | 1 | 2015-08-12T18:26:23.000Z | 2015-08-12T18:26:23.000Z | import numpy as np
# Rough fit to Greg Balco's error data on his blog.
#ends = np.array([[x_lo, ], [x_hi, 0.01]])
x = np.array([1e5, 1.1e8])
y = np.array([0.1, 0.01])
line = np.polyfit(np.log10(x), np.log10(y), 1)
print 10**line[1]
| 23.4 | 51 | 0.619658 |
4f628bc95017350e6b217f72799d5717837c8a2a | 733 | py | Python | src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/_params.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/_params.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 3 | 2021-03-26T00:48:20.000Z | 2022-03-29T22:05:39.000Z | src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/_params.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 1 | 2017-12-28T04:51:44.000Z | 2017-12-28T04:51:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import register_cli_argument
# pylint: disable=line-too-long
register_cli_argument('configure', 'defaults', nargs='+', options_list=('--defaults', '-d'),
help="space separated 'name=value' pairs for common arguments defaults, e.g. '--defaults group=myRG web=myweb vm=myvm'. Use '' to clear the defaults, e.g. --defaults vm='' web=''")
| 66.636364 | 202 | 0.530696 |
4f61795a34d7b013f86db8f65c33fe57cdca8010 | 1,234 | py | Python | Chapter06/Exercise6.04/form_project/form_example/forms.py | 3h04m1/Web-Development-with-Django | 6a002b5407901a679d6f781be91027720bbec58c | [
"MIT"
] | 97 | 2021-03-01T12:54:30.000Z | 2022-03-28T02:57:26.000Z | Chapter06/Exercise6.04/form_project/form_example/forms.py | 3h04m1/Web-Development-with-Django | 6a002b5407901a679d6f781be91027720bbec58c | [
"MIT"
] | 81 | 2020-08-27T04:56:04.000Z | 2022-03-12T00:53:40.000Z | Chapter06/Exercise6.04/form_project/form_example/forms.py | 3h04m1/Web-Development-with-Django | 6a002b5407901a679d6f781be91027720bbec58c | [
"MIT"
] | 163 | 2020-12-25T14:38:38.000Z | 2022-03-30T10:31:40.000Z | from django import forms
RADIO_CHOICES = (
("Value One", "Value One Display"),
("Value Two", "Text For Value Two"),
("Value Three", "Value Three's Display Text")
)
BOOK_CHOICES = (
(
"Non-Fiction", (
("1", "Deep Learning with Keras"),
("2", "Web Development with Django")
)
),
(
"Fiction", (
("3", "Brave New World"),
("4", "The Great Gatsby")
)
)
)
class ExampleForm(forms.Form):
text_input = forms.CharField()
password_input = forms.CharField(widget=forms.PasswordInput)
checkbox_on = forms.BooleanField()
radio_input = forms.ChoiceField(choices=RADIO_CHOICES, widget=forms.RadioSelect)
favorite_book = forms.ChoiceField(choices=BOOK_CHOICES)
books_you_own = forms.MultipleChoiceField(choices=BOOK_CHOICES)
text_area = forms.CharField(widget=forms.Textarea)
integer_input = forms.IntegerField()
float_input = forms.FloatField()
decimal_input = forms.DecimalField(max_digits=3)
email_input = forms.EmailField()
date_input = forms.DateField(widget=forms.DateInput(attrs={"type": "date"}))
hidden_input = forms.CharField(widget=forms.HiddenInput, initial="Hidden Value")
| 31.641026 | 84 | 0.653971 |
4f633dcb2c43248f3cb074e1229790db8b254f1c | 4,933 | py | Python | sdk/search/azure-search-documents/azure/search/documents/_internal/_paging.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 1 | 2021-09-07T18:43:20.000Z | 2021-09-07T18:43:20.000Z | sdk/search/azure-search-documents/azure/search/documents/_internal/_paging.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/search/azure-search-documents/azure/search/documents/_internal/_paging.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import base64
import itertools
import json
from azure.core.paging import ItemPaged, PageIterator, ReturnType
from ._generated.models import SearchRequest
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Union
def convert_search_result(result):
ret = result.additional_properties
ret["@search.score"] = result.score
ret["@search.highlights"] = result.highlights
return ret
def pack_continuation_token(response):
api_version = "2020-06-30"
if response.next_page_parameters is not None:
token = {
"apiVersion": api_version,
"nextLink": response.next_link,
"nextPageParameters": response.next_page_parameters.serialize(),
}
return base64.b64encode(json.dumps(token).encode("utf-8"))
return None
def unpack_continuation_token(token):
unpacked_token = json.loads(base64.b64decode(token))
next_link = unpacked_token["nextLink"]
next_page_parameters = unpacked_token["nextPageParameters"]
next_page_request = SearchRequest.deserialize(next_page_parameters)
return next_link, next_page_request
class SearchItemPaged(ItemPaged[ReturnType]):
def __init__(self, *args, **kwargs):
super(SearchItemPaged, self).__init__(*args, **kwargs)
self._first_page_iterator_instance = None
def __next__(self):
# type: () -> ReturnType
if self._page_iterator is None:
first_iterator = self._first_iterator_instance()
self._page_iterator = itertools.chain.from_iterable(first_iterator)
return next(self._page_iterator)
def _first_iterator_instance(self):
if self._first_page_iterator_instance is None:
self._first_page_iterator_instance = self.by_page()
return self._first_page_iterator_instance
def get_facets(self):
# type: () -> Union[dict, None]
"""Return any facet results if faceting was requested.
"""
return self._first_iterator_instance().get_facets()
def get_coverage(self):
# type: () -> float
"""Return the covereage percentage, if `minimum_coverage` was
specificied for the query.
"""
return self._first_iterator_instance().get_coverage()
def get_count(self):
# type: () -> float
"""Return the count of results if `include_total_result_count` was
set for the query.
"""
return self._first_iterator_instance().get_count()
# The pylint error silenced below seems spurious, as the inner wrapper does, in
# fact, become a method of the class when it is applied.
def _ensure_response(f):
# pylint:disable=protected-access
def wrapper(self, *args, **kw):
if self._current_page is None:
self._response = self._get_next(self.continuation_token)
self.continuation_token, self._current_page = self._extract_data(
self._response
)
return f(self, *args, **kw)
return wrapper
class SearchPageIterator(PageIterator):
def __init__(self, client, initial_query, kwargs, continuation_token=None):
super(SearchPageIterator, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token,
)
self._client = client
self._initial_query = initial_query
self._kwargs = kwargs
self._facets = None
def _get_next_cb(self, continuation_token):
if continuation_token is None:
return self._client.documents.search_post(
search_request=self._initial_query.request, **self._kwargs
)
_next_link, next_page_request = unpack_continuation_token(continuation_token)
return self._client.documents.search_post(search_request=next_page_request, **self._kwargs)
def _extract_data_cb(self, response): # pylint:disable=no-self-use
continuation_token = pack_continuation_token(response)
results = [convert_search_result(r) for r in response.results]
return continuation_token, results
@_ensure_response
def get_facets(self):
facets = self._response.facets
if facets is not None and self._facets is None:
self._facets = {k: [x.as_dict() for x in v] for k, v in facets.items()}
return self._facets
@_ensure_response
def get_coverage(self):
return self._response.coverage
@_ensure_response
def get_count(self):
return self._response.count
| 34.256944 | 99 | 0.666532 |
4f5c6c8d0baf6c691635151913c34357618062fd | 7,007 | py | Python | tempest/api/baremetal/admin/base.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | tempest/api/baremetal/admin/base.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | tempest/api/baremetal/admin/base.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from tempest_lib import exceptions as lib_exc
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
# NOTE(adam_g): The baremetal API tests exercise operations such as enroll
# node, power on, power off, etc. Testing against real drivers (ie, IPMI)
# will require passing driver-specific data to Tempest (addresses,
# credentials, etc). Until then, only support testing against the fake driver,
# which has no external dependencies.
SUPPORTED_DRIVERS = ['fake']
# NOTE(jroll): resources must be deleted in a specific order, this list
# defines the resource types to clean up, and the correct order.
RESOURCE_TYPES = ['port', 'node', 'chassis']
def creates(resource):
"""Decorator that adds resources to the appropriate cleanup list."""
def decorator(f):
@functools.wraps(f)
def wrapper(cls, *args, **kwargs):
resp, body = f(cls, *args, **kwargs)
if 'uuid' in body:
cls.created_objects[resource].add(body['uuid'])
return resp, body
return wrapper
return decorator
class BaseBaremetalTest(test.BaseTestCase):
"""Base class for Baremetal API tests."""
@classmethod
def skip_checks(cls):
super(BaseBaremetalTest, cls).skip_checks()
if not CONF.service_available.ironic:
skip_msg = ('%s skipped as Ironic is not available' % cls.__name__)
raise cls.skipException(skip_msg)
if CONF.baremetal.driver not in SUPPORTED_DRIVERS:
skip_msg = ('%s skipped as Ironic driver %s is not supported for '
'testing.' %
(cls.__name__, CONF.baremetal.driver))
raise cls.skipException(skip_msg)
@classmethod
def setup_credentials(cls):
super(BaseBaremetalTest, cls).setup_credentials()
cls.mgr = clients.AdminManager()
@classmethod
def setup_clients(cls):
super(BaseBaremetalTest, cls).setup_clients()
cls.client = cls.mgr.baremetal_client
@classmethod
def resource_setup(cls):
super(BaseBaremetalTest, cls).resource_setup()
cls.driver = CONF.baremetal.driver
cls.power_timeout = CONF.baremetal.power_timeout
cls.created_objects = {}
for resource in RESOURCE_TYPES:
cls.created_objects[resource] = set()
@classmethod
def resource_cleanup(cls):
"""Ensure that all created objects get destroyed."""
try:
for resource in RESOURCE_TYPES:
uuids = cls.created_objects[resource]
delete_method = getattr(cls.client, 'delete_%s' % resource)
for u in uuids:
delete_method(u, ignore_errors=lib_exc.NotFound)
finally:
super(BaseBaremetalTest, cls).resource_cleanup()
@classmethod
@creates('chassis')
def create_chassis(cls, description=None, expect_errors=False):
"""
Wrapper utility for creating test chassis.
:param description: A description of the chassis. if not supplied,
a random value will be generated.
:return: Created chassis.
"""
description = description or data_utils.rand_name('test-chassis-')
resp, body = cls.client.create_chassis(description=description)
return resp, body
@classmethod
@creates('node')
def create_node(cls, chassis_id, cpu_arch='x86', cpu_num=8, storage=1024,
memory=4096):
"""
Wrapper utility for creating test baremetal nodes.
:param cpu_arch: CPU architecture of the node. Default: x86.
:param cpu_num: Number of CPUs. Default: 8.
:param storage: Disk size. Default: 1024.
:param memory: Available RAM. Default: 4096.
:return: Created node.
"""
resp, body = cls.client.create_node(chassis_id, cpu_arch=cpu_arch,
cpu_num=cpu_num, storage=storage,
memory=memory, driver=cls.driver)
return resp, body
@classmethod
@creates('port')
def create_port(cls, node_id, address, extra=None, uuid=None):
"""
Wrapper utility for creating test ports.
:param address: MAC address of the port.
:param extra: Meta data of the port. If not supplied, an empty
dictionary will be created.
:param uuid: UUID of the port.
:return: Created port.
"""
extra = extra or {}
resp, body = cls.client.create_port(address=address, node_id=node_id,
extra=extra, uuid=uuid)
return resp, body
@classmethod
def delete_chassis(cls, chassis_id):
"""
Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: Server response.
"""
resp, body = cls.client.delete_chassis(chassis_id)
if chassis_id in cls.created_objects['chassis']:
cls.created_objects['chassis'].remove(chassis_id)
return resp
@classmethod
def delete_node(cls, node_id):
"""
Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: Server response.
"""
resp, body = cls.client.delete_node(node_id)
if node_id in cls.created_objects['node']:
cls.created_objects['node'].remove(node_id)
return resp
@classmethod
def delete_port(cls, port_id):
"""
Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: Server response.
"""
resp, body = cls.client.delete_port(port_id)
if port_id in cls.created_objects['port']:
cls.created_objects['port'].remove(port_id)
return resp
def validate_self_link(self, resource, uuid, link):
"""Check whether the given self link formatted correctly."""
expected_link = "{base}/{pref}/{res}/{uuid}".format(
base=self.client.base_url,
pref=self.client.uri_prefix,
res=resource,
uuid=uuid)
self.assertEqual(expected_link, link)
| 32.742991 | 79 | 0.626516 |
4f6321242c22ce6fd0619ba9fc469db0326c9c58 | 872 | py | Python | niapy/tests/test_abc.py | chinmay3/NiaPy | b4e5c0f98063e2a9eebd8d750f0922cfca88bc55 | [
"MIT"
] | null | null | null | niapy/tests/test_abc.py | chinmay3/NiaPy | b4e5c0f98063e2a9eebd8d750f0922cfca88bc55 | [
"MIT"
] | 1 | 2021-08-13T07:52:40.000Z | 2021-08-16T08:52:20.000Z | niapy/tests/test_abc.py | chinmay3/NiaPy | b4e5c0f98063e2a9eebd8d750f0922cfca88bc55 | [
"MIT"
] | 2 | 2021-08-08T08:29:53.000Z | 2021-08-12T15:31:55.000Z | # encoding=utf8
from niapy.algorithms.basic import ArtificialBeeColonyAlgorithm
from niapy.tests.test_algorithm import AlgorithmTestCase, MyProblem
class ABCTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = ArtificialBeeColonyAlgorithm
def test_custom(self):
abc_custom = self.algo(population_size=10, limit=2, seed=self.seed)
abc_customc = self.algo(population_size=10, limit=2, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, abc_custom, abc_customc, MyProblem())
def test_griewank(self):
abc_griewank = self.algo(population_size=10, seed=self.seed)
abc_griewankc = self.algo(population_size=10, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, abc_griewank, abc_griewankc)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 39.636364 | 88 | 0.755734 |
4f64242814520838bc1ddf1d4dca88a59fed0214 | 4,034 | py | Python | ogbg-code/utils2.py | animeshbchowdhury/DAGNN | 02062bd2b24c6a23ef1fa8093d082df72ece98cd | [
"MIT"
] | 38 | 2021-01-24T13:33:31.000Z | 2022-03-29T02:24:32.000Z | ogbg-code/utils2.py | animeshbchowdhury/DAGNN | 02062bd2b24c6a23ef1fa8093d082df72ece98cd | [
"MIT"
] | 3 | 2021-03-10T08:00:58.000Z | 2021-11-02T08:29:02.000Z | ogbg-code/utils2.py | animeshbchowdhury/DAGNN | 02062bd2b24c6a23ef1fa8093d082df72ece98cd | [
"MIT"
] | 9 | 2021-02-20T14:39:34.000Z | 2022-03-17T01:51:03.000Z |
import os
import torch
import statistics
class ASTNodeEncoder2(torch.nn.Module):
'''
Input:
x: default node feature. the first and second column represents node type and node attributes.
depth: The depth of the node in the AST.
Output:
emb_dim-dimensional vector
'''
def __init__(self, emb_dim, num_nodetypes, num_nodeattributes, max_depth):
super(ASTNodeEncoder2, self).__init__()
self.max_depth = max_depth
self.type_encoder = torch.nn.Embedding(num_nodetypes, emb_dim)
self.attribute_encoder = torch.nn.Embedding(num_nodeattributes, emb_dim)
# self.depth_encoder = torch.nn.Embedding(self.max_depth + 1, emb_dim)
def forward(self, x, depth):
depth[depth > self.max_depth] = self.max_depth
return self.type_encoder(x[:,0]) + self.attribute_encoder(x[:,1]) #+ self.depth_encoder(depth)
def augment_edge2(data):
'''
Input:
data: PyG data object
Output:
data (edges are augmented in the following ways):
data.edge_index: Added next-token edge. The inverse edges were also added.
data.edge_attr (torch.Long):
data.edge_attr[:,0]: whether it is AST edge (0) for next-token edge (1)
data.edge_attr[:,1]: whether it is original direction (0) or inverse direction (1)
'''
##### AST edge
edge_index_ast = data.edge_index
edge_attr_ast = torch.zeros((edge_index_ast.size(1), 2))
##### Inverse AST edge
# edge_index_ast_inverse = torch.stack([edge_index_ast[1], edge_index_ast[0]], dim = 0)
# edge_attr_ast_inverse = torch.cat([torch.zeros(edge_index_ast_inverse.size(1), 1), torch.ones(edge_index_ast_inverse.size(1), 1)], dim = 1)
##### Next-token edge
## Obtain attributed nodes and get their indices in dfs order
# attributed_node_idx = torch.where(data.node_is_attributed.view(-1,) == 1)[0]
# attributed_node_idx_in_dfs_order = attributed_node_idx[torch.argsort(data.node_dfs_order[attributed_node_idx].view(-1,))]
## Since the nodes are already sorted in dfs ordering in our case, we can just do the following.
attributed_node_idx_in_dfs_order = torch.where(data.node_is_attributed.view(-1,) == 1)[0]
## build next token edge
# Given: attributed_node_idx_in_dfs_order
# [1, 3, 4, 5, 8, 9, 12]
# Output:
# [[1, 3, 4, 5, 8, 9]
# [3, 4, 5, 8, 9, 12]
edge_index_nextoken = torch.stack([attributed_node_idx_in_dfs_order[:-1], attributed_node_idx_in_dfs_order[1:]], dim = 0)
edge_attr_nextoken = torch.cat([torch.ones(edge_index_nextoken.size(1), 1), torch.zeros(edge_index_nextoken.size(1), 1)], dim = 1)
##### Inverse next-token edge
# edge_index_nextoken_inverse = torch.stack([edge_index_nextoken[1], edge_index_nextoken[0]], dim = 0)
# edge_attr_nextoken_inverse = torch.ones((edge_index_nextoken.size(1), 2))
data.edge_index = torch.cat([edge_index_ast, edge_index_nextoken], dim = 1)
data.edge_attr = torch.cat([edge_attr_ast, edge_attr_nextoken], dim = 0)
return data
def summary_report(val_list):
return sum(val_list)/len(val_list), statistics.stdev(val_list) if len(val_list) > 1 else 0
def create_checkpoint(checkpoint_fn, epoch, model, optimizer, results):
checkpoint = {"epoch": epoch,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"results": results}
torch.save(checkpoint, checkpoint_fn)
def remove_checkpoint(checkpoint_fn):
os.remove(checkpoint_fn)
def load_checkpoint(checkpoint_fn, model, optimizer):
checkpoint = torch.load(checkpoint_fn)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint['results'], checkpoint['epoch'], model, optimizer
def load_checkpoint_results(checkpoint_fn):
checkpoint = torch.load(checkpoint_fn)
return checkpoint['results']
| 36.672727 | 145 | 0.675508 |
4f6373e325c6aee5b0e1a8e1361a77c8013147c6 | 28,961 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_open_id_connect_provider_operations.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 1 | 2020-03-05T18:10:35.000Z | 2020-03-05T18:10:35.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_open_id_connect_provider_operations.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_open_id_connect_provider_operations.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OpenIdConnectProviderOperations:
"""OpenIdConnectProviderOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name: str,
service_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs
) -> AsyncIterable["models.OpenIdConnectProviderCollection"]:
"""Lists of all the OpenId Connect Providers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
displayName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OpenIdConnectProviderCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.OpenIdConnectProviderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OpenIdConnectProviderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OpenIdConnectProviderCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders'} # type: ignore
async def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
opid: str,
**kwargs
) -> bool:
"""Gets the entity state (Etag) version of the openIdConnectProvider specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'opid': self._serialize.url("opid", opid, 'str', max_length=256, min_length=0, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
opid: str,
**kwargs
) -> "models.OpenidConnectProviderContract":
"""Gets specific OpenID Connect Provider without secrets.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenidConnectProviderContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.OpenidConnectProviderContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OpenidConnectProviderContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'opid': self._serialize.url("opid", opid, 'str', max_length=256, min_length=0, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
opid: str,
parameters: "models.OpenidConnectProviderContract",
if_match: Optional[str] = None,
**kwargs
) -> "models.OpenidConnectProviderContract":
"""Creates or updates the OpenID Connect Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:param parameters: Create parameters.
:type parameters: ~azure.mgmt.apimanagement.models.OpenidConnectProviderContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenidConnectProviderContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.OpenidConnectProviderContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OpenidConnectProviderContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'opid': self._serialize.url("opid", opid, 'str', max_length=256, min_length=0, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'OpenidConnectProviderContract')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
async def update(
self,
resource_group_name: str,
service_name: str,
opid: str,
if_match: str,
parameters: "models.OpenidConnectProviderUpdateContract",
**kwargs
) -> "models.OpenidConnectProviderContract":
"""Updates the specific OpenID Connect Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.OpenidConnectProviderUpdateContract
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenidConnectProviderContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.OpenidConnectProviderContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OpenidConnectProviderContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'opid': self._serialize.url("opid", opid, 'str', max_length=256, min_length=0, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'OpenidConnectProviderUpdateContract')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
async def delete(
self,
resource_group_name: str,
service_name: str,
opid: str,
if_match: str,
**kwargs
) -> None:
"""Deletes specific OpenID Connect Provider of the API Management service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'opid': self._serialize.url("opid", opid, 'str', max_length=256, min_length=0, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
async def list_secrets(
self,
resource_group_name: str,
service_name: str,
opid: str,
**kwargs
) -> "models.ClientSecretContract":
"""Gets the client secret details of the OpenID Connect Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClientSecretContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.ClientSecretContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ClientSecretContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.list_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'opid': self._serialize.url("opid", opid, 'str', max_length=256, min_length=0, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ClientSecretContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_secrets.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}/listSecrets'} # type: ignore
| 51.901434 | 219 | 0.663444 |
4f640a256f415021f5b601bc54754b7853efe122 | 8,913 | py | Python | tdrs-backend/tdpservice/security/models.py | riatzukiza/TANF-app | e34efc87e9703bab37db76b84cc47a041e3612a1 | [
"CC0-1.0"
] | 18 | 2020-03-25T19:57:12.000Z | 2021-07-26T15:37:50.000Z | tdrs-backend/tdpservice/security/models.py | raft-tech/TANF-app | a4ff39ca392591c4c218ad86df3bff5056d73152 | [
"CC0-1.0"
] | 1,465 | 2020-07-22T21:16:53.000Z | 2022-03-31T16:04:22.000Z | tdrs-backend/tdpservice/security/models.py | riatzukiza/TANF-app | e34efc87e9703bab37db76b84cc47a041e3612a1 | [
"CC0-1.0"
] | 15 | 2020-07-22T14:58:37.000Z | 2021-06-22T17:29:55.000Z | """Models for the tdpservice.security app."""
from hashlib import sha256
from io import StringIO
from os.path import join
from typing import Union
import logging
from django.contrib.admin.models import ContentType, LogEntry, ADDITION
from django.core.files.base import File
from django.db import models
from django.utils.timezone import now
from tdpservice.backends import DataFilesS3Storage
from tdpservice.data_files.models import DataFile
from tdpservice.users.models import User
logger = logging.getLogger(__name__)
def get_file_shasum(file: Union[File, StringIO]) -> str:
"""Derive the SHA256 checksum of a file."""
_hash = sha256()
# If the file has the `open` method it needs to be called, otherwise this
# input is a file-like object (ie. StringIO) and doesn't need to be opened.
if hasattr(file, 'open'):
f = file.open('rb')
else:
f = file
# For large files we need to read it in by chunks to prevent invalid hashes
if hasattr(f, 'multiple_chunks') and f.multiple_chunks():
for chunk in f.chunks():
_hash.update(chunk)
else:
content = f.read()
# If the content is returned as a string we must encode it to bytes
# or an error will be raised.
if isinstance(content, str):
content = content.encode('utf-8')
_hash.update(content)
# Ensure to reset the file so it can be read in further operations.
f.seek(0)
return _hash.hexdigest()
def get_zap_s3_upload_path(instance, _):
"""Produce a unique upload path for ZAP reports stored in S3."""
return join(
f'owasp_reports/{instance.scanned_at.date()}/{instance.app_target}',
'owasp_report.html'
)
class ClamAVFileScanManager(models.Manager):
"""Extends object manager functionality for ClamAVFileScan model."""
def record_scan(
self,
file: Union[File, StringIO],
file_name: str,
msg: str,
result: 'ClamAVFileScan.Result',
uploaded_by: User
) -> 'ClamAVFileScan':
"""Create a new ClamAVFileScan instance with associated LogEntry."""
try:
file_shasum = get_file_shasum(file)
except (AttributeError, TypeError, ValueError) as err:
logger.error(f'Encountered error deriving file hash: {err}')
file_shasum = 'INVALID'
# Create the ClamAVFileScan instance.
av_scan = self.model.objects.create(
file_name=file_name,
file_size=(
file.size
if isinstance(file, File)
else len(file.getvalue())
),
file_shasum=file_shasum,
result=result,
uploaded_by=uploaded_by
)
# Create a new LogEntry that is tied to this model instance.
content_type = ContentType.objects.get_for_model(ClamAVFileScan)
LogEntry.objects.log_action(
user_id=uploaded_by.pk,
content_type_id=content_type.pk,
object_id=av_scan.pk,
object_repr=str(av_scan),
action_flag=ADDITION,
change_message=msg
)
return av_scan
class ClamAVFileScan(models.Model):
"""Represents a ClamAV virus scan performed for an uploaded file."""
class Meta:
"""Model Meta options."""
verbose_name = 'Clam AV File Scan'
class Result(models.TextChoices):
"""Represents the possible results from a completed ClamAV scan."""
CLEAN = 'CLEAN'
INFECTED = 'INFECTED'
ERROR = 'ERROR'
scanned_at = models.DateTimeField(auto_now_add=True)
file_name = models.TextField()
file_size = models.PositiveBigIntegerField(
help_text='The file size in bytes'
)
file_shasum = models.TextField(
help_text='The SHA256 checksum of the uploaded file'
)
result = models.CharField(
choices=Result.choices,
help_text='Scan result for uploaded file',
max_length=12
)
uploaded_by = models.ForeignKey(
User,
help_text='The user that uploaded the scanned file',
null=True,
on_delete=models.SET_NULL,
related_name='av_scans'
)
data_file = models.ForeignKey(
DataFile,
blank=True,
help_text='The resulting DataFile object, if this scan was clean',
null=True,
on_delete=models.SET_NULL,
related_name='av_scans'
)
objects = ClamAVFileScanManager()
def __str__(self) -> str:
"""Return string representation of model instance."""
return f'{self.file_name} ({self.file_size_humanized}) - {self.result}'
@property
def file_size_humanized(self) -> str:
"""Convert the file size into the largest human readable unit."""
size = self.file_size
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
break
size /= 1024.0
return f'{size:.{2}f}{unit}'
class OwaspZapScanManager(models.Manager):
"""Extends object manager functionality for OwaspZapScan model."""
def record_scan(
self,
app_target: str,
html_report: Union[File, StringIO],
fail_count: int,
pass_count: int,
warn_count: int,
) -> 'ClamAVFileScan':
"""Create a new OwaspZapScan instance with associated LogEntry."""
# A LogEntry must be tied to a user, but these records get created from
# nightly system level processes. To allow us to still capture these
# logs we will create a reserved system user that cannot log in and has
# no permissions or groups.
system_user, created = User.objects.get_or_create(username='system')
if created:
logger.debug('Created reserved system user')
# Create the OwaspZapScan instance.
zap_scan = self.model.objects.create(
app_target=app_target,
html_report=html_report,
scanned_at=now(),
fail_count=fail_count,
pass_count=pass_count,
warn_count=warn_count
)
# Format a message using the supplied metrics
msg = (
f'OWASP ZAP scan completed with result: {zap_scan.result}. '
f'FAIL: {fail_count}, WARN: {warn_count}, PASS: {pass_count}'
)
# Create a new LogEntry that is tied to this model instance.
content_type = ContentType.objects.get_for_model(OwaspZapScan)
LogEntry.objects.log_action(
user_id=system_user.pk,
content_type_id=content_type.pk,
object_id=zap_scan.pk,
object_repr=str(zap_scan),
action_flag=ADDITION,
change_message=msg
)
return zap_scan
class OwaspZapScan(models.Model):
"""Tracks the results of a scheduled run of the OWASP ZAP scan.
OWASP ZAP scan is an automated penetration testing tool which provides us
a security analysis of our deployed applications. These scans are run
nightly by Circle CI which triggers a Cloud Foundry Task to download
and store the results in this model.
Reference: https://www.zaproxy.org/
"""
class Meta:
"""Model Meta options."""
verbose_name = 'OWASP ZAP Scan'
class AppTarget(models.TextChoices):
"""The application that was scanned for this report."""
BACKEND = 'tdrs-backend'
FRONTEND = 'tdrs-frontend'
app_target = models.CharField(
choices=AppTarget.choices,
help_text='The application that was scanned',
max_length=32
)
html_report = models.FileField(
help_text='The generated HTML ZAP Scanning Report',
storage=DataFilesS3Storage,
upload_to=get_zap_s3_upload_path
)
scanned_at = models.DateTimeField(
auto_now_add=True,
help_text='The date and time this scan was processed'
)
fail_count = models.PositiveSmallIntegerField(
help_text='The number of alerts raised at FAIL level during the scan'
)
pass_count = models.PositiveIntegerField(
help_text='The number of passed rules during the scan'
)
warn_count = models.PositiveIntegerField(
help_text='The number of alerts raised at WARN level during the scan'
)
objects = OwaspZapScanManager()
def __str__(self):
"""Return the string representation of a model instance."""
return (
f'{self.get_app_target_display()}: {self.scanned_at.date()} '
f'({self.result})'
)
@property
def result(self) -> str:
"""Return a summarized result of the scan."""
if self.fail_count > 0:
return 'Failed'
elif self.warn_count > 0:
return 'Warning'
elif self.pass_count > 0:
return 'Passed'
else:
return 'Error'
| 31.4947 | 79 | 0.631662 |
4f62d7c59a0ab82657c562f3e6d2aab29effe8c0 | 4,005 | py | Python | ExtentionPackages/paramiko/pipe.py | hongsofwing/PyQYT-master | 9a112d9adbf9885a8b7535b7ef7759b60a0f9a29 | [
"CNRI-Python"
] | 14 | 2018-02-16T12:00:09.000Z | 2021-09-15T09:37:13.000Z | ExtentionPackages/paramiko/pipe.py | hongsofwing/PyQYT-master | 9a112d9adbf9885a8b7535b7ef7759b60a0f9a29 | [
"CNRI-Python"
] | 269 | 2015-01-09T15:41:29.000Z | 2020-11-16T09:29:57.000Z | ExtentionPackages/paramiko/pipe.py | hongsofwing/PyQYT-master | 9a112d9adbf9885a8b7535b7ef7759b60a0f9a29 | [
"CNRI-Python"
] | 10 | 2017-06-18T08:29:10.000Z | 2021-06-07T12:16:27.000Z | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Abstraction of a one-way pipe where the read end can be used in
`select.select`. Normally this is trivial, but Windows makes it nearly
impossible.
The pipe acts like an Event, which can be set or cleared. When set, the pipe
will trigger as readable in `select <select.select>`.
"""
import sys
import os
import socket
from paramiko.py3compat import b
def make_pipe():
if sys.platform[:3] != 'win':
p = PosixPipe()
else:
p = WindowsPipe()
return p
class PosixPipe (object):
def __init__(self):
self._rfd, self._wfd = os.pipe()
self._set = False
self._forever = False
self._closed = False
def close(self):
os.close(self._rfd)
os.close(self._wfd)
# used for unit tests:
self._closed = True
def fileno(self):
return self._rfd
def clear(self):
if not self._set or self._forever:
return
os.read(self._rfd, 1)
self._set = False
def set(self):
if self._set or self._closed:
return
self._set = True
os.write(self._wfd, b'*')
def set_forever(self):
self._forever = True
self.set()
class WindowsPipe (object):
"""
On Windows, only an OS-level "WinSock" may be used in select(), but reads
and writes must be to the actual socket object.
"""
def __init__(self):
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('127.0.0.1', 0))
serv.listen(1)
# need to save sockets in _rsock/_wsock so they don't get closed
self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._rsock.connect(('127.0.0.1', serv.getsockname()[1]))
self._wsock, addr = serv.accept()
serv.close()
self._set = False
self._forever = False
self._closed = False
def close(self):
self._rsock.close()
self._wsock.close()
# used for unit tests:
self._closed = True
def fileno(self):
return self._rsock.fileno()
def clear (self):
if not self._set or self._forever:
return
self._rsock.recv(1)
self._set = False
def set (self):
if self._set or self._closed:
return
self._set = True
self._wsock.send(b'*')
def set_forever (self):
self._forever = True
self.set()
class OrPipe (object):
def __init__(self, pipe):
self._set = False
self._partner = None
self._pipe = pipe
def set(self):
self._set = True
if not self._partner._set:
self._pipe.set()
def clear(self):
self._set = False
if not self._partner._set:
self._pipe.clear()
def make_or_pipe(pipe):
"""
wraps a pipe into two pipe-like objects which are "or"d together to
affect the real pipe. if either returned pipe is set, the wrapped pipe
is set. when both are cleared, the wrapped pipe is cleared.
"""
p1 = OrPipe(pipe)
p2 = OrPipe(pipe)
p1._partner = p2
p2._partner = p1
return p1, p2
| 26.7 | 79 | 0.617728 |
4f5d2687a6c2a836773f8227350fbc98dfd4dbb2 | 782 | py | Python | lang/python/bottle/websocket/websocket.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | 8 | 2015-06-07T13:25:48.000Z | 2022-03-22T23:14:50.000Z | lang/python/bottle/websocket/websocket.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | 30 | 2016-01-29T01:36:41.000Z | 2018-09-19T07:01:22.000Z | lang/python/bottle/websocket/websocket.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | null | null | null | import time
from bottle import request, Bottle, abort
app = Bottle()
@app.route('/websocket')
def handle_websocket():
wsock = request.environ.get('wsgi.websocket')
if not wsock:
abort(400, 'Expected WebSocket request.')
message = wsock.receive()
cnt = 0
while True:
try:
print "hit", cnt
message = "no %d" % (cnt)
wsock.send("Your message was: %r" % message)
time.sleep(3)
cnt += 1
except WebSocketError:
break
from gevent.pywsgi import WSGIServer
from geventwebsocket import WebSocketError
from geventwebsocket.handler import WebSocketHandler
server = WSGIServer(("0.0.0.0", 8080), app,
handler_class=WebSocketHandler)
server.serve_forever()
| 26.965517 | 56 | 0.62532 |
4f64e5625b420e55f150dc0d3bfa1b380ec90dff | 5,368 | py | Python | kubernetes/client/models/v1_role_ref.py | knkgun/python | f39b26b4ff446e206e468d8e13940a5df458b6cd | [
"Apache-2.0"
] | 1 | 2022-02-07T21:57:20.000Z | 2022-02-07T21:57:20.000Z | kubernetes/client/models/v1_role_ref.py | knkgun/python | f39b26b4ff446e206e468d8e13940a5df458b6cd | [
"Apache-2.0"
] | 1 | 2022-03-01T03:37:57.000Z | 2022-03-01T03:37:57.000Z | kubernetes/client/models/v1_role_ref.py | knkgun/python | f39b26b4ff446e206e468d8e13940a5df458b6cd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.22
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1RoleRef(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name'
}
def __init__(self, api_group=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
"""V1RoleRef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_group = None
self._kind = None
self._name = None
self.discriminator = None
self.api_group = api_group
self.kind = kind
self.name = name
@property
def api_group(self):
"""Gets the api_group of this V1RoleRef. # noqa: E501
APIGroup is the group for the resource being referenced # noqa: E501
:return: The api_group of this V1RoleRef. # noqa: E501
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""Sets the api_group of this V1RoleRef.
APIGroup is the group for the resource being referenced # noqa: E501
:param api_group: The api_group of this V1RoleRef. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and api_group is None: # noqa: E501
raise ValueError("Invalid value for `api_group`, must not be `None`") # noqa: E501
self._api_group = api_group
@property
def kind(self):
"""Gets the kind of this V1RoleRef. # noqa: E501
Kind is the type of resource being referenced # noqa: E501
:return: The kind of this V1RoleRef. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1RoleRef.
Kind is the type of resource being referenced # noqa: E501
:param kind: The kind of this V1RoleRef. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this V1RoleRef. # noqa: E501
Name is the name of resource being referenced # noqa: E501
:return: The name of this V1RoleRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1RoleRef.
Name is the name of resource being referenced # noqa: E501
:param name: The name of this V1RoleRef. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RoleRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RoleRef):
return True
return self.to_dict() != other.to_dict()
| 29.494505 | 124 | 0.584948 |
4f6436c128fcd9956e3cedaef9da2738f7011b1a | 446 | py | Python | evalQuadratic.py | ahmedkareem999/MITx-6.00.1x | a383fe33c29959b65c56c6a054e4830243862ca8 | [
"MIT"
] | 28 | 2018-10-02T09:15:03.000Z | 2022-02-07T17:26:17.000Z | evalQuadratic.py | ahmedkareem999/MITx-6.00.1x | a383fe33c29959b65c56c6a054e4830243862ca8 | [
"MIT"
] | null | null | null | evalQuadratic.py | ahmedkareem999/MITx-6.00.1x | a383fe33c29959b65c56c6a054e4830243862ca8 | [
"MIT"
] | 27 | 2018-05-25T14:04:40.000Z | 2022-03-29T17:17:30.000Z | '''
Week-2:Eval Quadratic
Write a Python function, evalQuadratic(a, b, c, x), that returns the value of the quadratic a⋅x2+b⋅x+c.
This function takes in four numbers and returns a single number.
'''
#code
def evalQuadratic(a, b, c, x):
'''
a, b, c: numerical values for the coefficients of a quadratic equation
x: numerical value at which to evaluate the quadratic.
'''
# Your code here
return (a * (x ** 2) + b * x + c)
| 29.733333 | 103 | 0.661435 |
4f6482e0a75ad7d2e2bc3440e9257cf8d9250e41 | 13,213 | py | Python | scanning.py | GemHunt/RealTimeCoinID | 26449a1cc79f0698f7d4fd5b8dbb000a6c25f7c8 | [
"MIT"
] | null | null | null | scanning.py | GemHunt/RealTimeCoinID | 26449a1cc79f0698f7d4fd5b8dbb000a6c25f7c8 | [
"MIT"
] | null | null | null | scanning.py | GemHunt/RealTimeCoinID | 26449a1cc79f0698f7d4fd5b8dbb000a6c25f7c8 | [
"MIT"
] | null | null | null | import numpy as np
import os
import serial
import time
import sys
import cv2
import cv2.cv as cv
import cPickle as pickle
def get_filename(coin_id, image_id):
dir = '/home/pkrush/cents-test/' + str(coin_id / 100) + '/'
filename = dir + str(coin_id).zfill(5) + str(image_id).zfill(2) + '.png'
return filename
def read_from_cameras(top_camera, bottom_camera):
ret, top = top_camera.read()
ret, bottom = bottom_camera.read()
if top == None:
raise ValueError('A frame from the top camera came up None')
if bottom == None:
raise ValueError('A frame from the bottom camera came up None')
return top, bottom
def deskew(src, pixel_shift):
src_tri = np.zeros((3, 2), dtype=np.float32)
dst_tri = np.zeros((3, 2), dtype=np.float32)
rows = src.shape[0]
cols = src.shape[1]
# Set your 3 points to calculate the Affine Transform
src_tri[1] = [cols - 1, 0]
src_tri[2] = [0, rows - 1]
# dstTri is the same except the bottom is moved over shiftpixels:
dst_tri[1] = src_tri[1]
dst_tri[2] = [pixel_shift, rows - 1]
# Get the Affine Transform
warp_mat = cv2.getAffineTransform(src_tri, dst_tri)
## Apply the Affine Transform just found to the src image
cv2.warpAffine(src, warp_mat, (cols, rows), src, cv2.INTER_CUBIC)
return src
def scan(top_camera, bottom_camera, ser):
top_captures = []
bottom_captures = []
for count in range(0, 62):
top, bottom = read_from_cameras(top_camera, bottom_camera)
if count > 4:
top_captures.append(top)
bottom_captures.append(bottom)
led = count / 2
if led < 29:
ser.write(str(led) + "\n")
cv.WaitKey(1)
return top_captures, bottom_captures
def save(captures, coin_id):
count = 0
crop_radius = 224
border_expansion = 30
center_list = []
resized = []
start_time = time.time()
for frame in captures:
if coin_id % 2 == 0:
ratio = .41
else:
ratio = .46
frame_width = int(1920 * ratio)
frame_height = int(1080 * ratio)
frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA)
blank_image = np.zeros((frame_height + border_expansion * 2, frame_width + border_expansion * 2, 3), np.uint8)
blank_image[border_expansion:frame_height + border_expansion,
border_expansion:frame_width + border_expansion] = frame
frame = blank_image
resized.append(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if coin_id % 2 == 0:
circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 2000, param1=45, param2=25, minRadius=222,
maxRadius=226)
else:
circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 2000, param1=45, param2=25, minRadius=222,
maxRadius=226)
if circles is None:
continue
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center_x = i[0]
center_y = i[1]
coin_radius = i[2]
cv2.circle(gray, (center_x, center_y), 2, (0, 0, 255), 1)
cv2.circle(gray, (center_x, center_y), coin_radius, (0, 0, 255), 1)
center_list.append([center_x, center_y, coin_radius])
total_center_x = 0
total_center_y = 0
total_radius = 0
# print '1 In %s seconds' % (time.time() - start_time,)
for center_x, center_y, coin_radius in center_list:
# print center_x, center_y, coin_radius
total_center_x += center_x
total_center_y += center_y
total_radius += coin_radius
#print '2 In %s seconds' % (time.time() - start_time,)
if len(center_list) == 0:
return False
# raise ValueError(str(coin_id) + 'had no detected circles')
#print '3 In %s seconds' % (time.time() - start_time,)
average_center_x = float(total_center_x) / len(center_list)
average_center_y = float(total_center_y) / len(center_list)
average_radius = float(total_radius) / len(center_list)
resized_height,resized_width,channels = frame.shape
crop_top = average_center_y - crop_radius
crop_bottom = average_center_y + crop_radius
crop_left = average_center_x - crop_radius
crop_right = average_center_x + crop_radius
bad_crop = ' is Bad. X&Y:' + str(average_center_x) + "," + str(average_center_y) + ' Frame Width:' + str(resized_width) + ' Frame Height:' + str(resized_height)
if crop_left < 0:
print str(crop_left) + ' crop_left' + bad_crop + '\n\n\n'
#return False
if crop_right > resized_width:
print str(crop_right) + ' crop_right' + bad_crop + '\n\n\n'
#return False
if crop_top < 0:
print str(crop_top) + ' crop_top' + bad_crop + '\n\n\n'
#return False
if crop_bottom > resized_height:
print str(crop_bottom) + ' crop_bottom' + bad_crop + '\n\n\n'
#return False
# dir = '/media/pkrush/Seagate Backup Plus Drive/cents_2/' + str(coin_id/100) + '/'
dir = '/home/pkrush/cents-test/' + str(coin_id / 100) + '/'
if not os.path.exists(dir):
os.mkdir(dir)
#print '5 In %s seconds' % (time.time() - start_time,)
for frame in resized:
crop = frame[crop_top:crop_bottom, crop_left:crop_right]
cv2.imwrite(dir + str(coin_id).zfill(5) + str(count).zfill(2) + '.png', crop)
count += 1
#print '6 In %s seconds' % (time.time() - start_time,)
return True
def get_moving_center_x(frame, ratio, deskew_pixels, frame_name, frame_id):
frame_width = int(1920 * ratio)
frame_height = int(1080 * ratio)
# print '3 In %s seconds' % (time.time() - start_time,)
frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA)
# print '4 In %s seconds' % (time.time() - start_time,)
height_expansion_amount = 40
blank_image = np.zeros((frame_height + height_expansion_amount, frame_width, 3), np.uint8)
blank_image[height_expansion_amount / 2:frame_height + height_expansion_amount / 2, 0:frame_width] = frame
frame = blank_image
# frame = frame[460:,40:1040]
deskewed = deskew(frame, deskew_pixels)
gray = cv2.cvtColor(deskewed, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 300, param1=45, param2=25, minRadius=52, maxRadius=58)
if circles is None:
cv2.imshow(frame_name, frame)
return 0
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center_x = i[0]
center_y = i[1]
crop_radius = i[2]
cv2.circle(frame, (center_x, center_y), 2, (0, 0, 255), 1)
cv2.circle(frame, (center_x, center_y), crop_radius, (0, 0, 255), 1)
# print circles
cv2.imshow(frame_name, frame)
cv2.imwrite('/home/pkrush/cents-circle-detect/' + str(frame_id).zfill(6) + frame_name + '.png', frame)
return center_x * (1 / ratio)
def get_cameras():
top_camera = None
bottom_camera = None
for camera_id in range(0, 4):
cap = cv2.VideoCapture(camera_id)
cap.set(3, 1920)
cap.set(4, 1080)
if cap.get(3) == 1920:
if top_camera is None:
top_camera = cap
else:
bottom_camera = cap
top, bottom = read_from_cameras(top_camera, bottom_camera)
if bottom[170, 170, 0] == bottom[170, 170, 1] == bottom[170, 170, 2]:
temp_camera = top_camera
top_camera = bottom_camera
bottom_camera = temp_camera
return top_camera, bottom_camera
#this is a one time function as the init scanning had issues.
#237 sets of 2 were bad 2500 were good. I have 5000 good sets of 57 images for 2500 coins.
def save_good_coin_ids():
good_coin_ids = set()
bad_coin_ids = set()
for coin_id in range(0, 5458, 2):
good_coin_ids.add(coin_id)
for side in [0, 3]:
for image_id in range(0, 56):
filename = get_filename(coin_id + side, image_id)
if not os.path.isfile(filename):
bad_coin_ids.add(coin_id)
continue
if os.path.getsize(filename) == 0:
bad_coin_ids.add(coin_id)
continue
test_image = cv2.imread(filename)
if test_image is None:
bad_coin_ids.add(coin_id)
continue
width, height, channels = test_image.shape
if not width == height == 448:
bad_coin_ids.add(coin_id)
continue
good_coin_ids = good_coin_ids - bad_coin_ids
for start_id in coin_id_starts:
if start_id != 0:
#-2 is bad: Why;
#-2 bad the for top coin_id is good,
#-1 good the bottom of -4 good,
#0 good top coin_id is good,
#1 bad bottom will never be read as it's the back of -2
#2 good top is new the back of 0
#3 good bottom is the back of #0
bad_coin_ids.add(start_id - 2)
print len(bad_coin_ids)
print len(good_coin_ids)
good_coin_ids.difference(bad_coin_ids)
home_dir = '/home/pkrush/cent-models/'
data_dir = home_dir + 'metadata/'
back_sides = set()
for coin_id in good_coin_ids:
back_sides.add(coin_id + 3)
good_coin_ids = good_coin_ids.union(back_sides)
print len(good_coin_ids)
pickle.dump(good_coin_ids, open(data_dir + 'seed_image_ids.pickle', "wb"))
pickle.dump(good_coin_ids, open(data_dir + 'test_image_ids.pickle', "wb"))
coin_id_starts = [0, 380, 1152, 1972, 2674, 2780, 2846, 2946, 3330, 5448]
def get_start_coin_id():
return coin_id_starts[len(coin_id_starts) - 1]
coin_id = get_start_coin_id()
top_camera, bottom_camera = get_cameras()
# files = glob.glob('/home/pkrush/cents-circle-detect/*')
# for f in files:
# os.remove(f)
# files = glob.glob('/home/pkrush/cents-test/*')
# for f in files:
# os.remove(f)
start_time = time.time()
ser = serial.Serial(port='/dev/ttyUSB0', baudrate=115200)
ser.write(str(102) + "\n")
cv.WaitKey(2)
ser.write(str(104) + "\n")
cv.WaitKey(2)
frame_count = 0
last_scan_frame_count = -100
found_coin = False
top_belt_on = True
bottom_belt_on = True
while (True):
status = ''
if top_belt_on and bottom_belt_on:
# This might be overkill to keep turning them on:
ser.write(str(102) + "\n")
cv.WaitKey(1)
ser.write(str(104) + "\n")
cv.WaitKey(1)
top, bottom = read_from_cameras(top_camera, bottom_camera)
after_scan_frame_delay = 30
if frame_count - last_scan_frame_count < after_scan_frame_delay:
frame_count += 1
continue
if top_belt_on:
center_x = get_moving_center_x(top, .1, 8, 'Top', frame_count)
if center_x != 0:
status += 'top' + ' ' + str(center_x) + '-'
if top_belt_on and center_x < 1691:
top_belt_on = False
status += str(top_belt_on) + ' ' + str(bottom_belt_on) + '-'
ser.write(str(105) + "\n")
cv.WaitKey(1)
ser.write(str(106) + "\n")
cv.WaitKey(10)
ser.write(str(107) + "\n")
cv.WaitKey(1)
status += 'Top belt off, reset hopper'
if bottom_belt_on:
center_x = get_moving_center_x(bottom, .11, -8, 'Bot', frame_count)
if center_x != 0:
status += 'bottom' + ' ' + str(center_x) + '-'
if bottom_belt_on and center_x > 0:
bottom_belt_on = False
status += str(top_belt_on) + ' ' + str(bottom_belt_on) + '-'
ser.write(str(103) + "\n")
cv.WaitKey(1)
status += 'Bottom belt off-'
if top_belt_on == False and bottom_belt_on == False:
# if first_top_scanned == True:
status += 'Scanning ' + str(coin_id) + ' with the LED lights-'
last_scan_frame_count = frame_count
top_captures, bottom_captures = scan(top_camera, bottom_camera, ser)
# t = threading.Thread(target=save, args=(top_captures, coin_id))
# t.start()
# t = threading.Thread(target=save, args=(bottom_captures, coin_id + 1))
# t.start()
# print 'pre save In %s seconds' % (time.time() - start_time,)
top_save = save(top_captures, coin_id)
bottom_save = save(bottom_captures, coin_id + 1)
# print 'save In %s seconds' % (time.time() - start_time,)
if top_save and bottom_save:
coin_id += 2
status += 'Cycle In %s seconds' % (time.time() - start_time,)
start_time = time.time()
status += 'Both belts on-'
top_belt_on = True
bottom_belt_on = True
status += str(top_belt_on) + ' ' + str(bottom_belt_on) + '-'
if status != '':
print frame_count, status
frame_count +=1
#ser.write(str(102) + "\n")
# cv.WaitKey(3500)
cv.WaitKey(35)
#ser.write(str(100) + "\n")
cv.WaitKey(100)
#ser.write(str(101) + "\n")
top_camera.release()
bottom_camera.release()
cv2.destroyAllWindows()
| 34.862797 | 165 | 0.608416 |
4f5748c6834773f51976d2e5ee5cfe11f2411544 | 6,626 | py | Python | cstar/remote_paramiko.py | rjablonovsky/cstar | d2c549b38b90fe90019edfbc8fb404f2f955dd44 | [
"Apache-2.0"
] | 1 | 2021-06-06T10:34:55.000Z | 2021-06-06T10:34:55.000Z | cstar/remote_paramiko.py | rjablonovsky/cstar | d2c549b38b90fe90019edfbc8fb404f2f955dd44 | [
"Apache-2.0"
] | null | null | null | cstar/remote_paramiko.py | rjablonovsky/cstar | d2c549b38b90fe90019edfbc8fb404f2f955dd44 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paramiko.client
import re
from cstar.output import err, debug, msg
from cstar.exceptions import BadSSHHost, BadEnvironmentVariable, NoHostsSpecified
from cstar.executionresult import ExecutionResult
from pkg_resources import resource_string
PING_COMMAND = "echo ping"
_alnum_re = re.compile(r"[^a-zA-Z0-9\|_]")
class RemoteParamiko(object):
def __init__(self, hostname, ssh_username=None, ssh_password=None, ssh_identity_file=None):
if hasattr(hostname, "ip"):
self.hostname = hostname.ip
else:
self.hostname = hostname
if not self.hostname:
raise NoHostsSpecified("No SSH host specified")
self.ssh_username = ssh_username
self.ssh_password = ssh_password
self.ssh_identity_file = ssh_identity_file
self.client = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def _connect(self):
if self.client:
# Ensure underlying client is still a valid open connection
try:
stdin, stdout, stderr = self.client.exec_command(PING_COMMAND)
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
# ConnectionResetError is raised when a connection was established but then broken
# paramiko.ssh_exception.SSHException is raised if the connection was known to be broken
self.client = None
if not self.client:
try:
self.client = paramiko.client.SSHClient()
pkey = None
if self.ssh_identity_file != None:
pkey = paramiko.RSAKey.from_private_key_file(self.ssh_identity_file, None)
debug("Username : ", self.ssh_username)
debug("Id file: ", self.ssh_identity_file)
self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
self.client.connect(self.hostname, compress=True, username=self.ssh_username, password=self.ssh_password, pkey=pkey)
except:
self.client = None
raise BadSSHHost("Could not establish an SSH connection to host %s" % (self.hostname,))
def run_job(self, file, jobid, timeout=None, env={}):
try:
self._connect()
transport = self.client.get_transport()
session = transport.open_session()
paramiko.agent.AgentRequestHandler(session)
dir = ".cstar/remote-jobs/" + jobid
self.run(("mkdir", "-p", dir))
self.put_command(file, "%s/job" % (dir,))
# Manually insert environment into script, since passing env into exec_command leads to it being
# ignored on most ssh servers. :-(
for key in env:
if _alnum_re.search(key):
raise BadEnvironmentVariable(key)
env_str = " ".join(key + "=" + self.escape(value) for key, value in env.items())
remote_script = resource_string('cstar.resources', 'scripts/remote_job.sh')
wrapper = remote_script.decode("utf-8") % (env_str,)
self.write_command(wrapper, "%s/wrapper" % (dir,))
cmd = """
cd %s
nohup ./wrapper
""" % (self.escape(dir),)
stdin, stdout, stderr = self.client.exec_command(cmd, timeout=timeout)
stdout.channel.recv_exit_status()
real_output = self.read_file(dir + "/stdout")
real_error = self.read_file(dir + "/stderr")
real_status = int(self.read_file(dir + "/status"))
return ExecutionResult(cmd, real_status, real_output, real_error)
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
raise BadSSHHost("SSH connection to host %s was reset" % (self.hostname,))
def get_job_status(self, jobid):
pass
def run(self, argv):
try:
self._connect()
cmd = " ".join(self.escape(s) for s in argv)
stdin, stdout, stderr = self.client.exec_command(cmd)
status = stdout.channel.recv_exit_status()
out = stdout.read()
error = stderr.read()
if status != 0:
err("Command %s failed with status %d on host %s" % (cmd, status, self.hostname))
else:
debug("Command %s succeeded on host %s, output was %s and %s" %
(cmd, self.hostname, str(out, 'utf-8'), str(error, 'utf-8')))
return ExecutionResult(cmd, status, str(out, 'utf-8'), str(error, 'utf-8'))
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
self.client = None
raise BadSSHHost("SSH connection to host %s was reset" % (self.hostname,))
@staticmethod
def escape(input):
if _alnum_re.search(input):
return "'" + input.replace("'", r"'\''") + "'"
return input
def read_file(self, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
with ftp_client.file(remotepath, 'r') as f:
return str(f.read(), 'utf-8')
def put_file(self, localpath, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
ftp_client.put(localpath, remotepath)
def put_command(self, localpath, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
ftp_client.put(localpath, remotepath)
ftp_client.chmod(remotepath, 0o755)
def write_command(self, definition, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
with ftp_client.open(remotepath, 'w') as f:
f.write(definition)
ftp_client.chmod(remotepath, 0o755)
def mkdir(self, path):
self.run("mkdir " + path)
def close(self):
if self.client:
self.client.close()
self.client = None
| 38.976471 | 132 | 0.618171 |
4f5cf3db3f4b120de456c9010dc4c295c268c92b | 3,037 | py | Python | python-code/dlib-learning/dlib_face_recognization.py | juxiangwu/image-processing | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | [
"Apache-2.0"
] | 13 | 2018-09-07T02:29:07.000Z | 2021-06-18T08:40:09.000Z | python-code/dlib-learning/dlib_face_recognization.py | juxiangwu/image-processing | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | [
"Apache-2.0"
] | null | null | null | python-code/dlib-learning/dlib_face_recognization.py | juxiangwu/image-processing | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | [
"Apache-2.0"
] | 4 | 2019-06-20T00:09:39.000Z | 2021-07-15T10:14:36.000Z | import cv2
import os
import face
import numpy as np
def find_faces(image, cascade_file = "haarcascade_frontalface_alt.xml"):
if not os.path.isfile(cascade_file):
raise RuntimeError("%s: not found" % cascade_file)
cascade = cv2.CascadeClassifier(cascade_file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = cascade.detectMultiScale(gray,
# detector options
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (24, 24))
i = 0
results = []
for (x, y, w, h) in faces:
i += 1
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
temp = image[y:y+h, x:x+w, :]
results.append(temp)
return results, image
def camera_realtime(file, savevideo):
camera = cv2.VideoCapture(file)
print('Press [ESC] to quit demo')
elapsed = int()
cv2.namedWindow('', 0)
_, frame = camera.read()
height, width, _ = frame.shape
cv2.resizeWindow('', width, height)
if savevideo:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if file == 0:
fps = 10
if fps < 1:
fps = 1
else:
fps = round(camera.get(cv2.CAP_PROP_FPS))
videoWriter = cv2.VideoWriter('video.avi', fourcc, fps, (width, height))
while camera.isOpened():
_, frame = camera.read()
if frame is None:
print ('\nEnd of Video'+'\n'+'----------------------')
break
faces, processed = find_faces(frame)
elapsed += 1
if elapsed % 5 == 0:
for each in faces:
recognize_result = face.recognize_face(each,descriptors,names)
if recognize_result == -1:
print('this person does not exit in candidate-faces')
cv2.putText(processed,'this person does not exit in candidate-faces',(50,400),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0),1)
elif recognize_result == 0:
print('The operation failed, please try again')
else:
dict_candidate[recognize_result]+=1
person = 0
for name in names:
person += 1
cv2.putText(processed,name+' appears: '+str(dict_candidate[name])+' times',(50,50*person),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0),1)
if savevideo:
videoWriter.write(processed)
cv2.imshow('', processed)
choice = cv2.waitKey(1)
if choice&0xFF == 27: break
if savevideo:
videoWriter.release()
camera.release()
cv2.destroyAllWindows()
#-------------------main-----------------------
names, paths = face.scan_images('candidate-faces')
count = np.zeros(len(names))
dict_candidate = dict(zip(names,count))
descriptors = face.get_descriptors(paths)
camera_realtime(0,True) | 31.309278 | 143 | 0.539019 |
4f652ada51a55adde27d9b0c811e347e95bafe7d | 6,794 | py | Python | tests/integration/chat/v2/test_credential.py | theDrinkMD/twibbage | c0aba60bd2df50f0a5688db4a01048ea1efd1a45 | [
"MIT"
] | null | null | null | tests/integration/chat/v2/test_credential.py | theDrinkMD/twibbage | c0aba60bd2df50f0a5688db4a01048ea1efd1a45 | [
"MIT"
] | null | null | null | tests/integration/chat/v2/test_credential.py | theDrinkMD/twibbage | c0aba60bd2df50f0a5688db4a01048ea1efd1a45 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class CredentialTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.credentials.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Credentials',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"credentials": [
{
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Test slow create",
"type": "apn",
"sandbox": "False",
"date_created": "2015-10-07T17:50:01Z",
"date_updated": "2015-10-07T17:50:01Z",
"url": "https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"page": 0,
"page_size": 1,
"first_page_url": "https://chat.twilio.com/v2/Credentials?PageSize=1&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Credentials?PageSize=1&Page=0",
"next_page_url": null,
"key": "credentials"
}
}
'''
))
actual = self.client.chat.v2.credentials.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"credentials": [],
"meta": {
"page": 0,
"page_size": 1,
"first_page_url": "https://chat.twilio.com/v2/Credentials?PageSize=1&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Credentials?PageSize=1&Page=0",
"next_page_url": null,
"key": "credentials"
}
}
'''
))
actual = self.client.chat.v2.credentials.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.credentials.create(type="gcm")
values = {
'Type': "gcm",
}
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Credentials',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Test slow create",
"type": "apn",
"sandbox": "False",
"date_created": "2015-10-07T17:50:01Z",
"date_updated": "2015-10-07T17:50:01Z",
"url": "https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v2.credentials.create(type="gcm")
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Test slow create",
"type": "apn",
"sandbox": "False",
"date_created": "2015-10-07T17:50:01Z",
"date_updated": "2015-10-07T17:50:01Z",
"url": "https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v2.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Test slow create",
"type": "apn",
"sandbox": "False",
"date_created": "2015-10-07T17:50:01Z",
"date_updated": "2015-10-07T17:50:01Z",
"url": "https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v2.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v2/Credentials/CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v2.credentials(sid="CRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.assertTrue(actual)
| 32.980583 | 106 | 0.53665 |
4f64420c27426aa99965211bd655d4513653e35c | 2,331 | py | Python | maintupdate.py | diegmonti/labs-tools-incolabot | e9362f7a8a16e5bd319dd1d311714320c60af254 | [
"MIT"
] | 1 | 2017-04-09T23:25:10.000Z | 2017-04-09T23:25:10.000Z | maintupdate.py | dmm42/labs-tools-incolabot | 8d3a28e25ddcd7d547c3854d7b673990fdabb426 | [
"MIT"
] | 1 | 2017-08-04T11:50:47.000Z | 2017-08-14T12:13:52.000Z | maintupdate.py | dmm42/labs-tools-incolabot | 8d3a28e25ddcd7d547c3854d7b673990fdabb426 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pywikibot
import csv
site = pywikibot.Site('it', 'wikipedia')
with open('/data/project/maintgraph/bot.csv', 'r') as f:
reader = csv.reader(f)
line = next(reader)
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Controllare_copyright")
page.put(line[5], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Verificare_enciclopedicità")
page.put(line[8], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Aiutare")
page.put(line[2], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Pagine_orfane")
page.put(line[12], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Voci_non_neutrali")
page.put(line[22], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Senza_fonti")
page.put(line[14], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Controllare")
page.put(line[4], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Wikificare")
page.put(line[24], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Unire")
page.put(line[21], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Tradurre")
page.put(line[20], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Stub")
page.put(line[18], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Aggiungere_template")
page.put(line[1], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Categorizzare")
page.put(line[3], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Correggere")
page.put(line[6], u"Bot: aggiorno statistiche manutenzioni")
page = pywikibot.Page(site, u"Template:Situazione_lavoro_sporco/Localismo")
page.put(line[10], u"Bot: aggiorno statistiche manutenzioni")
| 41.625 | 92 | 0.788503 |
4f6507ebb14f7a065688bfa3e3f6a15567427914 | 13,731 | py | Python | switch_model/hawaii/psip_2016_04.py | ashutosh-pande/switch3 | 769d25a42c8323f24740567aa15c980f905a03e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | switch_model/hawaii/psip_2016_04.py | ashutosh-pande/switch3 | 769d25a42c8323f24740567aa15c980f905a03e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | switch_model/hawaii/psip_2016_04.py | ashutosh-pande/switch3 | 769d25a42c8323f24740567aa15c980f905a03e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from __future__ import division
import os
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument('--psip-force', action='store_true', default=True,
help="Force following of PSIP plans (retiring AES and building certain technologies).")
argparser.add_argument('--psip-relax', dest='psip_force', action='store_false',
help="Relax PSIP plans, to find a more optimal strategy.")
argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False,
help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).")
argparser.add_argument('--force-build', nargs=3, default=None,
help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.")
def define_components(m):
###################
# resource rules to match HECO's 2016-04-01 PSIP
##################
# decide whether to enforce the PSIP preferred plan
# if an environment variable is set, that takes precedence
# (e.g., on a cluster to override options.txt)
psip_env_var = os.environ.get('USE_PSIP_PLAN')
if psip_env_var is None:
# no environment variable; use the --psip-relax flag
psip = m.options.psip_force
elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]:
psip = True
elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]:
psip = False
else:
raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var))
if psip:
print "Using PSIP construction plan."
else:
print "Relaxing PSIP construction plan."
# force conversion to LNG in 2021
# force use of containerized LNG
# don't allow addition of anything other than those specified here
# force retirement of AES in 2021
# targets for individual generation technologies
# (year, technology, MW added)
# TODO: allow either CentralFixedPV or CentralTrackingPV for utility-scale solar
# (not urgent now, since CentralFixedPV is not currently in the model)
# technologies that are definitely being built (we assume near-term
# are underway and military projects are being built for their own
# reasons)
technology_targets_definite = [
(2016, 'CentralTrackingPV', 27.6), # Waianae Solar by Eurus Energy America
(2018, 'IC_Schofield', 54.0),
(2018, 'IC_Barge', 100.0), # JBPHH plant
(2021, 'IC_MCBH', 27.0),
# Distributed PV from Figure J-19
(2016, 'DistPV', 443.993468266547 - 210), # net of 210 MW of pre-existing DistPV
(2017, 'DistPV', 92.751756737742),
(2018, 'DistPV', 27.278236032368),
(2019, 'DistPV', 26.188129564885),
]
# technologies proposed in PSIP but which may not be built if a
# better plan is found
technology_targets_psip = [
(2018, 'OnshoreWind', 24), # NPM wind
(2018, 'CentralTrackingPV', 109.6), # replacement for canceled SunEdison projects
(2018, 'OnshoreWind', 10), # CBRE wind
(2018, 'CentralTrackingPV', 15), # CBRE PV
(2020, 'OnshoreWind', 30),
(2020, 'CentralTrackingPV', 60),
(2021, 'CC_383', 383.0),
(2030, 'CentralTrackingPV', 100),
(2030, 'OffshoreWind', 200),
(2040, 'CentralTrackingPV', 200),
(2040, 'OffshoreWind', 200),
(2045, 'CentralTrackingPV', 300),
(2045, 'OffshoreWind', 400),
(2020, 'DistPV', 21.8245069017911),
(2021, 'DistPV', 15.27427771741),
(2022, 'DistPV', 12.0039583149589),
(2023, 'DistPV', 10.910655054315),
(2024, 'DistPV', 10.913851847475),
(2025, 'DistPV', 10.910655054316),
(2026, 'DistPV', 9.82054858683205),
(2027, 'DistPV', 10.910655054316),
(2028, 'DistPV', 10.910655054315),
(2029, 'DistPV', 14.1873680430859),
(2030, 'DistPV', 9.82054858683205),
(2031, 'DistPV', 10.913851847475),
(2032, 'DistPV', 9.82054858683193),
(2033, 'DistPV', 14.1841712499261),
(2034, 'DistPV', 7.64033565186492),
(2035, 'DistPV', 13.094064782442),
(2036, 'DistPV', 9.82054858683205),
(2037, 'DistPV', 10.9202454337949),
(2038, 'DistPV', 9.66989970917803),
(2039, 'DistPV', 12.1514103994531),
(2040, 'DistPV', 12.2397218104919),
(2041, 'DistPV', 11.7673956211361),
(2042, 'DistPV', 10.9106550543149),
(2043, 'DistPV', 9.82054858683205),
(2044, 'DistPV', 15.27747451057),
(2045, 'DistPV', 10.291675978754),
]
if m.options.force_build is not None:
b = list(m.options.force_build)
b[0] = int(b[0]) # year
b[2] = float(b[2]) # quantity
b = tuple(b)
print "Forcing build: {}".format(b)
technology_targets_definite.append(b)
if psip:
technology_targets = technology_targets_definite + technology_targets_psip
else:
technology_targets = technology_targets_definite
def technology_target_init(m, per, tech):
"""Find the amount of each technology that is targeted to be built by the end of each period."""
start = 2000 if per == m.PERIODS.first() else per
end = per + m.period_length_years[per]
target = sum(
mw for (tyear, ttech, mw) in technology_targets
if ttech == tech and start <= tyear and tyear < end
)
return target
m.technology_target = Param(m.PERIODS, m.GENERATION_TECHNOLOGIES, initialize=technology_target_init)
# with PSIP: BuildGen is zero except for technology_targets
# (sum during each period or before first period)
# without PSIP: BuildGen is >= definite targets
def Enforce_Technology_Target_rule(m, per, tech):
"""Enforce targets for each technology; exact target for PSIP cases, minimum target for non-PSIP."""
def adjust_psip_credit(g, target):
if g in m.DISCRETELY_SIZED_GENS and target > 0.0:
# Rescale so that the n integral units that come closest
# to the target gets counted as the n.n fractional units
# needed to exactly meet the target.
# This is needed because some of the targets are based on
# nominal unit sizes rather than actual max output.
return (target / m.gen_unit_size[g]) / round(target / m.gen_unit_size[g])
else:
return 1.0
target = m.technology_target[per, tech]
build = sum(
m.BuildGen[g, per] * adjust_psip_credit(g, target)
for g in m.GENERATION_PROJECTS
if m.gen_tech[g] == tech and (g, per) in m.GEN_BLD_YRS
)
if type(build) is int and build == 0:
# no matching projects found
if target == 0:
return Constraint.Skip
else:
print(
"WARNING: target was set for {} in {}, but no matching projects are available. "
"Model will be infeasible.".format(tech, per)
)
return Constraint.Infeasible
elif psip:
return (build == target)
elif m.options.psip_minimal_renewables and any(txt in tech for txt in ["PV", "Wind", "Solar"]):
# only build the specified amount of renewables, no more
return (build == target)
else:
# treat the target as a lower bound
return (build >= target)
m.Enforce_Technology_Target = Constraint(
m.PERIODS, m.GENERATION_TECHNOLOGIES, rule=Enforce_Technology_Target_rule
)
aes_g = 'Oahu_AES'
aes_size = 180
aes_bld_year = 1992
m.AES_OPERABLE_PERIODS = Set(initialize = lambda m:
m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year]
)
m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary)
m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp:
Constraint.Skip if (aes_g, tp) not in m.GEN_TPS
else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size)
)
m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per:
0.0 if per not in m.AES_OPERABLE_PERIODS
else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year]
)
m.Cost_Components_Per_Period.append('AESDeactivateFixedCost')
if psip:
# keep AES active until 2022 or just before; deactivate after that
m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per:
(m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022
else (m.OperateAES[per] == 0)
)
# before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels
# period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG
# no biodiesel before 2040 (then phased in fast enough to meet the RPS)
m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [
(rfm, per)
for per in m.PERIODS if per + m.period_length_years[per] <= 2040
for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel'
])
m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per:
m.FuelConsumptionInMarket[rfm, per] == 0
)
# # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5)
# # Note: this is needed because we assume HECO plans to burn LNG in the future
# # even in scenarios where it costs more than oil.
# m.PSIP_HIGH_LNG_PERIODS = Set(initialize=lambda m:
# [per for per in m.PERIODS if per + m.period_length_years[per] > 2021 and per < 2045]
# )
# m.OilProductionGWhPerYear = Expression(m.PERIODS, rule=lambda m, per:
# sum(
# m.DispatchGenByFuel[g, tp, f] * m.tp_weight_in_year[tp] * 0.001 # convert from MWh to GWh
# for f in ['Diesel', 'LSFO', 'LSFO-Diesel-Blend']
# for g in m.GENS_BY_FUEL[f]
# for tp in m.TPS_IN_PERIOD[per] if (g, tp) in m.GEN_TPS
# )
# )
# m.Upper_Limit_Oil_Power = Constraint(m.PERIODS, rule=lambda m, per:
# (m.OilProductionGWhPerYear[per] <= 300)
# if per + 0.5 * m.period_length_years[per] >= 2021
# else
# Constraint.Skip
# )
# # lower limit is in place to roughly reflect HECO's plan
# m.Lower_Limit_Oil_Power = Constraint(m.PERIODS, rule=lambda m, per:
# (m.OilProductionGWhPerYear[per] >= 100)
# if per + m.period_length_years[per] < 2040 # relax constraint if period ends after 2040
# else
# Constraint.Skip
# )
# force LNG conversion in 2021 (modeled on similar constraint in lng_conversion.py)
# This could have extra code to skip the constraint if there are no periods after 2021,
# but it is unlikely ever to be run that way.
# Note: this is not needed if some plants are forced to run on LNG
# NOTE: this is no longer used; use '--force-lng-tier container' instead
# m.PSIP_Force_LNG_Conversion = Constraint(m.LOAD_ZONES, rule=lambda m, z:
# m.ConvertToLNG[
# z,
# min(per for per in m.PERIODS if per + m.period_length_years[per] > 2021)
# ] == 1
# )
# # Kahe 5, Kahe 6, Kalaeloa and CC_383 only burn LNG after 2021
# # This is not used because it creates a weird situation where HECO runs less-efficient non-LNG
# # plants instead of more efficient LNG-capable plants on oil.
# # there may be a faster way to build this, but it's not clear what
# m.PSIP_Force_LNG_Use = Constraint(m.GEN_TP_FUELS, rule=lambda m, g, tp, fuel:
# (m.GenFuelUseRate[g, tp, fuel] == 0)
# if g in m.LNG_CONVERTED_PLANTS
# and fuel != 'LNG'
# and m.tp_period[tp] + m.period_length_years[m.tp_period[tp]] > 2021
# else
# Constraint.Skip
# )
# don't allow construction of any advanced technologies (e.g., batteries, pumped hydro, fuel cells)
advanced_tech_vars = [
"BuildBattery",
"BuildPumpedHydroMW", "BuildAnyPumpedHydro",
"BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg",
"BuildFuelCellMW",
]
def no_advanced_tech_rule_factory(v):
return lambda m, *k: (getattr(m, v)[k] == 0)
for v in advanced_tech_vars:
try:
var = getattr(m, v)
setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v)))
except AttributeError:
pass # model doesn't have this var
# # don't allow any changes to the fuel market, including bulk LNG
# # not used now; use "--force-lng-tier container" instead
# m.PSIP_Deactivate_Limited_RFM_Supply_Tiers = Constraint(m.RFM_SUPPLY_TIERS,
# rule=lambda m, r, p, st:
# Constraint.Skip if (m.rfm_supply_tier_limit[r, p, st] == float('inf'))
# else (m.RFMSupplyTierActivate[r, p, st] == 0)
# )
| 47.348276 | 169 | 0.603161 |
4f65b0334398d459cbefbe4b59d549552caacbaa | 18,360 | py | Python | src/spring/azext_spring/commands.py | LGDoor/azure-cli-extensions | 570a7c181999c1dd160d48f8454aab6cea057a20 | [
"MIT"
] | null | null | null | src/spring/azext_spring/commands.py | LGDoor/azure-cli-extensions | 570a7c181999c1dd160d48f8454aab6cea057a20 | [
"MIT"
] | null | null | null | src/spring/azext_spring/commands.py | LGDoor/azure-cli-extensions | 570a7c181999c1dd160d48f8454aab6cea057a20 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_spring._utils import handle_asc_exception
from ._client_factory import (cf_spring_20220501preview,
cf_spring_20220301preview,
cf_spring_20220101preview,
cf_spring_20201101preview,
cf_config_servers)
from ._transformers import (transform_spring_table_output,
transform_app_table_output,
transform_spring_deployment_output,
transform_spring_certificate_output,
transform_spring_custom_domain_output,
transform_application_configuration_service_output,
transform_service_registry_output,
transform_spring_cloud_gateway_output,
transform_api_portal_output)
from ._marketplace import (transform_marketplace_plan_output)
from ._validators_enterprise import (validate_gateway_update, validate_api_portal_update)
from ._app_managed_identity_validator import (validate_app_identity_remove_or_warning,
validate_app_identity_assign_or_warning)
# pylint: disable=too-many-statements
def load_command_table(self, _):
spring_routing_util = CliCommandType(
operations_tmpl='azext_spring.spring_instance#{}',
client_factory=cf_spring_20220501preview
)
app_command = CliCommandType(
operations_tmpl='azext_spring.app#{}',
client_factory=cf_spring_20220501preview
)
app_managed_identity_command = CliCommandType(
operations_tmpl='azext_spring.app_managed_identity#{}',
client_factory=cf_spring_20220301preview
)
service_registry_cmd_group = CliCommandType(
operations_tmpl='azext_spring.service_registry#{}',
client_factory=cf_spring_20220101preview
)
builder_cmd_group = CliCommandType(
operations_tmpl="azext_spring._build_service#{}",
client_factory=cf_spring_20220101preview
)
buildpack_binding_cmd_group = CliCommandType(
operations_tmpl="azext_spring.buildpack_binding#{}",
client_factory=cf_spring_20220101preview
)
application_configuration_service_cmd_group = CliCommandType(
operations_tmpl='azext_spring.application_configuration_service#{}',
client_factory=cf_spring_20220101preview
)
gateway_cmd_group = CliCommandType(
operations_tmpl='azext_spring.gateway#{}',
client_factory=cf_spring_20220101preview
)
gateway_custom_domain_cmd_group = CliCommandType(
operations_tmpl='azext_spring.gateway#{}',
client_factory=cf_spring_20220101preview
)
gateway_route_config_cmd_group = CliCommandType(
operations_tmpl='azext_spring.gateway#{}',
client_factory=cf_spring_20220101preview
)
api_portal_cmd_group = CliCommandType(
operations_tmpl='azext_spring.api_portal#{}',
client_factory=cf_spring_20220101preview
)
api_portal_custom_domain_cmd_group = CliCommandType(
operations_tmpl='azext_spring.api_portal#{}',
client_factory=cf_spring_20220101preview
)
with self.command_group('spring', custom_command_type=spring_routing_util,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'spring_create', supports_no_wait=True)
g.custom_command('list-marketplace-plan', 'spring_list_marketplace_plan',
is_preview=True,
table_transformer=transform_marketplace_plan_output)
with self.command_group('spring', client_factory=cf_spring_20220501preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('update', 'spring_update', supports_no_wait=True)
g.custom_command('delete', 'spring_delete', supports_no_wait=True)
g.custom_command('start', 'spring_start', supports_no_wait=True)
g.custom_command('stop', 'spring_stop', supports_no_wait=True)
g.custom_command('list', 'spring_list', table_transformer=transform_spring_table_output)
g.custom_show_command('show', 'spring_get', table_transformer=transform_spring_table_output)
with self.command_group('spring test-endpoint', client_factory=cf_spring_20220101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('enable ', 'enable_test_endpoint')
g.custom_show_command('disable ', 'disable_test_endpoint')
g.custom_command('renew-key', 'regenerate_keys')
g.custom_command('list', 'list_keys')
with self.command_group('spring config-server', client_factory=cf_config_servers,
exception_handler=handle_asc_exception) as g:
g.custom_command('set', 'config_set', supports_no_wait=True)
g.custom_command('clear', 'config_delete')
g.custom_show_command('show', 'config_get')
with self.command_group('spring config-server git', client_factory=cf_config_servers,
supports_local_cache=True, exception_handler=handle_asc_exception) as g:
g.custom_command('set', 'config_git_set')
g.custom_command('repo add', 'config_repo_add')
g.custom_command('repo remove', 'config_repo_delete')
g.custom_command('repo update', 'config_repo_update')
g.custom_command('repo list', 'config_repo_list')
with self.command_group('spring app', custom_command_type=app_command,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'app_create')
g.custom_command('update', 'app_update', supports_no_wait=True)
with self.command_group('spring app', client_factory=cf_spring_20220501preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('set-deployment', 'app_set_deployment',
supports_no_wait=True)
g.custom_command('unset-deployment', 'app_unset_deployment',
supports_no_wait=True)
g.custom_command('scale', 'app_scale', supports_no_wait=True)
g.custom_command('show-deploy-log', 'app_get_build_log')
g.custom_command('delete', 'app_delete')
g.custom_command('list', 'app_list',
table_transformer=transform_app_table_output)
g.custom_show_command(
'show', 'app_get', table_transformer=transform_app_table_output)
g.custom_command('start', 'app_start', supports_no_wait=True)
g.custom_command('stop', 'app_stop', supports_no_wait=True)
g.custom_command('restart', 'app_restart', supports_no_wait=True)
g.custom_command('logs', 'app_tail_log')
g.custom_command('append-persistent-storage', 'app_append_persistent_storage')
g.custom_command('append-loaded-public-certificate', 'app_append_loaded_public_certificate')
with self.command_group('spring app identity', custom_command_type=app_managed_identity_command,
exception_handler=handle_asc_exception) as g:
g.custom_command('assign', 'app_identity_assign', validator=validate_app_identity_assign_or_warning)
g.custom_command('remove', 'app_identity_remove', validator=validate_app_identity_remove_or_warning)
g.custom_command('force-set', 'app_identity_force_set', is_preview=True)
g.custom_show_command('show', 'app_identity_show')
with self.command_group('spring app log', client_factory=cf_spring_20220101preview,
deprecate_info=g.deprecate(redirect='az spring app logs', hide=True),
exception_handler=handle_asc_exception) as g:
g.custom_command('tail', 'app_tail_log')
with self.command_group('spring app', custom_command_type=app_command, client_factory=cf_spring_20220501preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('deploy', 'app_deploy', supports_no_wait=True)
with self.command_group('spring app deployment', custom_command_type=app_command, client_factory=cf_spring_20220501preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'deployment_create', supports_no_wait=True)
with self.command_group('spring app deployment', client_factory=cf_spring_20220501preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('list', 'deployment_list',
table_transformer=transform_spring_deployment_output)
g.custom_show_command(
'show', 'deployment_get', table_transformer=transform_spring_deployment_output)
g.custom_command('delete', 'deployment_delete', supports_no_wait=True)
g.custom_command('generate-heap-dump', 'deployment_generate_heap_dump')
g.custom_command('generate-thread-dump', 'deployment_generate_thread_dump')
g.custom_command('start-jfr', 'deployment_start_jfr')
with self.command_group('spring app binding', client_factory=cf_spring_20220101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('list', 'binding_list')
g.custom_show_command('show', 'binding_get')
g.custom_command('cosmos add', 'binding_cosmos_add')
g.custom_command('cosmos update', 'binding_cosmos_update')
g.custom_command('mysql add', 'binding_mysql_add')
g.custom_command('mysql update', 'binding_mysql_update')
g.custom_command('redis add', 'binding_redis_add')
g.custom_command('redis update', 'binding_redis_update')
g.custom_show_command('remove', 'binding_remove')
with self.command_group('spring storage', client_factory=cf_spring_20220101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('list', 'storage_list')
g.custom_show_command('show', 'storage_get')
g.custom_command('add', 'storage_add')
g.custom_command('update', 'storage_update')
g.custom_command('remove', 'storage_remove')
g.custom_command('list-persistent-storage', "storage_list_persistent_storage", table_transformer=transform_app_table_output)
with self.command_group('spring certificate', client_factory=cf_spring_20220101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('add', 'certificate_add')
g.custom_show_command('show', 'certificate_show', table_transformer=transform_spring_certificate_output)
g.custom_command('list', 'certificate_list', table_transformer=transform_spring_certificate_output)
g.custom_command('remove', 'certificate_remove')
g.custom_command('list-reference-app', 'certificate_list_reference_app', table_transformer=transform_app_table_output)
with self.command_group('spring app custom-domain', client_factory=cf_spring_20220101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('bind', 'domain_bind')
g.custom_show_command('show', 'domain_show', table_transformer=transform_spring_custom_domain_output)
g.custom_command('list', 'domain_list', table_transformer=transform_spring_custom_domain_output)
g.custom_command('update', 'domain_update')
g.custom_command('unbind', 'domain_unbind')
with self.command_group('spring app-insights',
client_factory=cf_spring_20201101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('update', 'app_insights_update', supports_no_wait=True)
g.custom_show_command('show', 'app_insights_show')
with self.command_group('spring service-registry',
custom_command_type=service_registry_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_show_command('show', 'service_registry_show',
table_transformer=transform_service_registry_output)
g.custom_command('bind', 'service_registry_bind')
g.custom_command('unbind', 'service_registry_unbind')
with self.command_group('spring application-configuration-service',
custom_command_type=application_configuration_service_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_command('clear', 'application_configuration_service_clear')
g.custom_show_command('show', 'application_configuration_service_show',
table_transformer=transform_application_configuration_service_output)
g.custom_command('bind', 'application_configuration_service_bind')
g.custom_command('unbind', 'application_configuration_service_unbind')
with self.command_group('spring application-configuration-service git repo',
custom_command_type=application_configuration_service_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_command('add', 'application_configuration_service_git_add')
g.custom_command('update', 'application_configuration_service_git_update')
g.custom_command('remove', 'application_configuration_service_git_remove')
g.custom_command('list', 'application_configuration_service_git_list')
with self.command_group('spring gateway',
custom_command_type=gateway_cmd_group,
exception_handler=handle_asc_exception,
is_preview=True) as g:
g.custom_show_command('show', 'gateway_show', table_transformer=transform_spring_cloud_gateway_output)
g.custom_command('update', 'gateway_update', validator=validate_gateway_update, supports_no_wait=True)
g.custom_command('clear', 'gateway_clear', supports_no_wait=True)
with self.command_group('spring gateway custom-domain',
custom_command_type=gateway_custom_domain_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_show_command('show', 'gateway_custom_domain_show',
table_transformer=transform_spring_custom_domain_output)
g.custom_command('list', 'gateway_custom_domain_list',
table_transformer=transform_spring_custom_domain_output)
g.custom_command('bind', 'gateway_custom_domain_update')
g.custom_command('unbind', 'gateway_custom_domain_unbind')
g.custom_command('update', 'gateway_custom_domain_update')
with self.command_group('spring gateway route-config',
custom_command_type=gateway_route_config_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_show_command('show', 'gateway_route_config_show')
g.custom_command('list', 'gateway_route_config_list')
g.custom_command('create', 'gateway_route_config_create')
g.custom_command('update', 'gateway_route_config_update')
g.custom_command('remove', 'gateway_route_config_remove')
with self.command_group('spring api-portal',
custom_command_type=api_portal_cmd_group,
exception_handler=handle_asc_exception,
is_preview=True) as g:
g.custom_show_command('show', 'api_portal_show', table_transformer=transform_api_portal_output)
g.custom_command('update', 'api_portal_update', validator=validate_api_portal_update)
g.custom_command('clear', 'api_portal_clear')
with self.command_group('spring api-portal custom-domain',
custom_command_type=api_portal_custom_domain_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_show_command('show', 'api_portal_custom_domain_show',
table_transformer=transform_spring_custom_domain_output)
g.custom_command('list', 'api_portal_custom_domain_list',
table_transformer=transform_spring_custom_domain_output)
g.custom_command('bind', 'api_portal_custom_domain_update')
g.custom_command('unbind', 'api_portal_custom_domain_unbind')
g.custom_command('update', 'api_portal_custom_domain_update')
with self.command_group('spring build-service builder',
custom_command_type=builder_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'create_or_update_builder', supports_no_wait=True)
g.custom_command('update', 'create_or_update_builder', supports_no_wait=True)
g.custom_show_command('show', 'builder_show')
g.custom_command('delete', 'builder_delete', supports_no_wait=True, confirmation=True)
with self.command_group('spring build-service builder buildpack-binding',
custom_command_type=buildpack_binding_cmd_group,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'create_or_update_buildpack_binding')
g.custom_command('set', 'create_or_update_buildpack_binding')
g.custom_show_command('show', 'buildpack_binding_show')
g.custom_command('list', 'buildpack_binding_list')
g.custom_command('delete', 'buildpack_binding_delete', confirmation=True)
with self.command_group('spring build-service', exception_handler=handle_asc_exception):
pass
with self.command_group('spring', exception_handler=handle_asc_exception):
pass
| 57.018634 | 132 | 0.68927 |
4f634b7ec8d5e8a54974d33567aa7d12d24035e9 | 10,931 | py | Python | pythonFiles/tests/testing_tools/adapter/test_pytest.py | selimb/vscode-python | ce6acf8bc1264c957dda844b3870234b69b51bbe | [
"MIT"
] | null | null | null | pythonFiles/tests/testing_tools/adapter/test_pytest.py | selimb/vscode-python | ce6acf8bc1264c957dda844b3870234b69b51bbe | [
"MIT"
] | null | null | null | pythonFiles/tests/testing_tools/adapter/test_pytest.py | selimb/vscode-python | ce6acf8bc1264c957dda844b3870234b69b51bbe | [
"MIT"
] | null | null | null | import os.path
import unittest
from ...util import Stub, StubProxy
from testing_tools.adapter.errors import UnsupportedCommandError
from testing_tools.adapter.info import TestInfo, TestPath
from testing_tools.adapter.pytest import (
discover, add_cli_subparser, TestCollector
)
class StubSubparsers(StubProxy):
def __init__(self, stub=None, name='subparsers'):
super(StubSubparsers, self).__init__(stub, name)
def add_parser(self, name):
self.add_call('add_parser', None, {'name': name})
return self.return_add_parser
class StubArgParser(StubProxy):
def __init__(self, stub=None):
super(StubArgParser, self).__init__(stub, 'argparser')
def add_argument(self, *args, **kwargs):
self.add_call('add_argument', args, kwargs)
class StubPyTest(StubProxy):
def __init__(self, stub=None):
super(StubPyTest, self).__init__(stub, 'pytest')
self.return_main = 0
def main(self, args, plugins):
self.add_call('main', None, {'args': args, 'plugins': plugins})
return self.return_main
class StubPlugin(StubProxy):
def __init__(self, stub=None):
super(StubPlugin, self).__init__(stub, 'plugin')
def __getattr__(self, name):
if not name.startswith('pytest_'):
raise AttributeError(name)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
class FakeFunc(object):
def __init__(self, name):
self.__name__ = name
class FakeMarker(object):
def __init__(self, name):
self.name = name
class StubPytestItem(StubProxy):
def __init__(self, stub=None, **attrs):
super(StubPytestItem, self).__init__(stub, 'pytest.Item')
self.__dict__.update(attrs)
if 'own_markers' not in attrs:
self.own_markers = ()
def __getattr__(self, name):
self.add_call(name + ' (attr)', None, None)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
class StubPytestSession(StubProxy):
def __init__(self, stub=None):
super(StubPytestSession, self).__init__(stub, 'pytest.Session')
def __getattr__(self, name):
self.add_call(name + ' (attr)', None, None)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
class StubPytestConfig(StubProxy):
def __init__(self, stub=None):
super(StubPytestConfig, self).__init__(stub, 'pytest.Config')
def __getattr__(self, name):
self.add_call(name + ' (attr)', None, None)
def func(*args, **kwargs):
self.add_call(name, args or None, kwargs or None)
return func
##################################
# tests
class AddCLISubparserTests(unittest.TestCase):
def test_discover(self):
stub = Stub()
subparsers = StubSubparsers(stub)
parser = StubArgParser(stub)
subparsers.return_add_parser = parser
add_cli_subparser('discover', 'pytest', subparsers)
self.assertEqual(stub.calls, [
('subparsers.add_parser', None, {'name': 'pytest'}),
])
def test_unsupported_command(self):
subparsers = StubSubparsers(name=None)
subparsers.return_add_parser = None
with self.assertRaises(UnsupportedCommandError):
add_cli_subparser('run', 'pytest', subparsers)
with self.assertRaises(UnsupportedCommandError):
add_cli_subparser('debug', 'pytest', subparsers)
with self.assertRaises(UnsupportedCommandError):
add_cli_subparser('???', 'pytest', subparsers)
self.assertEqual(subparsers.calls, [
('add_parser', None, {'name': 'pytest'}),
('add_parser', None, {'name': 'pytest'}),
('add_parser', None, {'name': 'pytest'}),
])
class DiscoverTests(unittest.TestCase):
DEFAULT_ARGS = [
'-pno:terminal',
'--collect-only',
]
def test_basic(self):
stub = Stub()
pytest = StubPyTest(stub)
plugin = StubPlugin(stub)
expected = []
plugin.discovered = expected
discovered = discover([], _pytest_main=pytest.main, _plugin=plugin)
self.assertEqual(discovered, expected)
self.assertEqual(stub.calls, [
('pytest.main', None, {'args': self.DEFAULT_ARGS,
'plugins': [plugin]}),
])
def test_failure(self):
stub = Stub()
pytest = StubPyTest(stub)
pytest.return_main = 2
plugin = StubPlugin(stub)
with self.assertRaises(Exception):
discover([], _pytest_main=pytest.main, _plugin=plugin)
self.assertEqual(stub.calls, [
('pytest.main', None, {'args': self.DEFAULT_ARGS,
'plugins': [plugin]}),
])
class CollectorTests(unittest.TestCase):
def test_modifyitems(self):
stub = Stub()
session = StubPytestSession(stub)
config = StubPytestConfig(stub)
collector = TestCollector()
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile1 = './test_spam.py'.replace('/', os.path.sep)
relfile2 = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
collector.pytest_collection_modifyitems(session, config, [
StubPytestItem(
stub,
nodeid='test_spam.py::SpamTests::test_one',
name='test_one',
location=('test_spam.py', 12, 'SpamTests.test_one'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_one'),
),
StubPytestItem(
stub,
nodeid='test_spam.py::SpamTests::test_other',
name='test_other',
location=('test_spam.py', 19, 'SpamTests.test_other'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_other'),
),
StubPytestItem(
stub,
nodeid='test_spam.py::test_all',
name='test_all',
location=('test_spam.py', 144, 'test_all'),
fspath=os.path.join(testroot, 'test_spam.py'),
function=FakeFunc('test_all'),
),
StubPytestItem(
stub,
nodeid=relfile2 + '::All::BasicTests::test_first',
name='test_first',
location=(relfile2, 31, 'All.BasicTests.test_first'),
fspath=os.path.join(testroot, relfile2),
function=FakeFunc('test_first'),
),
StubPytestItem(
stub,
nodeid=relfile2 + '::All::BasicTests::test_each[1+2-3]',
name='test_each[1+2-3]',
location=(relfile2, 62, 'All.BasicTests.test_each[1+2-3]'),
fspath=os.path.join(testroot, relfile2),
function=FakeFunc('test_each'),
own_markers=[FakeMarker(v) for v in [
# supported
'skip', 'skipif', 'xfail',
# duplicate
'skip',
# ignored (pytest-supported)
'parameterize', 'usefixtures', 'filterwarnings',
# ignored (custom)
'timeout',
]],
),
])
self.maxDiff = None
self.assertEqual(collector.discovered, [
TestInfo(
id='test_spam.py::SpamTests::test_one',
name='test_one',
path=TestPath(
root=testroot,
relfile=relfile1,
func='SpamTests.test_one',
sub=None,
),
lineno=12,
markers=None,
),
TestInfo(
id='test_spam.py::SpamTests::test_other',
name='test_other',
path=TestPath(
root=testroot,
relfile=relfile1,
func='SpamTests.test_other',
sub=None,
),
lineno=19,
markers=None,
),
TestInfo(
id='test_spam.py::test_all',
name='test_all',
path=TestPath(
root=testroot,
relfile=relfile1,
func='test_all',
sub=None,
),
lineno=144,
markers=None,
),
TestInfo(
id=relfile2 + '::All::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot,
relfile=relfile2,
func='All.BasicTests.test_first',
sub=None,
),
lineno=31,
markers=None,
),
TestInfo(
id=relfile2 + '::All::BasicTests::test_each[1+2-3]',
name='test_each[1+2-3]',
path=TestPath(
root=testroot,
relfile=relfile2,
func='All.BasicTests.test_each',
sub=['[1+2-3]'],
),
lineno=62,
markers=['expected-failure', 'skip', 'skip-if'],
),
])
self.assertEqual(stub.calls, [])
def test_finish(self):
stub = Stub()
session = StubPytestSession(stub)
testroot = '/a/b/c'.replace('/', os.path.sep)
relfile = 'x/y/z/test_eggs.py'.replace('/', os.path.sep)
session.items = [
StubPytestItem(
stub,
nodeid=relfile + '::SpamTests::test_spam',
name='test_spam',
location=(relfile, 12, 'SpamTests.test_spam'),
fspath=os.path.join(testroot, relfile),
function=FakeFunc('test_spam'),
),
]
collector = TestCollector()
collector.pytest_collection_finish(session)
self.maxDiff = None
self.assertEqual(collector.discovered, [
TestInfo(
id=relfile + '::SpamTests::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='SpamTests.test_spam',
sub=None,
),
lineno=12,
markers=None,
),
])
self.assertEqual(stub.calls, [])
| 31.961988 | 75 | 0.515323 |
4f6406b74f1186ce276ff343e20db1f3ef2e3991 | 3,397 | py | Python | python/dllib/examples/nnframes/xgboost/xgboost_classifier.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | python/dllib/examples/nnframes/xgboost/xgboost_classifier.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | python/dllib/examples/nnframes/xgboost/xgboost_classifier.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import OptionParser
import numpy as np
from bigdl.dllib.nn.criterion import *
from bigdl.dllib.nn.layer import *
from bigdl.dllib.optim.optimizer import *
from numpy.testing import assert_allclose
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.tuning import ParamGridBuilder
from pyspark.sql.types import *
from pyspark.ml.feature import VectorAssembler
from bigdl.dllib.nncontext import *
from bigdl.dllib.feature.common import *
from bigdl.dllib.feature.image import *
from bigdl.dllib.keras import layers as ZLayer
from bigdl.dllib.keras.models import Model as ZModel
from bigdl.dllib.keras.optimizers import Adam as KAdam
from bigdl.dllib.nnframes import *
from bigdl.dllib.utils.tf import *
import csv
import errno
def process(filepath, demo):
sparkConf = init_spark_conf().setMaster("local[1]").setAppName("testXGBClassifier")
sc = init_nncontext(sparkConf)
sqlContext = SQLContext(sc)
if demo:
data = sc.parallelize([
(1.0, 2.0, 3.0, 4.0, 7.0, 1),
(1.0, 3.0, 8.0, 2.0, 5.0, 0),
(2.0, 3.4, 5.0, 2.0, 4.0, 1)
])
N = 6
train_data = data
test_data = data
else:
dataset = np.loadtxt(filepath, delimiter=',')
print(type(dataset))
M, N = dataset.shape
train_X = dataset[:(int)(0.8 * M), :]
test_X = dataset[(int)(0.8 * M):, :]
train_data = sc.parallelize(train_X.tolist())
test_data = sc.parallelize(test_X.tolist())
columns = ["f" + str(i) for i in range(1, N)]
features = ["f" + str(i) for i in range(1, N)]
columns.append("label")
df1 = train_data.toDF(columns)
vecasembler = VectorAssembler(inputCols=features, outputCol="features")
traindf = vecasembler.transform(df1).select("features", "label").cache()
traindf.show()
df2 = test_data.toDF(columns)
testdf = vecasembler.transform(df2).select("features", "label").cache()
testdf.show()
xgbCf0 = XGBClassifier()
xgbCf0.setNthread(1)
xgbCf0.setNumRound(10)
xgbCf0.setMissing(0)
xgbmodel = xgbCf0.fit(traindf)
xgbmodel.setFeaturesCol("features")
y0 = xgbmodel.transform(testdf)
print(y0)
y0.show()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--file-path", type=str, dest="data_path",
default=".", help="Path where data are stored")
parser.add_option("-d", "--demo", action="store_true", dest="demo", default=False)
parser.add_option("-m", "--master", type=str, dest="the master choice")
(option, args) = parser.parse_args(sys.argv)
if option.data_path is None:
errno("data path is not specified")
datapath = option.data_path
process(datapath, option.demo)
| 35.757895 | 87 | 0.681778 |
4f5a4debd3536da66dbdb2d4fa44363b52599762 | 216 | py | Python | app/v2/notifications/__init__.py | alphagov-mirror/notifications-api | 4a2e47b118c51f0ad45e87c89521f6087b1fcc2f | [
"MIT"
] | 51 | 2016-04-03T23:36:17.000Z | 2022-03-21T20:04:52.000Z | app/v2/notifications/__init__.py | alphagov-mirror/notifications-api | 4a2e47b118c51f0ad45e87c89521f6087b1fcc2f | [
"MIT"
] | 1,335 | 2015-12-15T14:28:50.000Z | 2022-03-30T16:24:27.000Z | app/v2/notifications/__init__.py | alphagov-mirror/notifications-api | 4a2e47b118c51f0ad45e87c89521f6087b1fcc2f | [
"MIT"
] | 30 | 2016-01-08T19:05:32.000Z | 2021-12-20T16:37:23.000Z | from flask import Blueprint
from app.v2.errors import register_errors
v2_notification_blueprint = Blueprint("v2_notifications", __name__, url_prefix='/v2/notifications')
register_errors(v2_notification_blueprint)
| 27 | 99 | 0.847222 |
4f650be65b34a33ffd4d6bde5da28c827ee4fba6 | 11,694 | py | Python | main.py | martinetoering/3D-ResNets-PyTorch-TimeCycle | b8a8b43c2a51848284e1d41d0452855e631b7df2 | [
"MIT"
] | null | null | null | main.py | martinetoering/3D-ResNets-PyTorch-TimeCycle | b8a8b43c2a51848284e1d41d0452855e631b7df2 | [
"MIT"
] | null | null | null | main.py | martinetoering/3D-ResNets-PyTorch-TimeCycle | b8a8b43c2a51848284e1d41d0452855e631b7df2 | [
"MIT"
] | null | null | null | import os
import sys
import json
import numpy as np
import torch
from torch import nn
from torch.optim import lr_scheduler
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torch.nn.functional as F
import argparse
import random
import pickle
import scipy.misc
import models.videos.model_simple as models
from opts import parse_opts
from geotnf.transformation import GeometricTnf
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset_utils import Logger
from datasets.hmdb51 import HMDB51
from train import train_epoch
from validation import val_epoch
import test
import eval_hmdb51
def partial_load(pretrained_dict, model):
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(pretrained_dict)
def get_params(opt):
params = {}
params['filelist'] = opt.list
params['imgSize'] = 256
params['imgSize2'] = 320
params['cropSize'] = 240
params['cropSize2'] = 80
params['offset'] = 0
state = {k: v for k, v in opt._get_kwargs()}
print('\n')
params['predDistance'] = state['predDistance']
print('predDistance: ' + str(params['predDistance']))
params['batch_size'] = state['batch_size']
print('batch_size: ' + str(params['batch_size']) )
print('temperature: ' + str(state['T']))
params['gridSize'] = state['gridSize']
print('gridSize: ' + str(params['gridSize']) )
params['n_classes'] = state['n_classes']
print('n_classes: ' + str(params['n_classes']) )
params['videoLen'] = state['videoLen']
print('videoLen: ' + str(params['videoLen']) )
return params, state
if __name__ == '__main__':
opt = parse_opts()
print("Gpu ID's:", opt.gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
print("Torch version:", torch.__version__)
print("Train, val, test, evaluate:", not opt.no_train, opt.no_val, not opt.no_test, not opt.no_eval)
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
split_list = opt.list.split("_")[1][0]
split_annotation = opt.annotation_path.split("_")[1][0]
if split_list != split_annotation:
print("Please provide list and annotation for same split")
exit()
split = (opt.annotation_path.split(".")[0]).split("/")[-1]
print("Split of HMDB51:", split)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.list = os.path.join(opt.root_path, opt.list)
folder = opt.result_path
opt.result_path = os.path.join(opt.root_path, opt.result_path + "_" + split)
if not os.path.isdir(opt.result_path):
os.mkdir(opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
params, state = get_params(opt)
print("Result path:", opt.result_path)
print("Resume path:", opt.resume_path)
print("Video path:", opt.video_path)
print("Annotation path:", opt.annotation_path)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
print("Architecture:", opt.arch)
# Random seed
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if not opt.no_cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
print("\n")
print("Sample Size:", opt.sample_size)
print("Video Len:", opt.videoLen)
print("Frame Gap:", opt.frame_gap)
print("Pred Distance:", opt.predDistance)
print("Sample Duration:", opt.sample_duration)
print("TimeCycle weight:", opt.timecycle_weight)
print("Binary classification weight:", opt.binary_class_weight)
model = models.CycleTime(class_num=params['n_classes'],
trans_param_num=3,
frame_gap=opt.frame_gap,
videoLen=opt.videoLen,
sample_duration=opt.sample_duration,
pretrained=opt.pretrained_imagenet,
temporal_out=params['videoLen'],
T=opt.T,
hist=opt.hist,
batch_size=opt.batch_size)
if not opt.no_cuda:
model = model.cuda()
cudnn.benchmark = False
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
if not opt.no_cuda:
criterion = criterion.cuda()
print('Weight_decay: ' + str(opt.weight_decay))
print('Beta1: ' + str(opt.momentum))
print("\n")
print("LOADING PRETRAIN/RESUME AND LOGGER")
print("\n")
optimizer = optim.Adam(model.parameters(),
lr=opt.learning_rate,
betas=(opt.momentum, 0.999),
weight_decay=opt.weight_decay)
print("\n")
print("Adam Optimizer made")
if opt.pretrain_path:
# Load checkpoint.
print('Loading pretrained model {}'.format(opt.pretrain_path))
assert os.path.isfile(opt.pretrain_path), 'No pretrain directory found'
checkpoint = torch.load(opt.pretrain_path)
partial_load(checkpoint['state_dict'], model)
del checkpoint
if opt.resume_path:
# Load checkpoint.
print('Loading checkpoint {}'.format(opt.resume_path))
assert os.path.isfile(opt.resume_path), 'No checkpoint directory found'
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
opt.begin_epoch = checkpoint['epoch']
partial_load(checkpoint['state_dict'], model)
if not opt.no_train:
optimizer.load_state_dict(checkpoint['optimizer'])
train_log_file = 'train_resume_{}.log'.format(opt.begin_epoch)
train_batch_log_file = 'train_batch_resume_{}.log'.format(opt.begin_epoch)
val_log_file = 'val_resume_{}.log'.format(opt.begin_epoch)
opts_file = os.path.join(opt.result_path, 'opts_resume_{}.json'.format(opt.begin_epoch))
del checkpoint
else:
train_log_file = 'train.log'
train_batch_log_file = 'train_batch.log'
val_log_file = 'val.log'
opts_file = os.path.join(opt.result_path, 'opts.json')
if not opt.no_train:
# Save opts
print("\n")
print("Save opts at", opts_file)
with open(opts_file, 'w') as opt_file:
json.dump(vars(opt), opt_file)
print("\n")
print("TRAINING")
print("\n")
train_logger = Logger(
os.path.join(opt.result_path, train_log_file),
['epoch', 'loss', 'loss_hmdb_class', 'loss_timecycle', 'loss_bin_class', 'acc', 'acc_bin', 'lr', 'loss_sim', 'theta_loss', 'theta_skip_loss'])
train_batch_logger = Logger(
os.path.join(opt.result_path, train_batch_log_file),
['epoch', 'batch', 'iter', 'loss_hmdb_class', 'loss_timecycle', 'loss_bin_class', 'acc', 'acc_bin', 'lr', 'loss_sim', 'theta_loss', 'theta_skip_loss'])
target_transform = ClassLabel()
geometric_transform = GeometricTnf(
'affine',
out_h=params['cropSize2'],
out_w=params['cropSize2'],
use_cuda = False)
training_data = HMDB51(
params,
opt.video_path,
opt.annotation_path,
'training',
frame_gap=opt.frame_gap,
sample_duration=opt.sample_duration,
target_transform=target_transform,
geometric_transform=geometric_transform)
print("Training data obtained")
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
print("Train loader made")
print("Learning rate:", opt.learning_rate)
print("Momentum:", opt.momentum)
print("Weight decay:", opt.weight_decay)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer,
'min',
patience=opt.lr_patience)
print("Lr_patience", opt.lr_patience)
print("\n")
if not opt.no_val:
print("VALIDATION")
print("\n")
val_logger = Logger(
os.path.join(opt.result_path, val_log_file), ['epoch', 'loss', 'acc'])
target_transform = ClassLabel()
validation_data = HMDB51(
params,
opt.video_path,
opt.annotation_path,
'validation',
sample_duration=opt.sample_duration,
n_samples_for_each_video=opt.n_val_samples,
target_transform=target_transform)
print("Validation data loaded")
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
print("Validation loader done")
#print("MODEL:", model.state_dict().keys())
print("\n")
print("RUNNING")
print("\n")
for i in range(opt.begin_epoch, opt.n_epochs + 1):
if not opt.no_train:
train_epoch(i, params, train_loader, model, criterion, optimizer, opt, train_logger, train_batch_logger)
if not opt.no_val:
validation_loss = val_epoch(i, params, val_loader, model, criterion, opt, val_logger)
if not opt.no_train and not opt.no_val:
scheduler.step(validation_loss)
if not opt.no_test:
print("\n")
print("TESTING")
target_transform = VideoID()
test_data = HMDB51(
params,
opt.video_path,
opt.annotation_path,
"validation",
sample_duration=opt.sample_duration,
n_samples_for_each_video=0,
target_transform=target_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
if not opt.no_train and not opt.no_val:
epoch = opt.n_epochs
else:
epoch = opt.begin_epoch - 1
val_json_name = str(epoch)
test.test(test_loader, model, opt, test_data.class_names, val_json_name)
if not opt.no_eval:
print("\n")
print("EVALUATING")
if not opt.no_train and not opt.no_val:
epoch = opt.n_epochs
else:
epoch = opt.begin_epoch - 1
eval_path = opt.result_path + '/' + "results" + '_' + str(epoch) + '.txt'
print("File:", eval_path)
prediction_file = os.path.join(opt.result_path, 'val_{}.json'.format(val_json_name))
subset = "validation"
epoch, accuracy, error = eval_hmdb51.eval_hmdb51(eval_path, opt.annotation_path, prediction_file, subset, opt.top_k, epoch)
print("Results for epoch ", epoch, "are: acc:", accuracy, "err@", opt.top_k, ":", error)
| 30.374026 | 162 | 0.616384 |
4f64c931d60322b9c5c32a84b96bf26e55e8bdab | 889 | py | Python | Join/spatial_joins.py | monocilindro/qgis-earthengine-examples | 82aea8926d34ed3f4ad4a4a345ddbd225819d28f | [
"MIT"
] | 646 | 2019-12-03T06:09:03.000Z | 2022-03-28T03:37:08.000Z | Join/spatial_joins.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 10 | 2019-12-30T03:42:44.000Z | 2021-05-22T07:34:07.000Z | Join/spatial_joins.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 219 | 2019-12-06T02:20:53.000Z | 2022-03-30T15:14:27.000Z | import ee
from ee_plugin import Map
# Load a primary 'collection': protected areas (Yosemite National Park).
primary = ee.FeatureCollection("WCMC/WDPA/current/polygons") \
.filter(ee.Filter.eq('NAME', 'Yosemite National Park'))
# Load a secondary 'collection': power plants.
powerPlants = ee.FeatureCollection('WRI/GPPD/power_plants')
# Define a spatial filter, with distance 100 km.
distFilter = ee.Filter.withinDistance(**{
'distance': 100000,
'leftField': '.geo',
'rightField': '.geo',
'maxError': 10
})
# Define a saveAll join.
distSaveAll = ee.Join.saveAll(**{
'matchesKey': 'points',
'measureKey': 'distance'
})
# Apply the join.
spatialJoined = distSaveAll.apply(primary, powerPlants, distFilter)
# Print the result.
# print(spatialJoined.getInfo())
Map.centerObject(spatialJoined, 10)
Map.addLayer(ee.Image().paint(spatialJoined, 1, 3), {}, 'Spatial Joined')
| 27.78125 | 73 | 0.72216 |
4f62fd84fdb98ec9b8bd3fc21367d2dba7ae21cd | 9,648 | py | Python | venv/Lib/site-packages/nbdime/tests/test_git_diffdriver.py | PeerHerholz/guideline_jupyter_book | ce445e4be0d53370b67708a22550565b90d71ac6 | [
"BSD-3-Clause"
] | 2 | 2021-02-16T16:17:07.000Z | 2021-11-08T20:27:13.000Z | venv/Lib/site-packages/nbdime/tests/test_git_diffdriver.py | PeerHerholz/guideline_jupyter_book | ce445e4be0d53370b67708a22550565b90d71ac6 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/nbdime/tests/test_git_diffdriver.py | PeerHerholz/guideline_jupyter_book | ce445e4be0d53370b67708a22550565b90d71ac6 | [
"BSD-3-Clause"
] | 4 | 2020-11-14T17:05:36.000Z | 2020-11-16T18:44:54.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import unicode_literals
import io
import os
from os.path import join as pjoin
import sys
import mock
import pytest
from tornado import ioloop
from nbdime.vcs.git.diffdriver import main as gdd_main
from nbdime.prettyprint import file_timestamp
from nbdime.utils import locate_gitattributes
from .utils import WEB_TEST_TIMEOUT, call
# Expected output includes coloring characters
expected_output = """nbdiff {0} {1}
--- {0} {2}
+++ {1} {3}
## modified /cells/0/outputs/0/data/text/plain:
- 6
+ 3
## modified /cells/0/source:
@@ -1,3 +1,3 @@
-def foe(x, y):
+def foo(x, y):
return x + y
-foe(3, 2)
+foo(1, 2)
## modified /cells/1/source:
@@ -1,3 +1,3 @@
-def foo(x, y):
+def foe(x, y):
return x * y
-foo(1, 2)
+foe(1, 2)
"""
expected_source_only = """nbdiff {0} {1}
--- {0} {2}
+++ {1} {3}
## modified /cells/0/source:
@@ -1,3 +1,3 @@
-def foe(x, y):
+def foo(x, y):
return x + y
-foe(3, 2)
+foo(1, 2)
## modified /cells/1/source:
@@ -1,3 +1,3 @@
-def foo(x, y):
+def foe(x, y):
return x * y
-foo(1, 2)
+foe(1, 2)
"""
expected_no_filter = """nbdiff {0} {1}
--- {0} {2}
+++ {1} {3}
## inserted before /cells/2:
+ code cell:
+ execution_count: 2
+ metadata (known keys):
+ collapsed: False
+ source:
+ x
## deleted /cells/3:
- code cell:
- execution_count: 2
- metadata (known keys):
- collapsed: False
- source:
- x
## inserted before /cells/5:
+ code cell:
+ metadata (known keys):
+ collapsed: False
+ source:
+ x
## deleted /cells/6:
- code cell:
- metadata (known keys):
- collapsed: False
- source:
- x
"""
expected_strip_output_filter = """nbdiff {0} {1}
--- {0} {2}
+++ {1} {3}
## inserted before /cells/2:
+ code cell:
+ execution_count: 2
+ metadata (known keys):
+ collapsed: False
+ source:
+ x
## deleted /cells/3:
- code cell:
- execution_count: 2
- metadata (known keys):
- collapsed: False
- source:
- x
- outputs:
- output 0:
- output_type: execute_result
- execution_count: 2
- data:
- text/plain: 3
## replaced (type changed from int to NoneType) /cells/5/execution_count:
- 4
+ None
## deleted /cells/5/outputs/0:
- output:
- output_type: execute_result
- execution_count: 4
- data:
- text/plain: 5
## replaced (type changed from NoneType to int) /cells/6/execution_count:
- None
+ 4
"""
expected_helper_filter = """nbdiff {0} {1}
--- {0} {2}
+++ {1} {3}
## inserted before /cells/2:
+ code cell:
+ execution_count: 2
+ metadata (known keys):
+ collapsed: False
+ source:
+ x
## deleted /cells/3:
- code cell:
- execution_count: 2
- metadata (known keys):
- collapsed: False
- source:
- x
## inserted before /cells/5:
+ code cell:
+ metadata (known keys):
+ collapsed: False
+ source:
+ x
## inserted before /cells/6:
+ raw cell:
+ source:
+ nbdime test filter marker
## deleted /cells/6:
- code cell:
- metadata (known keys):
- collapsed: False
- source:
- x
"""
def test_git_diff_driver(filespath, capsys, needs_git, reset_notebook_diff):
# Simulate a call from `git diff` to check basic driver functionality
fn1 = pjoin(filespath, 'foo--1.ipynb')
fn2 = pjoin(filespath, 'foo--2.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff',
'--no-color',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_output.format(fn1, fn2, t1, t2)
def test_git_diff_driver_flags(filespath, capsys, needs_git, reset_notebook_diff):
# Simulate a call from `git diff` to check basic driver functionality
fn1 = pjoin(filespath, 'foo--1.ipynb')
fn2 = pjoin(filespath, 'foo--2.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff', '-s',
'--no-color',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_source_only.format(fn1, fn2, t1, t2)
def test_git_diff_driver_ignore_flags(filespath, capsys, needs_git, reset_notebook_diff):
# Simulate a call from `git diff` to check basic driver functionality
fn1 = pjoin(filespath, 'foo--1.ipynb')
fn2 = pjoin(filespath, 'foo--2.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff',
'--no-color',
'-O',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_source_only.format(fn1, fn2, t1, t2)
def _config_filter_driver(name, capsys):
path = os.path.abspath(pjoin(os.path.dirname(__file__), 'filters', '%s.py' % name))
base_cmd = '%s %s' % (sys.executable, path)
gitattr = locate_gitattributes()
with io.open(gitattr, 'a', encoding="utf8") as f:
f.write(u'\n*.ipynb\tfilter=%s\n' % (name,))
with capsys.disabled():
call('git config --local --add filter.%s.clean "%s clean"' % (name, base_cmd))
call('git config --local --add filter.%s.smudge "%s smudge"' % (name, base_cmd))
def test_git_diff_driver_noop_filter(git_repo, filespath, capsys, reset_notebook_diff):
_config_filter_driver('noop', capsys)
fn1 = pjoin(git_repo, 'diff.ipynb')
fn2 = pjoin(filespath, 'src-and-output--1.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff',
'--use-filter',
'--no-color',
'-O',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_no_filter.format(fn1, fn2, t1, t2)
def test_git_diff_driver_strip_outputs_filter(git_repo, filespath, capsys, reset_notebook_diff):
_config_filter_driver('strip_outputs', capsys)
fn1 = pjoin(git_repo, 'diff.ipynb')
fn2 = pjoin(filespath, 'src-and-output--1.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff',
'--use-filter',
'--no-color',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_strip_output_filter.format(fn1, fn2, t1, t2)
def test_git_diff_driver_add_helper_filter(git_repo, filespath, capsys, reset_notebook_diff):
_config_filter_driver('add_helper', capsys)
fn1 = pjoin(git_repo, 'diff.ipynb')
fn2 = pjoin(filespath, 'src-and-output--1.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff',
'--use-filter',
'--no-color',
'-O',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_helper_filter.format(fn1, fn2, t1, t2)
def test_git_diff_driver_no_filter_without_flag(git_repo, filespath, capsys, reset_notebook_diff):
_config_filter_driver('add_helper', capsys)
fn1 = pjoin(git_repo, 'diff.ipynb')
fn2 = pjoin(filespath, 'src-and-output--1.ipynb')
t1 = file_timestamp(fn1)
t2 = file_timestamp(fn2)
mock_argv = [
'/mock/path/git-nbdiffdriver', 'diff',
'--no-color',
'-O',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644']
with mock.patch('sys.argv', mock_argv):
r = gdd_main()
assert r == 0
cap_out = capsys.readouterr()[0]
assert cap_out == expected_no_filter.format(fn1, fn2, t1, t2)
@pytest.mark.timeout(timeout=WEB_TEST_TIMEOUT)
def test_git_web_diff_driver(filespath, unique_port, reset_log, ioloop_patch, reset_notebook_diff):
# Simulate a call from `git diff` to check basic driver functionality
fn1 = os.path.join(filespath, 'foo--1.ipynb')
fn2 = os.path.join(filespath, 'foo--2.ipynb')
loop = ioloop.IOLoop.current()
loop.call_later(0, loop.stop)
mock_argv = [
'git-nbdiffdriver', 'webdiff',
fn1,
fn1, 'invalid_mock_checksum', '100644',
fn2, 'invalid_mock_checksum', '100644',
'--browser=disabled', '--port=%i' % unique_port]
with mock.patch('sys.argv', mock_argv):
# This simply checks that the function returns 0,
# but assumes that the function is routed to the web
# diff entrypoint
r = gdd_main()
assert r == 0
| 25.389474 | 99 | 0.613599 |
4f65e433a562abd56b15b365e1521908fdf5be6a | 9,339 | py | Python | tensorflow/python/framework/sparse_tensor.py | gnoses/TensorFlow | 63a21e054007d86269ed1ad0145ebce04ee57a81 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/sparse_tensor.py | gnoses/TensorFlow | 63a21e054007d86269ed1ad0145ebce04ee57a81 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/sparse_tensor.py | gnoses/TensorFlow | 63a21e054007d86269ed1ad0145ebce04ee57a81 | [
"Apache-2.0"
] | 1 | 2018-06-05T05:02:16.000Z | 2018-06-05T05:02:16.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
# pylint: disable=protected-access
_TensorLike = ops._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
class SparseTensor(_TensorLike):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies
the dense_shape of the sparse tensor. Takes a list indicating the number of
elements in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a
three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a
one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse_reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not (isinstance(sparse_tensor_value, SparseTensor) or
isinstance(sparse_tensor_value, SparseTensorValue)):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
Returns:
A `SparseTensor`.
"""
with ops.name_scope(None, "SparseTensor",
[indices, values, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(
values, name="values", as_ref=True)
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices_shape = indices.get_shape().with_rank(2)
values_shape = values.get_shape().with_rank(1)
dense_shape_shape = dense_shape.get_shape().with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape[0].merge_with(values_shape[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape[1].merge_with(dense_shape_shape[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
SparseTensorValue = collections.namedtuple(
"SparseTensorValue", ["indices", "values", "dense_shape"])
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
"Sparse dtype: requested = %s, actual = %s" % (
dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name)
| 36.480469 | 80 | 0.69879 |
4f63483a68872455a963d0f99e9ce0a35fd6eb6a | 13,733 | py | Python | references/optical_flow/train.py | sallysyw/vision | bf073e785528970e6a1605e411e4fc382d686dc7 | [
"BSD-3-Clause"
] | 12,063 | 2017-01-18T19:58:38.000Z | 2022-03-31T23:08:44.000Z | references/optical_flow/train.py | zhiqwang/vision | 578c1546c328c68e601bfd2e2309ed175ee343d9 | [
"BSD-3-Clause"
] | 4,673 | 2017-01-18T21:30:03.000Z | 2022-03-31T20:58:33.000Z | references/optical_flow/train.py | zhiqwang/vision | 578c1546c328c68e601bfd2e2309ed175ee343d9 | [
"BSD-3-Clause"
] | 7,132 | 2017-01-18T18:12:23.000Z | 2022-03-31T21:19:10.000Z | import argparse
import warnings
from math import ceil
from pathlib import Path
import torch
import torchvision.models.optical_flow
import utils
from presets import OpticalFlowPresetTrain, OpticalFlowPresetEval
from torchvision.datasets import KittiFlow, FlyingChairs, FlyingThings3D, Sintel, HD1K
try:
from torchvision.prototype import models as PM
from torchvision.prototype.models import optical_flow as PMOF
except ImportError:
PM = PMOF = None
def get_train_dataset(stage, dataset_root):
if stage == "chairs":
transforms = OpticalFlowPresetTrain(crop_size=(368, 496), min_scale=0.1, max_scale=1.0, do_flip=True)
return FlyingChairs(root=dataset_root, split="train", transforms=transforms)
elif stage == "things":
transforms = OpticalFlowPresetTrain(crop_size=(400, 720), min_scale=-0.4, max_scale=0.8, do_flip=True)
return FlyingThings3D(root=dataset_root, split="train", pass_name="both", transforms=transforms)
elif stage == "sintel_SKH": # S + K + H as from paper
crop_size = (368, 768)
transforms = OpticalFlowPresetTrain(crop_size=crop_size, min_scale=-0.2, max_scale=0.6, do_flip=True)
things_clean = FlyingThings3D(root=dataset_root, split="train", pass_name="clean", transforms=transforms)
sintel = Sintel(root=dataset_root, split="train", pass_name="both", transforms=transforms)
kitti_transforms = OpticalFlowPresetTrain(crop_size=crop_size, min_scale=-0.3, max_scale=0.5, do_flip=True)
kitti = KittiFlow(root=dataset_root, split="train", transforms=kitti_transforms)
hd1k_transforms = OpticalFlowPresetTrain(crop_size=crop_size, min_scale=-0.5, max_scale=0.2, do_flip=True)
hd1k = HD1K(root=dataset_root, split="train", transforms=hd1k_transforms)
# As future improvement, we could probably be using a distributed sampler here
# The distribution is S(.71), T(.135), K(.135), H(.02)
return 100 * sintel + 200 * kitti + 5 * hd1k + things_clean
elif stage == "kitti":
transforms = OpticalFlowPresetTrain(
# resize and crop params
crop_size=(288, 960),
min_scale=-0.2,
max_scale=0.4,
stretch_prob=0,
# flip params
do_flip=False,
# jitter params
brightness=0.3,
contrast=0.3,
saturation=0.3,
hue=0.3 / 3.14,
asymmetric_jitter_prob=0,
)
return KittiFlow(root=dataset_root, split="train", transforms=transforms)
else:
raise ValueError(f"Unknown stage {stage}")
@torch.no_grad()
def _validate(model, args, val_dataset, *, padder_mode, num_flow_updates=None, batch_size=None, header=None):
"""Helper function to compute various metrics (epe, etc.) for a model on a given dataset.
We process as many samples as possible with ddp, and process the rest on a single worker.
"""
batch_size = batch_size or args.batch_size
model.eval()
sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=sampler,
batch_size=batch_size,
pin_memory=True,
num_workers=args.num_workers,
)
num_flow_updates = num_flow_updates or args.num_flow_updates
def inner_loop(blob):
if blob[0].dim() == 3:
# input is not batched so we add an extra dim for consistency
blob = [x[None, :, :, :] if x is not None else None for x in blob]
image1, image2, flow_gt = blob[:3]
valid_flow_mask = None if len(blob) == 3 else blob[-1]
image1, image2 = image1.cuda(), image2.cuda()
padder = utils.InputPadder(image1.shape, mode=padder_mode)
image1, image2 = padder.pad(image1, image2)
flow_predictions = model(image1, image2, num_flow_updates=num_flow_updates)
flow_pred = flow_predictions[-1]
flow_pred = padder.unpad(flow_pred).cpu()
metrics, num_pixels_tot = utils.compute_metrics(flow_pred, flow_gt, valid_flow_mask)
# We compute per-pixel epe (epe) and per-image epe (called f1-epe in RAFT paper).
# per-pixel epe: average epe of all pixels of all images
# per-image epe: average epe on each image independently, then average over images
for name in ("epe", "1px", "3px", "5px", "f1"): # f1 is called f1-all in paper
logger.meters[name].update(metrics[name], n=num_pixels_tot)
logger.meters["per_image_epe"].update(metrics["epe"], n=batch_size)
logger = utils.MetricLogger()
for meter_name in ("epe", "1px", "3px", "5px", "per_image_epe", "f1"):
logger.add_meter(meter_name, fmt="{global_avg:.4f}")
num_processed_samples = 0
for blob in logger.log_every(val_loader, header=header, print_freq=None):
inner_loop(blob)
num_processed_samples += blob[0].shape[0] # batch size
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
print(
f"Batch-processed {num_processed_samples} / {len(val_dataset)} samples. "
"Going to process the remaining samples individually, if any."
)
if args.rank == 0: # we only need to process the rest on a single worker
for i in range(num_processed_samples, len(val_dataset)):
inner_loop(val_dataset[i])
logger.synchronize_between_processes()
print(header, logger)
def validate(model, args):
val_datasets = args.val_dataset or []
if args.weights:
weights = PM.get_weight(args.weights)
preprocessing = weights.transforms()
else:
preprocessing = OpticalFlowPresetEval()
for name in val_datasets:
if name == "kitti":
# Kitti has different image sizes so we need to individually pad them, we can't batch.
# see comment in InputPadder
if args.batch_size != 1 and args.rank == 0:
warnings.warn(
f"Batch-size={args.batch_size} was passed. For technical reasons, evaluating on Kitti can only be done with a batch-size of 1."
)
val_dataset = KittiFlow(root=args.dataset_root, split="train", transforms=preprocessing)
_validate(
model, args, val_dataset, num_flow_updates=24, padder_mode="kitti", header="Kitti val", batch_size=1
)
elif name == "sintel":
for pass_name in ("clean", "final"):
val_dataset = Sintel(
root=args.dataset_root, split="train", pass_name=pass_name, transforms=preprocessing
)
_validate(
model,
args,
val_dataset,
num_flow_updates=32,
padder_mode="sintel",
header=f"Sintel val {pass_name}",
)
else:
warnings.warn(f"Can't validate on {val_dataset}, skipping.")
def train_one_epoch(model, optimizer, scheduler, train_loader, logger, args):
for data_blob in logger.log_every(train_loader):
optimizer.zero_grad()
image1, image2, flow_gt, valid_flow_mask = (x.cuda() for x in data_blob)
flow_predictions = model(image1, image2, num_flow_updates=args.num_flow_updates)
loss = utils.sequence_loss(flow_predictions, flow_gt, valid_flow_mask, args.gamma)
metrics, _ = utils.compute_metrics(flow_predictions[-1], flow_gt, valid_flow_mask)
metrics.pop("f1")
logger.update(loss=loss, **metrics)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
scheduler.step()
def main(args):
utils.setup_ddp(args)
if args.weights:
model = PMOF.__dict__[args.model](weights=args.weights)
else:
model = torchvision.models.optical_flow.__dict__[args.model](pretrained=args.pretrained)
model = model.to(args.local_rank)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank])
if args.resume is not None:
d = torch.load(args.resume, map_location="cpu")
model.load_state_dict(d, strict=True)
if args.train_dataset is None:
# Set deterministic CUDNN algorithms, since they can affect epe a fair bit.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
validate(model, args)
return
print(f"Parameter Count: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
torch.backends.cudnn.benchmark = True
model.train()
if args.freeze_batch_norm:
utils.freeze_batch_norm(model.module)
train_dataset = get_train_dataset(args.train_dataset, args.dataset_root)
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True, drop_last=True)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
pin_memory=True,
num_workers=args.num_workers,
)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, eps=args.adamw_eps)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer=optimizer,
max_lr=args.lr,
epochs=args.epochs,
steps_per_epoch=ceil(len(train_dataset) / (args.world_size * args.batch_size)),
pct_start=0.05,
cycle_momentum=False,
anneal_strategy="linear",
)
logger = utils.MetricLogger()
done = False
for current_epoch in range(args.epochs):
print(f"EPOCH {current_epoch}")
sampler.set_epoch(current_epoch) # needed, otherwise the data loading order would be the same for all epochs
train_one_epoch(
model=model,
optimizer=optimizer,
scheduler=scheduler,
train_loader=train_loader,
logger=logger,
args=args,
)
# Note: we don't sync the SmoothedValues across processes, so the printed metrics are just those of rank 0
print(f"Epoch {current_epoch} done. ", logger)
if args.rank == 0:
# TODO: Also save the optimizer and scheduler
torch.save(model.state_dict(), Path(args.output_dir) / f"{args.name}_{current_epoch}.pth")
torch.save(model.state_dict(), Path(args.output_dir) / f"{args.name}.pth")
if current_epoch % args.val_freq == 0 or done:
validate(model, args)
model.train()
if args.freeze_batch_norm:
utils.freeze_batch_norm(model.module)
def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(add_help=add_help, description="Train or evaluate an optical-flow model.")
parser.add_argument(
"--name",
default="raft",
type=str,
help="The name of the experiment - determines the name of the files where weights are saved.",
)
parser.add_argument(
"--output-dir", default="checkpoints", type=str, help="Output dir where checkpoints will be stored."
)
parser.add_argument(
"--resume",
type=str,
help="A path to previously saved weights. Used to re-start training from, or evaluate a pre-saved model.",
)
parser.add_argument("--num-workers", type=int, default=12, help="Number of workers for the data loading part.")
parser.add_argument(
"--train-dataset",
type=str,
help="The dataset to use for training. If not passed, only validation is performed (and you probably want to pass --resume).",
)
parser.add_argument("--val-dataset", type=str, nargs="+", help="The dataset(s) to use for validation.")
parser.add_argument("--val-freq", type=int, default=2, help="Validate every X epochs")
parser.add_argument("--epochs", type=int, default=20, help="The total number of epochs to train.")
parser.add_argument("--batch-size", type=int, default=2)
parser.add_argument("--lr", type=float, default=0.00002, help="Learning rate for AdamW optimizer")
parser.add_argument("--weight-decay", type=float, default=0.00005, help="Weight decay for AdamW optimizer")
parser.add_argument("--adamw-eps", type=float, default=1e-8, help="eps value for AdamW optimizer")
parser.add_argument(
"--freeze-batch-norm", action="store_true", help="Set BatchNorm modules of the model in eval mode."
)
parser.add_argument(
"--model", type=str, default="raft_large", help="The name of the model to use - either raft_large or raft_small"
)
# TODO: resume, pretrained, and weights should be in an exclusive arg group
parser.add_argument("--pretrained", action="store_true", help="Whether to use pretrained weights")
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load.")
parser.add_argument(
"--num_flow_updates",
type=int,
default=12,
help="number of updates (or 'iters') in the update operator of the model.",
)
parser.add_argument("--gamma", type=float, default=0.8, help="exponential weighting for loss. Must be < 1.")
parser.add_argument("--dist-url", default="env://", help="URL used to set up distributed training")
parser.add_argument(
"--dataset-root",
help="Root folder where the datasets are stored. Will be passed as the 'root' parameter of the datasets.",
required=True,
)
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
Path(args.output_dir).mkdir(exist_ok=True)
main(args)
| 39.690751 | 147 | 0.658487 |
4f66377d14c5266724ecbe92e1019570a63166c5 | 2,545 | py | Python | demos/web8/server.py | thomas-brandeho/rpyc | 98f8f5cd0b4ad11c2fbfa3092031d7c46b5b447d | [
"MIT"
] | 238 | 2020-09-02T22:26:44.000Z | 2022-03-31T17:49:55.000Z | demos/web8/server.py | thomas-brandeho/rpyc | 98f8f5cd0b4ad11c2fbfa3092031d7c46b5b447d | [
"MIT"
] | 87 | 2020-09-02T20:10:35.000Z | 2022-03-16T16:49:47.000Z | demos/web8/server.py | thomas-brandeho/rpyc | 98f8f5cd0b4ad11c2fbfa3092031d7c46b5b447d | [
"MIT"
] | 40 | 2020-09-13T19:53:51.000Z | 2022-03-21T09:17:48.000Z | import rpyc
from rpyc.utils.server import ThreadedServer
import time
import threading
class Web8Service(rpyc.Service):
def on_connect(self, conn):
self._conn = conn
def exposed_get_page(self, gtk, content, page):
self.gtk = gtk
self.content = content
page = page.replace(" ", "_").lower()
pagefunc = getattr(self, f"page_{page}", None)
if pagefunc:
pagefunc()
else:
lbl1 = self.gtk.Label(f"Page {page!r} does not exist")
lbl1.show()
self.content.pack_start(lbl1)
def page_main(self):
counter = [0]
lbl1 = self.gtk.Label("Hello mate, this is the main page")
lbl1.show()
self.content.pack_start(lbl1)
def on_btn1_clicked(src):
counter[0] += 1
lbl2.set_text(f"You have clicked the button {counter[0]} times")
btn1 = self.gtk.Button("Add 1")
btn1.connect("clicked", on_btn1_clicked)
btn1.show()
self.content.pack_start(btn1)
lbl2 = self.gtk.Label("You have clicked the button 0 times")
lbl2.show()
self.content.pack_start(lbl2)
def on_btn2_clicked(src):
self._conn.root.navigate("/hello_world")
btn2 = self.gtk.Button("Go to the 'hello world' page")
btn2.connect("clicked", on_btn2_clicked)
btn2.show()
self.content.pack_start(btn2)
active = [False]
def bg_timer_thread():
while active[0]:
rpyc.async_(lbl3.set_text)(f"Server time is: {time.ctime()}")
time.sleep(1)
bg_thread = [None]
def on_btn3_clicked(src):
if btn3.get_label() == "Start timer":
bg_thread[0] = threading.Thread(target=bg_timer_thread)
active[0] = True
bg_thread[0].start()
btn3.set_label("Stop timer")
else:
active[0] = False
bg_thread[0].join()
btn3.set_label("Start timer")
btn3 = self.gtk.Button("Start timer")
btn3.connect("clicked", on_btn3_clicked)
btn3.show()
self.content.pack_start(btn3)
lbl3 = self.gtk.Label("Server time is: ?")
lbl3.show()
self.content.pack_start(lbl3)
def page_hello_world(self):
lbl = self.gtk.Label("Hello world!")
lbl.show()
self.content.pack_start(lbl)
if __name__ == "__main__":
t = ThreadedServer(Web8Service, port=18833)
t.start()
| 28.595506 | 77 | 0.565815 |
4f617c0995bb7a9ea58d5485e75d342634754378 | 1,051 | py | Python | setup.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | 2 | 2022-03-28T17:27:21.000Z | 2022-03-29T22:27:15.000Z | setup.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | null | null | null | setup.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | 1 | 2022-03-28T14:47:53.000Z | 2022-03-28T14:47:53.000Z | from locale import currency
import pathlib
from setuptools import setup
curr_dir_path = pathlib.Path(__file__).parent
readme_content = (curr_dir_path/"README.md").read_text()
setup(
name="sciencer",
version="0.1.3",
description="A smarter way to find new articles",
long_description=readme_content,
long_description_content_type="text/markdown",
url="https://github.com/SciencerIO/sciencer-toolkit",
author="SciencerIO",
author_email="diogo.rato.11@gmail.com",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
"Topic :: Utilities"
],
packages=["sciencer", "sciencer.collectors", "sciencer.expanders",
"sciencer.providers", "sciencer.filters", "sciencer.utils"],
include_package_data=True,
install_requires=["requests"],
)
| 31.848485 | 74 | 0.671741 |
4f6090b9e6ea3a4915a1157e3cbdc0d329c6a415 | 2,794 | py | Python | max_generator.py | nikos-daniilidis/max-mle | a2e22b2daea41a9575b8d626bd50bbfa0d84a95e | [
"MIT"
] | null | null | null | max_generator.py | nikos-daniilidis/max-mle | a2e22b2daea41a9575b8d626bd50bbfa0d84a95e | [
"MIT"
] | null | null | null | max_generator.py | nikos-daniilidis/max-mle | a2e22b2daea41a9575b8d626bd50bbfa0d84a95e | [
"MIT"
] | null | null | null | from __future__ import print_function
from warnings import warn
import numpy as np
class MaxSelectGenerator(object):
def __init__(self, locs, scales, base_event="normal"):
"""
Initialize a sequence of event generators where each event follows some base distribution
with loc and scale parameters.
:param locs: list of float. The mean parameters of the event generators.
:param scales: list of float. The standard deviation parameters of the event generators.
:param base_event: str.
"""
assert isinstance(locs, list)
assert isinstance(scales, list)
assert base_event in ["normal"]
assert len(locs) == len(scales), "Length of locs must equal length of scales."
self._num_generators = len(locs)
self._locs = locs
self._scales = scales
if base_event == "normal":
def _gen(n):
return np.random.normal(loc=0, scale=1., size=n)
self._gen = _gen
else:
warn("%s base_event is not supported. Reverting to uniform." % base_event)
def _gen(n):
return np.random.uniform(low=0., high=1., size=n)
self._gen = _gen
def get_all_events(self, n):
"""
Generate n events for all the streams. For efficiency, generate n*_num_generators events
following an underlying distribution, and offset/scale accordingly for each stream.
:param n: int. The number of events.
:return: numpy array. The events. Rows are event realizations, columns are generator streams.
"""
x = np.reshape(self._gen(n*self._num_generators), newshape=(n, self._num_generators))
v = np.array(self._scales)
m = np.ones(shape=(n, self._num_generators)) * np.array(self._locs)
return x * v + m
def select_events(self, n):
"""
Generate n events using get_all_events. For each event, select the maximum.
:param n: int. The number of events.
:return: tuple (numpy array, numpy array). The scores and stream indices for the maxima.
"""
es = self.get_all_events(n)
ixs = np.argmax(es, axis=1)
return es[np.arange(0, len(ixs), 1), ixs], ixs
def locs(self):
return self._locs
def scales(self):
return self._scales
if __name__ == "__main__":
# check that locs work as expected
#g = MaxSelectGenerator(locs=[0., 1., 2.], scales=[0.01, 0.01, 0.01])
#es = g.get_all_events(10)
#print(es)
# check that scales work as expected
g = MaxSelectGenerator(locs=[1., 1., 1.], scales=[0.001, 0.01, 0.1])
# es = g.get_all_events(10)
# print(es)
es, ixs = g.select_events(10)
print(ixs)
print(es)
print(es[ixs==0]) | 37.253333 | 101 | 0.620258 |
4f63fca95cd4100538b1077328ce3654b8da801f | 35,825 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/operations/_agent_pools_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/operations/_agent_pools_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/operations/_agent_pools_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations(object):
"""AgentPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AgentPoolListResult"]
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster. The operation returns properties
of each agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_12_01.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AgentPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AgentPool"
"""Gets the agent pool.
Gets the details of the agent pool by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
parameters, # type: "_models.AgentPool"
**kwargs # type: Any
):
# type: (...) -> "_models.AgentPool"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AgentPool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
parameters, # type: "_models.AgentPool"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AgentPool"]
"""Creates or updates an agent pool.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: Parameters supplied to the Create or Update an agent pool operation.
:type parameters: ~azure.mgmt.containerservice.v2020_12_01.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_12_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an agent pool.
Deletes the agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def get_upgrade_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AgentPoolUpgradeProfile"
"""Gets upgrade profile for an agent pool.
Gets the details of the upgrade profile for an agent pool with a specified resource group and
managed cluster name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.AgentPoolUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_upgrade_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore
def get_available_agent_pool_versions(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AgentPoolAvailableVersions"
"""Gets a list of supported versions for the specified agent pool.
Gets a list of supported versions for the specified agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.AgentPoolAvailableVersions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_available_agent_pool_versions.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore
def _upgrade_node_image_version_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.AgentPool"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AgentPool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self._upgrade_node_image_version_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_upgrade_node_image_version_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
def begin_upgrade_node_image_version(
self,
resource_group_name, # type: str
resource_name, # type: str
agent_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AgentPool"]
"""Upgrade node image version of an agent pool to the latest.
Upgrade node image version of an agent pool to the latest.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._upgrade_node_image_version_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'agentPoolName': self._serialize.url("agent_pool_name", agent_pool_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_upgrade_node_image_version.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
| 50.887784 | 263 | 0.665652 |
4f659ef0f1710c71e1dbb1e65f87ac12e6639b3c | 4,153 | py | Python | tests/unit/test_rolling.py | goosen78/gQuant | cc0bff4ac524ccfbe8097acd647a8b3fad5fe578 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_rolling.py | goosen78/gQuant | cc0bff4ac524ccfbe8097acd647a8b3fad5fe578 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_rolling.py | goosen78/gQuant | cc0bff4ac524ccfbe8097acd647a8b3fad5fe578 | [
"Apache-2.0"
] | null | null | null | '''
Workflow Serialization Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_rolling.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_rolling.py
'''
import pandas as pd
import unittest
import cudf
import os
from gquant.dataframe_flow.task import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import Rolling, Ewm
from .utils import make_orderer, error_function
import numpy as np
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestRolling(unittest.TestCase):
def setUp(self):
array_len = int(1e4)
self.average_window = 300
random_array = np.random.rand(array_len)
df = cudf.DataFrame()
df['in'] = random_array
pdf = pd.DataFrame()
pdf['in'] = random_array
# ignore importlib warnings.
self._pandas_data = pdf
self._cudf_data = df
def tearDown(self):
pass
@ordered
def test_rolling_functions(self):
'''Test rolling window method'''
gpu_result = Rolling(self.average_window, self._cudf_data['in']).mean()
cpu_result = self._pandas_data[
'in'].rolling(self.average_window).mean()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).max()
cpu_result = self._pandas_data['in'].rolling(self.average_window).max()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).min()
cpu_result = self._pandas_data['in'].rolling(self.average_window).min()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).sum()
cpu_result = self._pandas_data['in'].rolling(self.average_window).sum()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).std()
cpu_result = self._pandas_data['in'].rolling(self.average_window).std()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
gpu_result = Rolling(self.average_window, self._cudf_data['in']).var()
cpu_result = self._pandas_data['in'].rolling(self.average_window).var()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ewm_functions(self):
'''Test exponential moving average method'''
gpu_result = Ewm(self.average_window, self._cudf_data['in']).mean()
cpu_result = self._pandas_data[
'in'].ewm(span=self.average_window,
min_periods=self.average_window).mean()
err = error_function(cudf.Series(gpu_result, nan_as_null=False),
cpu_result)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
| 35.194915 | 79 | 0.633518 |
4f6001aa3f7c3e4f099018653918b407689b0e24 | 6,901 | py | Python | pywick/models/segmentation/denseaspp.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | 2 | 2020-11-28T07:56:09.000Z | 2021-11-08T09:30:39.000Z | pywick/models/segmentation/denseaspp.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | null | null | null | pywick/models/segmentation/denseaspp.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | null | null | null | # Source: https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/models/denseaspp.py (License: Apache 2.0)
"""
Implementation of `DenseASPP for Semantic Segmentation in Street Scenes <http://openaccess.thecvf.com/content_cvpr_2018/papers/Yang_DenseASPP_for_Semantic_CVPR_2018_paper.pdf>`_
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pywick.models.segmentation.da_basenets.densenet import *
from pywick.models.segmentation.da_basenets.fcn import _FCNHead
__all__ = ['DenseASPP', 'DenseASPP_121', 'DenseASPP_161', 'DenseASPP_169', 'DenseASPP_201']
class DenseASPP(nn.Module):
def __init__(self, num_classes, pretrained=True, backbone='densenet161', aux=False, dilate_scale=8, **kwargs):
super(DenseASPP, self).__init__()
self.nclass = num_classes
self.aux = aux
self.dilate_scale = dilate_scale
if backbone == 'densenet121':
self.pretrained = dilated_densenet121(dilate_scale, pretrained=pretrained, **kwargs)
elif backbone == 'densenet161':
self.pretrained = dilated_densenet161(dilate_scale, pretrained=pretrained, **kwargs)
elif backbone == 'densenet169':
self.pretrained = dilated_densenet169(dilate_scale, pretrained=pretrained, **kwargs)
elif backbone == 'densenet201':
self.pretrained = dilated_densenet201(dilate_scale, pretrained=pretrained, **kwargs)
else:
raise RuntimeError('unknown backbone: {}'.format(backbone))
in_channels = self.pretrained.num_features
self.head = _DenseASPPHead(in_channels, num_classes, **kwargs)
if aux:
self.auxlayer = _FCNHead(in_channels, num_classes, **kwargs)
self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head'])
def forward(self, x):
size = x.size()[2:]
features = self.pretrained.features(x)
if self.dilate_scale > 8:
features = F.interpolate(features, scale_factor=2, mode='bilinear', align_corners=True)
outputs = []
x = self.head(features)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(features)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return tuple(outputs)
else:
return outputs[0]
class _DenseASPPHead(nn.Module):
def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DenseASPPHead, self).__init__()
self.dense_aspp_block = _DenseASPPBlock(in_channels, 256, 64, norm_layer, norm_kwargs)
self.block = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(in_channels + 5 * 64, nclass, 1)
)
def forward(self, x):
x = self.dense_aspp_block(x)
return self.block(x)
class _DenseASPPConv(nn.Sequential):
def __init__(self, in_channels, inter_channels, out_channels, atrous_rate,
drop_rate=0.1, norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPConv, self).__init__()
self.add_module('conv1', nn.Conv2d(in_channels, inter_channels, 1)),
self.add_module('bn1', norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs))),
self.add_module('relu1', nn.ReLU(True)),
self.add_module('conv2', nn.Conv2d(inter_channels, out_channels, 3, dilation=atrous_rate, padding=atrous_rate)),
self.add_module('bn2', norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs))),
self.add_module('relu2', nn.ReLU(True)),
self.drop_rate = drop_rate
def forward(self, x):
features = super(_DenseASPPConv, self).forward(x)
if self.drop_rate > 0:
features = F.dropout(features, p=self.drop_rate, training=self.training)
return features
class _DenseASPPBlock(nn.Module):
def __init__(self, in_channels, inter_channels1, inter_channels2,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPBlock, self).__init__()
self.aspp_3 = _DenseASPPConv(in_channels, inter_channels1, inter_channels2, 3, 0.1,
norm_layer, norm_kwargs)
self.aspp_6 = _DenseASPPConv(in_channels + inter_channels2 * 1, inter_channels1, inter_channels2, 6, 0.1,
norm_layer, norm_kwargs)
self.aspp_12 = _DenseASPPConv(in_channels + inter_channels2 * 2, inter_channels1, inter_channels2, 12, 0.1,
norm_layer, norm_kwargs)
self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3, inter_channels1, inter_channels2, 18, 0.1,
norm_layer, norm_kwargs)
self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4, inter_channels1, inter_channels2, 24, 0.1,
norm_layer, norm_kwargs)
def forward(self, x):
aspp3 = self.aspp_3(x)
x = torch.cat([aspp3, x], dim=1)
aspp6 = self.aspp_6(x)
x = torch.cat([aspp6, x], dim=1)
aspp12 = self.aspp_12(x)
x = torch.cat([aspp12, x], dim=1)
aspp18 = self.aspp_18(x)
x = torch.cat([aspp18, x], dim=1)
aspp24 = self.aspp_24(x)
x = torch.cat([aspp24, x], dim=1)
return x
def get_denseaspp(num_classes=1, backbone='densenet169', pretrained=True, **kwargs):
r"""DenseASPP
Parameters
----------
dataset : str, default citys
The dataset that model pretrained on. (pascal_voc, ade20k)
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
pretrained_base : bool or str, default True
This will load pretrained backbone network, that was trained on ImageNet.
"""
return DenseASPP(num_classes=num_classes, pretrained=pretrained, backbone=backbone, **kwargs)
def DenseASPP_121(num_classes=1, **kwargs):
return get_denseaspp(num_classes=num_classes, backbone='densenet121', **kwargs)
def DenseASPP_161(num_classes=1, **kwargs):
return get_denseaspp(num_classes=num_classes, backbone='densenet161', **kwargs)
def DenseASPP_169(num_classes=1, **kwargs):
return get_denseaspp(num_classes=num_classes, backbone='densenet169', **kwargs)
def DenseASPP_201(num_classes=1, **kwargs):
return get_denseaspp(num_classes=num_classes, backbone='densenet201', **kwargs)
if __name__ == '__main__':
img = torch.randn(2, 3, 480, 480)
model = DenseASPP_121()
outputs = model(img)
| 41.077381 | 177 | 0.663237 |