blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a0ba64b046817c1d4b87a37d70ac854c54c543fe
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_192/ch160_2020_06_19_20_23_54_764349.py
|
e8c31d19b6bb9456664ada3169ee602ac3e1ff52
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
import math
x = 0
for x in range(0, 90):
python = math.sin(x)
bhaskara = (4*x*(180 - x))/(40500 - x*(180 - x))
modulo = bhaskara - python
if python != bhaskara:
print(max(abs(modulo))
|
[
"you@example.com"
] |
you@example.com
|
d43eb90092c329955661b847f2018aa00ba189bc
|
883a1d6cb2bcd15ab46567f6fed363ea08401340
|
/project0001api/profile/urls.py
|
4835934555331006b6affe2f36e4b09cc4c8dbe4
|
[
"MIT"
] |
permissive
|
Hir0v0/SNS-API
|
a08ebaf99dc8fd43a8e1eda99f27dbf56c5145eb
|
7616494ccb59bd94e0999bb40ee576557d2cc4f2
|
refs/heads/main
| 2023-02-24T17:53:30.253471 | 2021-01-08T11:41:28 | 2021-01-08T11:41:28 | 327,885,932 | 0 | 0 |
MIT
| 2021-01-08T11:41:30 | 2021-01-08T11:37:55 |
Python
|
UTF-8
|
Python
| false | false | 129 |
py
|
from django.urls import path
from . import views
app_name="profile"
urlpatterns = [
path('', views.ProfileView.as_view()),
]
|
[
"charopevez@gmail.com"
] |
charopevez@gmail.com
|
f02d95a17097355407275c3eebcb4950b7794ea0
|
6395987515664fd475fc91398bae06f2d7c1465c
|
/assign/14-class object/class object5.py
|
5b7f3cb72a836eed377ac3d7b357838a3307345e
|
[] |
no_license
|
amanmishra98/python-programs
|
984e1f503d04983802aec14ef7f3b2968cdebb60
|
e8e90e8ae38b0b4058fa978d5bced943ac995e91
|
refs/heads/master
| 2020-04-23T07:00:20.740648 | 2020-03-20T08:14:47 | 2020-03-20T08:14:47 | 170,993,284 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
#2.WAPS to define a class Employee with instance variable empid,empname,salary.
#define constructor to initialize member variable.define fun to shhow data.
class Employee:
def __init__(self):
empid=int(input("enter emplloyee id\n"))
empname=input("enter employee name\n")
salary=int(input("enter employee salary\n"))
print(empid,empname,salary)
c1=Employee()
|
[
"noreply@github.com"
] |
amanmishra98.noreply@github.com
|
ce01006fc28f38174aeae02dffe49f0214c5ae14
|
9554891e5e91fa9d7f75df0f28ae1d220c552478
|
/tests/settings.py
|
0bfc93f139030f93750f7d8315cca6601c124b85
|
[
"MIT"
] |
permissive
|
kmmbvnr/django-polymodels
|
2e79cd72c68935a7e83953e0864ced1cb4a530c5
|
7a9b64b1851fea23a64d3d9421a69911e1669a49
|
refs/heads/master
| 2022-06-21T04:27:15.836175 | 2020-05-07T03:12:18 | 2020-05-07T10:36:06 | 261,932,926 | 1 | 0 |
MIT
| 2020-05-07T02:44:49 | 2020-05-07T02:44:48 | null |
UTF-8
|
Python
| false | false | 245 |
py
|
from __future__ import unicode_literals
SECRET_KEY = 'not-anymore'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'polymodels',
'tests',
]
|
[
"charette.s@gmail.com"
] |
charette.s@gmail.com
|
2958f0f909860b6534a0178f12383d7da22b1669
|
4bd4bacecee33cada173e427b5ecb1d758bafaad
|
/src/scalarizr/externals/chef/auth.py
|
ceb60cae41ffdfa7437210aa80b15e234cc31fef
|
[] |
no_license
|
kenorb-contrib/scalarizr
|
3f2492b20910c42f6ab38749545fdbb79969473f
|
3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83
|
refs/heads/master
| 2022-11-26T10:00:58.706301 | 2017-11-02T16:41:34 | 2017-11-02T16:41:34 | 108,550,233 | 0 | 2 | null | 2020-07-24T11:05:36 | 2017-10-27T13:33:46 |
Python
|
UTF-8
|
Python
| false | false | 2,435 |
py
|
from __future__ import with_statement
import base64
import datetime
import hashlib
import re
def _ruby_b64encode(value):
"""The Ruby function Base64.encode64 automatically breaks things up
into 60-character chunks.
"""
b64 = base64.b64encode(value)
for i in xrange(0, len(b64), 60):
yield b64[i:i+60]
def ruby_b64encode(value):
return '\n'.join(_ruby_b64encode(value))
def sha1_base64(value):
"""An implementation of Mixlib::Authentication::Digester."""
return ruby_b64encode(hashlib.sha1(value).digest())
class UTC(datetime.tzinfo):
"""UTC timezone stub."""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return self.ZERO
utc = UTC()
def canonical_time(timestamp):
if timestamp.tzinfo is not None:
timestamp = timestamp.astimezone(utc).replace(tzinfo=None)
return timestamp.replace(microsecond=0).isoformat() + 'Z'
canonical_path_regex = re.compile(r'/+')
def canonical_path(path):
path = canonical_path_regex.sub('/', path)
if len(path) > 1:
path = path.rstrip('/')
return path
def canonical_request(http_method, path, hashed_body, timestamp, user_id):
# Canonicalize request parameters
http_method = http_method.upper()
path = canonical_path(path)
if isinstance(timestamp, datetime.datetime):
timestamp = canonical_time(timestamp)
hashed_path = sha1_base64(path)
return ('Method:%(http_method)s\n'
'Hashed Path:%(hashed_path)s\n'
'X-Ops-Content-Hash:%(hashed_body)s\n'
'X-Ops-Timestamp:%(timestamp)s\n'
'X-Ops-UserId:%(user_id)s' % vars())
def sign_request(key, http_method, path, body, host, timestamp, user_id):
"""Generate the needed headers for the Opscode authentication protocol."""
timestamp = canonical_time(timestamp)
hashed_body = sha1_base64(body or '')
# Simple headers
headers = {
'x-ops-sign': 'version=1.0',
'x-ops-userid': user_id,
'x-ops-timestamp': timestamp,
'x-ops-content-hash': hashed_body,
}
# Create RSA signature
req = canonical_request(http_method, path, hashed_body, timestamp, user_id)
sig = _ruby_b64encode(key.private_encrypt(req))
for i, line in enumerate(sig):
headers['x-ops-authorization-%s'%(i+1)] = line
return headers
|
[
"kenorb@users.noreply.github.com"
] |
kenorb@users.noreply.github.com
|
bb7ba31c6870e08fd7fc4b51b79227f4c6111083
|
66e1c9f6598a47916d532c0a6d06b29a1a18796f
|
/libLGTV_serial.py
|
0b83b9e001f8d62e93537b000fc9798a76f58cc6
|
[] |
no_license
|
Paulo-Branco/libLGTV_serial
|
cde7770a52d355f864840eab523c828b480c8f1b
|
1d327854e5170413b0782693e5f749377a060fb3
|
refs/heads/master
| 2021-04-15T05:57:01.748561 | 2018-03-26T23:00:56 | 2018-03-26T23:20:04 | 126,894,417 | 0 | 0 | null | 2018-03-26T21:52:27 | 2018-03-26T21:52:25 | null |
UTF-8
|
Python
| false | false | 11,035 |
py
|
# -*- coding: utf-8 -*-
import re
import serial
import os
import time
import tempfile
from filelock import FileLock
# from pprint import pprint
actual_codes = {}
common_codes = {
'aspect43' : b"kc 00 01",
'aspect169' : b"kc 00 02",
'aspectstatus' : b"kc 00 ff",
'poweroff' : b"ka 00 00",
'poweron' : b"ka 00 01",
'powerstatus' : b"ka 00 ff",
'mute' : b"ke 00 00",
'unmute' : b"ke 00 01",
'mutestatus' : b"ke 00 ff"
}
actual_codes['LW300C_etc'] = common_codes.copy()
actual_codes['LW300C_etc'].update({
'lockstatus' : b"km 00 ff",
'lockon' : b"km 00 01",
'lockoff' : b"km 00 00",
'osdon' : b"kl 00 01",
'osdoff' : b"kl 00 00",
'volumelevel' : b"kf 00 ff",
'brightnesslevel' : b"kh 00 ff",
'contrastlevel' : b"kg 00 ff",
'backlightlevel' : b"mg 00 ff",
'colorlevel' : b"ki 00 ff",
'colortemperaturelevel' : b"xu 00 ff",
'sharpnesslevel' : b"kk 00 ff",
'balancelevel' : b"kt 00 ff"
})
actual_codes['LK450_etc'] = common_codes.copy()
actual_codes['LK450_etc'].update({
'inputdigitalantenna' : b"xb 00 00",
'inputdigitalcable' : b"xb 00 01",
'inputanalogantenna' : b"xb 00 10",
'inputanalogcable' : b"xb 00 11",
'inputav1' : b"xb 00 20",
'inputav2' : b"xb 00 21",
'inputcomp1' : b"xb 00 40",
'inputcomp2' : b"xb 00 41",
'inputrgbpc' : b"xb 00 60",
'inputhdmi1' : b"xb 00 90",
'inputhdmi2' : b"xb 00 91",
'inputhdmi3' : b"xb 00 92",
'inputhdmi4' : b"xb 00 93",
'inputstatus' : b"xb 00 ff"
})
actual_codes['PJ250_etc'] = common_codes.copy()
actual_codes['PJ250_etc'].update({
'inputdtvantenna' : b"xb 00 00",
'inputdtvcable' : b"xb 00 01",
'inputanalogantenna' : b"xb 00 10",
'inputanalogcable' : b"xb 00 11",
'inputav1' : b"xb 00 20",
'inputav2' : b"xb 00 21",
'inputcomp1' : b"xb 00 40",
'inputcomp2' : b"xb 00 41",
'inputrgbpc' : b"xb 00 60",
'inputhdmi1' : b"xb 00 90",
'inputhdmi2' : b"xb 00 91",
'inputhdmi3' : b"xb 00 92",
'inputstatus' : b"xb 00 ff"
})
actual_codes['LE5300_etc'] = common_codes.copy()
actual_codes['LE5300_etc'].update({
'inputdtv' : b"xb 00 00",
'inputanalogantenna' : b"xb 00 10",
'inputanalogcable' : b"xb 00 11",
'inputav1' : b"xb 00 20",
'inputav2' : b"xb 00 21",
'inputcomp' : b"xb 00 40",
'inputrgbpc' : b"xb 00 60",
'inputhdmi1' : b"xb 00 90",
'inputhdmi2' : b"xb 00 91",
'inputhdmi3' : b"xb 00 92",
'inputhdmi4' : b"xb 00 93",
'inputstatus' : b"xb 00 ff"
})
actual_codes['LC7D_etc'] = common_codes.copy()
actual_codes['LC7D_etc'].update({
'inputdtvantenna' : b"xb 00 00",
'inputdtvcable' : b"xb 00 01",
'inputanalogantenna' : b"xb 00 10",
'inputanalogcable' : b"xb 00 11",
'inputav1' : b"xb 00 20",
'inputav2' : b"xb 00 21",
'inputcomp1' : b"xb 00 40",
'inputcomp2' : b"xb 00 41",
'inputrgbpc' : b"xb 00 60",
'inputhdmi1' : b"xb 00 90",
'inputhdmi2' : b"xb 00 91",
'inputstatus' : b"xb 00 ff"
})
actual_codes['01C_etc'] = common_codes.copy()
actual_codes['01C_etc'].update({
'inputav' : b"kb 00 02",
'inputcomp1' : b"kb 00 04",
'inputcomp2' : b"kb 00 05",
'inputrgbdtv' : b"kb 00 06",
'inputrgbpc' : b"kb 00 07",
'inputhdmidtv' : b"kb 00 08",
'inputhdmipc' : b"kb 00 09",
'inputstatus' : b"kb 00 ff"
})
actual_codes['02C_etc'] = common_codes.copy()
actual_codes['02C_etc'].update({
'inputav' : b"kb 00 02",
'inputcomp1' : b"kb 00 04",
'inputcomp2' : b"kb 00 05",
'inputrgbpc' : b"kb 00 07",
'inputhdmidtv' : b"kb 00 08",
'inputhdmipc' : b"kb 00 09",
'inputstatus' : b"kb 00 ff"
})
reverse_code_map = {
'LK450_etc': ('LV2500', 'LV2520', 'LV3500', 'LV3520', 'LK330', 'LK430', 'LK450',
'LK520', 'PW340', 'PW350', 'PW350U', 'PW350R', 'LH20', 'LH200C',
'LH30', 'LF11', 'LF21', 'LU55', 'CL10', 'CL20', 'CL11', 'PZ200'),
'PJ250_etc': ('PJ250', 'PK250', 'PK280', 'PK290', 'PJ340', 'PJ350', 'PK350',
'PKPK340', 'PK540', 'PJ550', 'PK550', 'PJ350C', 'PK550C'),
'LC7D_etc': ('LC7D', 'LC7DC', 'PC5D', 'PC5DC'),
'LE5300_etc': ('LE5300', 'LE5500', 'LE7300', 'LE530C', 'LD420', 'LD450', 'LD450C',
'LD520', 'LD520C', 'LD630', 'LW5600', 'LW5700', 'LW6500', 'LW9800',
'LV3700', 'LV5400', 'LV5500', 'LV9500', 'LK530', 'LK550', 'PZ750',
'PZ950', 'PZ950U', 'LW300'),
'01C_etc': ('01C', '01C-BA'),
'02C_etc': ('02C', '02C-BA', '02C-BH'),
'LW300C_etc': ('LW300C', 'LW300C-ZA')
}
all_codes = {}
# populate model suffix lookup hash
for suffix_codes, suffixes in reverse_code_map.items():
for suffix in suffixes:
all_codes[suffix] = actual_codes[suffix_codes]
class LGTV:
def __init__(self, model, port):
self.model = model.upper()
# Ignore digits which indicate the TV's screen size
if model.startswith('M'):
self.codes = all_codes[self.model[3:]] # Ignore the leading 'M' too
else:
self.codes = all_codes[self.model[2:]]
self.port = port
self.connection = None
self.toggles = {
'togglepower': ('poweron', 'poweroff'),
'togglemute': ('mute', 'unmute'),
}
self.debounces = {}
#this next line sets up the serial port to allow for communication
#and opens the serial port you may need to change
#ttyS0 to S1, S2, ect. The rest shouldn't need to change.
def get_port(self):
return serial.Serial(self.port, 9600, 8, serial.PARITY_NONE,
serial.STOPBITS_ONE, xonxoff=0, rtscts=0, timeout=1)
def get_port_ensured(self):
ser = None
while ser == None:
try:
ser = self.get_port()
except serial.serialutil.SerialException:
time.sleep(0.07)
return ser
def status_code(self, code):
return code[:-2] + b'ff'
def lookup(self, command):
levelPattern = re.compile("^(.*level)([0-9]{1,3})$")
matchLevel = levelPattern.match(command)
if command.startswith('toggle'):
states = self.toggles.get(command)
state_codes = (self.codes[states[0]], self.codes[states[1]])
return self.toggle(self.status_code(state_codes[0]), state_codes)
elif matchLevel:
return self.generate_hex_code(matchLevel.group(1), matchLevel.group(2))
elif command.endswith('up'):
key = command[:-2] + 'level'
return self.increment(self.status_code(self.codes[key]))
elif command.endswith('down'):
key = command[:-4] + 'level'
return self.decrement(self.status_code(self.codes[key]))
else:
return self.codes[command]
def generate_hex_code(self, command, decimalValue):
hexValue = hex(int(decimalValue))[2:]
return self.codes[command][:-2] + str(hexValue).encode()
# Returns None on error, full response otherwise
def query_full(self, code):
self.connection.write(code + b'\r')
response = self.connection.read(10)
if self.is_success(response):
return response
def query_data(self, code):
response = self.query_full(code)
return response and response[-3:-1]
# returns None on error, 2-char status for status commands, and True otherwise
def query(self, command):
if self.is_status(command):
return self.query_data(self.lookup(command))
elif self.is_level(command):
hexValue = self.query_data(self.lookup(command))
return int(hexValue, 16)
else:
return self.query_full(self.lookup(command)) and True
def is_status(self, command):
return command.endswith('status')
def is_level(self, command):
return command.endswith('level')
def is_success(self, response):
return response[-5:-3] == b'OK'
def hex_bytes_delta(self, hex_bytes, delta):
return bytearray(hex(int(hex_bytes, 16) + delta)[2:4], 'ascii')
def delta(self, code, delta):
level = self.query_data(code)
return code[0:6] + self.hex_bytes_delta(level, delta)
def increment(self, code):
return self.delta(code, +1)
def decrement(self, code):
return self.delta(code, -1)
def toggle(self, code, togglecommands):
level = self.query_data(code)
toggledata = (togglecommands[0][-2:], togglecommands[1][-2:])
data = toggledata[0]
if level == toggledata[0]:
data = toggledata[1]
return code[0:6] + data
# ======= These are the methods you'll most probably want to use ==========
def send(self, command):
if command in self.debounces:
wait_secs = self.debounces[command]
if self.connection == None:
self.connection = self.get_port()
lock_path = os.path.join(tempfile.gettempdir(), '.' + command + '_lock')
with FileLock(lock_path, timeout=0) as lock:
response = self.query(command)
time.sleep(wait_secs)
else:
if self.connection == None:
self.connection = self.get_port_ensured()
response = self.query(command)
self.connection.close()
return response
def available_commands(self):
print("Some features (such as a 4th HDMI port) might not be available for your TV model")
commands = self.codes.copy()
commands.update(self.toggles)
for command in commands.keys():
code = commands[command]
if command.endswith('level'):
print("%s : %s" % (command[:-5] + 'up', code[:-2] + b'??'))
print("%s : %s" % (command[:-5] + 'down', code[:-2] + b'??'))
else:
print("{0} : {1}".format(command, code))
def add_toggle(self, command, state0, state1):
self.toggles['toggle' + command] = (state0, state1)
def debounce(self, command, wait_secs=0.5):
self.debounces[command] = wait_secs
# end class LGTV
|
[
"pauloricardocb@gmail.com"
] |
pauloricardocb@gmail.com
|
a3406a2a4f8cfa622e3e1140e51ff930c9882ce6
|
a5d4c312df89e8c62b6717ce82c3f8c85c02b799
|
/ben_coomes_hw2.py
|
43d5ea944fb3a184028985c88b68cc760a659253
|
[] |
no_license
|
benCoomes/cycledgram
|
8d4c0ff081af63783d60d7b0724ade11c5632ade
|
0fcb21ad12da29c7e7820fd4e981f26d749a8f43
|
refs/heads/master
| 2021-01-19T21:59:13.167117 | 2016-04-19T17:51:07 | 2016-04-19T17:51:07 | 53,806,421 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,958 |
py
|
import fileinput
#### Register.py ####
class Reg(object):
def __init__(self, num, isBusy=False):
self.num = num
self.isBusy = isBusy
def __str__(self):
return "r%d" % self.num
class RegisterSet(object):
def __init__(self, regCount):
self.regCount = regCount
self.__registers = []
for i in range(0,regCount):
self.__registers.append(Reg(i, False))
def get(self, index):
if(index < 0 or index >= self.regCount):
print "Error: index of %d out of range in RegisterSet.get(index)" % index
return None
return self.__registers[index]
#### Instruction.py ####
class Inst(object):
def __init__(self, string):
self.string = string
def cycle(self):
pass
def canProceed(self):
pass
def __str__(self):
return self.string
class Add_inst(Inst):
def __init__(self, reg_list, string):
super(Add_inst, self).__init__(string)
self.source_regs = reg_list[1:3]
self.product_reg = reg_list[0]
self.stages = {
-1: ' ', # wait or unstarted
0 : 'IF',
1 : 'ID',
2 : 'EX',
3 : 'MD',
4 : 'WB',
5 : '' # finished
}
self.currentStage = -1
def cycle(self):
if self.currentStage == 0:
if self.canProceed():
self.currentStage += 1
self.product_reg.isBusy = True
else:
return self.stages[-1]
elif self.currentStage == 3:
self.currentStage += 1
self.product_reg.isBusy = False
elif self.currentStage == 5:
pass
else:
self.currentStage += 1
return self.stages[self.currentStage]
def canProceed(self):
idle_sregs = True
for r in self.source_regs:
if r.isBusy:
idle_sregs = False
return idle_sregs
class Sub_inst(Inst):
def __init__(self, reg_list, string):
super(Sub_inst, self).__init__(string)
self.source_regs = reg_list[1:3]
self.product_reg = reg_list[0]
self.stages = {
-1: ' ', # wait or unstarted
0 : 'IF',
1 : 'ID',
2 : 'EX',
3 : 'MD',
4 : 'WB',
5 : '' # finished
}
self.currentStage = -1
def cycle(self):
if self.currentStage == 0:
if self.canProceed():
self.currentStage += 1
self.product_reg.isBusy = True
else:
return self.stages[-1]
elif self.currentStage == 3:
self.currentStage += 1
self.product_reg.isBusy = False
elif self.currentStage == 5:
pass
else:
self.currentStage += 1
return self.stages[self.currentStage]
def canProceed(self):
idle_sregs = True
for r in self.source_regs:
if r.isBusy:
idle_sregs = False
return idle_sregs
class Load_inst(Inst):
def __init__(self, reg_list, string):
super(Load_inst, self).__init__(string)
self.source_regs = reg_list[1]
self.product_reg = reg_list[0]
self.stages = {
-1: ' ', # wait or unstarted
0 : 'IF',
1 : 'ID',
2 : 'EX',
3 : 'MD',
4 : 'WB',
5 : '' # finished
}
self.currentStage = -1
def cycle(self):
if self.currentStage == 0:
if self.canProceed():
self.currentStage += 1
self.product_reg.isBusy = True
else:
return self.stages[-1]
elif self.currentStage == 3:
self.currentStage += 1
self.product_reg.isBusy = False
elif self.currentStage == 5:
pass
else:
self.currentStage += 1
return self.stages[self.currentStage]
def canProceed(self):
return not self.source_regs.isBusy and not self.product_reg.isBusy
class Store_inst(Inst):
def __init__(self, reg_list, string):
super(Store_inst, self).__init__(string)
self.source_regs = reg_list[0:2]
self.product_reg = None
self.stages = {
-1: ' ', # wait or unstarted
0 : 'IF',
1 : 'ID',
2 : 'EX',
3 : 'MD',
4 : 'WB',
5 : '' # finished
}
self.currentStage = -1
def cycle(self):
if self.currentStage == 0:
if self.canProceed():
self.currentStage += 1
else:
return self.stages[-1]
elif self.currentStage == 5:
pass
else:
self.currentStage += 1
return self.stages[self.currentStage]
def canProceed(self):
idle_sregs = True
for r in self.source_regs:
if r.isBusy:
idle_sregs = False
return idle_sregs
#### Processor.py ####
class CPU(object):
def __init__(self):
self.instructions = []
self.output = []
# lists should only ever hold a single instruction object
self.stages = {
0 : 'IF',
1 : 'ID',
2 : 'EX',
3 : 'MD',
4 : 'WB'
}
self.win_start = 0
self.win_end = 1
self.win_max = 5
def addInst(self, instruction_list):
for i in instruction_list:
self.instructions.append(i)
self.output.append(str(i) + " ")
def tick(self):
# go throguh all inst
inst_finished = False
if_open = True
for inst in self.instructions:
# default value for inst before window
inst_out = ""
#if inst is in window, call cycle on inst and get the output
if(inst in self.instructions[self.win_start:self.win_end]):
if if_open:
inst_out = inst.cycle()
else:
inst_out = ' '
if(inst_out == 'WB'):
inst_finished = True
if(inst_out == ' '):
if_open = False
#if inst is after window, set output to appropriate spacing
elif(inst in self.instructions[self.win_end:]):
inst_out = " "
#write output for inst to coor. entry in self.output
self.output[self.instructions.index(inst)] += inst_out + " "
#update window values for next tick
if inst_finished:
self.win_start += 1
if (self.win_end - self.win_start) < self.win_max and if_open:
self.win_end += 1
return self.win_start != len(self.instructions)
def printPipeline(self):
for line in self.output:
print line
regs = RegisterSet(16)
# read in lines, parse as instructions
raw_inst = []
for line in fileinput.input():
raw_inst.append(line[:-1])
inst_list = []
for s in raw_inst:
parts = s.split()
if(len(parts) == 2):
parts[1] = parts[1].split(',')
else:
print "Error: bad string format for instruction input"
if(parts[0] == 'add'):
#make add
inst_regs = []
# uncheked assumption
for r in parts[1]:
r_num = int(r[1], 16)
inst_regs.append(regs.get(r_num))
inst_list.append(Add_inst(inst_regs, s))
#cpu.addInst(Add_inst(inst_regs))
elif(parts[0] == 'sub'):
#make sub
inst_regs = []
# uncheked assumption
for r in parts[1]:
r_num = int(r[1])
inst_regs.append(regs.get(r_num))
inst_list.append(Sub_inst(inst_regs, s))
#cpu.addInst(Sub_inst(inst_regs))
elif(parts[0] == 'lw'):
#make lw
inst_regs = []
r = parts[1]
inst_regs.append(regs.get(int(r[0][1])))
#assumption about how offset works:
# offset + regnum = actual reg num
# probably wrong, CHECK THIS
inst_regs.append(regs.get(int(r[1][0]) + int(r[1][3])))
inst_list.append(Load_inst(inst_regs, s))
#cpu.addInst(Load_inst(inst_regs))
elif(parts[0] == 'sw'):
#make sw
inst_regs = []
r = parts[1]
inst_regs.append(regs.get(int(r[0][1])))
#assumption about how offset works:
# offset + regnum = actual reg num
# probably wrong, CHECK THIS
inst_regs.append(regs.get(int(r[1][0]) + int(r[1][3])))
inst_list.append(Store_inst(inst_regs, s))
#cpu.addInst(Store_inst(inst_regs))
else:
print "ERROR! Unrecognized instruction"
cpu = CPU()
cpu.addInst(inst_list)
while cpu.tick():
pass
cpu.printPipeline()
|
[
"bcoomes@g.clemson.edu"
] |
bcoomes@g.clemson.edu
|
f519dd7ba24d14d8045d97653c1567470bc4a3e1
|
7439933b3a41ccd2dccb5c65fbe5dcd048dcccd5
|
/ursina_test/cheers.py
|
3da8be03c0d6ccaf8293fde3c3d7d6fe3f0df082
|
[] |
no_license
|
carlsonmark/ursina_test
|
8b67f2fc922fb9e608179aaa2ffbadf21fa0b847
|
e1b992b103fa81bcef29c1271ddc557e858c0212
|
refs/heads/master
| 2022-07-13T13:17:44.146288 | 2020-05-16T23:27:06 | 2020-05-16T23:27:06 | 261,594,274 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,659 |
py
|
import json
from pathlib import Path
from shutil import copy2
from ursina import *
from typing import Dict, Any, Optional, List
# Where cheer data is stored
cheer_data_filename = 'cheer.json'
# All of the "cheers"
cheers = []
def _cheers_textures() -> List[str]:
globbed = Path('textures/cheers').glob('*')
textures = [p.stem for p in globbed]
return textures
cheers_textures = _cheers_textures()
def load_cheer_data() -> Dict[str, Any]:
"""
Load cheer data from the file
"""
cheer_data = json.load(open(cheer_data_filename, 'r'))
return cheer_data
def save_cheer_data(cheer_data: Dict):
# Back up the file first
copy2(cheer_data_filename, f'{cheer_data_filename}.bak')
json.dump(cheer_data, open(cheer_data_filename, 'w+'), indent=2)
return
class Cheer(Entity):
def __init__(self, position, scale, texture, duration=10):
self._duration = duration
self._initial_scale = Vec3(scale)
self._expiry_time = time.monotonic() + duration
rotation_scale = 100
self._rotation_speed = Vec3(random.random() * rotation_scale,
random.random() * rotation_scale,
random.random() * rotation_scale)
super().__init__(
model='cube',
position=position,
scale=scale,
texture=texture
)
destroy(self, duration)
return
def update(self):
# Slowly fade the entity out
self.set_scale_for_remaining_time()
# Jitter around a bit
self.move_jittery()
# Rotate
self.rotate()
return
def set_scale_for_remaining_time(self):
remaining = self._expiry_time - time.monotonic()
scale = self._initial_scale * (remaining / self._duration)
self.scale = scale
return
def move_jittery(self):
move_per_second = 1
x = random.random() * move_per_second - move_per_second / 2
y = random.random() * move_per_second - move_per_second / 2
z = random.random() * move_per_second - move_per_second / 2
self.position = self.position + Vec3(x, y, z) * time.dt
return
def rotate(self):
self.rotation += self._rotation_speed * time.dt
return
class CheerScoreboard(Text):
def __init__(self, attendees: Dict[str,str]):
self._sort_key = 'cheer_available'
self._cheer_data = load_cheer_data()
for name in attendees.keys():
username = attendees[name]
self.add_attendee(username, name)
super().__init__(text='',
name='cheer',
position=window.bottom_right,
z=-999,
eternal=True,
origin=(.5, -.5),
# scale = (.05, .025),
color=color.white.tint(-.2),
font='VeraMono.ttf'
)
self.update_cheer_text()
return
def set_sort_key(self, sort_key: str):
self._sort_key = sort_key
self.update_cheer_text()
return
def add_attendee(self, username, name: str):
if not username in self._cheer_data:
self._cheer_data[username] = {
"cheer_available": 0,
"cheer_given": 0,
"name": name
}
return
def give_everyone_points(self, points):
"""
Give everyone points, without updating the "Given" stat
:param points: The number of points to give
"""
for attendee in self._cheer_data:
self._cheer_data[attendee]['cheer_available'] += points
self.update_cheer_text()
save_cheer_data(self._cheer_data)
return
def transfer_points(self, name_from: str, name_to: str, points: int, texture: Optional[str]):
"""
Transfer points from one attendee to another
:param name_from: Who to transfer the points from
:param name_to: Who to transfer the points to
:param points: The number of points to transfer
:param texture: The texture to use
"""
attendee_from = self._cheer_data[name_from]
attendee_to = self._cheer_data[name_to]
from_points = attendee_from['cheer_available']
from_points -= points
from_points = max(0, from_points)
transferred = attendee_from['cheer_available'] - from_points
attendee_from['cheer_available'] = from_points
if not name_from == name_to:
attendee_from['cheer_given'] += transferred
attendee_to['cheer_available'] += transferred
self.update_cheer_text(name_from=attendee_from['name'], name_to=attendee_to['name'])
save_cheer_data(self._cheer_data)
if not texture or texture not in cheers_textures:
texture = 'star'
self.add_cheers(count=points, texture=f'textures/cheers/{texture}')
return
def add_cheers(self, count: int, texture: str):
for i in range(min(50, count)):
max_pos = 2
position = (random.random() * max_pos - max_pos / 2,
random.random() * max_pos - max_pos / 2,
-2)
max_scale = .2
random_scale = random.random() * max_scale - max_scale
scale = Vec3(random_scale, random_scale, random_scale, )
e = Cheer(position=position, scale=scale, texture=texture, duration=10)
cheers.append(e)
return
def update_cheer_text(self, name_from=None, name_to=None):
"""
Update the cheer text
:param sort_key: Which cheer_data key to sort by
"""
order = reversed(sorted(self._cheer_data,
key=lambda item: self._cheer_data[item][self._sort_key]))
text = 'Name Points Given\n'
text += '-----+--------+------\n'
for username in order:
participant = self._cheer_data[username]
participant_name = participant['name']
if name_from == name_to and name_from == participant_name:
color = 'yellow'
elif name_from == participant_name:
color = 'red'
elif name_to == participant_name:
color = 'green'
else:
color = 'white'
text += f'<{color}>{participant["name"]}<white> | {participant["cheer_available"]:6} | {participant["cheer_given"]:5}\n'
self.text = text
self.create_background()
return
|
[
"carlsonmark@gmail.com"
] |
carlsonmark@gmail.com
|
4b6593f23436dfb27afd3d7fb6288cef6f61058e
|
002a4ef4ff4867fab3d4ee1593c54e54617b0728
|
/lib/deform_psroi_pooling/ps_roi.py
|
3e8c444c03ac03690545b62f1369a088fa942d09
|
[] |
no_license
|
jdd803/OCR
|
ed1f7d603886807619325b040473c0a2bf1235cf
|
90cf24d1f7deffae95ea502479cb00f071dbbeb5
|
refs/heads/master
| 2020-05-04T14:21:04.975383 | 2019-04-08T13:02:39 | 2019-04-08T13:02:39 | 179,193,042 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,659 |
py
|
import tensorflow as tf
import numpy as np
def tf_flatten(inputs):
"""Flatten tensor"""
return tf.reshape(inputs, [-1])
def tf_repeat(inputs, repeats, axis=0):
assert len(inputs.get_shape()) == 1
a = tf.expand_dims(inputs, -1)
a = tf.tile(a, [1, repeats])
a = tf_flatten(a)
return a
def np_flatten(inputs):
return np.reshape(inputs, [-1])
def np_repeat(inputs, repeats, axis=0):
assert len(np.shape(inputs)) == 1
a = np.expand_dims(inputs, -1)
a = np.tile(a, [1, repeats])
a = np_flatten(a)
return a
def ps_roi(features, boxes, pool = True, offsets = [], k = 3, feat_stride = 8):
pooled_response = tf.py_func(_ps_roi,[features, boxes, pool, offsets, k, feat_stride],tf.float32)
pooled_fea = tf.convert_to_tensor(pooled_response)
return pooled_fea
def _ps_roi(features, boxes, pool, offsets, k, feat_stride):
'''
Implement the PSROI pooling
:param features: (1,h,w,2*k^2*(c+1) or (1,h,w,2*K^2*4)
:param boxes: (5)->(0,x1,y1,x2,y2)
:param pool: control whether ave_pool the features
:param offsets: (k*k*(c+1),2)
:param k: output size,(x,y)
:return:(b,k,k,c+1)
'''
fea_shape = np.shape(features)
num_classes = fea_shape[-1] / (k * k) #channels
depth = num_classes #(c+1)
feature_boxes = np.round(boxes / feat_stride)
feature_boxes[-2:] -= 1 #not include right and bottom edge
top_left_point = np.hstack((feature_boxes[1:3],feature_boxes[1:3])).reshape((1,4))
boxes_part = np.zeros(( k * k, 4)) #(k^2,4)
boxes_part += top_left_point #(k*k,4)
width = (feature_boxes[3] - feature_boxes[1]) / k # (n,1)
height = (feature_boxes[4] - feature_boxes[2]) / k # (n,1)
# split boxes
shift_x = np.arange(0, k) * width
shift_y = np.arange(0, k) * height
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
boxes_part += shifts
boxes_part[:, 2] = boxes_part[:, 0] + width - 1
boxes_part[:, 3] = boxes_part[:, 1] + height - 1
boxes_part = np.reshape(np.floor(boxes_part),(k,k,-1,4)) #(k,k,1,4)
boxes_part[:, -1, 0, 2] = feature_boxes[-2]
boxes_part[-1, :, 0, 3] = feature_boxes[-1]
boxes_part = np.reshape(boxes_part, (1, int(k*k), 4)) #(1,k*k,4)
# add offsets to splitted boxes
if len(offsets) == 0:
offsets0 = np.zeros((depth * k * k, 2)) # (k*k*2*(c+1),2)
else:
offsets0 = offsets # (k*k*c,2)
offsets0 = np.reshape(offsets0, (int(k * k), int(depth), 2)) #(x,y,x,y,x,y)(k*k,c,2)
# offsets1 = tf.stack((offsets0, offsets0),axis=3)
# offsets1 = tf.reshape(offsets1,(boxes_num, k * k, depth, 4))
offsets1 = np.tile(offsets0, (1, 1, 2)) #(k*k,c,4)
offsets1 = np.transpose(offsets1,(1,0,2)) #(c,k*k,4)
boxes_part = np.repeat(boxes_part,depth,axis=0)
boxes_part += offsets1 #(c,k*k,4)
boxes_part = np.reshape(boxes_part,(int(k*k*depth),4)) #(c*k*k,4)
# clip split boxes by feature' size
temp00 = np.clip(boxes_part[..., 0], 0, fea_shape[2] - 1)
temp11 = np.clip(boxes_part[..., 1], 0, fea_shape[1] - 1)
temp22 = np.clip(boxes_part[..., 2], 0, fea_shape[2] - 1)
temp33 = np.clip(boxes_part[..., 3], 0, fea_shape[1] - 1)
boxes_k_offset = np.stack([temp00,temp11,temp22,temp33],axis=-1) #(c*k*k,4)
boxes_k_offset = np.reshape(boxes_k_offset,(int(depth), int(k*k), 4)) #(c,k*k,4)
boxes_k_offset = np.transpose(boxes_k_offset,(1,0,2)) #(k*k,c,4)
# num of classes
all_boxes_num = k * k
for i in range(all_boxes_num):
part_k = i % (k * k)
pooled_fea = map_coordinates(features[0],boxes_k_offset[i],part_k,num_classes,pool) #(depth,1)/(depth,h,w)
if (part_k % k) == 0:
pooled_row = pooled_fea
elif (part_k % k) == (k - 1) and part_k != (k - 1):
pooled_row = np.concatenate((pooled_row, pooled_fea), axis=2)
pooled_response = np.concatenate((pooled_response, pooled_row), axis=1)
elif (part_k % k) == (k - 1) and part_k == (k - 1):
pooled_row = np.concatenate((pooled_row,pooled_fea), axis=2)
pooled_response = pooled_row
else:
pooled_row = np.concatenate((pooled_row,pooled_fea), axis=2)
# try:
# pooled_response = np.concatenate((pooled_response, pooled_fea), 0)
# except UnboundLocalError:
# pooled_response = pooled_fea
return pooled_response #(depth,k,k)/(depth,height,width)
def map_coordinates(inputs,boxes,k,num_classes,pool):
'''
Get values in the boxes
:param inputs: feature map (h,w,2*k^2*(c+1) or (h,w,2*K^2*2)
:param boxes: (depth,4)(x1,y1,x2,y2) May be fraction
:param k: relative position
:param num_classes:
:param pool: whether ave_pool the features
:return:
'''
# compute box's width and height, both are integer
width = boxes[0][2] - boxes[0][0] + 1
height = boxes[0][3] - boxes[0][1] + 1
depth = np.shape(boxes)[0]
tp_lf = np.reshape(boxes[:,0:2],(-1,1,2)) #(depth,1,2)
grid = np.meshgrid(np.array(range(int(height))), np.array(range(int(width))))
grid = np.stack(grid, axis=-1) #(h,w,2)
#grid = np.expand_dims(grid,axis=0) #(1,h,w,2)
grid = np.reshape(grid, (1,-1, 2)) #(1,n_points,2)
coords = grid + tp_lf #(depth,n,2)
n_coords = np.shape(coords)[1]
# coords_lt = tf.cast(tf.floor(coords), 'int32') #(depth,n_points,2)
# coords_rb = tf.cast(tf.ceil(coords), 'int32')
# coords_lb = tf.stack([coords_lt[..., 0], coords_rb[..., 1]], axis=-1)
# coords_rt = tf.stack([coords_rb[..., 0], coords_lt[..., 1]], axis=-1)
coords_lt = np.floor(coords)
coords_rb = np.ceil(coords)
coords_lt = coords_lt.astype(np.int32)
coords_rb = coords_rb.astype(np.int32)
coords_lb = np.stack([coords_lt[..., 0], coords_rb[..., 1]], axis=-1)
coords_rt = np.stack([coords_rb[..., 0], coords_lt[..., 1]], axis=-1) #(depth,n_points,2)
idx = np_repeat(range(depth), n_coords)
def _get_vals_by_coords(input, coords):
inputs1 = input[:,:,int(k*num_classes):int((k+1)*num_classes)] #(h,w,depth)
inputs2 = np.transpose(inputs1,(2,0,1)) #(depth,h,w)
indices = np.stack([
idx,np_flatten(coords[..., 0]), np_flatten(coords[..., 1])
], axis=-1)
inputs_shape = np.shape(inputs2)
temp1 = inputs_shape[1]*inputs_shape[2]
temp2 = inputs_shape[2]
indices1 = [i[0]*temp1+i[1]*temp2+i[2] for i in indices]
vals = np.take(inputs2,indices1)
vals = np.reshape(vals, (int(depth),int(n_coords)))
return vals #(depth,n_points)
vals_lt = _get_vals_by_coords(inputs, coords_lt)
vals_rb = _get_vals_by_coords(inputs, coords_rb)
vals_lb = _get_vals_by_coords(inputs, coords_lb)
vals_rt = _get_vals_by_coords(inputs, coords_rt) #(depth,n_points)
# coords_offset_lt = coords - tf.cast(coords_lt, 'float32') #(depth,n_points,2)
coords_offset_lt = coords - coords_lt # (depth,n_points,2)
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[..., 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[..., 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[..., 1] # (depth,n_points)
if pool:
pooled_box = np.mean(mapped_vals, axis=1) #(depth,1)
pooled_box = np.reshape(pooled_box, (depth, 1, 1)) #(depth,1,1)
else:
pooled_box = np.reshape(mapped_vals, (depth, int(height), int(width))) #(depth,h,w)
return pooled_box #(depth,1)/(depth,h,w)
|
[
"MingdongJu@163.com"
] |
MingdongJu@163.com
|
3b917e2dc41a7b3904aaba73dfd6da76170247de
|
e3add5b68d6a9d607bb03602be7bb65f7aa751e1
|
/bin/ldap3/operation/modifyDn.py
|
400f56992180df2746e8077c0802fd50ca1a8cf3
|
[] |
no_license
|
M-u-S/TA-LDAP
|
ac41379ce18b2153e0a269944ea443bfb81b7a61
|
e2c79d4fcff716836138d89f4468b40503213d9b
|
refs/heads/master
| 2021-01-06T23:35:00.269684 | 2020-02-19T02:30:28 | 2020-02-19T02:30:28 | 241,514,384 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,313 |
py
|
"""
"""
# Created on 2013.05.31
#
# Author: Giovanni Cannata
#
# Copyright 2013, 2014, 2015, 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from ..protocol.rfc4511 import ModifyDNRequest, LDAPDN, RelativeLDAPDN, DeleteOldRDN, NewSuperior, ResultCode
from ..operation.bind import referrals_to_list
# ModifyDNRequest ::= [APPLICATION 12] SEQUENCE {
# entry LDAPDN,
# newrdn RelativeLDAPDN,
# deleteoldrdn BOOLEAN,
# newSuperior [0] LDAPDN OPTIONAL }
def modify_dn_operation(dn,
new_relative_dn,
delete_old_rdn=True,
new_superior=None):
request = ModifyDNRequest()
request['entry'] = LDAPDN(dn)
request['newrdn'] = RelativeLDAPDN(new_relative_dn)
request['deleteoldrdn'] = DeleteOldRDN(delete_old_rdn)
if new_superior:
request['newSuperior'] = NewSuperior(new_superior)
return request
def modify_dn_request_to_dict(request):
return {'entry': str(request['entry']),
'newRdn': str(request['newrdn']),
'deleteOldRdn': bool(request['deleteoldrdn']),
'newSuperior': str(request['newSuperior']) if request['newSuperior'] is not None and request['newSuperior'].hasValue() else None}
def modify_dn_response_to_dict(response):
return {'result': int(response['resultCode']),
'description': ResultCode().getNamedValues().getName(response['resultCode']),
'dn': str(response['matchedDN']),
'referrals': referrals_to_list(response['referral']),
'message': str(response['diagnosticMessage'])}
|
[
"michael.uschmann@gkc.co.nz"
] |
michael.uschmann@gkc.co.nz
|
7252b484dfdf32010667b6f1203dd62e8102ecfa
|
e7013e7bec4941a2ea762a843e16dc3d5dce0145
|
/task2/simple_test.py
|
7c182b1dbc87d8455ab6f5055295c01442bbb93e
|
[] |
no_license
|
Siyan-Wang/exp_task2
|
9ad9e77ab96ad974e4eee02a4d988eb8b2fe710c
|
19feb1bddda558e68e231a824b3596e0fe533597
|
refs/heads/master
| 2022-04-25T20:26:10.886921 | 2020-04-14T14:56:53 | 2020-04-14T14:56:53 | 119,312,634 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
import numpy as np
import pytest
import io
def test_built_in_functiton():
#test using built-in function
np.testing.assert_allclose(2.0/8.0,0.25,rtol=1e-5)
def test_array():
#test using numpy.array
np.testing.assert_allclose(np.divide(2.0,8.0),0.25,rtol=1e-5)
def test_input_size():
f=open("task2/input.txt","rb")
char=f.readline().strip().decode("utf-8")
assert len(char)==6
|
[
"sw2936@columbia.edu"
] |
sw2936@columbia.edu
|
3921885438264c0a9638d2a6932b6f694cd538d2
|
2b60e31d61762c0175ff375a8ebbc29ee9857c5f
|
/env/lib/python3.7/heapq.py
|
fc72e5d20ba9d0972329920f45c782ee2969df22
|
[] |
no_license
|
iamsushanth/babynames
|
255e933bfe51c8571af2aed10f7ec14c5cd53f97
|
49ec0ba0bda61d7ec3a9645fb19f71f8c618d87a
|
refs/heads/master
| 2021-07-10T19:20:23.218651 | 2020-02-03T18:25:08 | 2020-02-03T18:25:08 | 238,031,841 | 0 | 0 | null | 2021-03-20T02:48:35 | 2020-02-03T18:22:13 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 56 |
py
|
/Users/iamsushanth/opt/miniconda3/lib/python3.7/heapq.py
|
[
"sushanth.uk@gmail.com"
] |
sushanth.uk@gmail.com
|
621ddd31b9732ba50ce92e1aff9829dfe5d71467
|
34bfbbbeb4157bca8eafa40e4fc46aa2b42d212e
|
/guide_core/scripts/core_controller.py
|
40375c1027a74484a6cf31b0360fd912e33d83fb
|
[] |
no_license
|
tony92151/ros_DQN_guide_dog
|
82eefe1f8e578b48736e362320c2f12627cc8f2a
|
5f164bdb16a4bf416b7a2ffab48410bc0cbef379
|
refs/heads/master
| 2020-04-16T23:27:01.710864 | 2019-11-29T13:28:24 | 2019-11-29T13:28:24 | 166,012,084 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,175 |
py
|
#!/usr/bin/python
#reference http://wiki.ros.org/roslaunch/API%20Usage
import roslaunch
import rospy
import rospkg
#get pkg dir & get to param
rospack = rospkg.RosPack()
dirpath_ar = rospack.get_path('lemon_minibot_detect')
dirpath_core = rospack.get_path('lemon_minibot_core')
dirpath_rviz = rospack.get_path('minibot_simulation')
dirpath_nav = rospack.get_path('turtlebot3_copy')
class core_control:
def __init__(self):
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
self.mapping_launch = roslaunch.parent.ROSLaunchParent(uuid, [dirpath_core+"/launch/lemon_minibot_gmapping.launch"])
self.mapping_launch_already = 0
self.nav_launch = roslaunch.parent.ROSLaunchParent(uuid, [dirpath_core+"/launch/lemon_minibot_navigation.launch"])
self.nav_launch_already = 0
def init_again(self):
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
self.mapping_launch = roslaunch.parent.ROSLaunchParent(uuid, [dirpath_core+"/launch/lemon_minibot_gmapping.launch"])
self.nav_launch = roslaunch.parent.ROSLaunchParent(uuid, [dirpath_core+"/launch/lemon_minibot_navigation.launch"])
def update(self):
if rospy.get_param('~/smach/slam')==1:
#rospy.sleep(0.5)
if self.mapping_launch_already == 0:
rospy.sleep(1)
self.mapping_launch.start()
self.mapping_launch_already = 1
rospy.loginfo("slam started")
else:
rospy.loginfo("slam have already launched")
rospy.sleep(0.3)
else:
if self.mapping_launch_already == 1:
rospy.sleep(0.3)
self.mapping_launch.shutdown()
self.mapping_launch_already = 0
self.init_again()
rospy.loginfo("slam closed")
else:
rospy.loginfo("slam have already closed")
rospy.sleep(1)
#######################################
if rospy.get_param('~/smach/navigation')!=-1:
#rospy.sleep(0.5)
if self.nav_launch_already == 0:
rospy.sleep(1)
self.nav_launch.start()
self.nav_launch_already = 1
rospy.loginfo("navigation started")
else:
rospy.loginfo("navigation have already launched")
rospy.sleep(0.3)
else :
if self.nav_launch_already == 1:
rospy.sleep(0.3)
self.nav_launch.shutdown()
self.nav_launch_already = -1
self.init_again()
rospy.loginfo("navigation closed")
else:
rospy.loginfo("navigation have already closed")
rospy.sleep(1)
if __name__ == '__main__':
rospy.init_node('Core_controller', anonymous=False)
core = core_control()
while True:
if not rospy.is_shutdown():
core.update()
else:
print "Shutting down"
sys.exit(0)
|
[
"tony92151@yahoo.com.tw"
] |
tony92151@yahoo.com.tw
|
ad6dad21628cc635db005d138eccf7792e89f1ee
|
9f459e7183590313e8b7be35c40e3b3311efeca0
|
/projects/DensePose/densepose/data/build.py
|
eeb231de0fce675b5cc92b7d78f32fe56a67a912
|
[
"Apache-2.0"
] |
permissive
|
TalhaUsuf/RetinaNet_W9_form
|
95e129f2611d240eb4f524640ffdc7384a27fca3
|
2a66bba1de96bebd679775b841d95ac7dcfcbbbe
|
refs/heads/master
| 2023-08-14T02:04:11.668370 | 2021-09-15T15:53:01 | 2021-09-15T15:53:01 | 405,922,850 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29,753 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import logging
import numpy as np
from collections import UserDict, defaultdict
from dataclasses import dataclass
from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple
import torch
from torch.utils.data.dataset import Dataset
from detectron.config import CfgNode
from detectron.data.build import build_detection_test_loader as d2_build_detection_test_loader
from detectron.data.build import build_detection_train_loader as d2_build_detection_train_loader
from detectron.data.build import (
load_proposals_into_dataset,
print_instances_class_histogram,
trivial_batch_collator,
worker_init_reset_seed,
)
from detectron.data.catalog import DatasetCatalog, Metadata, MetadataCatalog
from detectron.data.samplers import TrainingSampler
from detectron.utils.comm import get_world_size
from densepose.config import get_bootstrap_dataset_config
from densepose.modeling import build_densepose_embedder
from .combined_loader import CombinedDataLoader, Loader
from .dataset_mapper import DatasetMapper
from .datasets.coco import DENSEPOSE_CSE_KEYS_WITHOUT_MASK, DENSEPOSE_IUV_KEYS_WITHOUT_MASK
from .datasets.dataset_type import DatasetType
from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter
from .samplers import (
DensePoseConfidenceBasedSampler,
DensePoseCSEConfidenceBasedSampler,
DensePoseCSEUniformSampler,
DensePoseUniformSampler,
MaskFromDensePoseSampler,
PredictionToGroundTruthSampler,
)
from .transform import ImageResizeTransform
from .utils import get_category_to_class_mapping, get_class_to_mesh_name_mapping
from .video import (
FirstKFramesSelector,
FrameSelectionStrategy,
LastKFramesSelector,
RandomKFramesSelector,
VideoKeyframeDataset,
video_list_from_file,
)
__all__ = ["build_detection_train_loader", "build_detection_test_loader"]
Instance = Dict[str, Any]
InstancePredicate = Callable[[Instance], bool]
def _compute_num_images_per_worker(cfg: CfgNode):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
return images_per_worker
def _map_category_id_to_contiguous_id(dataset_name: str, dataset_dicts: Iterable[Instance]):
meta = MetadataCatalog.get(dataset_name)
for dataset_dict in dataset_dicts:
for ann in dataset_dict["annotations"]:
ann["category_id"] = meta.thing_dataset_id_to_contiguous_id[ann["category_id"]]
@dataclass
class _DatasetCategory:
"""
Class representing category data in a dataset:
- id: category ID, as specified in the dataset annotations file
- name: category name, as specified in the dataset annotations file
- mapped_id: category ID after applying category maps (DATASETS.CATEGORY_MAPS config option)
- mapped_name: category name after applying category maps
- dataset_name: dataset in which the category is defined
For example, when training models in a class-agnostic manner, one could take LVIS 1.0
dataset and map the animal categories to the same category as human data from COCO:
id = 225
name = "cat"
mapped_id = 1
mapped_name = "person"
dataset_name = "lvis_v1_animals_dp_train"
"""
id: int
name: str
mapped_id: int
mapped_name: str
dataset_name: str
_MergedCategoriesT = Dict[int, List[_DatasetCategory]]
def _add_category_id_to_contiguous_id_maps_to_metadata(merged_categories: _MergedCategoriesT):
merged_categories_per_dataset = {}
for contiguous_cat_id, cat_id in enumerate(sorted(merged_categories.keys())):
for cat in merged_categories[cat_id]:
if cat.dataset_name not in merged_categories_per_dataset:
merged_categories_per_dataset[cat.dataset_name] = defaultdict(list)
merged_categories_per_dataset[cat.dataset_name][cat_id].append(
(
contiguous_cat_id,
cat,
)
)
logger = logging.getLogger(__name__)
for dataset_name, merged_categories in merged_categories_per_dataset.items():
meta = MetadataCatalog.get(dataset_name)
if not hasattr(meta, "thing_classes"):
meta.thing_classes = []
meta.thing_dataset_id_to_contiguous_id = {}
meta.thing_dataset_id_to_merged_id = {}
else:
meta.thing_classes.clear()
meta.thing_dataset_id_to_contiguous_id.clear()
meta.thing_dataset_id_to_merged_id.clear()
logger.info(f"Dataset {dataset_name}: category ID to contiguous ID mapping:")
for _cat_id, categories in sorted(merged_categories.items()):
added_to_thing_classes = False
for contiguous_cat_id, cat in categories:
if not added_to_thing_classes:
meta.thing_classes.append(cat.mapped_name)
added_to_thing_classes = True
meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id
meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id
logger.info(f"{cat.id} ({cat.name}) -> {contiguous_cat_id}")
def _maybe_create_general_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
def has_annotations(instance: Instance) -> bool:
return "annotations" in instance
def has_only_crowd_anotations(instance: Instance) -> bool:
for ann in instance["annotations"]:
if ann.get("is_crowd", 0) == 0:
return False
return True
def general_keep_instance_predicate(instance: Instance) -> bool:
return has_annotations(instance) and not has_only_crowd_anotations(instance)
if not cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS:
return None
return general_keep_instance_predicate
def _maybe_create_keypoints_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
min_num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
def has_sufficient_num_keypoints(instance: Instance) -> bool:
num_kpts = sum(
(np.array(ann["keypoints"][2::3]) > 0).sum()
for ann in instance["annotations"]
if "keypoints" in ann
)
return num_kpts >= min_num_keypoints
if cfg.MODEL.KEYPOINT_ON and (min_num_keypoints > 0):
return has_sufficient_num_keypoints
return None
def _maybe_create_mask_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
if not cfg.MODEL.MASK_ON:
return None
def has_mask_annotations(instance: Instance) -> bool:
return any("segmentation" in ann for ann in instance["annotations"])
return has_mask_annotations
def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
if not cfg.MODEL.DENSEPOSE_ON:
return None
use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
def has_densepose_annotations(instance: Instance) -> bool:
for ann in instance["annotations"]:
if all(key in ann for key in DENSEPOSE_IUV_KEYS_WITHOUT_MASK) or all(
key in ann for key in DENSEPOSE_CSE_KEYS_WITHOUT_MASK
):
return True
if use_masks and "segmentation" in ann:
return True
return False
return has_densepose_annotations
def _maybe_create_specific_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
specific_predicate_creators = [
_maybe_create_keypoints_keep_instance_predicate,
_maybe_create_mask_keep_instance_predicate,
_maybe_create_densepose_keep_instance_predicate,
]
predicates = [creator(cfg) for creator in specific_predicate_creators]
predicates = [p for p in predicates if p is not None]
if not predicates:
return None
def combined_predicate(instance: Instance) -> bool:
return any(p(instance) for p in predicates)
return combined_predicate
def _get_train_keep_instance_predicate(cfg: CfgNode):
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
combined_specific_keep_predicate = _maybe_create_specific_keep_instance_predicate(cfg)
def combined_general_specific_keep_predicate(instance: Instance) -> bool:
return general_keep_predicate(instance) and combined_specific_keep_predicate(instance)
if (general_keep_predicate is None) and (combined_specific_keep_predicate is None):
return None
if general_keep_predicate is None:
return combined_specific_keep_predicate
if combined_specific_keep_predicate is None:
return general_keep_predicate
return combined_general_specific_keep_predicate
def _get_test_keep_instance_predicate(cfg: CfgNode):
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
return general_keep_predicate
def _maybe_filter_and_map_categories(
dataset_name: str, dataset_dicts: List[Instance]
) -> List[Instance]:
meta = MetadataCatalog.get(dataset_name)
category_id_map = meta.thing_dataset_id_to_contiguous_id
filtered_dataset_dicts = []
for dataset_dict in dataset_dicts:
anns = []
for ann in dataset_dict["annotations"]:
cat_id = ann["category_id"]
if cat_id not in category_id_map:
continue
ann["category_id"] = category_id_map[cat_id]
anns.append(ann)
dataset_dict["annotations"] = anns
filtered_dataset_dicts.append(dataset_dict)
return filtered_dataset_dicts
def _add_category_whitelists_to_metadata(cfg: CfgNode):
for dataset_name, whitelisted_cat_ids in cfg.DATASETS.WHITELISTED_CATEGORIES.items():
meta = MetadataCatalog.get(dataset_name)
meta.whitelisted_categories = whitelisted_cat_ids
logger = logging.getLogger(__name__)
logger.info(
"Whitelisted categories for dataset {}: {}".format(
dataset_name, meta.whitelisted_categories
)
)
def _add_category_maps_to_metadata(cfg: CfgNode):
for dataset_name, category_map in cfg.DATASETS.CATEGORY_MAPS.items():
category_map = {
int(cat_id_src): int(cat_id_dst) for cat_id_src, cat_id_dst in category_map.items()
}
meta = MetadataCatalog.get(dataset_name)
meta.category_map = category_map
logger = logging.getLogger(__name__)
logger.info("Category maps for dataset {}: {}".format(dataset_name, meta.category_map))
def _add_category_info_to_bootstrapping_metadata(dataset_name: str, dataset_cfg: CfgNode):
meta = MetadataCatalog.get(dataset_name)
meta.category_to_class_mapping = get_category_to_class_mapping(dataset_cfg)
meta.categories = dataset_cfg.CATEGORIES
meta.max_count_per_category = dataset_cfg.MAX_COUNT_PER_CATEGORY
logger = logging.getLogger(__name__)
logger.info(
"Category to class mapping for dataset {}: {}".format(
dataset_name, meta.category_to_class_mapping
)
)
def _maybe_add_class_to_mesh_name_map_to_metadata(dataset_names: List[str], cfg: CfgNode):
for dataset_name in dataset_names:
meta = MetadataCatalog.get(dataset_name)
if not hasattr(meta, "class_to_mesh_name"):
meta.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
def _merge_categories(dataset_names: Collection[str]) -> _MergedCategoriesT:
merged_categories = defaultdict(list)
category_names = {}
for dataset_name in dataset_names:
meta = MetadataCatalog.get(dataset_name)
whitelisted_categories = meta.get("whitelisted_categories")
category_map = meta.get("category_map", {})
cat_ids = (
whitelisted_categories if whitelisted_categories is not None else meta.categories.keys()
)
for cat_id in cat_ids:
cat_name = meta.categories[cat_id]
cat_id_mapped = category_map.get(cat_id, cat_id)
if cat_id_mapped == cat_id or cat_id_mapped in cat_ids:
category_names[cat_id] = cat_name
else:
category_names[cat_id] = str(cat_id_mapped)
# assign temporary mapped category name, this name can be changed
# during the second pass, since mapped ID can correspond to a category
# from a different dataset
cat_name_mapped = meta.categories[cat_id_mapped]
merged_categories[cat_id_mapped].append(
_DatasetCategory(
id=cat_id,
name=cat_name,
mapped_id=cat_id_mapped,
mapped_name=cat_name_mapped,
dataset_name=dataset_name,
)
)
# second pass to assign proper mapped category names
for cat_id, categories in merged_categories.items():
for cat in categories:
if cat_id in category_names and cat.mapped_name != category_names[cat_id]:
cat.mapped_name = category_names[cat_id]
return merged_categories
def _warn_if_merged_different_categories(merged_categories: _MergedCategoriesT):
logger = logging.getLogger(__name__)
for cat_id in merged_categories:
merged_categories_i = merged_categories[cat_id]
first_cat_name = merged_categories_i[0].name
if len(merged_categories_i) > 1 and not all(
cat.name == first_cat_name for cat in merged_categories_i[1:]
):
cat_summary_str = ", ".join(
[f"{cat.id} ({cat.name}) from {cat.dataset_name}" for cat in merged_categories_i]
)
logger.warning(
f"Merged category {cat_id} corresponds to the following categories: "
f"{cat_summary_str}"
)
def combine_detection_dataset_dicts(
dataset_names: Collection[str],
keep_instance_predicate: Optional[InstancePredicate] = None,
proposal_files: Optional[Collection[str]] = None,
) -> List[Instance]:
"""
Load and prepare dataset dicts for training / testing
Args:
dataset_names (Collection[str]): a list of dataset names
keep_instance_predicate (Callable: Dict[str, Any] -> bool): predicate
applied to instance dicts which defines whether to keep the instance
proposal_files (Collection[str]): if given, a list of object proposal files
that match each dataset in `dataset_names`.
"""
assert len(dataset_names)
if proposal_files is None:
proposal_files = [None] * len(dataset_names)
assert len(dataset_names) == len(proposal_files)
# load datasets and metadata
dataset_name_to_dicts = {}
for dataset_name in dataset_names:
dataset_name_to_dicts[dataset_name] = DatasetCatalog.get(dataset_name)
assert len(dataset_name_to_dicts), f"Dataset '{dataset_name}' is empty!"
# merge categories, requires category metadata to be loaded
# cat_id -> [(orig_cat_id, cat_name, dataset_name)]
merged_categories = _merge_categories(dataset_names)
_warn_if_merged_different_categories(merged_categories)
merged_category_names = [
merged_categories[cat_id][0].mapped_name for cat_id in sorted(merged_categories)
]
# map to contiguous category IDs
_add_category_id_to_contiguous_id_maps_to_metadata(merged_categories)
# load annotations and dataset metadata
for dataset_name, proposal_file in zip(dataset_names, proposal_files):
dataset_dicts = dataset_name_to_dicts[dataset_name]
assert len(dataset_dicts), f"Dataset '{dataset_name}' is empty!"
if proposal_file is not None:
dataset_dicts = load_proposals_into_dataset(dataset_dicts, proposal_file)
dataset_dicts = _maybe_filter_and_map_categories(dataset_name, dataset_dicts)
print_instances_class_histogram(dataset_dicts, merged_category_names)
dataset_name_to_dicts[dataset_name] = dataset_dicts
if keep_instance_predicate is not None:
all_datasets_dicts_plain = [
d
for d in itertools.chain.from_iterable(dataset_name_to_dicts.values())
if keep_instance_predicate(d)
]
else:
all_datasets_dicts_plain = list(
itertools.chain.from_iterable(dataset_name_to_dicts.values())
)
return all_datasets_dicts_plain
def build_detection_train_loader(cfg: CfgNode, mapper=None):
"""
A data loader is created in a way similar to that of Detectron2.
The main differences are:
- it allows to combine datasets with different but compatible object category sets
The data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Start workers to work on the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will return.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of training data
"""
_add_category_whitelists_to_metadata(cfg)
_add_category_maps_to_metadata(cfg)
_maybe_add_class_to_mesh_name_map_to_metadata(cfg.DATASETS.TRAIN, cfg)
dataset_dicts = combine_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
keep_instance_predicate=_get_train_keep_instance_predicate(cfg),
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, True)
return d2_build_detection_train_loader(cfg, dataset=dataset_dicts, mapper=mapper)
def build_detection_test_loader(cfg, dataset_name, mapper=None):
"""
Similar to `build_detection_train_loader`.
But this function uses the given `dataset_name` argument (instead of the names in cfg),
and uses batch size 1.
Args:
cfg: a detectron2 CfgNode
dataset_name (str): a name of the dataset that's available in the DatasetCatalog
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, False)`.
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
"""
_add_category_whitelists_to_metadata(cfg)
_add_category_maps_to_metadata(cfg)
_maybe_add_class_to_mesh_name_map_to_metadata([dataset_name], cfg)
dataset_dicts = combine_detection_dataset_dicts(
[dataset_name],
keep_instance_predicate=_get_test_keep_instance_predicate(cfg),
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
sampler = None
if not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE:
sampler = torch.utils.data.SequentialSampler(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return d2_build_detection_test_loader(
dataset_dicts, mapper=mapper, num_workers=cfg.DATALOADER.NUM_WORKERS, sampler=sampler
)
def build_frame_selector(cfg: CfgNode):
strategy = FrameSelectionStrategy(cfg.STRATEGY)
if strategy == FrameSelectionStrategy.RANDOM_K:
frame_selector = RandomKFramesSelector(cfg.NUM_IMAGES)
elif strategy == FrameSelectionStrategy.FIRST_K:
frame_selector = FirstKFramesSelector(cfg.NUM_IMAGES)
elif strategy == FrameSelectionStrategy.LAST_K:
frame_selector = LastKFramesSelector(cfg.NUM_IMAGES)
elif strategy == FrameSelectionStrategy.ALL:
frame_selector = None
# pyre-fixme[61]: `frame_selector` may not be initialized here.
return frame_selector
def build_transform(cfg: CfgNode, data_type: str):
if cfg.TYPE == "resize":
if data_type == "image":
return ImageResizeTransform(cfg.MIN_SIZE, cfg.MAX_SIZE)
raise ValueError(f"Unknown transform {cfg.TYPE} for data type {data_type}")
def build_combined_loader(cfg: CfgNode, loaders: Collection[Loader], ratios: Sequence[float]):
images_per_worker = _compute_num_images_per_worker(cfg)
return CombinedDataLoader(loaders, images_per_worker, ratios)
def build_bootstrap_dataset(dataset_name: str, cfg: CfgNode) -> Sequence[torch.Tensor]:
"""
Build dataset that provides data to bootstrap on
Args:
dataset_name (str): Name of the dataset, needs to have associated metadata
to load the data
cfg (CfgNode): bootstrapping config
Returns:
Sequence[Tensor] - dataset that provides image batches, Tensors of size
[N, C, H, W] of type float32
"""
logger = logging.getLogger(__name__)
_add_category_info_to_bootstrapping_metadata(dataset_name, cfg)
meta = MetadataCatalog.get(dataset_name)
factory = BootstrapDatasetFactoryCatalog.get(meta.dataset_type)
dataset = None
if factory is not None:
dataset = factory(meta, cfg)
if dataset is None:
logger.warning(f"Failed to create dataset {dataset_name} of type {meta.dataset_type}")
return dataset
def build_data_sampler(cfg: CfgNode, sampler_cfg: CfgNode, embedder: Optional[torch.nn.Module]):
if sampler_cfg.TYPE == "densepose_uniform":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseUniformSampler(count_per_class=sampler_cfg.COUNT_PER_CLASS),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_UV_confidence":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseConfidenceBasedSampler(
confidence_channel="sigma_2",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_fine_segm_confidence":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseConfidenceBasedSampler(
confidence_channel="fine_segm_confidence",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_coarse_segm_confidence":
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseConfidenceBasedSampler(
confidence_channel="coarse_segm_confidence",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_cse_uniform":
assert embedder is not None
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseCSEUniformSampler(
cfg=cfg,
use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
embedder=embedder,
count_per_class=sampler_cfg.COUNT_PER_CLASS,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
elif sampler_cfg.TYPE == "densepose_cse_coarse_segm_confidence":
assert embedder is not None
data_sampler = PredictionToGroundTruthSampler()
# transform densepose pred -> gt
data_sampler.register_sampler(
"pred_densepose",
"gt_densepose",
DensePoseCSEConfidenceBasedSampler(
cfg=cfg,
use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
embedder=embedder,
confidence_channel="coarse_segm_confidence",
count_per_class=sampler_cfg.COUNT_PER_CLASS,
search_proportion=0.5,
),
)
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
return data_sampler
raise ValueError(f"Unknown data sampler type {sampler_cfg.TYPE}")
def build_data_filter(cfg: CfgNode):
if cfg.TYPE == "detection_score":
min_score = cfg.MIN_VALUE
return ScoreBasedFilter(min_score=min_score)
raise ValueError(f"Unknown data filter type {cfg.TYPE}")
def build_inference_based_loader(
cfg: CfgNode,
dataset_cfg: CfgNode,
model: torch.nn.Module,
embedder: Optional[torch.nn.Module] = None,
) -> InferenceBasedLoader:
"""
Constructs data loader based on inference results of a model.
"""
dataset = build_bootstrap_dataset(dataset_cfg.DATASET, dataset_cfg.IMAGE_LOADER)
meta = MetadataCatalog.get(dataset_cfg.DATASET)
training_sampler = TrainingSampler(len(dataset))
data_loader = torch.utils.data.DataLoader(
dataset, # pyre-ignore[6]
batch_size=dataset_cfg.IMAGE_LOADER.BATCH_SIZE,
sampler=training_sampler,
num_workers=dataset_cfg.IMAGE_LOADER.NUM_WORKERS,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return InferenceBasedLoader(
model,
data_loader=data_loader,
data_sampler=build_data_sampler(cfg, dataset_cfg.DATA_SAMPLER, embedder),
data_filter=build_data_filter(dataset_cfg.FILTER),
shuffle=True,
batch_size=dataset_cfg.INFERENCE.OUTPUT_BATCH_SIZE,
inference_batch_size=dataset_cfg.INFERENCE.INPUT_BATCH_SIZE,
category_to_class_mapping=meta.category_to_class_mapping,
)
def has_inference_based_loaders(cfg: CfgNode) -> bool:
"""
Returns True, if at least one inferense-based loader must
be instantiated for training
"""
return len(cfg.BOOTSTRAP_DATASETS) > 0
def build_inference_based_loaders(
cfg: CfgNode, model: torch.nn.Module
) -> Tuple[List[InferenceBasedLoader], List[float]]:
loaders = []
ratios = []
embedder = build_densepose_embedder(cfg).to(device=model.device) # pyre-ignore[16]
for dataset_spec in cfg.BOOTSTRAP_DATASETS:
dataset_cfg = get_bootstrap_dataset_config().clone()
dataset_cfg.merge_from_other_cfg(CfgNode(dataset_spec))
loader = build_inference_based_loader(cfg, dataset_cfg, model, embedder)
loaders.append(loader)
ratios.append(dataset_cfg.RATIO)
return loaders, ratios
def build_video_list_dataset(meta: Metadata, cfg: CfgNode):
video_list_fpath = meta.video_list_fpath
video_base_path = meta.video_base_path
category = meta.category
if cfg.TYPE == "video_keyframe":
frame_selector = build_frame_selector(cfg.SELECT)
transform = build_transform(cfg.TRANSFORM, data_type="image")
video_list = video_list_from_file(video_list_fpath, video_base_path)
keyframe_helper_fpath = cfg.KEYFRAME_HELPER if hasattr(cfg, "KEYFRAME_HELPER") else None
return VideoKeyframeDataset(
video_list, category, frame_selector, transform, keyframe_helper_fpath
)
class _BootstrapDatasetFactoryCatalog(UserDict):
"""
A global dictionary that stores information about bootstrapped datasets creation functions
from metadata and config, for diverse DatasetType
"""
def register(self, dataset_type: DatasetType, factory: Callable[[Metadata, CfgNode], Dataset]):
"""
Args:
dataset_type (DatasetType): a DatasetType e.g. DatasetType.VIDEO_LIST
factory (Callable[Metadata, CfgNode]): a callable which takes Metadata and cfg
arguments and returns a dataset object.
"""
assert dataset_type not in self, "Dataset '{}' is already registered!".format(dataset_type)
self[dataset_type] = factory
BootstrapDatasetFactoryCatalog = _BootstrapDatasetFactoryCatalog()
BootstrapDatasetFactoryCatalog.register(DatasetType.VIDEO_LIST, build_video_list_dataset)
|
[
"talha.yousuf@stech.ai"
] |
talha.yousuf@stech.ai
|
6027836b1b5d3cb8b842b1a1b77f5c9777269896
|
2cc29c65c1c9e8ff6999f67ee01009ac1ee4fb43
|
/Lesson3/Task-3_3.py
|
2507a8b3f84203079da4a773bdb15e68573936f7
|
[] |
no_license
|
ekspertas/Python_Algoritms
|
2620820a723bdf972e00520d9792594a11d34904
|
49f9193bf5a67a5b61fca9f266e6ad9cffa0af1a
|
refs/heads/main
| 2023-04-29T19:13:56.440489 | 2021-05-18T14:31:14 | 2021-05-18T14:31:14 | 360,904,082 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 980 |
py
|
"""
В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.
"""
import random
SIZE = 10
MIN_ITEM = -100
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
print('Массив случайных чисел:\n', array)
min_el = array[0]
max_el = array[0]
max_el_inx = 0
min_el_inx = 0
for el in range(SIZE):
if array[el] > max_el:
max_el = array[el]
max_el_inx = el
if array[el] < min_el:
min_el = array[el]
min_el_inx = el
print('Минимальный и максимальный элементы массива:\n', min_el, 'и', max_el)
array.pop(min_el_inx)
array.insert(min_el_inx, max_el)
array.pop(max_el_inx)
array.insert(max_el_inx, min_el)
print('Массив, в котром поменяны местами минимальный и максимальный элементы:\n', array)
|
[
"ekspertasd@gmail.com"
] |
ekspertasd@gmail.com
|
33a33cfd3f32dd9321b486aeb4d948593d5c76b2
|
b15178f2ec828894c3b2d31b3ff6164be37ab875
|
/setup.py
|
a511bad7d960a83c9af9d54df61c11eb837181ee
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
biomodels/BIOMD0000000007
|
08e9de5d8d6745cde85d337c385e0f41f53906d3
|
1c03559e6e807621fa757386ea03dfae2c0ca312
|
refs/heads/master
| 2021-01-25T06:05:51.198922 | 2014-10-16T05:13:44 | 2014-10-16T05:13:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 377 |
py
|
from setuptools import setup, find_packages
setup(name='BIOMD0000000007',
version=20140916,
description='BIOMD0000000007 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000007',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
[
"stanleygu@gmail.com"
] |
stanleygu@gmail.com
|
0b21b4b1295f5b13869a1b1ac8f2ecc8a5e67b11
|
61a82db8fec18699385d1f765925bc0cabda9059
|
/py/lista_sklepow.py
|
b876b0c828d95039fd02e90156a03e8e2471a02d
|
[] |
no_license
|
Gordwick/transport_company_db_project
|
9a7afa045d01a69adc2d5f21a2758a1323a44691
|
1eb70eaf389f9277bc94201aff0121d9ff30bae1
|
refs/heads/master
| 2021-02-13T15:40:39.165517 | 2020-03-03T18:28:58 | 2020-03-03T18:28:58 | 244,710,443 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,048 |
py
|
#!/usr/bin/env python
import cgi
import cgitb
import codecs
import psycopg2
from config import config
cgitb.enable()
form = cgi.FieldStorage()
def lista_sklepow():
""" lista sklepow z opcjami select html """
conn = None
try:
#laczenie z baza danych
params = config()
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('set search_path to try1;')
cur.execute('SELECT s.nr_sklep,s.nazwa,pobierz_adres_sklep(s.nr_sklep) from try1.sklep s;')
db_version = cur.fetchall()
print"<select id=\"dostawca_select\" class=\"select_bar\" name=\"wybrany_sklep\" >"
for nr,nazwa,adres in db_version:
print "<option value=\"%s\">%s : %s</option>" % (nr,nazwa,adres[6:-1])
print"</select>"
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == '__main__':
print "Content-type: text/html\n"
lista_sklepow()
|
[
"sitkosebastian97@gmail.com"
] |
sitkosebastian97@gmail.com
|
994aaf530c445b31b300665fa6c47320e70099c0
|
032ce9681437fcbfe538a54e69a385b1252c10c2
|
/usuarios/usuario (1).py
|
e8cdc2fe374f69d3378a2f73267b8bd34553707e
|
[] |
no_license
|
pmalbani22/20-Proyecto-Python
|
c8945f2e11556b66263e2d6af40a419fab46a9dd
|
5e6dda6cb8d24dcc0df273243237d60f477b04f3
|
refs/heads/master
| 2023-06-16T20:14:11.802130 | 2021-07-19T02:09:05 | 2021-07-19T02:09:05 | 387,312,565 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,062 |
py
|
import datetime
import hashlib
# llamo al módulo conexion y le pongo un alias conexion
import usuarios.conexion as conexion
# Guardo en la variable connect el método conectar()
connect = conexion.conectar()
# Creo dos variables y guardo los valores que vienen del módulo conexion
database = connect[0]
cursor = connect[1]
class Usuario:
# Defino el constructor __init__
def __init__(self, nombre, apellido, email, password):
self.nombre = nombre
self.apellido = apellido
self.email = email
self.password = password
def registrar(self):
fecha = datetime.datetime.now()
# Cifrar la contraseña
cifrado = hashlib.sha256() # utilizo uno de los métodos de encriptación sha256
# Solo puede tomar datos en bytes no en texto plano por lo que con encode lo transformo
cifrado.update(self.password.encode("utf8"))
sql = "INSERT INTO usuarios VALUES (null, %s, %s, %s, %s, %s)"
# cifrado.hexdigest me guarda en la base el dato en formato en exadecimal
usuario = (self.nombre, self.apellido,
self.email, cifrado.hexdigest(), fecha)
try:
# Ejecuto la consulta de sql
cursor.execute(sql, usuario)
database.commit()
result = [cursor.rowcount, self]
except:
result = [0, self]
return result
# Login del usuario
def identificar(self):
# Consulta para saber si existe en la base
sql = "SELECT * FROM usuarios WHERE email = %s AND password = %s"
# Cifrar la contraseña
cifrado = hashlib.sha256() # utilizo uno de los métodos de encriptación sha256
# Solo puede tomar datos en bytes no en texto plano por lo que con encode lo transformo
cifrado.update(self.password.encode("utf8"))
# Datos para la consulta
usuario = (self.email, cifrado.hexdigest())
# Ejecuto la consulta
cursor.execute(sql, usuario)
result = cursor.fetchone()
return result
|
[
"69486559+pmalbani22@users.noreply.github.com"
] |
69486559+pmalbani22@users.noreply.github.com
|
babc7901205f8acb01cd72c9fe245666b9304558
|
c3b49af4a5bfda091f8c59c809003ae5a217eed2
|
/oldScripts/doMergingFromList.py
|
f5bc1e436c80c0b657bf7bf053622fac1b4cf6c8
|
[] |
no_license
|
cmsdaq/merger
|
a902e9fcaae1815948e02ba4c07ee0c1812e4732
|
76c3ee07a78ba5d8975a9d3863f08fb5fe3eb125
|
refs/heads/master
| 2020-04-06T03:48:13.033219 | 2018-11-29T11:01:56 | 2018-11-29T11:01:56 | 17,595,433 | 0 | 2 | null | 2017-01-26T13:00:18 | 2014-03-10T14:25:39 |
Python
|
UTF-8
|
Python
| false | false | 9,170 |
py
|
#!/usr/bin/env python
import os, time, sys, getopt
import shutil
import json
import glob
import multiprocessing
import datetime
import fileinput
import socket
# program to merge (cat) files given a list
"""
Do actual merging
"""
def mergeFiles(outputMergedFolder, outMergedFile, inputDataFolder, files, inputJsonFile):
print "mergeFiles:", outMergedFile, inputDataFolder, files, inputJsonFile
outMergedFileFullPath = os.path.join(outputMergedFolder, outMergedFile)
initMergingTime = time.time()
now = datetime.datetime.now()
print now.strftime("%H:%M:%S"), ": Start merge of ", inputJsonFile
if os.path.exists(outMergedFileFullPath):
os.remove(outMergedFileFullPath)
# cat all files in one
for nfile in range(0, len(files)):
inputFile = os.path.join(inputDataFolder, files[nfile])
if not os.path.exists(inputFile):
# putting back the JSON file to the system
inputJsonRenameFile = inputJsonFile.replace("_TEMP.json",".json")
shutil.move(inputJsonFile,inputJsonRenameFile)
msg = "inputFile File Not Found: %s --> %s (json) %s (merged)" % (inputFile,inputJsonFile,outMergedFileFullPath)
raise RuntimeError, msg
filenames = [inputDataFolder + "/" + word_in_list for word_in_list in files]
with open(outMergedFileFullPath, 'w') as fout:
for line in fileinput.input(filenames):
fout.write(line)
#for nfile in range(0, len(files)):
# inputFile = os.path.join(inputDataFolder, files[nfile])
# msg = "cat %s >> %s;" % (inputFile,outMergedFileFullPath)
# if(float(debug) > 0): print msg
# os.system(msg)
# try:
# os.remove(inputFile)
# except OSError:
# print "I tried to remove", inputFile, ", but somebody else did it before I got a chance to"
# Last thing to do is to move the file to its final location "merged/runXXXXXX/open/../."
outMergedFileFullPathStable = outputMergedFolder + "/../" + outMergedFile
shutil.move(outMergedFileFullPath,outMergedFileFullPathStable)
# remove already merged files, if wished
if(doRemoveFiles == True and float(debug) != 99):
for nfile in range(0, len(files)):
inputFile = os.path.join(inputDataFolder, files[nfile])
os.remove(inputFile)
endMergingTime = time.time()
inputJsonRenameFile = inputJsonFile.replace("_TEMP.json","_MERGED.json")
# back to the initial state if this is just for testing
if(float(debug) == 99): inputJsonRenameFile = inputJsonFile.replace("_TEMP.json",".json")
shutil.move(inputJsonFile,inputJsonRenameFile)
now = datetime.datetime.now()
if(debug >= 0): print now.strftime("%H:%M:%S"), ": Time for merging(%s): %f" % (inputJsonFile,endMergingTime-initMergingTime)
"""
Do loops
"""
def doTheMerging():
# get hostname, important
theHost = socket.gethostname()
while 1:
#print paths_to_watch
inputDataFolders = glob.glob(paths_to_watch)
if(float(debug) > 0): print "***************NEW LOOP***************"
if(float(debug) > 0): print inputDataFolders
for nf in range(0, len(inputDataFolders)):
inputDataFolder = inputDataFolders[nf]
# making output folders
inputDataFolderString = inputDataFolder.split('/')
outputMergedFolder = os.path.join(outputMerge, inputDataFolderString[len(inputDataFolderString)-1], "open")
if not os.path.exists(outputMergedFolder):
try:
os.makedirs(outputMergedFolder)
except OSError, e:
print "Looks like the directory " + outputMergedFolder + " has just been created by someone else..."
# reading the list of files in the given folder
before = dict ([(f, None) for f in os.listdir (inputDataFolder)])
if(float(debug) > 0): time.sleep (1)
if(float(debug) > 0): print "Begin folder iteration"
after = dict ([(f, None) for f in os.listdir (inputDataFolder)])
afterString = [f for f in after]
added = [f for f in after if not f in before]
if(float(debug) > 0): print afterString
removed = [f for f in before if not f in after]
if added:
if(float(debug) > 0): print "Added: ", ", ".join (added)
if removed:
if(float(debug) > 0): print "Removed: ", ", ".join (removed)
# loop over JSON files, which will give the list of files to be merged
processs = []
for i in range(0, len(afterString)):
if ".dat" in afterString[i]: continue
if ".ini" in afterString[i]: continue
if ".eof" in afterString[i]: continue
if "STS" in afterString[i]: continue
if "TEMP" in afterString[i]: continue
if "MERGED" in afterString[i]: continue
if ".json" not in afterString[i]: continue
if theHost not in afterString[i]: continue
if(float(debug) > 0): print "FILE:", afterString[i]
inputJsonFile = os.path.join(inputDataFolder, afterString[i])
if(float(debug) > 0): print "inputJsonFile:",inputJsonFile
settings_textI = open(inputJsonFile, "r").read()
settings = json.loads(settings_textI)
# moving the file to avoid issues
inputJsonRenameFile = inputJsonFile.replace(".json","_TEMP.json")
shutil.move(inputJsonFile,inputJsonRenameFile)
# making a dictionary with dat files only
filesDict = dict()
for i in range(0, len(afterString)):
if not "dat" in afterString[i]: continue
fileNameString = afterString[i].split('_')
key = (fileNameString[0],fileNameString[1],fileNameString[2])
tempFileName = afterString[i]
if key in filesDict.keys():
filesDict[key].append(tempFileName)
else:
filesDict.update({key:[tempFileName]})
if(float(debug) > 0): print "filesDict: ", filesDict
# this is the actual list of files, something critical
files = map(str,settings['data'][len(settings['data'])-1])
# making comparison between the files from the JSON and from the dictionary made 'by hand'
fileNameString = afterString[i].split('_')
key = (fileNameString[0],fileNameString[1],fileNameString[2])
if key in filesDict.keys():
if(float(debug) > 0): print "comparison1: ", filesDict[key]
filesDictJSON = dict()
for nfile in range(0, len(files)):
if key in filesDictJSON.keys():
filesDictJSON[key].append(files[nfile])
else:
filesDictJSON.update({key:[files[nfile]]})
if(float(debug) > 0): print "comparison2: ", filesDictJSON[key]
else:
print "Oh boy, the key " + key + " does not exist!!!"
# we don't do anything for now, just a warning message
if filesDict[key].sort() != filesDictJSON[key].sort():
print "Both JSON files are different for: " + fileNameString
print filesDict[key]
print filesDictJSON[key]
print "***********"
theSTSJSONfileName = inputJsonFile.replace(".json","_STS.json")
if os.path.exists(theSTSJSONfileName):
os.remove(theSTSJSONfileName)
theSTSJSONfile = open(theSTSJSONfileName, 'w')
theSTSJSONfile.write(json.dumps({'filelist': str(files)}, sort_keys=True, indent=4, separators=(',', ': ')))
theSTSJSONfile.close()
# merged file
outMergedFileOldFolder = inputJsonFile.replace("_TEMP.json",".dat").split('/')
outMergedFile = outMergedFileOldFolder[len(outMergedFileOldFolder)-1]
if(float(debug) > 0): print "outMergedFile:", outMergedFile
process = multiprocessing.Process(target = mergeFiles, args = [outputMergedFolder, outMergedFile, inputDataFolder, files, inputJsonRenameFile])
process.start()
#for process in processs: # then kill them all off
# process.terminate()
before = after
"""
Main
"""
valid = ['paths_to_watch=', 'typeMerging=', 'outputMerge=', 'debug=', 'help']
usage = "Usage: listdir.py --paths_to_watch=<paths_to_watch>\n"
usage += " --outputMerge=<merged>\n"
usage += " --typeMerging=<macro-no_in_used_for_now>\n"
usage += " --debug=<0>\n"
try:
opts, args = getopt.getopt(sys.argv[1:], "", valid)
except getopt.GetoptError, ex:
print usage
print str(ex)
sys.exit(1)
paths_to_watch = "unmerged"
outputMerge = "merged"
typeMerging = "macro"
debug = 0
doRemoveFiles = False
for opt, arg in opts:
if opt == "--help":
print usage
sys.exit(1)
if opt == "--paths_to_watch":
paths_to_watch = arg
if opt == "--outputMerge":
outputMerge = arg
if opt == "--typeMerging":
typeMerging = arg
if opt == "--debug":
debug = arg
if opt == "--doRemoveFiles":
doRemoveFiles = arg
if not os.path.exists(outputMerge):
try:
os.makedirs(outputMerge)
except OSError, e:
print "Looks like the directory " + outputMerge + " has just been created by someone else..."
doTheMerging()
|
[
"ceballos@cern.ch"
] |
ceballos@cern.ch
|
670a85cfaf02e0415b66738b9d92ee70cff91bb2
|
6f1deafab85d360c822f079d04611d3120f9504e
|
/single_cell/config/generate_pipeline_config.py
|
3a6e99a4f265e078b34a116046ff61c5da478dc4
|
[] |
no_license
|
arfathpasha/single_cell_pipeline
|
e9927ee6cd5d29d62e8d3b3129437da52224d37e
|
42574e52ffaf75f56bd8b22b54033b31c9cf6ff3
|
refs/heads/master
| 2020-04-02T17:52:59.149922 | 2018-11-21T19:27:21 | 2018-11-21T19:27:21 | 154,676,569 | 0 | 0 | null | 2018-10-25T13:32:06 | 2018-10-25T13:32:06 | null |
UTF-8
|
Python
| false | false | 1,170 |
py
|
import os
import warnings
import pipeline_config
from single_cell.utils import helpers
def generate_pipeline_config_in_temp(args):
if args['which'] in ['clean_sentinels', 'generate_config']:
return args
if args.get("config_file", None):
return args
config_yaml = "config.yaml"
tmpdir = args.get("tmpdir", None)
pipelinedir = args.get("pipelinedir", None)
# use pypeliner tmpdir to store yaml
if pipelinedir:
config_yaml = os.path.join(pipelinedir, config_yaml)
elif tmpdir:
config_yaml = os.path.join(tmpdir, config_yaml)
else:
warnings.warn("no tmpdir specified, generating configs in working dir")
config_yaml = os.path.join(os.getcwd(), config_yaml)
config_yaml = helpers.get_incrementing_filename(config_yaml)
params_override = args["config_override"]
helpers.makedirs(config_yaml, isfile=True)
config_params = pipeline_config.get_config_params(override=params_override)
config = pipeline_config.get_singlecell_pipeline_config(config_params)
pipeline_config.write_config(config, config_yaml)
args["config_file"] = config_yaml
return args
|
[
"dgrewal@momac30.bccrc.ca"
] |
dgrewal@momac30.bccrc.ca
|
c7ce371055e0e28656f0d0b0eeaae05a92a47485
|
f5e43fa92d3bb3b242cff3e4adf807bc0e9fa747
|
/venv/Lib/site-packages/ffmpy3.py
|
c9732252ab03f7bcbcaa761408215246ad550e09
|
[] |
no_license
|
Neytrinoo/ordis
|
6567f160307f4616b0543e7bd00f20a6587bf125
|
64d8951de032530614fe446cc23c889bf56e8c6f
|
refs/heads/master
| 2020-04-23T16:17:56.842008 | 2019-05-07T15:18:09 | 2019-05-07T15:18:09 | 171,293,959 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,597 |
py
|
import errno
import shlex
import asyncio
import subprocess
__version__ = '0.2.3'
__license__ = 'MIT'
"""
ffmpy3
ffmpy3 is a fork of the `ffmpy <https://github.com/Ch00k/ffmpy>`_ project and subject to the terms of the MIT license.
"""
class FFmpeg(object):
"""Wrapper for various `FFmpeg <https://www.ffmpeg.org/>`_ related applications (ffmpeg,
ffprobe).
Compiles FFmpeg command line from passed arguments (executable path, options, inputs and
outputs).
``inputs`` and ``outputs`` are dictionaries containing inputs/outputs as keys and
their respective options as values.
One dictionary value (set of options) must be either a
single space separated string, or a list or strings without spaces (i.e. each part of the
option is a separate item of the list, the result of calling ``split()`` on the options
string).
If the value is a list, it cannot be mixed, i.e. cannot contain items with spaces.
An exception are complex FFmpeg command lines that contain quotes: the quoted part must be
one string, even if it contains spaces (see *Examples* for more info).
Parameters
-----------
executable : str
path to ffmpeg executable; by default the ``ffmpeg`` command will be searched for in the
``PATH``, but can be overridden with an absolute path to ``ffmpeg`` executable
global_options : iterable
global options passed to ``ffmpeg`` executable (e.g. ``-y``, ``-v`` etc.); can be specified
either as a list/tuple/set of strings, or one space-separated string; by default no global
options are passed
inputs : dict
a dictionary specifying one or more input arguments as keys with their corresponding options
(either as a list of strings or a single space separated string) as values
outputs : dict
a dictionary specifying one or more output arguments as keys with their corresponding options
(either as a list of strings or a single space separated string) as values
"""
def __init__(self, executable='ffmpeg', global_options=None, inputs=None, outputs=None):
self.executable = executable
self._cmd = [executable]
global_options = global_options or []
if _is_sequence(global_options):
normalized_global_options = []
for opt in global_options:
normalized_global_options += shlex.split(opt)
else:
normalized_global_options = shlex.split(global_options)
self._cmd += normalized_global_options
self._cmd += _merge_args_opts(inputs, add_input_option=True)
self._cmd += _merge_args_opts(outputs)
self.cmd = subprocess.list2cmdline(self._cmd)
self.process = None
def __repr__(self):
return '<{0!r} {1!r}>'.format(self.__class__.__name__, self.cmd)
def run(self, input_data=None, stdout=None, stderr=None):
"""Execute FFmpeg command line.
``input_data`` can contain input for FFmpeg in case `pipe <https://ffmpeg.org/ffmpeg-protocols.html#pipe>`_
protocol is used for input.
``stdout`` and ``stderr`` specify where to redirect the ``stdout`` and ``stderr`` of the
process. By default no redirection is done, which means all output goes to running shell
(this mode should normally only be used for debugging purposes).
If FFmpeg ``pipe`` protocol
is used for output, ``stdout`` must be redirected to a pipe by passing `subprocess.PIPE` as
``stdout`` argument.
Returns a 2-tuple containing ``stdout`` and ``stderr`` of the process. If there was no
redirection or if the output was redirected to e.g. `os.devnull`, the value returned will
be a tuple of two `None` values, otherwise it will contain the actual ``stdout`` and
``stderr`` data returned by ffmpeg process.
Parameters
-----------
input_data : bytes
input data for FFmpeg to deal with (audio, video etc.) as bytes (e.g.
the result of reading a file in binary mode)
stdout
Where to redirect FFmpeg ``stdout`` to. Default is `None`, meaning no redirection.
stderr
Where to redirect FFmpeg ``stderr`` to. Default is `None`, meaning no redirection.
Raises
-------
FFExecutableNotFoundError
The executable path passed was not valid.
FFRuntimeError
The process exited with an error.
Returns
--------
tuple
A 2-tuple containing ``stdout`` and ``stderr`` from the process.
"""
try:
self.process = subprocess.Popen(
self._cmd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr
)
except OSError as e:
if e.errno == errno.ENOENT:
raise FFExecutableNotFoundError("Executable '{0}' not found".format(self.executable))
else:
raise
out = self.process.communicate(input=input_data)
if self.process.returncode != 0:
raise FFRuntimeError(self.cmd, self.process.returncode, out[0], out[1])
return out
@asyncio.coroutine
def run_async(self, input_data=None, stdout=None, stderr=None):
"""Asynchronously execute FFmpeg command line.
``input_data`` can contain input for FFmpeg in case `pipe <https://ffmpeg.org/ffmpeg-protocols.html#pipe>`_
``stdout`` and ``stderr`` specify where to redirect the ``stdout`` and ``stderr`` of the
process. By default no redirection is done, which means all output goes to running shell
(this mode should normally only be used for debugging purposes).
If FFmpeg ``pipe`` protocol
is used for output, ``stdout`` must be redirected to a pipe by passing `subprocess.PIPE` as
``stdout`` argument.
Note that the parent process is responsible for reading any output from stdout/stderr. This
should be done even if the output will not be used since the process may otherwise deadlock.
This can be done by awaiting on :meth:`asyncio.subprocess.Process.communicate` on the returned
:class:`asyncio.subprocess.Process` or by manually reading from the streams as necessary.
Returns a reference to the child process created for use by the parent program.
Parameters
-----------
input_data : bytes
input data for FFmpeg to deal with (audio, video etc.) as bytes (e.g.
the result of reading a file in binary mode)
stdout
Where to redirect FFmpeg ``stdout`` to. Default is `None`, meaning no redirection.
stderr
Where to redirect FFmpeg ``stderr`` to. Default is `None`, meaning no redirection.
Raises
-------
FFExecutableNotFoundError
The executable path passed was not valid.
Returns
--------
:class:`asyncio.subprocess.Process`
The child process created.
"""
try:
if input_data:
stdin = asyncio.subprocess.PIPE
else:
stdin = None
self.process = yield from asyncio.create_subprocess_exec(
*self._cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr
)
except OSError as e:
if e.errno == errno.ENOENT:
raise FFExecutableNotFoundError("Executable '{0}' not found".format(self.executable))
else:
raise
if input_data:
self.process.stdin.write(input_data)
return self.process
@asyncio.coroutine
def wait(self):
"""Asynchronously wait for the process to complete execution.
Raises
-------
FFRuntimeError
The process exited with an error.
Returns
--------
int or None
0 if the process finished successfully, or None if it has not been started
"""
if not self.process:
return None
exitcode = yield from self.process.wait()
if exitcode != 0:
raise FFRuntimeError(self.cmd, exitcode)
return exitcode
class FFprobe(FFmpeg):
"""Wrapper for `ffprobe <https://www.ffmpeg.org/ffprobe.html>`_.
Compiles FFprobe command line from passed arguments (executable path, options, inputs).
FFprobe executable by default is taken from ``PATH`` but can be overridden with an
absolute path.
Parameters
-----------
executable : str
absolute path to ffprobe executable
global_options : iterable
global options passed to ffprobe executable; can be specified either as a list/tuple of
strings or a space-separated string
inputs : dict
a dictionary specifying one or more inputs as keys with their corresponding options as values
"""
def __init__(self, executable='ffprobe', global_options='', inputs=None):
super(FFprobe, self).__init__(
executable=executable,
global_options=global_options,
inputs=inputs
)
class FFExecutableNotFoundError(Exception):
"""Raised when FFmpeg/FFprobe executable was not found."""
class FFRuntimeError(Exception):
"""Raised when FFmpeg/FFprobe command line execution returns a non-zero exit code.
Attributes
-----------
cmd : str
The command used to launch the executable, with all command line options.
exit_code : int
The resulting exit code from the executable.
stdout : bytes
The contents of stdout (only if executed synchronously).
stderr : bytes
The contents of stderr (only if executed synchronously).
"""
def __init__(self, cmd, exit_code, stdout=b'', stderr=b''):
self.cmd = cmd
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
message = "`{0}` exited with status {1}\n\nSTDOUT:\n{2}\n\nSTDERR:\n{3}".format(
self.cmd,
exit_code,
stdout.decode(),
stderr.decode()
)
super(FFRuntimeError, self).__init__(message)
def _is_sequence(obj):
"""Check if the object is a sequence (list, tuple etc.).
Parameters
-----------
object
an object to be checked
Returns
--------
bool
True if the object is iterable but is not a string, False otherwise
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def _merge_args_opts(args_opts_dict, **kwargs):
"""Merge options with their corresponding arguments.
Iterates over the dictionary holding arguments (keys) and options (values). Merges each
options string with its corresponding argument.
Parameters
-----------
args_opts_dict : dict
a dictionary of arguments and options
kwargs : dict
*input_option* - if specified prepends ``-i`` to input argument
Returns
--------
list
a merged list of strings with arguments and their corresponding options
"""
merged = []
if not args_opts_dict:
return merged
for arg, opt in args_opts_dict.items():
if not _is_sequence(opt):
opt = shlex.split(opt or '')
merged += opt
if not arg:
continue
if 'add_input_option' in kwargs:
merged.append('-i')
merged.append(arg)
return merged
|
[
"neytrino070902@yandex.ru"
] |
neytrino070902@yandex.ru
|
2a69eb33a3e58deaffbcbc4c7aeaea38e732e355
|
86993cd1168115e7e9b2a7f60253f7c02a5330a7
|
/project_euler/p025.py
|
9193f228d06807cdc845e935d6933c54ed66b6f8
|
[
"MIT"
] |
permissive
|
yhlam/project-euler
|
4ec3ebf4719605e390d5ff7f68b0e7a5e08efcc1
|
503c90c039380eee9e6935676d0cdc3588804018
|
refs/heads/master
| 2020-04-08T08:03:24.240902 | 2014-04-05T15:09:02 | 2014-04-05T15:09:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 731 |
py
|
"""1000-digit Fibonacci number
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the first term in the Fibonacci sequence to contain 1000 digits?
Answer: 4782
"""
def solve():
LIMIT = 1000
BOUND = 10 ** (LIMIT - 1)
a = 1
b = 1
count = 2
while b < BOUND:
temp = a + b
a = b
b = temp
count += 1
return count
if __name__ == '__main__':
print(solve())
|
[
"lamyuenhei@gmail.com"
] |
lamyuenhei@gmail.com
|
0eee35d85860baeff05901fcea1b5fff96bad305
|
f7ddf11d729824188c92b1a90d8d376f435eadb9
|
/Menu.py
|
a0f08702d73f5479379146d839d9f2c2a6fbddef
|
[] |
no_license
|
ChiuHsiung/TkinterTest
|
d7de99575851b183e120f1e0258cd0f9c3013636
|
28622ae555db8f437ad43650d518fc15f8302d56
|
refs/heads/master
| 2021-05-31T05:39:50.251045 | 2016-02-23T11:30:31 | 2016-02-23T11:30:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,066 |
py
|
# -*- coding : UTF-8 -*-
__author__ = 'admin'
from tkinter import *
def new_file():
print("Open new file")
def open_file():
print("Open existing file")
def stub_action():
print("Menu select")
def makeCommandMenu():
CmdBtn = Menubutton(mBar, text='Button Commands', underline=0)
CmdBtn.pack(side=LEFT, padx="2m")
CmdBtn.menu = Menu(CmdBtn)
CmdBtn.menu.add_command(label="Undo")
CmdBtn.menu.entryconfig(0, state=DISABLED)
CmdBtn.menu.add_command(label='New...', underline=0, command=new_file)
CmdBtn.menu.add_command(label='Open...', underline=0, command=open_file)
CmdBtn.menu.add_command(label='Wild Font', underline=0,
font=('Tempus Sans ITC', 14), command=stub_action)
CmdBtn.menu.add_command(bitmap="@bitmaps/RotateLeft")
CmdBtn.menu.add('separator')
CmdBtn.menu.add_command(label='Quit', underline=0,
background='white', activebackground='green',
command=CmdBtn.quit)
CmdBtn['menu'] = CmdBtn.menu
return CmdBtn
def makeCascadeMenu():
CasBtn = Menubutton(mBar, text='Cascading Menus', underline=0)
CasBtn.pack(side=LEFT, padx="2m")
CasBtn.menu = Menu(CasBtn)
CasBtn.menu.randomname1 = Menu(CasBtn.menu)
CasBtn.menu.randomname1.randomname2 = Menu(CasBtn.menu.randomname1)
CasBtn.menu.randomname1.randomname2.add_command(label='Stockbroker')
CasBtn.menu.randomname1.randomname2.add_command(label='Quantity Surveyor')
CasBtn.menu.randomname1.randomname2.add_command(label='Church Warden')
CasBtn.menu.randomname1.randomname2.add_command(label='BRM')
CasBtn.menu.randomname1.add_command(label='Wooden Leg')
CasBtn.menu.randomname1.add_command(label='Hire Purchase')
CasBtn.menu.randomname1.add_command(label='Dead Crab')
CasBtn.menu.randomname1.add_command(label='Tree Surgeon')
CasBtn.menu.randomname1.add_command(label='Filing Cabinet')
CasBtn.menu.randomname1.add_command(label='Goldfish')
CasBtn.menu.randomname1.add_cascade(label='Is it a...',
menu=CasBtn.menu.randomname1.randomname2)
CasBtn.menu.add_cascade(label='Scipts', menu=CasBtn.menu.randomname1)
CasBtn['menu'] = CasBtn.menu
return CasBtn
def makeCheckbuttonMenu():
ChkBtn = Menubutton(mBar, text='Checkbutton Menus', underline=0)
ChkBtn.pack(side=LEFT, padx='2m')
ChkBtn.menu = Menu(ChkBtn)
ChkBtn.menu.add_checkbutton(label='Doug')
ChkBtn.menu.add_checkbutton(label='Dinsdale')
ChkBtn.menu.add_checkbutton(label="Stig O'Tracy")
ChkBtn.menu.add_checkbutton(label='Vince')
ChkBtn.menu.add_checkbutton(label='Gloria Pules')
ChkBtn.menu.invoke(ChkBtn.menu.index('Dinsdale'))
ChkBtn['menu'] = ChkBtn.menu
return ChkBtn
def makeRadiobuttonMenu():
RadBtn = Menubutton(mBar, text='Radiobutton Menus', underline=0)
RadBtn.pack(side=LEFT, padx='2m')
RadBtn.menu = Menu(RadBtn)
RadBtn.menu.add_radiobutton(label='metonymy')
RadBtn.menu.add_radiobutton(label='zeugmatists')
RadBtn.menu.add_radiobutton(label='synechdotists')
RadBtn.menu.add_radiobutton(label='axiomists')
RadBtn.menu.add_radiobutton(label='anagogists')
RadBtn.menu.add_radiobutton(label='catachresis')
RadBtn.menu.add_radiobutton(label='periphrastic')
RadBtn.menu.add_radiobutton(label='litotes')
RadBtn.menu.add_radiobutton(label='circumlocutors')
RadBtn['menu'] = RadBtn.menu
return RadBtn
def makeDisabledMenu():
Dummy_button = Menubutton(mBar, text='Disabled Menu', underline=0)
Dummy_button.pack(side=LEFT, padx='2m')
Dummy_button["state"] = DISABLED
return Dummy_button
root = Tk()
mBar = Frame(root, relief=RAISED, borderwidth=2)
mBar.pack(fill=X)
CmdBtn = makeCommandMenu()
CasBtn = makeCascadeMenu()
ChkBtn = makeCheckbuttonMenu()
RadBtn = makeRadiobuttonMenu()
NoMenu = makeDisabledMenu()
# mBar.tk_menuBar(CmdBtn, CasBtn, ChkBtn, RadBtn, NoMenu)
root.title('Menus')
root.mainloop()
|
[
"zhuangqiuxiong@icloud.com"
] |
zhuangqiuxiong@icloud.com
|
e6e3bcd4498af24df0ecab7eb76eec6eb245bcc2
|
1cf3ddf43bd2e13a3b6eadf4beea702101bc3f67
|
/tela-azul.py
|
30967dab30f9dbffd99720d47623cb2640d463fa
|
[] |
no_license
|
Matthew2812/Eletro-Hack
|
d385c0686cb6bf3f3a2e0d1f603763dc3394b418
|
e81e07a045add665798368402e59d7893124b8f7
|
refs/heads/master
| 2020-04-05T13:30:27.233374 | 2018-11-20T20:46:32 | 2018-11-20T20:46:32 | 156,905,627 | 0 | 0 | null | 2018-11-09T22:52:47 | 2018-11-09T18:59:45 |
Python
|
UTF-8
|
Python
| false | false | 346 |
py
|
import pygame,time,os, sys
pygame.init()
tela_azul = pygame.image.load(os.path.join('Imagens','tela-azul.jpg'))
tela_azul = pygame.transform.smoothscale(tela_azul,(1366,768))
screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
screen.blit(tela_azul,(0,0))
pygame.display.flip()
time.sleep(5)
pygame.quit()
sys.exit()
|
[
"noreply@github.com"
] |
Matthew2812.noreply@github.com
|
4c0de3be67bd8370f7c6e7b1379788a5fcabcd0f
|
1195255c33d6f590807bbe52490b7686f9731a73
|
/menu.py
|
113dcf32aec0d30f6403d0634a575d7412ebd717
|
[] |
no_license
|
antonioalme20/conversor_bits
|
6b2dc70971a3cd822f5ff0ae9591aea5b10b4b18
|
8769dba98e7ccb5874dc1fe3cd9f2addb9bbf493
|
refs/heads/master
| 2020-09-02T09:26:42.473967 | 2019-11-25T21:26:28 | 2019-11-25T21:26:28 | 219,190,114 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,413 |
py
|
#****EN ESTE ARCHIVO RECOJO LAS FUNCIONES PARA EJECUTAR LOS MENUS DE DESPLAZAMIENTO*****
#Menu Principal
def menu1():
print("\n1)Bit->...\n2)Byte->...\n3)Kilobyte->...\n4)Megabyte->...\n5)Gigabyte->...\n6)Terabyte->...\n7)Petabyte->...")
#Menu de Bits
def menu_bits():
print("\n1)Bit->Byte\n2)Bit->Kilobyte\n3)Bit->Megabyte\n4)Bit->Gigabyte\n5)Bit->Terabyte\n6)Bit->Petabyte")
#Menu de Bytes
def menu_bytes():
print("\n1)Byte->Bit\n2)Byte->Kilobyte\n3)Byte->Megabyte\n4)Byte->Gigabyte\n5)Byte->Terabyte\n6)Byte->Petabyte")
#Menu de Kilobytes
def menu_kilobytes():
print("\n1)Kilobyte->Bit\n2)Kilobyte->Byte\n3)Kilobyte->Megabyte\n4)Kilobyte->Gigabyte\n5)Kilobyte->Terabyte\n6)Kilobyte->Petabyte")
#Menu de Megabytes
def menu_megabytes():
print("\n1)Megabyte->Bit\n2)Megabyte->Byte\n3)Megabyte->Kilobyte\n4)Megabyte->Gigabyte\n5)Megabyte->Terabyte\n6)Megabyte->Petabyte")
#Menu de Gigabytes
def menu_gigabytes():
print("\n1)Gigabyte->Bit\n2)Gigabyte->Byte\n3)Gigabyte->Kilobyte\n4)Gigabyte->Megabyte\n5)Gigabyte->Terabyte\n6)Gigabyte->Petabyte")
#Menu de Terabytes
def menu_terabytes():
print("\n1)Terabyte->Bit\n2)Terabyte->Byte\n3)Terabyte->Kilobyte\n4)Terabyte->Megabyte\n5)Terabyte->Gigabyte\n6)Terabyte->Petabyte")
#Menu de Petabyte
def menu_petabytes():
print("\n1)Petabyte->Bit\n2)Petabyte->Byte\n3)Petabyte->Kilobyte\n4)Petabyte->Megabyte\n5)Petabyte->Gigabyte\n6)Petabyte->Terabyte")
|
[
"noreply@github.com"
] |
antonioalme20.noreply@github.com
|
8860ffa3c141389a4e72108c4df8d5b3dc15d333
|
378f84d623d0c0ffb40fd0ed6d6d6a9aea112671
|
/microblogSC/settings_example.py
|
2c771edf009de1deb0954e07a1b7d869807fb932
|
[] |
no_license
|
camilleganzin/microblog
|
db02d0614b6ff4b66c456e71224eabaafb030cd5
|
c5d06b1fce5e6a5e6a34ba5af918ddddcd39f5a7
|
refs/heads/master
| 2021-01-22T22:28:23.733855 | 2017-11-04T16:13:58 | 2017-11-04T16:13:58 | 92,778,345 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,380 |
py
|
"""
Django settings for microblogSC project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from tornado.web import StaticFileHandler
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '***'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'microblog.apps.MicroblogConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'tornado_websockets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'microblogSC.urls'
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '****'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '***'
SOCIAL_AUTH_USER_MODEL = 'auth.User'
SOCIAL_AUTH_DEFAULT_USERNAME = 'new_social_auth_user'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email', # <- this line not included by default
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
LOGIN_URL = '/login/google-oauth2/'
LOGIN_REDIRECT_URL = '/microblog/'
LOGIN_ERROR_URL = '/login-error/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'microblogSC.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"camilleganzin@gmail.com"
] |
camilleganzin@gmail.com
|
0e16d44ab43d0f6d037b3d5da22cc580bb562e28
|
2cbd1a7fc113b51e463f15556788463b40351db1
|
/day16.py
|
93c70ef76989a16ad2c9cd4d029ad7c4af8f1988
|
[] |
no_license
|
Gyaha/AOC2015
|
8ca286b65860c10b76eace994988ecfdae29c939
|
5ccd3a2a67254cc7ad30904b0c4d0eb7292c1462
|
refs/heads/main
| 2023-07-04T11:59:07.205938 | 2021-07-27T20:15:05 | 2021-07-27T20:15:05 | 384,755,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,818 |
py
|
def read_rules(s: str) -> list:
rules = []
for l in s.splitlines():
ll = l.split()
rules.append([int(ll[1][:-1]), {
ll[2][:-1]:int(ll[3][:-1]),
ll[4][:-1]:int(ll[5][:-1]),
ll[6][:-1]:int(ll[7])}])
return rules
keys = {"children": 3,
"cats": 7,
"samoyeds": 2,
"pomeranians": 3,
"akitas": 0,
"vizslas": 0,
"goldfish": 5,
"trees": 3,
"cars": 2,
"perfumes": 1}
keys2 = {"children": ["=", 3],
"cats": [">", 7],
"samoyeds": ["=", 2],
"pomeranians": ["<", 3],
"akitas": ["=", 0],
"vizslas": ["=", 0],
"goldfish": ["<", 5],
"trees": [">", 3],
"cars": ["=", 2],
"perfumes": ["=", 1]}
def find_the_right_sue(s: str, keys: dict) -> int:
rules = read_rules(s)
for sue in rules:
if check_sue(sue, keys):
return sue[0]
def find_the_real_sue(s: str, keys: dict) -> int:
rules = read_rules(s)
for sue in rules:
if real_check_sue(sue, keys):
return sue[0]
def real_check_sue(sue: list, keys) -> bool:
for ru in sue[1].keys():
b = sue[1][ru]
a = keys[ru]
if a[0] == "=":
if not a[1] == b:
return False
elif a[0] == "<":
if not a[1] > b:
return False
else:
if not a[1] < b:
return False
return True
def check_sue(sue: list, keys: dict) -> bool:
for ru in sue[1].keys():
a = keys[ru]
b = sue[1][ru]
if not a == b:
return False
return True
with open("input16") as file:
data = file.read()
print(find_the_right_sue(data, keys))
print(find_the_real_sue(data, keys2))
|
[
"gyaha@users.noreply.github.com"
] |
gyaha@users.noreply.github.com
|
07f43fea0722634c5025b721083c1c83f189d56b
|
48c1b26b00f01c745ff2b979da019e21d52e3710
|
/Week 4/solution.py
|
0c4aa7b5ad45fe17400f08679cba4b27121e9fdb
|
[
"MIT"
] |
permissive
|
popey0/2020-Tutorials
|
1dd413e2a6473dd76c451f6f85b2402b6c250d13
|
e97f248fb5f30304fd36ca5b29445c8fa7b63b15
|
refs/heads/master
| 2022-12-31T06:30:41.401423 | 2020-10-21T00:52:17 | 2020-10-21T00:52:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,143 |
py
|
# -*- coding: utf-8 -*-
"""
Week 4 ICT Session
"""
import numpy as np
from scipy import fft # Import the module, NOT the function
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter, MultipleLocator
def mapping(x, xp, fp):
"""One dimensional linear mapping"""
xmin, xmax = xp
fmin, fmax = fp
slope = (fmax - fmin) / (xmax - xmin)
return (x - xmin) * slope + fmin
def approx(func, fit=(0, np.pi), num=50, plot=[5, 21], parity='odd'):
"""Approximate a function with a sum of sines.
The given function is approximated using a discrete sine transform over the
`fit` domain. The result is then shown over a domain of [-2pi, +2pi], using
the `plot` number of terms.
Parameters
----------
func : callable
A single parameter function to be fit
fit : tuple, optional
The domain to approximate the function over, by default (0, np.pi)
num : int, optional
The number of points to approximate over, by default 50
plot : list, optional
The number of terms to plot, by default [5, 21]
"""
domain = np.linspace(*fit, num=num)
display = np.linspace(-2*np.pi, 2*np.pi, num=500)
fourier = fft.dct if parity == 'even' else fft.dst
trig = np.cos if parity == 'even' else np.sin
coeffs = fourier(func(domain))
fig, ax = plt.subplots()
ax.plot(display, func(display), 'k--')
summation = 0.0
for n, a in enumerate(coeffs, 0 if parity == 'even' else 1):
if n == 0:
a /= 2
summation += trig(n*display) * a / num
if n in plot:
ax.plot(display, summation, label=f'N={n} terms')
ax.legend()
ax.xaxis.set_major_formatter(FuncFormatter(
lambda val, _: f'{val/np.pi:.0g}$\pi$' if val else '0' # noqa: W605
))
ax.xaxis.set_minor_locator(MultipleLocator(base=np.pi/4))
ax.xaxis.set_major_locator(MultipleLocator(base=np.pi))
ax.set_xlim([-2*np.pi, 2*np.pi])
plt.show()
if __name__ == '__main__':
approx(lambda x: np.ones_like(x))
approx(lambda x: x**2, parity='even')
approx(lambda x: x, parity='odd')
|
[
"alexandermuirhead@hotmail.com"
] |
alexandermuirhead@hotmail.com
|
b6f4dc379cf9bd32506bd7ba21385484479be112
|
5e710f6379fe656cf366a5bfd069adc46e54801c
|
/Novice/07-03/kelompok/olshop/migrations/0007_remove_penjualan1m_saldo_awal.py
|
902a033c6efdf2422e7d4ded3fbfed43ccebc17f
|
[] |
no_license
|
atko45/Praxis-Academy
|
c45e067a3a54648106f969e1433e3c4f60e22a10
|
05c2069b698d0a7d5e0e0e5c6d3e97ed74d2eef7
|
refs/heads/master
| 2023-01-03T14:11:18.076966 | 2020-11-04T04:03:52 | 2020-11-04T04:03:52 | 292,163,250 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 334 |
py
|
# Generated by Django 2.2 on 2020-09-30 03:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('olshop', '0006_auto_20200929_1700'),
]
operations = [
migrations.RemoveField(
model_name='penjualan1m',
name='saldo_awal',
),
]
|
[
"okta.brahvila45@gmail.com"
] |
okta.brahvila45@gmail.com
|
d797f5f38448e40d8ac1b2f35f26c2494a906e41
|
44e6cac3ee0e23b69541ebca85680a543276b47c
|
/absence/models.py
|
dcbd977b82c18f10d1d1c27aa47394dfb1e95eb3
|
[] |
no_license
|
it21546/attendancesystemdos
|
5b8b1a50dd18ae2226e6fb87a1f3c7091347acd0
|
8c7fee44c7c90a8feff2057dac53ecace862b63a
|
refs/heads/master
| 2023-08-15T03:55:34.641898 | 2021-10-14T10:04:08 | 2021-10-14T10:04:08 | 406,307,144 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,367 |
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
POSTUND_CHOICES = [
("Προπτυχιακό", "Προπτυχιακό"),
("Μεταπτυχιακό", "Μεταπτυχιακό"),
]
DEPARTMENT_CHOICES = [
("Πληροφορικής και Τηλεματικής", "Πληροφορικής και Τηλεματικής"),
("Διαιτολογίας-Διατροφής", "Διαιτολογίας-Διατροφής"),
("Γεωγραφίας", "Γεωγραφίας"),
("Οικονομίας και Βιώσιμης Ανάπτυξης", "Οικονομίας και Βιώσιμης Ανάπτυξης"),
]
class Course(models.Model):
course_name = models.CharField(max_length = 100)
users_available = models.ManyToManyField(User)
post_und = models.CharField(max_length = 100, choices = POSTUND_CHOICES, default = '')
department = models.CharField(max_length = 100, choices = DEPARTMENT_CHOICES, default = 'Πληροφορικής και Τηλεματικής')
current_user = models.ForeignKey(User, related_name='owner', null=True, on_delete=models.CASCADE)
def __str__(self):
return self.course_name
@classmethod
def pick_course(cls, current_user, new_course):
course, created = cls.objects.get_or_create(current_user=current_user)
course.users_available.add(current_user)
@classmethod
def drop_course(cls, current_user, new_course):
course, created = cls.objects.get_or_create(current_user=current_user)
course.users_available.remove(current_user)
class Absence(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
username = models.ForeignKey(User, on_delete=models.CASCADE) #CharField(max_length = 100, null=True, blank=True)
professor_name = models.CharField(max_length = 100)
student_name = models.CharField(max_length = 100)
it_number = models.CharField(max_length = 100)
date_of_absence = models.DateTimeField(default=timezone.now)
absences_left = models.IntegerField(default=0)
is_failed = models.BooleanField(default=False)
def __str__(self):
return self.it_number
def get_absolute_url(self):
return reverse('absence-detail', kwargs={'pk': self.pk})
|
[
"it21546@hua.gr"
] |
it21546@hua.gr
|
d827e99e9bfe24739b29b9efd7b67641f05c3576
|
ff3e0d75fda9a1a94fd8ba7618c0aab499b8393d
|
/musicians/migrations/0004_auto_20200813_0055.py
|
255088a50889f0134f21340a8b9558fc20ab73a7
|
[
"MIT"
] |
permissive
|
victorsemenov1980/DjangoFullStack
|
bbe2897c20633b3eba8db807442eb0921668e6f1
|
655a3a9980057913c1aeeb1cd54683ccf12ad901
|
refs/heads/master
| 2023-04-05T23:34:13.836215 | 2021-04-22T18:08:51 | 2021-04-22T18:08:51 | 289,705,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,423 |
py
|
# Generated by Django 3.1 on 2020-08-13 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicians', '0003_info'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='Featured',
),
migrations.RemoveField(
model_name='service',
name='Featured_Price',
),
migrations.RemoveField(
model_name='service',
name='Price_hour',
),
migrations.RemoveField(
model_name='service',
name='Price_service',
),
migrations.AddField(
model_name='main',
name='Bio',
field=models.TextField(default='none'),
preserve_default=False,
),
migrations.AddField(
model_name='main',
name='Instrument',
field=models.CharField(default='none', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='main',
name='Organization',
field=models.CharField(default='none', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='service',
name='Description',
field=models.CharField(blank=True, max_length=255),
),
]
|
[
"vs1378009@icloud.com"
] |
vs1378009@icloud.com
|
d13566a33b190ce7d3ce241b992ad4b6da0d2c42
|
ef57b38bcfeb6e1e63497fc84632ae509578d6c2
|
/dv-var.py
|
f75b0bdfbb96a24784da41ead102988f4638fc32
|
[] |
no_license
|
league/Radio-Transient-Search
|
d31fcccf3b2e192e91563cb9050087b91c6f1815
|
0c2883f0b335db8f0785fa004234167edaba8d8f
|
refs/heads/master
| 2021-07-13T08:03:23.639950 | 2021-03-20T02:39:33 | 2021-03-20T02:39:33 | 100,059,536 | 1 | 0 | null | 2017-08-11T18:19:30 | 2017-08-11T18:19:30 | null |
UTF-8
|
Python
| false | false | 15,112 |
py
|
import disper
import sys
import numpy as np
import glob
import os
import time
import sys
import humanize
from datetime import datetime, timedelta
from mpi4py import MPI
from mpisetup import totalrank, rank, log, comm
def DMs(DMstart,DMend,dDM):
"""
Calculate the number of DMs searched between DMstart and DMend, with spacing dDM * DM.
Required:
DMstart - Starting Dispersion Measure in pc cm-3
DMend - Ending Dispersion Measure in pc cm-3
dDM - DM spacing in pc cm-3
"""
#NDMs = np.log10(float(DMend)/float(DMstart))/np.log10(1.0+dDM)
NDMs = (DMend-DMstart)/dDM
return int(np.round(NDMs))
def delay2(freq, dm):
"""
Calculate the relative delay due to dispersion over a given frequency
range in Hz for a particular dispersion measure in pc cm^-3. Return
the dispersive delay in seconds. Same as delay, but w.r.t to highest frequency.
***Used to simulate a dispersed pulse.***
Required:
freq - 1-D array of frequencies in MHz
dm - Dispersion Measure in pc cm-3
"""
# Dispersion constant in MHz^2 s / pc cm^-3
_D = 4.148808e3
# Delay in s
tDelay = dm*_D*((1/freq)**2 - (1/freq.max())**2)
return tDelay
def Threshold(ts, thresh, clip=3, niter=1):
"""
Wrapper to scipy threshold a given time series using Scipy's threshold function (in
scipy.stats.stats). First it calculates the mean and rms of the given time series. It then
makes the time series in terms of SNR. If a given SNR value is less than the threshold, it is
set to "-1". Returns a SNR array with values less than thresh = -1, all other values = SNR.
Also returns the mean and rms of the timeseries.
Required:
ts - input time series.
Options:
thresh - Time series signal-to-noise ratio threshold. default = 5.
clip - Clipping SNR threshold for values to leave out of a mean/rms calculation. default = 3.
niter - Number of iterations in mean/rms calculation. default = 1.
Usage:
>>sn, mean, rms = Threshold(ts, *options*)
"""
# Calculate, robustly, the mean and rms of the time series. Any values greater than 3sigma are left
# out of the calculation. This keeps the mean and rms free from sturation due to large deviations.
mean = np.mean(ts)
std = np.std(ts)
#print mean,std
if niter > 0:
for i in range(niter):
ones = np.where((ts-mean)/std < clip)[0] # only keep the values less than 3sigma
mean = np.mean(ts[ones])
std = np.std(ts[ones])
SNR = (ts-mean)/std
# Often getting "invalid value encountered in less" here:
with np.errstate(invalid='raise'):
SNR[SNR<thresh]=-1
#SNR = st.threshold((ts-mean)/std, threshmin=thresh, newval=-1)
return SNR, mean, std
def Decimate_ts(ts, ndown=2):
"""
Takes a 1-D timeseries and decimates it by a factore of ndown, default = 2.
Code adapted from analysis.binarray module:
http://www.astro.ucla.edu/~ianc/python/_modules/analysis.html#binarray
from Ian's Python Code (http://www.astro.ucla.edu/~ianc/python/index.html)
Optimized for time series' with length = multiple of 2. Will handle others, though.
Required:
ts - input time series
Options:
ndown - Factor by which to decimate time series. Default = 2.
if ndown = 1, returns ts
"""
if ndown==1:
return ts
ncols = len(ts)
n_rep = ncols / ndown
ts_ds = np.array([ts[i::ndown][0:n_rep] for i in range(ndown)]).mean(0)
return ts_ds
class OutputSource():
pulse = None # Pulse Number
SNR = None # SNR of pulse
DM = None # DM (pc/cm3) of pulse
time = None # Time at which pulse ocurred
dtau = None # Temporal resolution of time series
dnu = None # Spectral resolution
nu = None # Central Observing Frequency
mean = None # Mean in the time series
rms = None # RMS in the time series
formatter = "{0.pulse:07d} {0.SNR:10.6f} {0.DM:10.4f} {0.time:10.6f} "+\
" {0.dtau:10.6f} {0.dnu:.4f} {0.nu:.4f} {0.mean:.5f}"+\
" {0.rms:0.5f}\n "
def __str__(self):
return self.formatter.format(self)
def savitzky_golay(y, window_size, order, deriv=0):
"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter
This implementation is based on [1]_.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techhniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute
(default = 0 means only smoothing)
Returns
-------
y_smooth : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
>>> t = np.linspace(-4, 4, 500)
>>> y = np.exp(-t ** 2)
>>> np.random.seed(0)
>>> y_noisy = y + np.random.normal(0, 0.05, t.shape)
>>> y_smooth = savitzky_golay(y, window_size=31, order=4)
>>> print np.rms(y_noisy - y)
>>> print np.rms(y_smooth - y)
References
----------
.. [1] http://www.scipy.org/Cookbook/SavitzkyGolay
.. [2] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [3] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
#raise TypeError("window_size size must be a positive odd number")
window_size += 1
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range]
for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(y, m, mode='valid')
def snr(x):return (x-x.mean())/x.std()
def bpf(x, windows = 40):
bp = savitzky_golay(x,windows,1)
x2 = x / bp
mask = np.where(snr(x2)>1)[0]
mask2= np.zeros(x.shape[0])
mask2[mask] = 1.
y = np.ma.array(x, mask = mask2)
bp = savitzky_golay(y,windows,1)
fit = np.ma.polyfit(np.arange(len(y)),y,4)
p = np.poly1d(fit)(np.arange(len(y)))[mask]
bp = x
bp[mask] = np.poly1d(fit)(np.arange(len(y)))[mask]
bp = savitzky_golay(bp,windows,2)
return bp
def fold(t,period,T0=0):
time = np.arange(len(t))
epoch = np.floor( 1.*(time - T0)/period )
phase = 1.*(time - T0)/period - epoch
foldt = t[np.argsort(phase)]
return Decimate_ts(foldt, 1.*len(t)/period )
def RFImask(spr):#sp has shape[:,:]
x = np.where(abs(spr.mean(1))>np.sort(spr.mean(1))[spr.shape[0]/2]+np.sort(spr.mean(1))[spr.shape[0]/2]-np.sort(spr.mean(1))[1])
y = np.where(abs(spr.mean(0))>np.sort(spr.mean(0))[spr.shape[1]/2]+np.sort(spr.mean(0))[spr.shape[1]/2]-np.sort(spr.mean(0))[1])
return [x[0],y[0]]
def massagesp(spectrometer, windows_x=43,windows_y=100):
bp = bpf(spectrometer.mean(0),windows_x)
spectrometer /= bp
bl = bpf(spectrometer.mean(1),windows_y)
spectrometer = (spectrometer.T - bl).T
mask = np.array ( RFImask(spectrometer) )
mask2 = np.zeros((spectrometer.shape))
mask2[mask[0],:] = 1.
mask2[:,mask[1]] = 1.
temp_spec = np.ma.array(spectrometer, mask = mask2 )
mean = temp_spec.mean()
spectrometer[mask[0],:] = mean
spectrometer[:,mask[1]] = mean
spectrometer -= mean
return spectrometer
def progress(t_start, fraction, message):
if fraction > 0:
elapsed = (datetime.now() - t_start).total_seconds()
remaining = timedelta(seconds = (elapsed / fraction) - elapsed)
log("%s (%0.1f%% complete, %s remaining)" %
(message, 100*fraction, humanize.naturaldelta(remaining)))
if __name__ == '__main__':
# TODO: make selecting the freq range more robust by auto-scaling
# it to the number of bins in the file.
fcl = 200 #/4
fch = 3700 #/4
# TODO: make selecting tuning more robust by detecting which is
# actually lower. The HDF file (like the DRX as well, apparently)
# can contain them in either order.
pol = 1
DMstart = 300 # Initial DM trial
DMend = 400 # Final DM trial
maxpw = 5 # Maximum pulse width to search (seconds)
thresh= 5.0 #SNR cut off
import h5py
import humanize
h5f = h5py.File(sys.argv[1], 'r')['Observation1']
num_time_bins = h5f['time'].shape[0]
tInt = h5f['time'][1] - h5f['time'][0] # Temporal resolution
log("%s time bins, %.9f sec each" % (humanize.intcomma(num_time_bins), tInt))
# These are time offsets rather than independent filenames
time_bins_per_file = min(3000, num_time_bins / totalrank)
fn = range(0, num_time_bins, time_bins_per_file)
fpp = len(fn) / totalrank # Files per process
numberofFiles = fpp * totalrank
npws = int(np.round(np.log2(maxpw/tInt)))+1
spectarray = np.zeros((fpp, time_bins_per_file, fch-fcl))
h5t = h5f['Tuning%d' % (pol+1)]
freq = h5t['freq'][fcl:fch]
freq /= 10**6
cent_freq = np.median(freq)
BW = freq.max()-freq.min()
# Announce the setup for the computation
if rank == 0:
log("Using frequency buckets %i:%i" % (fcl, fch))
log("Tuning %d, central freq %f, BW %f, %d buckets" % (pol, cent_freq, BW, freq.shape[0]))
log("%i files, %i per processor * %i processors = %i" %
(len(fn), fpp, totalrank, numberofFiles))
log("Max pulse width %0.3fs" % maxpw)
log("spectarray will have dimensions %s" % str(spectarray.shape))
log("PHASE1: Process spectrograms")
outname = 'spectarray%02i.npy' % rank
#combine spectrogram and remove background
for i in range(fpp):
findex = rank*fpp + i
time_bin_start = fn[findex]
log("Loading #%i (%i of %i): %d" % (findex, i, fpp, time_bin_start))
spx = h5t['XX'][time_bin_start : (time_bin_start + time_bins_per_file), fcl:fch]
spy = h5t['YY'][time_bin_start : (time_bin_start + time_bins_per_file), fcl:fch]
sp = (spx + spy) / 2
spectarray[i,:sp.shape[0],:] = massagesp( sp, 10, 50 )
log("Writing %s" % outname)
np.save(outname, spectarray)
#sys.exit()
# Or we can pick up the saved spectarrays
"""
log("Loading %s" % outname)
spectarray = np.load(outname)
"""
txtsize=np.zeros((npws,2),dtype=np.int32) #fileno = txtsize[ranki,0], pulse number = txtsize[ranki,1],ranki is the decimated order of 2
txtsize[:,0]=1 #fileno start from 1
# Calculate which DMs to test
DM = DMstart
DMtrials = DMstart
if rank == 0:
log("Approaching phase 2, calculating which DMs to try...")
while DM < DMend:
if DM < 1000:
dDM = 0.1
elif DM >= 1000:
dDM = 1.
DM += dDM
DMtrials = np.append(DMtrials,DM)
log("Will test %i DMs in range %.1f:%.1f" % (len(DMtrials), DMstart, DMend))
DMtrials = comm.bcast(DMtrials, root=0)
if rank == 0:
log("PHASE 2: DM search")
t_start = datetime.now()
for DM in DMtrials:
if rank == 0:
progress(t_start, (DM-DMstart)/(DMend-DMstart), "DM trial %f" % DM)
tb=np.round((delay2(freq,DM)/tInt)).astype(np.int32)
ts=np.zeros((tb.max()+numberofFiles*time_bins_per_file))
for freqbin in range(len(freq)):
for i in range(fpp):
ts[tb.max()-tb[freqbin] + (rank*fpp+i)*time_bins_per_file :tb.max()-tb[freqbin] + (rank*fpp+i+1)*time_bins_per_file ] += spectarray[i,:,freqbin]
tstotal=ts*0#initiate a 4 hour blank time series
comm.Allreduce(ts,tstotal,op=MPI.SUM)#merge the 4 hour timeseries from all processor
tstotal = tstotal[tb.max():len(tstotal)-tb.max()]#cut the dispersed time lag
'''
# save the time series around the Pulsar's DM
if rank == 0:
if np.abs(DM - 10.922) <= dDM:
print 'DM=',DM
np.save('ts_pol%.1i_DMx100_%.6i' % (pol,DM*100),tstotal)
sys.exit()
'''
#search for signal with decimated timeseries
if rank<npws:#timeseries is ready for signal search
ranki=rank
filename = "ppc_SNR_pol_%.1i_td_%.2i_no_%.05i.txt" % (pol,ranki,txtsize[ranki,0])
outfile = open(filename,'a')
ndown = 2**ranki #decimate the time series
sn,mean,rms = Threshold(Decimate_ts(tstotal,ndown),thresh,niter=0)
ones = np.where(sn!=-1)[0]
for one in ones:# Now record all pulses above threshold
pulse = OutputSource()
txtsize[ranki,1] += 1
if txtsize[ranki,1] % 100 == 0:
log("Reached %d pulses" % txtsize[ranki,1])
pulse.pulse = txtsize[ranki,1]
pulse.SNR = sn[one]
pulse.DM = DM
pulse.time = one*tInt*ndown
pulse.dtau = tInt*ndown
pulse.dnu = freq[1]-freq[0]
pulse.nu = cent_freq
pulse.mean = mean
pulse.rms = rms
outfile.write(pulse.formatter.format(pulse)[:-1])
if txtsize[ranki,1] >200000*txtsize[ranki,0]:
outfile.close()
txtsize[ranki,0]+=1
filename = "ppc_SNR_pol_%.1i_td_%.2i_no_%.05d.txt" % (pol,ranki,txtsize[ranki,0])
log("Previous pulse file reached limit, recording to %s" % filename)
outfile = open(filename,'a')
|
[
"league@contrapunctus.net"
] |
league@contrapunctus.net
|
8c506c55fcae581ec67d2c09b843a380a00d0741
|
d81d615c2e6ef0d9d4244c3e6cce96ddcad7230b
|
/task2/optimization.py
|
b5b90436df152e4fb18db570a8358c683cbb44e7
|
[] |
no_license
|
Sorrow321/prac_ml_msu
|
857aa1d4714c86c2c4ad7014077ee6afc940d234
|
390bcea9baa66766a381f7a61ddecdb6258973b7
|
refs/heads/master
| 2020-12-14T06:03:09.738249 | 2020-01-18T01:35:35 | 2020-01-18T01:35:35 | 234,665,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,019 |
py
|
import numpy as np
import time
import scipy
from oracles import BinaryLogistic
from scipy.special import expit
class GDClassifier:
"""
Реализация метода градиентного спуска для произвольного
оракула, соответствующего спецификации оракулов из модуля oracles.py
"""
def __init__(self, loss_function, step_alpha=1, step_beta=0,
tolerance=1e-5, max_iter=1000, **kwargs):
"""
loss_function - строка, отвечающая за функцию потерь классификатора.
Может принимать значения:
- 'binary_logistic' - бинарная логистическая регрессия
step_alpha - float, параметр выбора шага из текста задания
step_beta- float, параметр выбора шага из текста задания
tolerance - точность, по достижении которой, необходимо прекратить оптимизацию.
Необходимо использовать критерий выхода по модулю разности соседних значений функции:
если |f(x_{k+1}) - f(x_{k})| < tolerance: то выход
max_iter - максимальное число итераций
**kwargs - аргументы, необходимые для инициализации
"""
self.step_alpha = step_alpha
self.step_beta = step_beta
self.tolerance = tolerance
self.max_iter = max_iter
if loss_function == 'binary_logistic':
self.oracle = BinaryLogistic(**kwargs)
def fit(self, X, y, w_0=None, trace=False):
"""
Обучение метода по выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
ВАЖНО! Вектор y должен состоять из 1 и -1, а не 1 и 0.
w_0 - начальное приближение в методе
trace - переменная типа bool
Если trace = True, то метод должен вернуть словарь history, содержащий информацию
о поведении метода. Длина словаря history = количество итераций + 1 (начальное приближение)
history['time']: list of floats, содержит интервалы времени между двумя итерациями метода
history['func']: list of floats, содержит значения функции на каждой итерации
(0 для самой первой точки)
"""
# initial value
self.w = w_0 if not w_0 is None else np.zeros(X.shape[1])
loss_value = self.oracle.func(X, y, self.w)
history = {'time': [], 'func': [loss_value], 'accuracy': [(y == self.predict(X)).sum() / len(y)]}
prev_time = time.time()
for i in range(1, self.max_iter + 1):
new_w = self.w - self.step_alpha / (i ** self.step_beta) * self.oracle.grad(X, y, self.w)
new_loss_value = self.oracle.func(X, y, new_w)
if trace:
history['func'].append(new_loss_value)
history['time'].append(time.time() - prev_time)
history['accuracy'].append((y == self.predict(X)).sum() / len(y))
prev_time = time.time()
if abs(loss_value - new_loss_value) < self.tolerance:
self.w = new_w
break
loss_value = new_loss_value
self.w = new_w
if trace:
return history
def predict(self, X, threshold=0.5):
"""
Получение меток ответов на выборке X
X - scipy.sparse.csr_matrix или двумерный numpy.array
return: одномерный numpy array с предсказаниями
"""
result = (self.predict_proba(X) > threshold).astype('int')
result[result == 0] = -1
return result
def predict_proba(self, X):
"""
Получение вероятностей принадлежности X к классу k
X - scipy.sparse.csr_matrix или двумерный numpy.array
return: двумерной numpy array, [i, k] значение соответветствует вероятности
принадлежности i-го объекта к классу k
"""
return expit(X.dot(self.w))
def get_objective(self, X, y):
"""
Получение значения целевой функции на выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
return: float
"""
return self.oracle.func(X, y, self.w)
def get_gradient(self, X, y):
"""
Получение значения градиента функции на выборке X с ответами y
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
return: numpy array, размерность зависит от задачи
"""
return self.oracle.grad(X, y, self.w)
def get_weights(self):
"""
Получение значения весов функционала
"""
return self.w
class SGDClassifier(GDClassifier):
"""
Реализация метода стохастического градиентного спуска для произвольного
оракула, соответствующего спецификации оракулов из модуля oracles.py
"""
def __init__(self, loss_function, batch_size, step_alpha=1, step_beta=0,
tolerance=1e-5, max_iter=1000, random_seed=153, **kwargs):
"""
loss_function - строка, отвечающая за функцию потерь классификатора.
Может принимать значения:
- 'binary_logistic' - бинарная логистическая регрессия
batch_size - размер подвыборки, по которой считается градиент
step_alpha - float, параметр выбора шага из текста задания
step_beta- float, параметр выбора шага из текста задания
tolerance - точность, по достижении которой, необходимо прекратить оптимизацию
Необходимо использовать критерий выхода по модулю разности соседних значений функции:
если |f(x_{k+1}) - f(x_{k})| < tolerance: то выход
max_iter - максимальное число итераций (эпох)
random_seed - в начале метода fit необходимо вызвать np.random.seed(random_seed).
Этот параметр нужен для воспроизводимости результатов на разных машинах.
**kwargs - аргументы, необходимые для инициализации
"""
self.step_alpha = step_alpha
self.step_beta = step_beta
self.tolerance = tolerance
self.max_epoch = max_iter
self.batch_size = batch_size
np.random.seed(random_seed)
if loss_function == 'binary_logistic':
self.oracle = BinaryLogistic(**kwargs)
def fit(self, X, y, w_0=None, trace=False, log_freq=1):
"""
Обучение метода по выборке X с ответами y
ВАЖНО! Вектор y должен состоять из 1 и -1, а не 1 и 0.
X - scipy.sparse.csr_matrix или двумерный numpy.array
y - одномерный numpy array
w_0 - начальное приближение в методе
Если trace = True, то метод должен вернуть словарь history, содержащий информацию
о поведении метода. Если обновлять history после каждой итерации, метод перестанет
превосходить в скорости метод GD. Поэтому, необходимо обновлять историю метода лишь
после некоторого числа обработанных объектов в зависимости от приближённого номера эпохи.
Приближённый номер эпохи:
{количество объектов, обработанных методом SGD} / {количество объектов в выборке}
log_freq - float от 0 до 1, параметр, отвечающий за частоту обновления.
Обновление должно проиходить каждый раз, когда разница между двумя значениями приближённого номера эпохи
будет превосходить log_freq.
history['epoch_num']: list of floats, в каждом элементе списка будет записан приближённый номер эпохи:
history['time']: list of floats, содержит интервалы времени между двумя соседними замерами
history['func']: list of floats, содержит значения функции после текущего приближённого номера эпохи
history['weights_diff']: list of floats, содержит квадрат нормы разности векторов весов с соседних замеров
(0 для самой первой точки)
"""
# initial value
self.w = w_0 if not w_0 is None else np.zeros(X.shape[1])
if isinstance(X, scipy.sparse.coo.coo_matrix):
X = X.tocsr()
loss_value = self.oracle.func(X, y, self.w)
history = {'epoch_num': [0], 'time': [], 'func': [loss_value], 'weights_diff': [0], 'accuracy': [(y == self.predict(X)).sum() / len(y)]}
prev_time = time.time()
iter_id = 1
calc = time.time() - time.time()
for epoch_i in range(1, self.max_epoch + 1):
permutation = np.random.permutation(X.shape[0])
X_shuffled = X[permutation]
y_shuffled = y[permutation]
self.w_prev = np.copy(self.w)
for batch_i in range(int(np.ceil(X.shape[0] / self.batch_size))):
X_batch = X_shuffled[batch_i * self.batch_size : (batch_i + 1) * self.batch_size]
y_batch = y_shuffled[batch_i * self.batch_size : (batch_i + 1) * self.batch_size]
self.w = self.w - self.step_alpha / (iter_id ** self.step_beta) * self.oracle.grad(X_batch, y_batch, self.w)
iter_id += 1
new_loss_value = self.oracle.func(X, y, self.w)
if trace:
history['epoch_num'].append(epoch_i)
history['time'].append(time.time() - prev_time)
history['func'].append(new_loss_value)
history['accuracy'].append((y == self.predict(X)).sum() / len(y))
diff = self.w - self.w_prev
history['weights_diff'].append(np.dot(diff, diff))
prev_time = time.time()
if abs(loss_value - new_loss_value) < self.tolerance:
break
loss_value = new_loss_value
if trace:
return history
def predict(self, X, threshold=0.5):
"""
Получение меток ответов на выборке X
X - scipy.sparse.csr_matrix или двумерный numpy.array
return: одномерный numpy array с предсказаниями
"""
result = (self.predict_proba(X) > threshold).astype('int')
result[result == 0] = -1
return result
def predict_proba(self, X):
"""
Получение вероятностей принадлежности X к классу k
X - scipy.sparse.csr_matrix или двумерный numpy.array
return: двумерной numpy array, [i, k] значение соответветствует вероятности
принадлежности i-го объекта к классу k
"""
return expit(X.dot(self.w))
def get_weights(self):
"""
Получение значения весов функционала
"""
return self.w
|
[
"noreply@github.com"
] |
Sorrow321.noreply@github.com
|
608639397198677dd8f08304ee0a78ad5fe88050
|
b961403c4d63b0a2f6471a2226d46a53756f8a2c
|
/walk/app.py
|
19293622dc1dc2be3021446c3f1fcff49911f21b
|
[] |
no_license
|
yeony-syny/sparta8
|
6072c0d0806439aca773f22eb5f8323c07ac1817
|
e3262c2dd4c358cbec558821fd0d320bd386762a
|
refs/heads/master
| 2022-10-30T17:48:51.646464 | 2020-06-20T08:04:51 | 2020-06-20T08:04:51 | 264,628,999 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 202 |
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
path = "C:/Users/bumky/Desktop/develop/chromedriver"
driver = webdriver.Chrome(path)
driver.get("http://www.facebook.org")
|
[
"synoout@gmail.com"
] |
synoout@gmail.com
|
5ee9b76c8b45fb89102e0d6b56f0078da0fd77d7
|
f3091f0b1c841729bff285d01f52a1db06b71513
|
/tests/daemon/test_walletd.py
|
171036f2abb251dd121cc66f0fada3ff79064991
|
[
"MIT"
] |
permissive
|
Taffsigg/QRL
|
285174df0dcb9f704611ed28bb60a9b3cf235fd6
|
130044cb5015ad21db49a3b6867f1327a071b680
|
refs/heads/master
| 2020-11-25T12:38:51.000437 | 2019-11-07T16:19:59 | 2019-11-07T16:19:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 58,067 |
py
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from mock import Mock
from pyqrllib.pyqrllib import bin2hstr, hstr2bin
from qrl.daemon.walletd import WalletD
from qrl.generated import qrl_pb2
from qrl.core.AddressState import AddressState
from qrl.core.txs.TransferTransaction import TransferTransaction
from qrl.core.txs.MessageTransaction import MessageTransaction
from qrl.daemon.helper.DaemonHelper import WalletDecryptionError
from qrl.core.misc import logger
from tests.misc.helper import set_qrl_dir, get_alice_xmss, get_bob_xmss
from tests.misc.MockHelper.mock_function import MockFunction
logger.initialize_default()
class TestWalletD(TestCase):
def __init__(self, *args, **kwargs):
self.passphrase = '你好'
self.qaddress = "Q010400ff39df1ba4d1d5b8753e6d04c51c34b95b01fc3650c10ca7b296a18bdc105412c59d0b3b"
self.hex_seed = "0104008441d43524996f76236141d16b7b324323abf796e77ad" \
"7c874622a82f5744bb803f9b404d25733d0db82be7ac6f3c4cf"
self.mnemonic = "absorb drank lute brick cure evil inept group grey " \
"breed hood reefy eager depict weed image law legacy " \
"jockey calm lover freeze fact lively wide dread spiral " \
"jaguar span rinse salty pulsar violet fare"
super(TestWalletD, self).__init__(*args, **kwargs)
def test_init(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
self.assertIsNotNone(walletd)
def test_qaddress_to_address(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
qaddress = "Q010600968c3408cba5192d75c11cec909e803fc590e82463216b5a04ce8e447f76b4e02c0d3d81"
address = walletd.qaddress_to_address(qaddress)
self.assertEqual(qaddress[1:], bin2hstr(address))
def test_authenticate(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd.authenticate()
walletd._wallet = Mock()
walletd._wallet.encrypted = Mock(return_value=True)
with self.assertRaises(ValueError):
walletd.authenticate()
def test_get_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
m = MockFunction()
walletd.get_address_state = m.get
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=10)
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
master_addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
m.put(qaddress, master_addr_state)
slaves = walletd.get_slave_list(qaddress)
self.assertEqual(len(slaves), 1)
self.assertEqual(len(slaves[0]), 3)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][1].pk)), 0)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][2].pk)), 0)
slave00_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[0][0].qaddress))
slave01_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[0][1].qaddress))
slave02_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[0][2].qaddress))
self.assertEqual(slaves[0][0].index, 0)
for i in range(0, 1024):
slave00_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 0, 0, 1020)
m.put(slaves[0][0].qaddress, slave00_addr_state)
self.assertEqual(slaves[0][1].index, 0)
for i in range(0, 1024):
slave01_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 0, 1, 1020)
m.put(slaves[0][1].qaddress, slave01_addr_state)
self.assertEqual(slaves[0][2].index, 5)
for i in range(5, 1000):
slave02_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 0, 2, 1018)
m.put(slaves[0][2].qaddress, slave02_addr_state)
walletd.get_slave(qaddress)
slaves = walletd.get_slave_list(qaddress)
self.assertEqual(len(slaves), 2)
walletd._wallet.set_slave_ots_index(0, 0, 2, 1019)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[1][0].pk)), 0)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[1][1].pk)), 0)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[1][2].pk)), 0)
slave10_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[1][0].qaddress))
slave11_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[1][1].qaddress))
slave12_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[1][2].qaddress))
self.assertEqual(slaves[1][0].index, 0)
for i in range(0, 1024):
slave10_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 1, 0, 1020)
m.put(slaves[1][0].qaddress, slave10_addr_state)
self.assertEqual(slaves[1][1].index, 0)
for i in range(0, 1024):
slave11_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 1, 1, 1020)
m.put(slaves[1][1].qaddress, slave11_addr_state)
self.assertEqual(slaves[1][2].index, 5)
for i in range(5, 1000):
slave12_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 1, 2, 1018)
m.put(slaves[1][2].qaddress, slave12_addr_state)
walletd.get_slave(qaddress)
slaves = walletd.get_slave_list(qaddress)
self.assertEqual(len(slaves), 3)
walletd._wallet.set_slave_ots_index(0, 1, 2, 1019)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[2][0].pk)), 0)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[2][1].pk)), 0)
master_addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[2][2].pk)), 0)
slave20_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[2][0].qaddress))
slave21_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[2][1].qaddress))
slave22_addr_state = AddressState.get_default(walletd.qaddress_to_address(slaves[2][2].qaddress))
self.assertEqual(slaves[2][0].index, 0)
for i in range(0, 1024):
slave20_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 2, 0, 1020)
m.put(slaves[2][0].qaddress, slave20_addr_state)
self.assertEqual(slaves[2][1].index, 0)
for i in range(0, 1024):
slave21_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 2, 1, 1020)
m.put(slaves[2][1].qaddress, slave21_addr_state)
self.assertEqual(slaves[2][2].index, 5)
for i in range(5, 1000):
slave22_addr_state.set_ots_key(i)
walletd._wallet.set_slave_ots_index(0, 2, 2, 1018)
m.put(slaves[2][2].qaddress, slave22_addr_state)
walletd.get_slave(qaddress)
slaves = walletd.get_slave_list(qaddress)
self.assertEqual(len(slaves), 4)
walletd._wallet.set_slave_ots_index(0, 0, 2, 1019)
def test_encrypt_last_item(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
walletd.authenticate()
walletd.add_new_address(height=8)
self.assertFalse(walletd.get_wallet_info()[2])
walletd._passphrase = self.passphrase
walletd._encrypt_last_item()
self.assertTrue(walletd.get_wallet_info()[2])
def test_get_wallet_index_xmss(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
index, xmss = walletd._get_wallet_index_xmss(qaddress, 0)
self.assertEqual(index, 0)
self.assertEqual(xmss.qaddress, qaddress)
def test_add_new_address(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
self.assertEqual(len(walletd.list_address()), 1)
def test_add_new_address2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
self.assertEqual(len(walletd.list_address()), 1)
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
self.assertEqual(len(walletd.list_address()), 2)
def test_add_address_from_seed(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
qaddress1 = walletd.add_address_from_seed(seed=self.hex_seed) # Using hexseed
self.assertEqual(self.qaddress, qaddress1)
self.assertEqual(len(walletd.list_address()), 1)
def test_add_address_from_seed2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
qaddress1 = walletd.add_address_from_seed(seed=self.hex_seed) # Using hexseed
self.assertEqual(self.qaddress, qaddress1)
self.assertEqual(len(walletd.list_address()), 1)
walletd.remove_address(self.qaddress)
self.assertEqual(len(walletd.list_address()), 0)
qaddress2 = walletd.add_address_from_seed(seed=self.mnemonic) # Using mnemonic
self.assertEqual(self.qaddress, qaddress2)
self.assertEqual(len(walletd.list_address()), 1)
def test_list_address(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
self.assertEqual(len(walletd.list_address()), 1)
list_address = walletd.list_address()
self.assertEqual(list_address[0], qaddress)
def test_remove_address(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
self.assertEqual(len(walletd.list_address()), 1)
result = walletd.remove_address(qaddress)
self.assertTrue(result)
self.assertEqual(len(walletd.list_address()), 0)
def test_remove_address2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
self.assertEqual(len(walletd.list_address()), 1)
result = walletd.remove_address(qaddress)
self.assertTrue(result)
self.assertEqual(len(walletd.list_address()), 0)
result = walletd.remove_address("Q123")
self.assertFalse(result)
self.assertEqual(len(walletd.list_address()), 0)
def test_validate_address(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
qaddress = "Q010400ff39df1ba4d1d5b8753e6d04c51c34b95b01fc3650c10ca7b296a18bdc105412c59d0b3b"
self.assertTrue(walletd.validate_address(qaddress))
qaddress = "Q010400ff39df1ba4d1d5b8753e6d04c51c34b95b01fc3650c10ca7b296a18bdc105412c59d0b00"
self.assertFalse(walletd.validate_address(qaddress))
def test_get_recovery_seeds(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
self.assertEqual(qaddress[0], 'Q')
seeds = walletd.get_recovery_seeds(qaddress)
self.assertIsInstance(seeds, tuple)
walletd.remove_address(qaddress)
self.assertEqual(len(walletd.list_address()), 0)
qaddress2 = walletd.add_address_from_seed(seeds[0]) # Using Hex Seed
self.assertEqual(qaddress, qaddress2)
walletd.remove_address(qaddress2)
self.assertEqual(len(walletd.list_address()), 0)
qaddress2 = walletd.add_address_from_seed(seeds[1]) # Using Mnemonic
self.assertEqual(qaddress, qaddress2)
walletd.remove_address(qaddress2)
self.assertEqual(len(walletd.list_address()), 0)
def test_get_wallet_info(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
version, len_address_items, encrypted = walletd.get_wallet_info()
self.assertEqual(version, 1)
self.assertEqual(len_address_items, 0)
self.assertFalse(encrypted)
def test_sign_and_push_transaction(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
alice_xmss = get_alice_xmss()
bob_xmss = get_bob_xmss()
tx = TransferTransaction.create(addrs_to=[bob_xmss.address],
amounts=[1],
fee=1,
xmss_pk=alice_xmss.pk)
walletd.sign_and_push_transaction(tx, alice_xmss, 0, enable_save=False)
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.UNKNOWN))
with self.assertRaises(Exception):
walletd.sign_and_push_transaction(tx, alice_xmss, 0, enable_save=False)
def test_relay_transfer_txn(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_transfer_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
def test_relay_transfer_txn2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_transfer_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_transfer_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
def test_relay_transfer_txn3(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
with self.assertRaises(Exception):
walletd.relay_transfer_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=alice_xmss.qaddress,
ots_index=0)
def test_relay_transfer_txn_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_transfer_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
def test_relay_transfer_txn2_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_transfer_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_transfer_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
def test_relay_transfer_txn3_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
with self.assertRaises(Exception):
walletd.relay_transfer_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=alice_xmss.qaddress)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_transfer_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
def test_relay_message_txn(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
tx = walletd.relay_message_txn(message='Hello QRL!',
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
def test_relay_message_txn_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
tx = walletd.relay_message_txn_by_slave(message='Hello QRL!',
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
def test_relay_message_txn2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
tx = walletd.relay_message_txn(message='Hello QRL!',
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_message_txn(message='Hello QRL!',
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
def test_relay_message_txn2_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
tx = walletd.relay_message_txn_by_slave(message='Hello QRL!',
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_message_txn_by_slave(message='Hello QRL!',
fee=100000000,
master_qaddress=qaddress)
def test_relay_token_txn(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_token_txn(symbol='QRL',
name='Quantum Resistant Ledger',
owner_qaddress=alice_xmss.qaddress,
decimals=5,
qaddresses=qaddresses,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
def test_relay_token_txn_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_token_txn_by_slave(symbol='QRL',
name='Quantum Resistant Ledger',
owner_qaddress=alice_xmss.qaddress,
decimals=5,
qaddresses=qaddresses,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
def test_relay_token_txn2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_token_txn(symbol='QRL',
name='Quantum Resistant Ledger',
owner_qaddress=alice_xmss.qaddress,
decimals=5,
qaddresses=qaddresses,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_token_txn(symbol='QRL',
name='Quantum Resistant Ledger',
owner_qaddress=alice_xmss.qaddress,
decimals=5,
qaddresses=qaddresses,
amounts=amounts,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
def test_relay_token_txn2_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
tx = walletd.relay_token_txn_by_slave(symbol='QRL',
name='Quantum Resistant Ledger',
owner_qaddress=alice_xmss.qaddress,
decimals=5,
qaddresses=qaddresses,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_token_txn_by_slave(symbol='QRL',
name='Quantum Resistant Ledger',
owner_qaddress=alice_xmss.qaddress,
decimals=5,
qaddresses=qaddresses,
amounts=amounts,
fee=100000000,
master_qaddress=qaddress)
def test_relay_transfer_token_txn(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
tx = walletd.relay_transfer_token_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
token_txhash='',
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_transfer_token_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
token_txhash='',
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
def test_relay_transfer_token_txn2(self):
"""
Relaying transfer token transaction from an address not listed in wallet daemon
:return:
"""
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
walletd.add_new_address(height=8)
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
with self.assertRaises(Exception):
walletd.relay_transfer_token_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
token_txhash='',
fee=100000000,
master_qaddress=None,
signer_address=alice_xmss.qaddress,
ots_index=0)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_transfer_token_txn(qaddresses_to=qaddresses_to,
amounts=amounts,
token_txhash='',
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
def test_relay_transfer_token_txn_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
bob_xmss = get_bob_xmss(4)
qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress]
amounts = [1000000000, 1000000000]
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
tx = walletd.relay_transfer_token_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
token_txhash='',
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_transfer_token_txn_by_slave(qaddresses_to=qaddresses_to,
amounts=amounts,
token_txhash='',
fee=100000000,
master_qaddress=qaddress)
def test_relay_slave_txn(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
slave_pks = [alice_xmss.pk]
access_types = [0]
tx = walletd.relay_slave_txn(slave_pks=slave_pks,
access_types=access_types,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_slave_txn(slave_pks=slave_pks,
access_types=access_types,
fee=100000000,
master_qaddress=None,
signer_address=qaddress,
ots_index=0)
def test_relay_slave_txn_by_slave(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address_with_slaves(height=8)
addr_state = AddressState.get_default(walletd.qaddress_to_address(qaddress))
slaves = walletd.get_slave_list(qaddress)
addr_state.add_slave_pks_access_type(bytes(hstr2bin(slaves[0][0].pk)), 0)
walletd._public_stub.GetAddressState = Mock(
return_value=qrl_pb2.GetAddressStateResp(state=addr_state.pbdata))
walletd.encrypt_wallet(self.passphrase)
walletd.unlock_wallet(self.passphrase)
alice_xmss = get_alice_xmss(4)
slave_pks = [alice_xmss.pk]
access_types = [0]
tx = walletd.relay_slave_txn_by_slave(slave_pks=slave_pks,
access_types=access_types,
fee=100000000,
master_qaddress=qaddress)
self.assertIsNotNone(tx)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.relay_slave_txn_by_slave(slave_pks=slave_pks,
access_types=access_types,
fee=100000000,
master_qaddress=qaddress)
def test_encrypt_wallet(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
with self.assertRaises(ValueError):
walletd.encrypt_wallet(passphrase=self.passphrase)
walletd.add_new_address()
walletd.encrypt_wallet(passphrase=self.passphrase)
with self.assertRaises(Exception):
walletd.encrypt_wallet(passphrase=self.passphrase)
def test_lock_wallet(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
walletd.add_new_address()
walletd.encrypt_wallet(passphrase=self.passphrase)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.add_new_address()
def test_unlock_wallet(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
walletd.add_new_address()
walletd.encrypt_wallet(passphrase=self.passphrase)
walletd.lock_wallet()
with self.assertRaises(ValueError):
walletd.add_new_address()
with self.assertRaises(WalletDecryptionError):
walletd.unlock_wallet(passphrase='pass123')
walletd.unlock_wallet(passphrase=self.passphrase)
walletd.add_new_address()
self.assertEqual(len(walletd.list_address()), 2)
def test_change_passphrase(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.PushTransaction = Mock(
return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED))
qaddress = walletd.add_new_address()
walletd.encrypt_wallet(passphrase=self.passphrase)
walletd.lock_wallet()
passphrase2 = 'pass000'
with self.assertRaises(ValueError):
walletd.change_passphrase(old_passphrase='pass123', new_passphrase='pass234')
walletd.change_passphrase(old_passphrase=self.passphrase, new_passphrase=passphrase2)
with self.assertRaises(WalletDecryptionError):
walletd.unlock_wallet(passphrase=self.passphrase)
walletd.unlock_wallet(passphrase=passphrase2)
qaddresses = walletd.list_address()
self.assertEqual(len(qaddresses), 1)
self.assertEqual(qaddresses[0], qaddress)
def test_get_transactions_by_address(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.GetTransactionsByAddress = Mock(
return_value=qrl_pb2.GetTransactionsByAddressResp(mini_transactions=[],
balance=0))
mini_transactions, balance = walletd.get_transactions_by_address(qaddress=get_alice_xmss(4).qaddress)
self.assertEqual(len(mini_transactions), 0)
self.assertEqual(balance, 0)
def test_get_transaction(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
tx = qrl_pb2.Transaction()
tx.fee = 10
tx.transaction_hash = b'1234'
tx.message.message_hash = b'hello'
pk = '01020016ecb9f39b9f4275d5a49e232346a15ae2fa8c50a2927daeac189b8c5f2d1' \
'8bc4e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e'
tx.public_key = bytes(hstr2bin(pk))
header_hash = 'ab'
walletd._public_stub.GetTransaction = Mock(
return_value=qrl_pb2.GetTransactionResp(tx=tx,
confirmations=10,
block_number=5,
block_header_hash=bytes(hstr2bin(header_hash))))
tx, confirmations, block_number, block_header_hash = walletd.get_transaction(tx_hash='1234')
self.assertIsNotNone(tx)
self.assertEqual(tx.transaction_hash, bin2hstr(b'1234'))
self.assertEqual(confirmations, "10")
self.assertEqual(block_number, 5)
self.assertEqual(block_header_hash, header_hash)
def test_get_balance(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.GetBalance = Mock(
return_value=qrl_pb2.GetBalanceResp(balance=1000))
balance = walletd.get_balance(self.qaddress)
self.assertEqual(balance, 1000)
def test_get_total_balance(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.GetTotalBalance = Mock(
return_value=qrl_pb2.GetTotalBalanceResp(balance=6000))
balance = walletd.get_total_balance()
self.assertEqual(balance, 6000)
def test_get_ots(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.GetOTS = Mock(
return_value=qrl_pb2.GetOTSResp(ots_bitfield=[b'\x00'] * 10, next_unused_ots_index=1))
ots_bitfield, next_unused_ots_index = walletd.get_ots(self.qaddress)
self.assertEqual(ots_bitfield, [b'\x00'] * 10)
self.assertEqual(next_unused_ots_index, 1)
def test_get_height(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
walletd._public_stub.GetHeight = Mock(
return_value=qrl_pb2.GetHeightResp(height=1001))
height = walletd.get_height()
self.assertEqual(height, 1001)
def test_get_block(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
block = qrl_pb2.Block()
block.header.hash_header = b'001122'
block.header.block_number = 1
walletd._public_stub.GetBlock = Mock(
return_value=qrl_pb2.GetBlockResp(block=block))
b = walletd.get_block('001122')
self.assertEqual(b.header.hash_header, bin2hstr(block.header.hash_header))
self.assertEqual(b.header.block_number, block.header.block_number)
def test_get_block_by_number(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
block = qrl_pb2.Block()
block.header.hash_header = b'001122'
block.header.block_number = 1
walletd._public_stub.GetBlockByNumber = Mock(
return_value=qrl_pb2.GetBlockResp(block=block))
b = walletd.get_block_by_number(1)
self.assertEqual(b.header.hash_header, bin2hstr(block.header.hash_header))
self.assertEqual(b.header.block_number, block.header.block_number)
def test_get_block_by_number2(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
alice = get_alice_xmss()
message = b'\xaf\xaf\xa2\xe4\xfc\xabv\xdb\xe5\xbf\xe9(\x9a\xe5\xf5\xfb' \
b'\xe5\x9a\x13\xde+\xe5{D_\x05m\x06\x1c\x8f\nG?\xed\xd6qip3'
tx = MessageTransaction.create(message_hash=message,
fee=1,
xmss_pk=alice.pk)
tx.sign(alice)
block = qrl_pb2.Block()
block.header.hash_header = b'001122'
block.header.block_number = 1
block.transactions.extend([tx.pbdata])
walletd._public_stub.GetBlockByNumber = Mock(
return_value=qrl_pb2.GetBlockResp(block=block))
b = walletd.get_block_by_number(1)
self.assertEqual(b.header.hash_header, bin2hstr(block.header.hash_header))
self.assertEqual(b.header.block_number, block.header.block_number)
def test_get_block_by_number3(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
alice = get_alice_xmss()
message = b'\xaf\xaf\xa2B\x1f\xc7_\x1f\xfc;\xf5D^Hg\xb7R\x14\xa4Q\x82' \
b'\x1c \x9c\x861\x81\xa5\xdd\xe3\x81\x90\x89\xd6\xd4'
tx = MessageTransaction.create(message_hash=message,
fee=1,
xmss_pk=alice.pk)
tx.sign(alice)
block = qrl_pb2.Block()
block.header.hash_header = b'001122'
block.header.block_number = 1
block.transactions.extend([tx.pbdata])
walletd._public_stub.GetBlockByNumber = Mock(
return_value=qrl_pb2.GetBlockResp(block=block))
b = walletd.get_block_by_number(1)
self.assertEqual(b.header.hash_header, bin2hstr(block.header.hash_header))
self.assertEqual(b.header.block_number, block.header.block_number)
def test_get_address_from_pk(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
pk = '01020016ecb9f39b9f4275d5a49e232346a15ae2fa8c50a2927daeac189b8c5f2d1' \
'8bc4e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e'
address = walletd.get_address_from_pk(pk)
self.assertEqual(address, 'Q010200670246b0026436b717f199e3ec5320ba6ab61d5eddff811ac199a9e9b871d3280178b343')
def test_get_node_info(self):
with set_qrl_dir("wallet_ver1"):
walletd = WalletD()
block_last_hash_str = 'c23f47a10a8c53cc5ded096369255a32c4a218682a961d0ee7db22c500000000'
version = "1.0.0"
num_connections = 10
num_known_peers = 200
uptime = 10000
block_height = 102345
block_last_hash = bytes(hstr2bin(block_last_hash_str))
network_id = "network id"
node_info = qrl_pb2.NodeInfo(version=version,
num_connections=num_connections,
num_known_peers=num_known_peers,
uptime=uptime,
block_height=block_height,
block_last_hash=block_last_hash,
network_id=network_id)
walletd._public_stub.GetNodeState = Mock(
return_value=qrl_pb2.GetNodeStateResp(info=node_info))
b = walletd.get_node_info()
self.assertEqual(b.info.version, version)
self.assertEqual(b.info.num_connections, num_connections)
self.assertEqual(b.info.num_known_peers, num_known_peers)
self.assertEqual(b.info.uptime, uptime)
self.assertEqual(b.info.block_height, block_height)
self.assertEqual(b.info.block_last_hash, block_last_hash)
self.assertEqual(b.info.network_id, network_id)
|
[
"kaushal.forex@gmail.com"
] |
kaushal.forex@gmail.com
|
0ab52593e61a8c030d9e303a4c84011ce9f94f21
|
75e24fc71cf0833bb6040fa5037a0523c67d4581
|
/nlplingo/active_learning/metrics.py
|
5c880ba632dbf6cfbae101db65920c9732147a90
|
[
"Apache-2.0"
] |
permissive
|
BBN-E/nlplingo
|
53d5ff2aa17d03a1c6db8afc8ed2b0cf683b1c55
|
32ff17b1320937faa3d3ebe727032f4b3e7a353d
|
refs/heads/main
| 2022-12-19T19:28:11.666850 | 2020-10-09T01:16:32 | 2020-10-09T01:16:32 | 302,090,268 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
import numpy as np
def best_vs_second_best(predictions):
"""Computes best vs second best metric
:type predictions: numpy.nparray
:rtype: numpy.nparray
"""
pred_sorted_arg = np.argsort(-predictions, axis=1)
best_vs_second_best_score = 1 - abs(
predictions[range(predictions.shape[0]), pred_sorted_arg[:, 0]] -
predictions[range(predictions.shape[0]), pred_sorted_arg[:, 1]]
)
return best_vs_second_best_score
|
[
"hqiu@bbn.com"
] |
hqiu@bbn.com
|
ef93815b785e5d1137f4ca132da2fb68ee16ca95
|
738fde3d5f8b38b0a087fffdb62db7007cad170c
|
/Metodos.py
|
56febda60ff5755fbf8b0df6a7a7a040ed489d1c
|
[] |
no_license
|
rubenAlbuquerque/Gestao-de-Dados-Clinicos
|
b8b4636937d61cc3ce4fe4ac5d9d47f6ab9a6292
|
d899423a420d9952b7a669e9fb85a88da7b8ce08
|
refs/heads/master
| 2020-08-05T07:49:40.639355 | 2019-10-02T22:35:00 | 2019-10-02T22:35:00 | 212,452,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,860 |
py
|
def verificar_sequencia(seq):
for i in range(len(seq)):
if seq[i] not in ["A","T","C","G"]:
return False
return True
class Pessoa:
def __init__(self):
self.contatos = []
def novo_cliente(self, dicionario, dados):
nome = input("Insira o nome do Cliente:")
contato = input("Insira o contacto do Cliente:")
self.contatos.append(contato)
dados['contatos'] = self.contatos
while True:
outro_contato = input("Insira outro contacto (ou digite 0 para sair):")
if outro_contato == '0':
break
else:
dados['contatos'].append(outro_contato)
dicionario[nome] = dados
return dicionario
def novo_contato_cliente(self, dicionario):
nome = input("Insira o nome do Cliente:")
if nome in dicionario.keys():
print("O",nome,"tem os seguintes contatos:",dicionario[nome]['contatos'])
novo_contato = input("Insira o novo contato de " + nome + ":")
dicionario[nome]['contatos'].append(novo_contato)
else:
print("Erro- Cliente não existe. Crie primeiro uma entrada...")
return dicionario
def associar_DNA(self, dicionario):
nome = input("Insira o nome do Cliente:")
if nome in dicionario.keys():
while True:
sequencia = input("Insira a sequencia de DNA de " + nome + ":").upper()
if verificar_sequencia(sequencia):
dicionario[nome]['DNA'] = sequencia
break
else:
print("Erro- A sequência de DNA não é valida...")
else:
print("Erro- Cliente não existe. Crie primeiro uma entrada...")
return dicionario
def apagar_contato(self, dicionario):
nome = input("Insira o nome do Cliente:")
print("Contatos de",nome,"são", dicionario[nome]['contatos'])
contato = input("Insira o contato que deseja excluir:")
if contato in dicionario[nome]['contatos']:
dicionario[nome]['contatos'].remove(contato)
else:
print("Erro- O contacto",contato,"não está associado ao Cliente", nome)
return dicionario
def agenda(self, dicionario):
nome = input("Insira o nome do Cliente:")
print("Cliente: ", nome)
if len(dicionario[nome]['contatos']) != 0:
for i in range(len(dicionario[nome]['contatos'])):
print("\tcontato " + str(i+1) + " : " + dicionario[nome]['contatos'][i])
if dicionario[nome]['DNA'] is None:
print('ola')
# if len(dicionario[nome]['DNA']) != 0:
# print("\tDNA: ", dicionario[nome]['DNA'])
else:
print("\tDNA: ", dicionario[nome]['DNA'])
|
[
"noreply@github.com"
] |
rubenAlbuquerque.noreply@github.com
|
e0335552143c00779abca57ca8f089e94c29776d
|
1ccc9ae90f86d6c57eb0c9348b908d652d7f46be
|
/manage.py
|
896a20a43039899698c3918c7245208086aae5f0
|
[] |
no_license
|
rizwans-cuelogic/flask-blog
|
03294f1c0df47368c23e0104a5960801b5c7c0d9
|
3c38125ed6146f5af6ced9f6fafca2e554e850e2
|
refs/heads/master
| 2021-08-19T22:07:33.821682 | 2017-11-27T14:51:04 | 2017-11-27T14:51:04 | 110,678,293 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 237 |
py
|
from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
from app import app,db
migrate =Migrate(app,db)
manager = Manager(app)
manager.add_command('db',MigrateCommand)
if __name__=='__main__':
manager.run()
|
[
"rizwan.shaikh@cuelogic.com"
] |
rizwan.shaikh@cuelogic.com
|
859f1690e91ca3f518f15255794a51adc1e3c7c7
|
e457ec8103562439d153403ad9106d42664976af
|
/set4/Code7.py
|
48aecd35c48cc462aa9a2eed2e3b7c058d3f057b
|
[] |
no_license
|
ishaniMadhuwanthi/Python-Codes
|
548a5206bdcd50013abc91b3a0560f442bc910a7
|
8959b85bfa80a38f6111945e815499e39590233f
|
refs/heads/master
| 2022-08-12T03:59:05.770534 | 2022-07-26T04:55:18 | 2022-07-26T04:55:18 | 226,508,882 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 267 |
py
|
# Calculate mutiplication of two random float numbers
import random
num1 = random.random()
print("First Random float is ", num1)
num2 = random.uniform(9.5, 99.5)
print("Second Random float is ", num1)
num3 = num1 * num2
print("Multiplication is ", num3)
|
[
"noreply@github.com"
] |
ishaniMadhuwanthi.noreply@github.com
|
a2ae33df39f4c18bf1122e51783c1b3641f8a71b
|
0a004fc3fe8e36fd7ce0ed2cc7e8140982315e03
|
/unsupervised_learning/0x00-dimensionality_reduction/0-pca.py
|
96f2f628a740e86a328e4e2a17f3fdae39d1650a
|
[] |
no_license
|
pafuentess/holbertonschool-machine_learning
|
266ed4f05e106e194cdafe39544e48904f6538f4
|
3bffd1391b3fc790f0137d0afbe90eb8e2f7d713
|
refs/heads/master
| 2023-03-26T15:12:14.721409 | 2021-03-20T20:28:15 | 2021-03-20T20:28:15 | 279,388,813 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 284 |
py
|
#!/usr/bin/env python3
""" doc """
import numpy as np
def pca(X, var=0.95):
""" doc """
U, sigma, V = np.linalg.svd(X)
a_sum = np.cumsum(sigma)
dim = [i for i in range(len(sigma)) if((a_sum[i]) / a_sum[-1]) >= var]
ndim = dim[0] + 1
return V.T[:, :ndim]
|
[
"pafuentess@unal.edu.co"
] |
pafuentess@unal.edu.co
|
6c13a2bb9c012badbf065b7117c98cf2344d8b14
|
f7f834e68ce816011ae30be0883deef090fbeeed
|
/camp/Z_Template_2018/Day 5 - Space Invaders/space_invaders.py
|
be8cc7bd451a55706eed78c51f0099e5ac7b5db7
|
[] |
no_license
|
Rosebotics/PythonGameDesign2019
|
97b568cf999dea8642e254a22e528539946118e3
|
2f03476df940257adc2928f0c985c01daa5166f4
|
refs/heads/master
| 2020-06-04T04:42:35.656392 | 2019-06-22T16:21:57 | 2019-06-22T16:21:57 | 191,875,778 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,301 |
py
|
import pygame, sys, random, time
from pygame.locals import *
class Missile:
def __init__(self, screen, x):
# TODO: Save the screen into a field
# TODO: Save the x into a field
# TODO: Set the y to 591 as a field (which is just above the fighter)
# TODO: Set a field called exploded to False
pass
def move(self):
# TODO: Move the missile up 5
pass
def draw(self):
# TODO: Draw a red line from x, y that is 8 pixels in height
pass
class Fighter:
def __init__(self, screen, x, y):
self.screen = screen
self.image = pygame.image.load("fighter.png").convert()
self.image.set_colorkey((255, 255, 255))
self.x = x
self.y = y
self.missiles = []
def draw(self):
self.screen.blit(self.image, (self.x, self.y))
def fire(self):
self.missiles.append(Missile(self.screen, self.x + 50))
def remove_exploded_missles(self):
for k in range(len(self.missiles) - 1, -1, -1):
if self.missiles[k].exploded or self.missiles[k].y < 0:
del self.missiles[k]
class Badguy:
def __init__(self, screen, x, y):
self.dead = False
self.screen = screen
self.x = x
self.y = y
self.image = pygame.image.load("badguy.png").convert()
self.image.set_colorkey((0, 0, 0))
self.original_x = x
self.moving_right = True
def move(self):
if self.moving_right:
self.x = self.x + 2
if self.x > self.original_x + 100:
self.moving_right = False
else:
self.x = self.x - 2
if self.x < self.original_x - 100:
self.moving_right = True
def draw(self):
self.screen.blit(self.image, (self.x, self.y))
def hit_by(self, missile):
return pygame.Rect(self.x, self.y, 70, 45).collidepoint(missile.x, missile.y)
class EnemyFleet:
def __init__(self, screen, enemy_rows):
self.badguys = []
for j in range(enemy_rows):
for k in range(8):
self.badguys.append(Badguy(screen, 80 * k, 50 * j + 20))
@property
def is_defeated(self):
return len(self.badguys) == 0
def move(self):
for badguy in self.badguys:
badguy.move()
def draw(self):
for badguy in self.badguys:
badguy.draw()
def remove_dead_badguys(self):
for k in range(len(self.badguys) - 1, -1, -1):
if self.badguys[k].dead:
del self.badguys[k]
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("Space Invaders")
screen = pygame.display.set_mode((640, 650))
# TODO: Set enemy_rows to an initial value of 3.
# TODO: Create an EnemyFleet object (called enemy) with the screen and enemy_rows
# TODO: Create a Fighter (called fighter) at location 320, 590
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
# TODO: If the event type is KEYDOWN and pressed_keys[K_SPACE} is True, then fire a missile
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
# TODO: If K_LEFT is pressed move the fighter left 3
# TODO: If K_RIGHT is pressed move the fighter right 3
# TODO: Draw the fighter
# TODO: Move the enemy
# TODO: Draw the enemy
# TODO: For each missle in the fighter missiles
# TODO: Move the missle
# TODO: Draw the missle
# TODO: For each badguy in the enemy badguys
# TODO: For each missle in the fighter missiles
# TODO: If the badguy is hit by the missle
# TODO: Mark the badguy as dead = True
# TODO: Mark the missile as exploded = True
# TODO: Use the fighter to remove exploded missiles
# TODO: Use the enemy to remove dead badguys
# TODO: If the enemy id_defeated
# TODO: Increment the enemy_rows
# TODO: Create a new enemy with the screen and enemy_rows
pygame.display.update()
main()
|
[
"fisherds@rose-hulman.edu"
] |
fisherds@rose-hulman.edu
|
3b30c30a366cc29bba2b5cedcd49a312819055e8
|
db0c012043f9eabe13ee3e7d830b061ce6c4aa24
|
/rvn_rpc.py
|
90fac21575b17d0d7fb9cbf28b5de1cda0a01dc1
|
[] |
no_license
|
jeroz1/raven-trader-pro
|
80ebd433ef452b430f725441f8da772cae115680
|
44a1b1c6162a83f472d4b0425b34e73b4b55ac6b
|
refs/heads/main
| 2023-05-03T00:40:36.054661 | 2021-05-16T18:58:04 | 2021-05-16T18:58:04 | 367,949,711 | 0 | 0 | null | 2021-05-16T17:50:17 | 2021-05-16T17:50:16 | null |
UTF-8
|
Python
| false | false | 2,390 |
py
|
from jsonrpcclient.requests import Request
from requests import post, get
from decimal import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
import sys, getopt, argparse, json, time, getpass, os.path
from util import *
from config import *
def do_rpc(method, log_error=True, **kwargs):
req = Request(method, **kwargs)
try:
resp = post(RPC_URL, json=req)
if resp.status_code != 200:
print("==>", end="")
print(req)
print("<== ERR:", end="")
print(resp.text)
return json.loads(resp.text)["result"]
except:
print("RPC Error")
return None
def decode_full(txid):
resp = get(TX_QRY.format(txid))
if resp.status_code != 200:
print("Error fetching raw transaction")
result = json.loads(resp.text)
return result
def check_unlock(timeout = 10):
print("Unlocking Wallet for {}s".format(timeout))
phrase_test = do_rpc("help", command="walletpassphrase")
#returns None if no password set
if(phrase_test.startswith("walletpassphrase")):
do_rpc("walletpassphrase", passphrase=RPC_UNLOCK_PHRASE, timeout=timeout)
def dup_transaction(tx):
new_vin = []
new_vout = {}
for old_vin in tx["vin"]:
new_vin.append({"txid": old_vin["txid"], "vout": old_vin["vout"], "sequence": old_vin["sequence"]})
for old_vout in tx["vout"]:
vout_script = old_vout["scriptPubKey"]
vout_addr = vout_script["addresses"][0]
if(vout_script["type"] == "transfer_asset"):
new_vout[vout_addr] = make_transfer(vout_script["asset"]["name"], vout_script["asset"]["amount"])
else:
new_vout[vout_addr] = old_vout["value"]
return new_vin, new_vout
def search_swap_tx(utxo):
utxo_parts = utxo.split("|")
height = do_rpc("getblockcount")
check_height = height
while check_height >= height - 10:
hash = do_rpc("getblockhash", height=check_height)
details = do_rpc("getblock", blockhash=hash, verbosity=2)
for block_tx in details["tx"]:
for tx_vin in block_tx["vin"]:
if "vout" in tx_vin and block_tx["txid"] == utxo_parts[0] and tx_vin["vout"] == int(utxo_parts[1]):
return block_tx["txid"]
check_height -= 1
print("Unable to find transaction for completed swap")
return None #If we don't find it 10 blocks back, who KNOWS what happened to it
|
[
"ben.d.abraham@gmail.com"
] |
ben.d.abraham@gmail.com
|
7c7ec50d29b03c3642ab2ceba8b96c4be5487afb
|
669e9241b02bdaa303fbc2fd4023b90d4d179a59
|
/Basketball Scoreboard/challenge1.py
|
72070c13f348ee839784ae72678555d7d2e7e973
|
[] |
no_license
|
benjaminpotter/HatchProjects
|
0854cf46ae7c3781468116a5d63b703dd54ae68c
|
7f6a948d3474c755d071751b725c059e6c7f3553
|
refs/heads/master
| 2022-01-28T16:58:03.449073 | 2019-08-16T13:47:30 | 2019-08-16T13:47:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 997 |
py
|
def setup():
size(400, 400)
threePoint = 0
fieldGoal = 0
freeThrow = 0
def drawScoreboard():
global threePoint, fieldGoal, freeThrow
background(0, 0, 0)
noFill()
stroke(255, 0, 0)
rect(30, 337, 110, 34)
rect(155, 337, 110, 34)
rect(278, 337, 116, 34)
fill(255)
textSize(22)
text("3-Point", 50, 361)
text("Field Goal", 160, 361)
text("Free Throw", 279, 361)
textSize(150)
fill(threePoint * 1.2 + 30, fieldGoal * 1.3 + 30, freeThrow * 1.8 + 30)
text(threePoint * 3 + fieldGoal * 2 + freeThrow, 116, 200)
def addPoints():
global threePoint, fieldGoal, freeThrow
if mouseX > 30 and mouseX < 140 and mouseY > 337 and mouseY < 371:
threePoint += 1
elif mouseX > 155 and mouseX < 265 and mouseY > 337 and mouseY < 371:
fieldGoal += 1
elif mouseX > 278 and mouseX < 388 and mouseY > 337 and mouseY < 371:
freeThrow += 1
def draw():
drawScoreboard()
def mousePressed():
addPoints()
|
[
"reactiveneon@gmail.com"
] |
reactiveneon@gmail.com
|
f2da7757695997c51c9de4f489b7d3c00f52f014
|
a6a7ade8086f7137bc30001fb3a882dcda4d84d5
|
/121. Best Time to Buy and Sell Stock 2.py
|
517c1559084c900dfba259feca8a1d94d8821493
|
[] |
no_license
|
kartikb7/LeetCode
|
389ea67409beb2a3bbd990d89e9d96b150b6ad0d
|
62ea0c940a5b36ca58b3e8a5a5a39be97088269f
|
refs/heads/master
| 2020-11-25T03:09:03.988856 | 2020-07-10T17:00:47 | 2020-07-10T17:00:47 | 228,472,777 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
max_P = 0
low = 999999
for price in prices:
if price > low:
profit = price - low
if profit > max_P:
max_P = profit
else:
low = price
return max_P
|
[
"kartikbansal007@gmail.com"
] |
kartikbansal007@gmail.com
|
42d8299937567087b6cb064aff12ef0812305445
|
e0a3050ba6c87f3f180505cc1d8e1b2bd69d0f62
|
/Notes/Lecture 5 - Tuples and Lists/oddTuples.py
|
3271e891eeef34657fb413e1c87931611bfc6c5b
|
[] |
no_license
|
P450/MIT-6.0001x
|
7ddab872de5da67551bcc9f3c59227cf9e0ed317
|
8985289a047dcc42bce96d9b8a4535bf71032b51
|
refs/heads/master
| 2020-03-28T16:32:45.044744 | 2018-09-13T23:05:50 | 2018-09-13T23:05:50 | 148,706,202 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,401 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 10 15:28:57 2017
@author: Jae You
Write a procedure called oddTuples, which takes a tuple as input, and returns
a new tuple as output, where every other element of the input tuple is copied,
starting with the first one. So if test is the tuple ('I', 'am', 'a', 'test', 'tuple'),
then evaluating oddTuples on this input would return the tuple ('I', 'a', 'tuple').
"""
def oddTuples(aTup):
'''
aTup: a tuple
returns: tuple, every other element of aTup.
'''
oddTup = ()
for i in range(len(aTup)):
if i % 2 == 0:
oddTup += (aTup[i],)
return oddTup
print(oddTuples(()))
solutions:
def oddTuples(aTup):
'''
aTup: a tuple
returns: tuple, every other element of aTup.
'''
# a placeholder to gather our response
rTup = ()
index = 0
# Idea: Iterate over the elements in aTup, counting by 2
# (every other element) and adding that element to
# the result
while index < len(aTup):
rTup += (aTup[index],)
index += 2
return rTup
def oddTuples2(aTup):
'''
Another way to solve the problem.
aTup: a tuple
returns: tuple, every other element of aTup.
'''
# Here is another solution to the problem that uses tuple
# slicing by 2 to achieve the same result
return aTup[::2]
|
[
"27456372+P450@users.noreply.github.com"
] |
27456372+P450@users.noreply.github.com
|
8d0a04ca9094ab279081e20ae900e1d10d2bfe8a
|
34fffaa3bc670abb8700da0c521bafcfdce15ea4
|
/float_func/steps.py
|
f839de125bf95533641fab71c70669ab74cc8345
|
[] |
no_license
|
julialhk/leds-seq-creator
|
ac111bac63dc131719816e09df907bcd251e1515
|
35d588c74f826628708ffcb2f159e346e31b5e27
|
refs/heads/master
| 2020-11-24T14:34:33.593417 | 2019-12-22T16:27:01 | 2019-12-22T16:27:01 | 228,195,512 | 0 | 0 | null | 2019-12-15T14:14:34 | 2019-12-15T14:14:33 | null |
UTF-8
|
Python
| false | false | 559 |
py
|
class StepsFloatFunc:
def __init__(self, num_of_steps, value_diff, initial_value):
self.num_of_steps = num_of_steps
self.value_diff = value_diff
self.initial_value = initial_value
@classmethod
def from_timing(cls, timing, diff = 0.25, initial_value = 0.0):
return cls(timing.number_of_repeats(), diff,initial_value)
def to_json_obj(self):
return {
"t": "steps",
"num": self.num_of_steps,
"diff": self.value_diff,
"init": self.initial_value
}
|
[
"amirgiraffe@gmail.com"
] |
amirgiraffe@gmail.com
|
d2d896e33f071613f54f7316003d94779c5ad272
|
201238828ce40894d0523d6a25f6b5516ac0cd1d
|
/lab3/test.py
|
c273afacdef3a9ad40a0b2e2225c2a1471abecbc
|
[] |
no_license
|
Kaskere/zuters-lab
|
992d4510ff8d39fe10f6203660d33d16f04f569a
|
e75a7f5d9dab7f9f94c204703f548d0911f02428
|
refs/heads/master
| 2022-03-07T11:26:55.457878 | 2013-04-08T21:39:19 | 2013-04-08T21:39:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,385 |
py
|
import os
import numpy
def test():
import lab3
lab_root = os.path.dirname(__file__)
numpy.set_printoptions(precision=3, suppress=True)
print "--------------------------------------------------------------------------------"
print "----------------------------------LAB3------------------------------------------"
print "--------------------------------------------------------------------------------"
g = 0.2
samples = lab3.read_table(lab_root + "/examples_boolean.txt")
answers = lab3.read_table(lab_root + "/d_notand.txt")
epoch, err, weights = lab3.train_mlp(samples, answers, [2, 1], 500, g, 0.1, 0.01)
print "Trained on d_notand.txt in %d epochs, MSE %.3f" % (epoch, err)
results = lab3.run(samples, weights, g)
for inp, expected, out in zip(samples, answers, results[-1][:, :-1]):
print "%s : %s : %.3f" % (inp, expected, out)
answers = lab3.read_table(lab_root + "/d_xor.txt")
epoch, err, weights = lab3.train_mlp(samples, answers, [2, 1], 500, g, 0.1, 0.01)
print "Trained on d_xor.txt in %d epochs, MSE %.3f" % (epoch, err)
results = lab3.run(samples, weights, g)
for inp, expected, out in zip(samples, answers, results[-1][:, :-1]):
print "%s : %s : %.3f" % (inp, expected, out)
samples = lab3.read_table(lab_root + "/examples.txt")
answers = lab3.read_table(lab_root + "/d.txt")
norm_samples = (samples - 1) / (samples.max() - 1)
norm_answers = (answers - 1) / (answers.max() - 1)
epoch, err, weights = lab3.train_mlp(norm_samples, norm_answers, [4, 1], 500, g, 0.1, 0.01)
print "Trained on d.txt (0, 0.5, 1) in %d epochs, MSE %.3f" % (epoch, err)
results = lab3.run(norm_samples, weights, g)
for inp, expected, out in zip(norm_samples, norm_answers, results[-1][:, :-1]):
print "%s : %s : %.3f" % (inp, expected, out)
norm_samples = (samples == 3) * 0.9 + (samples == 2) * 0.5 + (samples == 1) * 0.1
norm_answers = (answers == 3) * 0.9 + (answers == 2) * 0.5 + (answers == 1) * 0.1
epoch, err, weights = lab3.train_mlp(norm_samples, norm_answers, [4, 1], 500, g, 0.1, 0.01)
print "Trained on d.txt (0.1, 0.5, 0.9) in %d epochs, MSE %.3f" % (epoch, err)
results = lab3.run(norm_samples, weights, g)
for inp, expected, out in zip(norm_samples, norm_answers, results[-1][:, :-1]):
print "%s : %s : %.3f" % (inp, expected, out)
g = 0.3
answers = lab3.read_table(lab_root + "/d2.txt")
norm_samples = (samples - 1) / (samples.max() - 1)
epoch, err, weights = lab3.train_mlp(norm_samples, answers, [4, 3], 500, g, 0.01, 0.01)
print "Trained on d2.txt (0, 1) in %d epochs, MSE %.3f" % (epoch, err)
results = lab3.run(norm_samples, weights, g)
for inp, expected, out in zip(norm_samples, answers, results[-1][:, :-1]):
print "%s : %s : %s" % (inp, expected, out)
norm_samples = (samples == 3) * 0.9 + (samples == 2) * 0.5 + (samples == 1) * 0.1
norm_answers = (answers == 1) * 0.9 + (answers == 0) * 0.1
epoch, err, weights = lab3.train_mlp(norm_samples, norm_answers, [4, 3], 500, g, 0.1, 0.01)
print "Trained on d2.txt (0.1, 0.9) in %d epochs, MSE %.3f" % (epoch, err)
results = lab3.run(norm_samples, weights, g)
for inp, expected, out in zip(norm_samples, norm_answers, results[-1][:, :-1]):
print "%s : %s : %s" % (inp, expected, out)
|
[
"emils.solmanis@gmail.com"
] |
emils.solmanis@gmail.com
|
08b01af01392cb5b5e0ab0605c707494fea4e10e
|
05c9f1af21a698e09f7ec37a075624250e907262
|
/samples/cloud_loadbalancers/session_persistence.py
|
65361528513dff78dabf813b885ccaf5a90b79a5
|
[
"Apache-2.0"
] |
permissive
|
pycontribs/pyrax
|
5f5a1d6816f5a831b1ae4b74ffaf438a1c0269a6
|
2397136b75e6fcc906ee406e9c1bc7aaef94387a
|
refs/heads/master
| 2023-08-28T16:43:21.037208 | 2022-09-21T15:14:38 | 2022-09-21T15:14:38 | 5,975,139 | 10 | 27 |
Apache-2.0
| 2021-07-12T21:23:11 | 2012-09-27T01:05:57 |
Python
|
UTF-8
|
Python
| false | false | 1,492 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
clb = pyrax.cloud_loadbalancers
try:
lb = clb.list()[0]
except IndexError:
print("You do not have any load balancers yet.")
print("Please create one and then re-run this script.")
sys.exit()
print("Load Balancer:", lb)
orig = lb.session_persistence
print("Current setting of session persistence:", orig or '""')
print()
if orig:
print("Clearing...")
lb.session_persistence = ""
else:
print("Setting persistence to HTTP_COOKIE...")
lb.session_persistence = "HTTP_COOKIE"
print("New setting of session persistence:", lb.session_persistence or '""')
|
[
"ed@leafe.com"
] |
ed@leafe.com
|
e16744594394b55d918f30bbdab8881dc469c519
|
b359bff916008fac8581175ff3848e4fbed45179
|
/frequency.py
|
5399d1e73bd489952dce333a5ab2bb8cf1622ec8
|
[] |
no_license
|
hongcho7/Python-Snippets
|
ace08dbf029bd3fc7db81c90f383a7f24faca65c
|
ea7e8e486fd13ceea2a583e996d2289f77ef01b7
|
refs/heads/main
| 2023-06-04T19:44:22.676268 | 2021-06-22T14:57:45 | 2021-06-22T14:57:45 | 378,991,622 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
# finding frequency of each element in a list
from collections import Counter
my_list = ['a','a','b','b','c','d','d','d','d','d']
count = Counter(my_list)
print(count)
# Counter({
'd':5,
'b':3,
'a':2,
'c': 1})
print(count['b'])
# 3
pirnt(count.most_common(1))
[('d', 5)]
|
[
"alex0114@naver.com"
] |
alex0114@naver.com
|
54428d50317cd7680a740ae8e5227d8a8980beea
|
9f9076ef4e6c41b327d78bc6dc0d5bbc0d194a57
|
/OOPs In Python/Operator Overloading.py
|
16a2376459c8e44452f11863d6ca87150503907d
|
[] |
no_license
|
bnitish101/Python-Practice
|
d6ca0103438159c389d2e605c357275998b344dc
|
b864d54e47396cfdcf87270146309c3cd6a918c8
|
refs/heads/master
| 2021-02-15T20:34:44.904051 | 2020-04-05T19:33:15 | 2020-04-05T19:33:15 | 244,200,321 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 951 |
py
|
class Student:
def __init__(self, m1, m2):
self.m1 = m1
self.m2 = m2
def __add__(self, other):
m1 = self.m1 + other.m1
m2 = self.m2 + other.m2
s3 = Student(m1, m2)
return s3
def __gt__(self, other):
r1 = self.m1 + self.m2
r2 = other.m1 + other.m2
if r1 > r2:
return True
else:
return False
def __str__(self):
return '{} {}'.format(self.m1, self.m2)
s1 = Student(2, 4)
s2 = Student(3, 4)
s3 = s1 + s2
print(s3.m1)
print(Student.__add__(s1, s2).m2)
if s1 > s2:
print('S1 wins\n')
else:
print('S2 wins\n')
print(type(s1))
print(s1) # if print anything it'll automatically call __str__() magic method,
# class Student returns value of m1 and m2 in __str__() method which is overloading by inbuilt __str__() method
x = 2
y = '2'
print('\n', type(x))
print(type(y))
print(int.__str__(x))
print(str.__str__(y))
|
[
"35606236+bnitish101@users.noreply.github.com"
] |
35606236+bnitish101@users.noreply.github.com
|
38f215aeb9d06b4cd61b0ff7ebf69ff0eac92ac6
|
228dd278c875b9539908afffefcfe5b130a28a62
|
/v2/src/code/verification/measure_service/measure_verify.py
|
4ee6c5907c23cf9f2d2318a7f5dfc98dd2d27642
|
[] |
no_license
|
sheriefvt/MARS-services
|
57d9ca81f1f606ca8a8938e75b41fb806a36f8b9
|
a5f6b6140b0c8a30cd083a41a465f0bc999017a1
|
refs/heads/master
| 2021-04-30T12:50:53.156082 | 2018-02-13T02:06:11 | 2018-02-13T02:06:11 | 121,283,295 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,431 |
py
|
import filecmp,requests,time
import ConfigParser,io
print 'Start verification testing for network measure service'
with open ('mars.config', "r") as myfile:
data=myfile.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(data))
server = config.get("MARS_configuration", "server")
host = config.get("MARS_configuration", "host")
port = config.get("MARS_configuration", "port")
port2 = config.get("MARS_configuration", "port2")
port3 = config.get("MARS_configuration", "port3")
database_path = config.get("MARS_configuration", "database")
index_path1 = config.get("MARS_configuration", "index1")
index_path2 = config.get("MARS_configuration", "index2")
file_path = config.get("MARS_configuration", "uploadfile")
qsub_path = config.get("MARS_configuration", "qsub")
graph_path = config.get("MARS_configuration", "graph")
code_path = config.get("MARS_configuration", "code")
output_path = config.get("MARS_configuration", "output")
notify = config.get("MARS_configuration", "storage_notify")
print 'Testing measure service with network sample'
num_measure=9
url11 ='http://'+host+":"+port3+"/graphservice/measure/compute"
for l in range(1,num_measure):
print 'requesting measure {i} for network sample'.format(i=l)
data11={'graph':'sample','measure':l}
r = requests.get(url11,params=data11)
time.sleep(3)
print 'Waiting for files to be generated..'
time.sleep(5)
print 'Diffing files..'
for l in range(1 , num_measure):
if filecmp.cmp(output_path+'sample_{i}.out'.format(i=l), output_path+'sample_{i}.out.valid'.format(i=l)):
print 'Measure {i} valid'.format(i=l)
else:
print 'Measure {i} invalid'.format(i=l)
print 'Testing measure service with network sample2'
for l in range(1,num_measure):
print 'requesting measure {i} for network sample2'.format(i=l)
data11={'graph':'sample2','measure':l}
r = requests.get(url11,params=data11)
time.sleep(3)
print 'Waiting for files to be generated..'
time.sleep(5)
print 'Diffing files..'
for l in range(1 , num_measure):
if filecmp.cmp(output_path+'sample2_{i}.out'.format(i=l), output_path+'sample2_{i}.out.valid'.format(i=l)):
print 'Measure {i} valid'.format(i=l)
else:
print 'Measure {i} invalid'.format(i=l)
print 'network measure service validation test complete..'
|
[
"sherif@cos.io"
] |
sherif@cos.io
|
61da79106d0b6d352e14ce5514b9aec1b7a2251c
|
1201c529057e3e69072d2fbc3ceb59be54dcc2ff
|
/Assignment_6.5.py
|
a0f5e96dfcdbbee42799e433fe419aeda6b86c5c
|
[] |
no_license
|
TheChandaChen/github-upload
|
d02b54f190bd8cdb5adc4f14abecb3429e29a0e5
|
d5a463ce2cc47d1a27870afc528b6f61d2b97ce1
|
refs/heads/master
| 2022-11-12T02:45:36.221822 | 2020-06-28T03:58:38 | 2020-06-28T03:58:38 | 275,484,354 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 180 |
py
|
text = "X-DSPAM-Confidence: 0.8475";
cpos = text.find(':')
numtext = len(text)
num = text[cpos+1 : numtext]
numfinal = num.strip()
finalfloat = float(numfinal)
print(numfinal)
|
[
"TheChandaChen@gmail.com"
] |
TheChandaChen@gmail.com
|
73463d8bd35a06a80126bfb0fd095daeac78fe2a
|
a13fa33ed25f8556722eaa543d4db55350282764
|
/database/database.py
|
5d49827330d15ca4bb1c88c5ef62ef319d3897e8
|
[] |
no_license
|
Vytautasragaisis07/database_one_to_one
|
7b7407fb96d423ef817faaa3a43bfb1ae3c29a0c
|
bfd08e78f6cb32a40e49309044eb770c13f62813
|
refs/heads/master
| 2020-11-24T23:18:22.196318 | 2019-12-16T12:39:35 | 2019-12-16T12:39:35 | 228,383,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,949 |
py
|
import sqlite3
from customers.customer import customer
from customers.transaction import transaction
def open_connection():
connection = sqlite3.connect("customers.db")
cursor = connection.cursor()
return connection, cursor
def close_connection(connection, cursor):
cursor.close()
connection.close()
def create_customers_table():
try:
connection, cursor = open_connection()
query = """CREATE TABLE IF NOT EXISTS customers (
customer_id INTEGER PRIMARY KEY AUTOINCREMENT,
customer_name TEXT,
customer_lastName TEXT,
transaction_id int,
FOREIGN KEY(transaction_id) REFERENCES transactions(transaction_id))
"""
cursor.execute(query)
except sqlite3.DatabaseError as error:
print(error)
finally:
close_connection(connection, cursor)
def create_transactions_table():
try:
connection, cursor = open_connection()
query = """CREATE TABLE IF NOT EXISTS transactions (
transaction_id INTEGER PRIMARY KEY AUTOINCREMENT,
transaction_number REAL,
customer_id int,
FOREIGN KEY(customer_id) REFERENCES customers(customer_id))
"""
cursor.execute(query)
except sqlite3.DatabaseError as error:
print(error)
finally:
close_connection(connection, cursor)
def query_database(query, params=None):
try:
connection, cursor = open_connection()
if params:
cursor.execute(query, params)
connection.commit()
else:
for row in cursor.execute(query):
print(row)
except sqlite3.DataError as error:
print(error)
finally:
connection.close()
def create_customer(customer):
query = """INSERT INTO customers VALUES (?, ?, ?, ?)"""
params = (customer.customer_id, customer.customer_name, customer.customer_lastName, customer.transaction_id)
query_database(query, params)
def create_transaction(transaction, customer_id):
query = """INSERT INTO transactions VALUES (?, ?, ?)"""
params = (transaction.transaction_id, transaction.transaction_number, customer_id)
query_database(query, params)
def get_customer():
query = """SELECT * FROM customers"""
query_database(query)
def get_transaction():
query = """SELECT * FROM transactions"""
query_database(query)
def delete_customer(customer):
query = """SELECT * FROM customers"""
query_database(query)
def update_customer(customer_id, transaction_id):
query = """UPDATE customers SET transaction_id = ? WHERE customer_id = ?"""
params = (transaction_id, customer_id)
query_database(query, params)
def insert_record(customer, transaction):
create_customer(customer)
connect, cursor = open_connection()
customer_id_for_transaction = cursor.execute("SELECT customer_id FROM customers WHERE customer_name = 'Vytautas'").fetchone()
close_connection(connect, cursor)
# (1,)
customer.customer_id = customer_id_for_transaction[0]
# 1
create_transaction(transaction, customer.customer_id)
connect, cursor = open_connection()
transaction_id_for_customer = cursor.execute("SELECT transaction_id FROM transactions ORDER BY transaction_id DESC LIMIT 1").fetchone()
close_connection(connect, cursor)
transaction.transaction_id = transaction_id_for_customer[0]
update_customer(customer.customer_id, transaction.transaction_id)
customer1 = customer(None, "Vytautas", "Ragaisis", None)
transaction1 = transaction(None, 123456, None)
create_customers_table()
create_transactions_table()
insert_record(customer1, transaction1)
#create_customer(customer1)
#create_transaction(transaction1)
get_customer()
get_transaction()
|
[
"moksleivis@KITM.local"
] |
moksleivis@KITM.local
|
e87991ceb39fddf4a9b31d6375b7faf965e740ce
|
881f9d77497bfe7d079fce75f313e2f81ea845fe
|
/stockweb/Lib/site-packages/streamlit/elements/text.py
|
72d1a59a905eb260e2d10ffa913f06ebf31a7948
|
[] |
no_license
|
CollatMK/stockwebapp
|
5674b56434bfe081edb2db9a047a890535aeaf1d
|
294828c40ee45fc1a0694e39c5b79acfa3335a92
|
refs/heads/master
| 2023-06-07T01:54:08.760408 | 2021-06-18T05:10:07 | 2021-06-18T05:10:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,437 |
py
|
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast
import streamlit
from streamlit.proto.Text_pb2 import Text as TextProto
from .utils import clean_text
class TextMixin:
def text(self, body):
"""Write fixed-width and preformatted text.
Parameters
----------
body : str
The string to display.
Example
-------
>>> st.text('This is some text.')
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1
height: 50px
"""
text_proto = TextProto()
text_proto.body = clean_text(body)
return self.dg._enqueue("text", text_proto)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
|
[
"yyc95317@outlook.com"
] |
yyc95317@outlook.com
|
fae959446735dbc58472cf7a01d309d96b121fec
|
0779982f27f2bc1b5481c96449e59cf64dd2cdc3
|
/DSRevision/Anagrams.py
|
78d4656163e1639593633bcab639d0650df6953b
|
[] |
no_license
|
meghaggarwal/Data-Structures-Algorithms
|
2c06839a8f492120080898cef29b9ceb12caf008
|
368009eb7c38ae0e29a70fa360ee84841d1273b3
|
refs/heads/master
| 2023-03-09T23:51:55.838135 | 2021-02-24T09:40:25 | 2021-02-24T09:40:25 | 265,654,231 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 149 |
py
|
Given two strings s1 and s2, check whether s1 and s2 are anagrams or not.
i/p - abcd adcb
o/p yes
app - XOR
- hashing
- sort ( O(nlogn))
|
[
"meghaa.2703@gmail.com"
] |
meghaa.2703@gmail.com
|
93d6b00bdbbb4ab3e4740d1dab84fae78dfa36fd
|
0bf6b634267f02c04c6fb4ab276b26498cde8ca4
|
/venv/bin/django-admin
|
1d2f555d8d22bb879427dbf4aa6bb2d7af98a7d5
|
[] |
no_license
|
danielmcv/Paynom
|
dde328fbdf818b35b1004372304d40f3663b10a5
|
5b489ab8b66bd8d0693af0ad0ad69df05a6993d0
|
refs/heads/master
| 2021-01-10T12:47:57.607374 | 2015-10-04T16:55:24 | 2015-10-04T16:55:24 | 43,643,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 295 |
#!/home/daniel/Documents/Paynom/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"daneil625@outlook.com"
] |
daneil625@outlook.com
|
|
58543a9fb220ac7f1e3df6aa78a0e6d2c948efa0
|
9f675f1d305385bbe81eb337b1126624f26fcca0
|
/hey.py
|
9f5969bdefdb8027944b1e8c4587e36d60fb1caf
|
[] |
no_license
|
Kelta-King/QR_Code_Scanner_Python
|
ca058e748a03d6de6190531a9aef9f20226368e1
|
9ecc91894ba14890da651ee264b465a4fd61276d
|
refs/heads/main
| 2023-08-28T10:27:34.435586 | 2021-10-25T12:36:57 | 2021-10-25T12:36:57 | 419,175,556 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14 |
py
|
print("Yoman")
|
[
"kushangshah@Kushangs-MacBook-Pro.local"
] |
kushangshah@Kushangs-MacBook-Pro.local
|
f7bbe2a81a48f0b05b5f743828f9bd981e81ca9f
|
572e171127f3e25a2a919ac5e3cf1cdb18746ead
|
/billingdashboard/dashboards/project/invoices/tables.py
|
f1859ec36bf3db587ee80551eeb9e29de4fd9095
|
[] |
no_license
|
sreenathmenon/mbtest
|
d01a1b15d2b0d7795b8976028591af5e762fc95e
|
9b78c9b851c8e3b65dcf0f7450330fdbdac89a7c
|
refs/heads/master
| 2021-05-16T04:26:31.039756 | 2017-10-06T16:32:33 | 2017-10-06T16:32:33 | 106,026,837 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,351 |
py
|
#
# Copyright 2017 NephoScale
#
from django.utils.translation import ugettext_lazy as _
from horizon import tables
class UserInvoiceListingTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('ID'))
inv_code = tables.Column('inv_code', verbose_name=_('Code'), \
link="horizon:project:customer_invoices:customer_invoice_details")
user = tables.Column('user', verbose_name=_('Account'))
inv_date = tables.Column('inv_date', verbose_name=_('Invoice Date'))
inv_from = tables.Column('inv_from', verbose_name=_('From Date'))
inv_to = tables.Column('inv_to', verbose_name=_('To Date'))
total_amt = tables.Column('total_amt', verbose_name=_('Total'))
balance_amt = tables.Column('balance_amt', verbose_name=_('Balance'))
amt_paid = tables.Column('amt_paid', verbose_name=_('Paid'))
last_updated = tables.Column('last_updated', verbose_name=_('Last Updated'))
notes = tables.Column('notes', verbose_name=_('Notes'))
status = tables.Column('status', verbose_name=_('Status'))
def get_object_id(self, datum):
return datum['id']
class Meta(object):
name = 'customer_invoices'
verbose_name = _('Invoices')
row_actions = ()
table_actions = ()
|
[
"sreenath.mm@poornam.com"
] |
sreenath.mm@poornam.com
|
67e621ecfca50542026a0bc3eba12f59122ad3b5
|
efd3564def48ae6e5fff6068da21fc61f88486ee
|
/iam/models.py
|
fe44011c6227b49f36f6ae826fa39489099e4904
|
[
"MIT"
] |
permissive
|
druuu/IAM-Manager
|
0c4e4f75879d44f4519e3c4655778f532e4455cb
|
5ed542ed52ff6e18ea70122510fc9d5e6998159d
|
refs/heads/master
| 2021-01-16T19:18:10.412258 | 2016-05-12T10:02:14 | 2016-05-12T10:02:14 | 58,738,368 | 0 | 0 | null | 2016-05-13T12:29:36 | 2016-05-13T12:29:36 | null |
UTF-8
|
Python
| false | false | 115 |
py
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
|
[
"ashwin@micropyramid.com"
] |
ashwin@micropyramid.com
|
615ec213d76bfcbfaceae7f75787b8ba491393cd
|
9806e10f82b0bfbf39fa7661461298062174e23d
|
/wsgi.py
|
19d4ebcab5e427631151d52e4e3a8ae93ba7583d
|
[] |
no_license
|
rongshengqin/loveweb
|
2e64efe09d71a56521ff569413c6516f093db108
|
09331c6794c084fe02f4ff1c5e8417edb7fea5b2
|
refs/heads/master
| 2021-07-16T14:12:26.802548 | 2017-10-16T12:34:35 | 2017-10-16T12:34:35 | 107,124,285 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
#!/usr/bin/python
import os
from flask_app import app as application
#
# Below for testing only
#
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
# Wait for a single request, serve it and quit.
httpd.handle_request()
|
[
"basicqrs@163.com"
] |
basicqrs@163.com
|
7ff4f342c296f14581f59bf952c57db0709b0254
|
0cc4eb3cb54f8394c127ace62d3108fdb5230c85
|
/.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/2and3/Crypto/PublicKey/__init__.pyi
|
26410a457f1a06184022094247938b2894f9cbe2
|
[] |
no_license
|
jacobmerson/spack-develop-env
|
5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8
|
5fca20ca343b1a76f05fc635c87f94ed25417d94
|
refs/heads/master
| 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 213 |
pyi
|
/lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/2and3/Crypto/PublicKey/__init__.pyi
|
[
"mersoj@rpi.edu"
] |
mersoj@rpi.edu
|
b7534815b8c7ad1a7cabc452db9d215c18d77d0b
|
6fff01f73a579426442dd404c2f60b02228a1c2c
|
/0314/위장.py
|
93193f624dab6832f0182d7226529ced2b9273f2
|
[] |
no_license
|
ohhhhmy/OhGongCo
|
2e841e0b93d9ff467b1ba49c93f53ef4735e3188
|
61be96ca66b241db380798e7d942c943dd49fa33
|
refs/heads/main
| 2023-04-27T20:16:45.840964 | 2021-05-13T12:40:48 | 2021-05-13T12:40:48 | 345,608,098 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,242 |
py
|
def solution(c):
from functools import reduce
dic = {}
for wear in c:
stuff = wear[1]
if stuff not in dic:
dic[stuff] = 1
else:
dic[stuff] += 1
count = [i + 1 for i in dic.values()]
answer = reduce(lambda x, y : x * y, count)
return answer - 1
# 예전 풀이
# def solution(c):
# from itertools import combinations
# from functools import reduce
# dic = {}
# for wear in c:
# stuff = wear[1]
# if stuff not in dic:
# dic[stuff] = 1
# else:
# dic[stuff] += 1
# count = [i for i in dic.values()]
# key_num = len(count)
# answer = 0
# for i in range(1, key_num + 1):
# combi = combinations(count, i)
# for j in combi:
# answer +=reduce(lambda x, y : x * y, j)
# return answer
# 예전 풀이의 문제 : 조합으로 구하다보니 시간 초과가 났다.
#test case
print(solution([["yellowhat", "headgear"], ["bluesunglasses", "eyewear"], ["glasses", "eyewear"], ["green_turban", "headgear"], ["black_cap", "headgear"], ["short", "pants"]]))
print(solution([["crowmask", "face"], ["bluesunglasses", "face"], ["smoky_makeup", "face"]]))
|
[
"dmsdh316@naver.com"
] |
dmsdh316@naver.com
|
570cc838272c8d6af88062cc6f7e249fd0b36979
|
ea57ef44636ce151b3ef5322466cdfcb02482515
|
/pendulum/constants.py
|
abc6ec06eacd7553dcf6ee58a8d094672a79966c
|
[
"MIT"
] |
permissive
|
Sn3akyP3t3/pendulum
|
acb3dc5067576c4569a08b1d8a8ecfce918b4724
|
7ce170bdc64199d74e09e347402983f1bb015f63
|
refs/heads/master
| 2020-03-22T01:15:01.160870 | 2018-07-01T15:49:09 | 2018-07-01T15:49:09 | 139,292,657 | 0 | 0 |
MIT
| 2018-07-01T01:46:00 | 2018-07-01T01:46:00 | null |
UTF-8
|
Python
| false | false | 2,836 |
py
|
# The day constants
SUNDAY = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
# Number of X in Y.
YEARS_PER_CENTURY = 100
YEARS_PER_DECADE = 10
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
DAYS_PER_WEEK = 7
HOURS_PER_DAY = 24
MINUTES_PER_HOUR = 60
SECONDS_PER_MINUTE = 60
SECONDS_PER_HOUR = MINUTES_PER_HOUR * SECONDS_PER_MINUTE
SECONDS_PER_DAY = HOURS_PER_DAY * SECONDS_PER_HOUR
US_PER_SECOND = 1000000
# Formats
ATOM = 'YYYY-MM-DDTHH:mm:ssZ'
COOKIE = 'dddd, DD-MMM-YYYY HH:mm:ss zz'
ISO8601 = 'YYYY-MM-DDTHH:mm:ssZ'
ISO8601_EXTENDED = 'YYYY-MM-DDTHH:mm:ss.SSSSSSZ'
RFC822 = 'ddd, DD MMM YY HH:mm:ss ZZ'
RFC850 = 'dddd, DD-MMM-YY HH:mm:ss zz'
RFC1036 = 'ddd, DD MMM YY HH:mm:ss ZZ'
RFC1123 = 'ddd, DD MMM YYYY HH:mm:ss ZZ'
RFC2822 = 'ddd, DD MMM YYYY HH:mm:ss ZZ'
RFC3339 = ISO8601
RFC3339_EXTENDED = ISO8601_EXTENDED
RSS = 'ddd, DD MMM YYYY HH:mm:ss ZZ'
W3C = ISO8601
EPOCH_YEAR = 1970
DAYS_PER_N_YEAR = 365
DAYS_PER_L_YEAR = 366
USECS_PER_SEC = 1000000
SECS_PER_MIN = 60
SECS_PER_HOUR = 60 * SECS_PER_MIN
SECS_PER_DAY = SECS_PER_HOUR * 24
# 400-year chunks always have 146097 days (20871 weeks).
SECS_PER_400_YEARS = 146097 * SECS_PER_DAY
# The number of seconds in an aligned 100-year chunk, for those that
# do not begin with a leap year and those that do respectively.
SECS_PER_100_YEARS = (
(76 * DAYS_PER_N_YEAR + 24 * DAYS_PER_L_YEAR) * SECS_PER_DAY,
(75 * DAYS_PER_N_YEAR + 25 * DAYS_PER_L_YEAR) * SECS_PER_DAY
)
# The number of seconds in an aligned 4-year chunk, for those that
# do not begin with a leap year and those that do respectively.
SECS_PER_4_YEARS = (
(4 * DAYS_PER_N_YEAR + 0 * DAYS_PER_L_YEAR) * SECS_PER_DAY,
(3 * DAYS_PER_N_YEAR + 1 * DAYS_PER_L_YEAR) * SECS_PER_DAY
)
# The number of seconds in non-leap and leap years respectively.
SECS_PER_YEAR = (
DAYS_PER_N_YEAR * SECS_PER_DAY,
DAYS_PER_L_YEAR * SECS_PER_DAY
)
DAYS_PER_YEAR = (
DAYS_PER_N_YEAR,
DAYS_PER_L_YEAR
)
# The month lengths in non-leap and leap years respectively.
DAYS_PER_MONTHS = (
(-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31),
(-1, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
)
# The day offsets of the beginning of each (1-based) month in non-leap
# and leap years respectively.
# For example, in a leap year there are 335 days before December.
MONTHS_OFFSETS = (
(-1, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365),
(-1, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
)
DAY_OF_WEEK_TABLE = (
0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4
)
TM_SUNDAY = 0
TM_MONDAY = 1
TM_TUESDAY = 2
TM_WEDNESDAY = 3
TM_THURSDAY = 4
TM_FRIDAY = 5
TM_SATURDAY = 6
TM_JANUARY = 0
TM_FEBRUARY = 1
TM_MARCH = 2
TM_APRIL = 3
TM_MAY = 4
TM_JUNE = 5
TM_JULY = 6
TM_AUGUST = 7
TM_SEPTEMBER = 8
TM_OCTOBER = 9
TM_NOVEMBER = 10
TM_DECEMBER = 11
|
[
"sebastien@eustace.io"
] |
sebastien@eustace.io
|
1829819e1f2f2797abdc8b88dd33133975b014e7
|
762faf17ae39ac7121c9ebc8964265052f2f724f
|
/summativeAPIproject.py
|
7332d94eaa18ace5b42bfb75df45716a990d8318
|
[] |
no_license
|
arodrrigues/Y10Design-PythonAR
|
d00921c239010d511e9dd9e703e50183a71db3f1
|
4701bea0c5ef47269c2d390356d2b6f529edda9f
|
refs/heads/master
| 2020-07-23T14:21:07.248989 | 2020-05-30T13:46:25 | 2020-05-30T13:46:25 | 207,589,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,345 |
py
|
# Eli Preston
# Upper Canada College
# This program displays the stock closing prices of APPLE from 2014 to 2019 in a bar graph using Chart js.
import requests
import json
import pprint
def writeHTML(data, closingPrices, dates):
myfile = open("copy.html","w")
myfile.write("""
<!DOCTYPE html>
<html>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Montserrat">
<head>
<title>API Home Page</title>
<link rel="stylesheet" href="../HTML/homestyle.css">
<link rel='icon' href='favicon (1).ico' type='image/x-icon'/ >
<ul>
<li><a href="../HTML/eli.html" id="special"class="left" style="float:left;">Home</a></li>
<meta name="viewport" content="width=device-width, initial-scale=1">
</ul>
""")
bgColors = []
for date in dates:
if date[:4] == "2000":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2001":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2002":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2003":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2004":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2005":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2006":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2007":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2008":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2009":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2010":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2011":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2012":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2013":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2014":
bgColors.append("'rgba(0, 255, 0, 1)'")
elif date[:4] == "2015":
bgColors.append("'rgba(255, 255, 0, 1)'")
elif date[:4] == "2016":
bgColors.append("'rgba(255, 0, 0, 1)'")
elif date[:4] == "2017":
bgColors.append("'rgba(0, 0, 255, 1)'")
elif date[:4] == "2018":
bgColors.append("'rgba(255, 0, 255, 1)'")
elif date[:4] == "2019":
bgColors.append("'rgba(0, 255, 255, 1)'")
twothousand = []
twothousandone = []
twothousandtwo = []
twothousandthree = []
twothousandfour = []
twothousandfive = []
twothousandsix = []
twothousandseven = []
twothousandeight = []
twothousandnine = []
twothousandten = []
twothousandeleven = []
twothousandtwelve = []
twothousandthirteen = []
twothousandfourteen = []
twothousandfifteen = []
twothousandsixteen = []
twothousandseventeen = []
twothousandeighteen = []
twothousandnineteen = []
index = 0
for yr in dates:
if yr[:4] == "2000":
print('2000')
twothousand.append(closingPrices[index])
elif yr[:4] == "2001":
print('2001')
twothousandone.append(closingPrices[index])
elif yr[:4] == "2002":
print('2002')
twothousandtwo.append(closingPrices[index])
elif yr[:4] == "2003":
print('2003')
twothousandthree.append(closingPrices[index])
elif yr[:4] == "2004":
print('2004')
twothousandfour.append(closingPrices[index])
elif yr[:4] == "2005":
print('2005')
twothousandfive.append(closingPrices[index])
elif yr[:4] == "2006":
print('2006')
twothousandsix.append(closingPrices[index])
elif yr[:4] == "2007":
print('2007')
twothousandseven.append(closingPrices[index])
elif yr[:4] == "2008":
print('2008')
twothousandeight.append(closingPrices[index])
elif yr[:4] == "2009":
print('2009')
twothousandnine.append(closingPrices[index])
elif yr[:4] == "2010":
print('2010')
twothousandten.append(closingPrices[index])
elif yr[:4] == "2011":
print('2011')
twothousandeleven.append(closingPrices[index])
elif yr[:4] == "2012":
print('2012')
twothousandtwelve.append(closingPrices[index])
elif yr[:4] == "2013":
print('2013')
twothousandthirteen.append(closingPrices[index])
elif yr[:4] == "2014":
print('2014')
twothousandfourteen.append(closingPrices[index])
elif yr[:4] == "2015":
print('2015')
twothousandfifteen.append(closingPrices[index])
elif yr[:4] == "2016":
print('2016')
twothousandsixteen.append(closingPrices[index])
elif yr[:4] == "2017":
print('2017')
twothousandseventeen.append(closingPrices[index])
elif yr[:4] == "2018":
print('2018')
twothousandeighteen.append(closingPrices[index])
elif yr[:4] == "2019":
print('2019')
twothousandnineteen.append(closingPrices[index])
index += 1
print(twothousandone)
print(twothousandtwo)
print(twothousandthree)
print(twothousandfour)
print(twothousandfive)
print(twothousandsix)
print(twothousandseven)
print(twothousandeight)
print(twothousandnine)
print(twothousandten)
print(twothousandeleven)
print(twothousandtwelve)
print(twothousandthirteen)
print(twothousandfourteen)
print(twothousandfifteen)
print(twothousandsixteen)
print(twothousandseventeen)
print(twothousandeighteen)
print(twothousandnineteen)
myfile.write("""
<div class="headerAPI" id="home">
<p id="big"><b>Apple API Display</b></p>
<p id="small">This website is displaying data from an Apple API.</p>
<p></p>
</div>
<canvas id="myChart" width="300" height="200"></canvas>
<script src="https://cdn.jsdelivr.net/npm/chart.js@2.8.0"></script>
<script>
var ctx = document.getElementById('myChart').getContext('2d');
var myChart = new Chart(ctx, {
type: 'bar',
data: {
labels: """ + ",".join(str(dates)) + """,
datasets: [{
label: 'Closing Prices of Apple From 2014 to Present Day (In $$$)',
data: """ + "".join(str(closingPrices)) + """,
backgroundColor: [""" + ",".join(bgColors) + """],
borderColor: [
],
borderWidth: 1
}]
},
options: {
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
},
options: {
layout: {
padding: {
left: 50,
right: 50,
top: 50,
bottom: 50
}
}
}
});
</script>
</head>
<body>
<div class="rowapi" id="rowapi">
<div class="column">
<a href="https://financialmodelingprep.com/api/v3/historical-price-full/AAPL" style="text-decoration:none;" target="_blank">
<img src="../HTML/ImagesHTML/apiuseddoc.png" style="width:100%" onclick="" class="hover-shadow cursor">
<div class="overlay">
<div class="text">API Used</div>
</div>
</div>
<div class="column">
<a href="https://www.chartjs.org/docs/latest/" style="text-decoration:none;" target="_blank">
<img src="../HTML/ImagesHTML/gettingstarted.png" style="width:100%" onclick="" class="hover-shadow cursor">
<div class="overlay">
<div class="text">Chart JS; Used For Graph</div>
</div>
</div>
<div class="column">
<a href="https://github.com/EliPreston" style="text-decoration:none;">
<img src="../HTML/ImagesHTML/github-logo.jpeg" style="width:100%" onclick="" class="hover-shadow cursor">
<div class="overlay">
<div class="text">GitHub</div>
</div>
</div>
<div class="column">
<a href="https://sites.google.com/ucc.on.ca/y10design-epres" style="text-decoration:none;">
<img src="../HTML/ImagesHTML/googlesitesnew99.png" style="width:100%" onclick="" class="hover-shadow cursor">
<div class="overlay">
<div class="text">Google Site</div>
</div>
</div>
</div>
</body>
</html>
""")
myfile.close()
def main():
response = requests.get("https://financialmodelingprep.com/api/v3/historical-price-full/AAPL?serietype=line")
if (response.status_code == 200):
data = response.content
data_as_str = data.decode()
datajson = response.json()
dataSymbol = datajson['symbol']
dataPoints = datajson['historical']
print(dataSymbol)
closingPrices = []
dates = []
for point in dataPoints:
print(f"Date: {point['date']} \n\tClosing Price: {point['close']}")
closingPrices.append(point["close"])
dates.append(point["date"])
writeHTML(data_as_str, closingPrices, dates)
else:
data = "Error has occured"
writeHTML(data, [], [])
main()
|
[
"andrew.rodrigues@GCCYJTB8J1WK.local"
] |
andrew.rodrigues@GCCYJTB8J1WK.local
|
ad2c09304c77568abc578168dc811c48b5904970
|
439f2d5a345b1714391b333848b7266a0ea2059d
|
/mediacrush/processing/invocation.py
|
ed15a6a29f19fc2222484dd8c222b4d452e020d3
|
[
"MIT"
] |
permissive
|
SuperTux88/MediaCrush
|
a7c9c9097af1a2f1822cf59bc9fa80affadff256
|
79ae752462febbc031f7a179371970846ccf46a7
|
refs/heads/c4c
| 2021-01-17T16:16:13.312596 | 2016-09-19T16:45:50 | 2016-09-19T16:45:50 | 51,722,135 | 1 | 2 | null | 2016-06-16T02:47:26 | 2016-02-15T00:58:53 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,097 |
py
|
from mediacrush.config import _cfgi
import os
import threading
import subprocess
class Invocation(object):
crashed = False
exited = False
stdout = None
process = None
args = []
def __init__(self, command):
self.command = command
def __call__(self, *args, **kw):
self.args = self.command.format(*args, **kw).split()
return self
def _target(self):
try:
self.process = subprocess.Popen(self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.stdout = self.process.communicate()
except:
self.crashed = True
return
def run(self, timeout=_cfgi("max_processing_time")):
if not self.args:
self.args = self.command.split()
thread = threading.Thread(target=self._target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Terminating process")
self.process.terminate()
thread.join()
self.exited = True
self.returncode = self.process.returncode
|
[
"jose.manuel.diez@gmail.com"
] |
jose.manuel.diez@gmail.com
|
f0d3c7b7002e07ed6121cc88556e16022241444a
|
9444ba23799124a73570df4359673887be390649
|
/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py
|
dfecbaff8ef5dabd0fca1a9c58d1849bc518bcb3
|
[
"Apache-2.0"
] |
permissive
|
snabbco/neutron
|
fe400b57591c0f9c835494b80425d67e00668340
|
a657c06d10f2171149c6b1863df36522bdc11cd7
|
refs/heads/master
| 2021-01-17T11:12:53.555048 | 2014-05-27T09:05:00 | 2014-05-27T09:05:00 | 20,214,598 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,168 |
py
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.common import topics
from neutron.common import utils
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
class MeteringAgentNotifyAPI(proxy.RpcProxy):
"""API for plugin to notify L3 metering agent."""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=topics.METERING_AGENT):
super(MeteringAgentNotifyAPI, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3_routers = {}
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
adminContext, [router['id']],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify metering agent at %(topic)s.%(host)s '
'the message %(method)s'),
{'topic': self.topic,
'host': l3_agent.host,
'method': method})
l3_router = l3_routers.get(l3_agent.host, [])
l3_router.append(router)
l3_routers[l3_agent.host] = l3_router
for host, routers in l3_routers.iteritems():
self.cast(context, self.make_msg(method, routers=routers),
topic='%s.%s' % (self.topic, host))
def _notification_fanout(self, context, method, router_id):
LOG.debug(_('Fanout notify metering agent at %(topic)s the message '
'%(method)s on router %(router_id)s'),
{'topic': self.topic,
'method': method,
'router_id': router_id})
self.fanout_cast(
context, self.make_msg(method,
router_id=router_id),
topic=self.topic)
def _notification(self, context, method, routers):
"""Notify all the agents that are hosting the routers."""
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if utils.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
self._agent_notification(context, method, routers)
else:
self.fanout_cast(context, self.make_msg(method, routers=routers),
topic=self.topic)
def router_deleted(self, context, router_id):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, routers):
if routers:
self._notification(context, 'routers_updated', routers)
def update_metering_label_rules(self, context, routers):
self._notification(context, 'update_metering_label_rules', routers)
def add_metering_label(self, context, routers):
self._notification(context, 'add_metering_label', routers)
def remove_metering_label(self, context, routers):
self._notification(context, 'remove_metering_label', routers)
|
[
"sylvain.afchain@enovance.com"
] |
sylvain.afchain@enovance.com
|
b158359e453a4747612cafc96d190ba0bf906382
|
b3436d9bd89f38e1e540cb5d9e2770126292f63d
|
/apps/users_app/migrations/0001_initial.py
|
c9aac3e9a50780a12d336963b180483bd86cb1ef
|
[] |
no_license
|
stereotypestudio/python_belt
|
bc46189698aff3876fd082082aed19e0da5e3cdb
|
176813adf6a6b55ed54c7e4e9e2886c2f13a1b86
|
refs/heads/master
| 2020-03-21T08:28:57.540146 | 2018-06-26T21:56:07 | 2018-06-26T21:56:07 | 138,346,508 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 685 |
py
|
# Generated by Django 2.0.6 on 2018-06-21 17:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password_hash', models.CharField(max_length=255)),
],
),
]
|
[
"chris@stereotype.studio"
] |
chris@stereotype.studio
|
1e1d7ca3bfe15837aaed003514b62088a040f6d2
|
868ac4e558cf5fe945e8b557564f34f79b3ad01e
|
/purity_fb/purity_fb_1dot11/models/snmp_agent_response.py
|
3eb0a329ee36e940b618e7040ff1ee601a4825ff
|
[
"Apache-2.0"
] |
permissive
|
mabdelhafez/purity_fb_python_client
|
f4253ce8497fb3cff648e0a0cd1e567f48129fa7
|
a9856875b3df43b4302a2e4addd1a6b71f51f5ce
|
refs/heads/master
| 2022-04-20T09:24:22.031408 | 2020-04-20T22:11:32 | 2020-04-20T22:15:44 | 257,372,596 | 0 | 0 |
NOASSERTION
| 2020-04-20T18:40:24 | 2020-04-20T18:40:23 | null |
UTF-8
|
Python
| false | false | 4,171 |
py
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.11 Python SDK
Pure Storage FlashBlade REST 1.11 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.11
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SnmpAgentResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[SnmpAgent]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
SnmpAgentResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this SnmpAgentResponse.
pagination information, only available in GET requests
:return: The pagination_info of this SnmpAgentResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this SnmpAgentResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this SnmpAgentResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this SnmpAgentResponse.
A list of SNMP agents.
:return: The items of this SnmpAgentResponse.
:rtype: list[SnmpAgent]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this SnmpAgentResponse.
A list of SNMP agents.
:param items: The items of this SnmpAgentResponse.
:type: list[SnmpAgent]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SnmpAgentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mabdelhafez@purestorage.com"
] |
mabdelhafez@purestorage.com
|
eed318c659a03a362eea57886adeb8f9edd97292
|
a682419ca9862ff33b18a30a7599ad64f892e24d
|
/modules/selenium/webdriver/chrome/service.py
|
c8d810b5dfe0c7d3d90b4b9bc95b4527e2f234ba
|
[] |
no_license
|
lasanjin/corona-swe
|
1dd3449a973fa9ece0fc39b125235ea0b31a7069
|
ad21a7e717d666fa679aa5896e24138dcddfead4
|
refs/heads/master
| 2021-05-18T17:28:09.292155 | 2020-06-08T21:42:53 | 2020-06-08T21:42:53 | 251,337,604 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,825 |
py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from modules.selenium.webdriver.common import service
class Service(service.Service):
"""
Object that manages the starting and stopping of the ChromeDriver
"""
def __init__(self, executable_path, port=0, service_args=None,
log_path=None, env=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the ChromeDriver
- port : Port the service is running on
- service_args : List of args to pass to the chromedriver service
- log_path : Path for the chromedriver service to log to"""
self.service_args = service_args or []
if log_path:
self.service_args.append('--log-path=%s' % log_path)
service.Service.__init__(self, executable_path, port=port, env=env,
start_error_message="Please see https://sites.google.com/a/chromium.org/chromedriver/home")
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
|
[
"sanjinslavnic@gmail.com"
] |
sanjinslavnic@gmail.com
|
e40b4d81eb1aa444efa9cde6f36a97adeaf6647e
|
2ffd74028fedf428eb8715481bd9d35a74f10a67
|
/mqtt.py
|
c6da79512dafefd45df999813c3d44aa6233a5e3
|
[] |
no_license
|
msgarbossa/micropython-motion
|
2274be15b41be31869811edaa2f90c9b505e4d54
|
b300e7920b5b88a355559a077ab03c6ecae0abae
|
refs/heads/master
| 2023-09-01T13:52:55.429910 | 2021-10-20T07:41:09 | 2021-10-20T07:41:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,563 |
py
|
#!/usr/bin/env python
#
# Copyright (c) 2019, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
# wget https://raw.githubusercontent.com/pycom/pycom-libraries/master/examples/mqtt/mqtt.py
import usocket as socket
import ustruct as struct
from ubinascii import hexlify
class MQTTException(Exception):
pass
class MQTTClient:
def __init__(self, client_id, server, port=0, user=None, password=None, keepalive=0,
ssl=False, ssl_params={}):
if port == 0:
port = 8883 if ssl else 1883
self.client_id = client_id
self.sock = None
self.addr = socket.getaddrinfo(server, port)[0][-1]
self.ssl = ssl
self.ssl_params = ssl_params
self.pid = 0
self.cb = None
self.user = user
self.pswd = password
self.keepalive = keepalive
self.lw_topic = None
self.lw_msg = None
self.lw_qos = 0
self.lw_retain = False
def _send_str(self, s):
self.sock.write(struct.pack("!H", len(s)))
self.sock.write(s)
def _recv_len(self):
n = 0
sh = 0
while 1:
b = self.sock.read(1)[0]
n |= (b & 0x7f) << sh
if not b & 0x80:
return n
sh += 7
def set_callback(self, f):
self.cb = f
def set_last_will(self, topic, msg, retain=False, qos=0):
assert 0 <= qos <= 2
assert topic
self.lw_topic = topic
self.lw_msg = msg
self.lw_qos = qos
self.lw_retain = retain
def connect(self, clean_session=True):
self.sock = socket.socket()
self.sock.connect(self.addr)
if self.ssl:
import ussl
self.sock = ussl.wrap_socket(self.sock, **self.ssl_params)
msg = bytearray(b"\x10\0\0\x04MQTT\x04\x02\0\0")
msg[1] = 10 + 2 + len(self.client_id)
msg[9] = clean_session << 1
if self.user is not None:
msg[1] += 2 + len(self.user) + 2 + len(self.pswd)
msg[9] |= 0xC0
if self.keepalive:
assert self.keepalive < 65536
msg[10] |= self.keepalive >> 8
msg[11] |= self.keepalive & 0x00FF
if self.lw_topic:
msg[1] += 2 + len(self.lw_topic) + 2 + len(self.lw_msg)
msg[9] |= 0x4 | (self.lw_qos & 0x1) << 3 | (self.lw_qos & 0x2) << 3
msg[9] |= self.lw_retain << 5
self.sock.write(msg)
#print(hex(len(msg)), hexlify(msg, ":"))
self._send_str(self.client_id)
if self.lw_topic:
self._send_str(self.lw_topic)
self._send_str(self.lw_msg)
if self.user is not None:
self._send_str(self.user)
self._send_str(self.pswd)
resp = self.sock.read(4)
assert resp[0] == 0x20 and resp[1] == 0x02
if resp[3] != 0:
raise MQTTException(resp[3])
return resp[2] & 1
def disconnect(self):
self.sock.write(b"\xe0\0")
self.sock.close()
def ping(self):
self.sock.write(b"\xc0\0")
def publish(self, topic, msg, retain=False, qos=0):
pkt = bytearray(b"\x30\0\0\0")
pkt[0] |= qos << 1 | retain
sz = 2 + len(topic) + len(msg)
if qos > 0:
sz += 2
assert sz < 2097152
i = 1
while sz > 0x7f:
pkt[i] = (sz & 0x7f) | 0x80
sz >>= 7
i += 1
pkt[i] = sz
#print(hex(len(pkt)), hexlify(pkt, ":"))
self.sock.write(pkt, i + 1)
self._send_str(topic)
if qos > 0:
self.pid += 1
pid = self.pid
struct.pack_into("!H", pkt, 0, pid)
self.sock.write(pkt, 2)
self.sock.write(msg)
if qos == 1:
while 1:
op = self.wait_msg()
if op == 0x40:
sz = self.sock.read(1)
assert sz == b"\x02"
rcv_pid = self.sock.read(2)
rcv_pid = rcv_pid[0] << 8 | rcv_pid[1]
if pid == rcv_pid:
return
elif qos == 2:
assert 0
def subscribe(self, topic, qos=0):
assert self.cb is not None, "Subscribe callback is not set"
pkt = bytearray(b"\x82\0\0\0")
self.pid += 1
struct.pack_into("!BH", pkt, 1, 2 + 2 + len(topic) + 1, self.pid)
#print(hex(len(pkt)), hexlify(pkt, ":"))
self.sock.write(pkt)
self._send_str(topic)
self.sock.write(qos.to_bytes(1, "little"))
while 1:
op = self.wait_msg()
if op == 0x90:
resp = self.sock.read(4)
#print(resp)
assert resp[1] == pkt[2] and resp[2] == pkt[3]
if resp[3] == 0x80:
raise MQTTException(resp[3])
return
# Wait for a single incoming MQTT message and process it.
# Subscribed messages are delivered to a callback previously
# set by .set_callback() method. Other (internal) MQTT
# messages processed internally.
def wait_msg(self):
res = self.sock.read(1)
self.sock.setblocking(True)
if res is None:
return None
if res == b"":
raise OSError(-1)
if res == b"\xd0": # PINGRESP
sz = self.sock.read(1)[0]
assert sz == 0
return None
op = res[0]
if op & 0xf0 != 0x30:
return op
sz = self._recv_len()
topic_len = self.sock.read(2)
topic_len = (topic_len[0] << 8) | topic_len[1]
topic = self.sock.read(topic_len)
sz -= topic_len + 2
if op & 6:
pid = self.sock.read(2)
pid = pid[0] << 8 | pid[1]
sz -= 2
msg = self.sock.read(sz)
self.cb(topic, msg)
if op & 6 == 2:
pkt = bytearray(b"\x40\x02\0\0")
struct.pack_into("!H", pkt, 2, pid)
self.sock.write(pkt)
elif op & 6 == 4:
assert 0
# Checks whether a pending message from server is available.
# If not, returns immediately with None. Otherwise, does
# the same processing as wait_msg.
def check_msg(self):
self.sock.setblocking(False)
return self.wait_msg()
|
[
"mikeysky@gmail.com"
] |
mikeysky@gmail.com
|
f213cfeb3a3b12d830a7ed6f016c30b084eb2a83
|
ae58484571f1b59597f3117290165e1d9567b66f
|
/ciscoise/session.py
|
a448591ddff9be3b0092df3c2f125eac56fdfa2c
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
ironbow/ise-guest-migrator
|
2a447611433889bfda83f2d37ff0a924dc158a0f
|
9174256cc6cbe527efa4e791c6cbeae915a6d1da
|
refs/heads/main
| 2023-01-13T18:00:57.698591 | 2020-11-05T20:44:45 | 2020-11-05T20:44:45 | 310,044,624 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,803 |
py
|
import requests
import urllib3
from urllib.parse import urlunsplit, urlsplit, urlparse, urlunparse
import logging
from datetime import datetime
import base64
from .exception import *
import json
urllib3.disable_warnings(category=urllib3.exceptions.InsecureRequestWarning)
class ISESession:
def __init__(self, hostname, username, password, **kwargs):
options = kwargs["options"]
port = options["port"] if "port" in options.keys() else 9060
sslVerify = False if options["verify"] == False else True
debug = options["debug"] if "debug" in options.keys() else False
admin_auth_string = f"{username}:{password}"
sponsor_auth_string = (
f"{options['sponsor_api_user']}:{options['sponsor_api_password']}"
if "sponsor_api_user" in options.keys()
and "sponsor_api_password" in options.keys()
else False
)
self.config = {
"hostname": hostname,
"username": username,
"password": password,
"admin_auth": "Basic " + self._b64e(admin_auth_string),
"sponsor_auth": "Basic " + self._b64e(sponsor_auth_string)
if sponsor_auth_string
else False,
"base_url": f"https://{hostname}:{str(port)}/ers/",
"debug": debug,
}
# Create session using admin credentials
self.admin = requests.Session()
self.admin.headers["Authorization"] = self.config["admin_auth"]
self.admin.headers["Content-Type"] = "application/json"
self.admin.headers["Accept"] = "application/json"
self.admin.verify = sslVerify
if self.config["sponsor_auth"]:
# Create session using sponsor credentials
self.sponsor = requests.Session()
self.sponsor.headers["Authorization"] = self.config["sponsor_auth"]
self.sponsor.headers["Content-Type"] = "application/json"
self.sponsor.headers["Accept"] = "application/json"
self.sponsor.verify = sslVerify
self.resources = {
"guestuserall": {
"target": "guestuser",
"type": "config",
"sponsor_auth": True,
"pagination": True,
},
"guestuser": {
"target": "guestuser",
"type": "config",
"sponsor_auth": True,
"pagination": False,
},
"guesttypeall": {
"target": "guesttype",
"type": "config",
"sponsor_auth": False,
"pagination": True,
},
"guesttype": {
"target": "guesttype",
"type": "config",
"sponsor_auth": False,
"pagination": False,
},
"sponsorportalall": {
"target": "sponsorportal",
"type": "config",
"sponsor_auth": False,
"pagination": True,
},
"sponsorportal": {
"target": "sponsorportal",
"type": "config",
"sponsor_auth": False,
"pagination": False,
},
}
self.logger = self.setup_logging()
def setup_logging(self):
log_format = "%(asctime)s - %(levelname)s - %(message)s"
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create handlers
console_handler = logging.StreamHandler()
# file_handler = logging.FileHandler(
# "aciconn." + "{:%Y%m%d_%H-%M-%S}.log".format(datetime.now())
# )
# Set logging levels
console_handler.setLevel(logging.DEBUG)
# file_handler.setLevel(logging.DEBUG)
# Create formatters and add it to handlers
console_format = logging.Formatter("%(levelname)s: %(message)s")
file_format = logging.Formatter(
"%(threadName)s: %(asctime)s: %(name)s: %(levelname)s: %(message)s"
)
console_handler.setFormatter(console_format)
# file_handler.setFormatter(file_format)
# Add handlers to the logger
logger.addHandler(console_handler)
# logger.addHandler(file_handler)
# return logger to class
return logger
def url(self, resource: str) -> str:
api = self.resources[resource]
return self.config["base_url"] + api["type"] + "/" + api["target"]
def get(self, resource: str, id: str = None):
# Find API Endpoint in resources data
api = self.resources[resource]
# Because the ISE API is garbo (i.e. does not always return pagination attributes when needed),
# the most consistent way to handle pagination is by brute force. :)
current_page = 1
page_size = 10 # TODO: Set this to 100 for actual packaging.
next = True
results = []
while next:
url = self.url(resource) + "/" + id if id else self.url(resource)
if api["pagination"]:
url = self.paginate_url(url, current_page, page_size)
# print(urlparse(url))
# Log if debug flag set
if self.config["debug"]:
# TODO: Improve this logging.
self.logger.debug("GOING TO: " + url)
# If the API endpoint requires sponsor credentials, check to make sure they were provided. If so, use them instead
if api["sponsor_auth"]:
if not self.config["sponsor_auth"]:
raise SponsorAuthMissing(
"Sponsor credentials required for '"
+ resource
+ "'. Please initialise the ISEClient with sponsor_api_user and sponsor_api_password"
)
response = self.sponsor.get(url)
else:
response = self.admin.get(url)
# If response was OK, return data.
if response.status_code == 200:
o = json.loads(response.text)
# If the result was in the form of SearchResults, strip some of the depth out before returning.
if "SearchResult" in o.keys():
# For pagination sake, check if total is 0. If so, jump to return.
if o["SearchResult"]["total"] == 0:
next = False
continue
# The contents of o["SearchResult"]["resources"] should be an array when calling get all style
# APIs, and so should work with pagination.
results.extend(o["SearchResult"]["resources"])
# Again, API is garbo, and some endpoints return page 1 results on every page 🙃
# So, as additional check if results of page 1 are < page size, we've got them all.
if o["SearchResult"]["total"] < page_size:
next = False
else:
# Otherwise, return the dict.
results = o
next = False
else:
raise ISEAPIError(response=response)
if api["pagination"]:
current_page += 1
return results
def paginate_url(self, url: str, page: int = 1, size: int = 100):
return f"{url}?page={page}&size={size}"
def _b64e(self, s: str) -> str:
"""Helper function to encode a string to base64"""
return base64.b64encode(s.encode("ascii")).decode("ascii")
def post(self, resource: str, payload: object):
# Find API Endpoint in resources data
api = self.resources[resource]
url = self.url(resource)
# Log if debug flag set
if self.config["debug"]:
# TODO: Improve this logging.
self.logger.debug("GOING TO: " + url)
# If the API endpoint requires sponsor credentials, check to make sure they were provided. If so, use them instead
if api["sponsor_auth"]:
if not self.config["sponsor_auth"]:
raise SponsorAuthMissing(
"Sponsor credentials required for '"
+ resource
+ "'. Please initialise the ISEClient with sponsor_api_user and sponsor_api_password"
)
response = self.sponsor.post(url, json.dumps(payload))
else:
response = self.admin.post(url, json.dumps(payload))
if response.status_code == 201:
return True
else:
error = response.json()
raise ISEAPIError(
error["ERSResponse"]["messages"][0]["title"], response=response
)
|
[
"rn.wolfe@gmail.com"
] |
rn.wolfe@gmail.com
|
5f19b9e9eef1e03ce82f8dfb3727ba1dd24319a9
|
9272342e3534b55df42b602d66d3911d68917a30
|
/config/config.py
|
66a01fe6861d9ffc2b57bc11ab211841c9351f73
|
[] |
no_license
|
andwenj/api_test
|
1bbf67e2745df04db676e0aa0765090d03f2b7f8
|
353f25511787323fbd9f3d061806d71c69276f3b
|
refs/heads/master
| 2020-08-28T13:37:03.515573 | 2019-10-26T13:44:54 | 2019-10-26T13:44:54 | 217,714,409 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,709 |
py
|
import logging
import os
import time
from optparse import OptionParser
today = time.strftime('%Y%m%d',time.localtime())
now = time.strftime('%Y%m%d_%H%M%S',time.localtime())
#项目路径
prj_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #当前文件的绝对路径的上一级,__file__指当前文件
#print(prj_path)
data_path = os.path.join(prj_path,'data') #数据目录
test_path = os.path.join(prj_path,'test') #用例目录
test_case_path = os.path.join(prj_path, 'test', 'case') # 用例目录
data_file = os.path.join(prj_path, 'data', 'test_user_data.xlsx')
last_fails_file = os.path.join(prj_path, 'last_failures.pickle')
log_file = os.path.join(prj_path,'log','log_{}.txt'.format(today)) #日志路径,按天保存
report_file = os.path.join(prj_path,'report','report_{}.html'.format(now)) #报告路径,按当前时间保存
#log配置
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(funcName)s:%(filename)s,%(lineno)d] %(message)s',#log对格式
datefmt='%Y-%m-%d %H:%M:%S',#日期格式
filename=log_file,#日志输出文件
filemode='a'#追加模式
)
#数据库配置
db_host = '127.0.0.1'
db_port = 3306
db_user = 'test'
db_passwd = '123456'
db = 'test_db'
#邮件配置
send_email_after_run = True
smtp_server = 'smtp.163.com'
smtp_user = 'andwenj@163.com'
smtp_passwd = 'wj252310' # 授权码,非邮箱登陆密码
sender = 'andwenj@163.com'
receiver = '1184865395@qq.com'
subject = '接口测试报告1212'
# 命令行选项
parser = OptionParser()
parser.add_option('--collect-only', action='store_true', dest='collect_only', help='仅列出所有用例')
parser.add_option('--rerun-fails', action='store_true', dest='rerun_fails', help='运行上次失败的用例')
parser.add_option('--testlist', action='store_true', dest='testlist', help='运行test/testlist.txt列表指定用例')
parser.add_option('--testsuite', action='store', dest='testsuite', help='运行指定的TestSuite')
parser.add_option('--tag', action='store', dest='tag', help='运行指定tag的用例')
(options, args) = parser.parse_args() # 应用选项(使生效)
'''
--conllect-only'是参数名,dest='collect-only'指存储到 options.collect_only变量中,'store_true'指,如果有该参数,options.collect_only=True
'store'指将--testsuite='smoke_suite',参数的值'smoke_suite'存到options.testsuite变量中
作者:韩志超
链接:<a href='https://www.jianshu.com/p/ed82716eef58'>https://www.jianshu.com/p/ed82716eef58</a>
来源:简书
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
#if __name__=="__main__":
# logging.info("hello")
'''
print("1")
"""
Log Level:
CRITICAL: 用于输出严重错误信息
ERROR: 用于输出错误信息
WARNING: 用于输出警示信息
INFO: 用于输出一些提升信息
DEBUG: 用于输出一些调试信息
优先级 CRITICAL > ERROR > WARNING > INFO > DEBUG
指定level = logging.DEBUG 所有等级大于等于DEBUG的信息都会输出
若指定level = logging.ERROR WARNING,INFO,DEBUG小于设置级别的信息不会输出
日志格式:
%(levelno)s: 打印日志级别的数值
%(levelname)s: 打印日志级别名称
%(pathname)s: 打印当前执行程序的路径,其实就是sys.argv[0]
%(filename)s: 打印当前执行程序名
%(funcName)s: 打印日志的当前函数
%(lineno)d: 打印日志的当前行号
%(asctime)s: 打印日志的时间
%(thread)d: 打印线程ID
%(threadName)s: 打印线程名称
%(process)d: 打印进程ID
%(message)s: 打印日志信息
"""
|
[
"andwenj@163.com"
] |
andwenj@163.com
|
20af6159a4e989bb6fb819cb1c6e743c8371ff32
|
38da8ba7915335eb39c59cab24d79f22b2b3b7a3
|
/preProcess.py
|
d580e702413cda1f13d872b24495e8996effa22e
|
[] |
no_license
|
rafiparvez/ReceiptRecognizer
|
25869f2036f95e7c62f29cfa887b85e19dcbfef2
|
bbb9c103194c84081aa0dab2ba7311d62a414573
|
refs/heads/master
| 2021-01-19T20:48:38.000082 | 2017-07-12T14:00:57 | 2017-07-12T14:00:57 | 88,557,419 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,525 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 20:40:40 2017
@author: rafip
"""
import cv2
import numpy as np
from cv2 import boundingRect, countNonZero, cvtColor, drawContours, findContours, getStructuringElement, morphologyEx, rectangle, threshold
def resize(image, width=None, height=None):
'''
returns image resized acoording to given width or height
'''
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
return resized
def imgShow(caption,image):
'''
helps display images for troubleshooting
displays images in window until some key is pressed
'''
cv2.imshow(caption, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def preProcess(ImgProcPath,imgPath):
image = cv2.imread(imgPath)
#origImg = image.copy()
#ratio = image.shape[0] / 2000.0
#Resize original images to reduce size
rgb = resize(image, height=2000)
#Convert into grayscale
gray = cvtColor(rgb, cv2.COLOR_BGR2GRAY)
# Binarize image using adaptive Threshold and perform Morphological Opening and Closing
# to correct missing pixels from texts. Closed Image will be used to crop text blocks from
filtered = cv2.adaptiveThreshold(gray.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 29, 8)
kernel = np.ones((1,1), np.uint8)
opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
# morphological gradient
morph_kernel = getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = morphologyEx(gray, cv2.MORPH_GRADIENT, morph_kernel)
# binarize image. This binary image is used for detecting text blocks
_, bw = threshold(src=grad, thresh=0, maxval=255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
morph_kernel = getStructuringElement(cv2.MORPH_RECT, (9, 1))
# connect horizontally oriented regions
connected = morphologyEx(bw, cv2.MORPH_CLOSE, morph_kernel)
mask = np.zeros(bw.shape, np.uint8)
# find contours
im2, contours, hierarchy = findContours(connected, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
#mask to crop text blocks from binary image "clo
mask2 = np.zeros(gray.shape, np.uint8)
# filter contours
for idx in range(0, len(hierarchy[0])):
x, y, w, h = boundingRect(contours[idx])
# fill the contour
mask = drawContours(mask, contours, idx, (255, 255, 255), cv2.FILLED)
# ratio of non-zero pixels in the filled region.
# Condition are put to exclude very small contours containing noise
# and non-text containg contours
r = float(countNonZero(mask)) / (w * h)
if r > 0.45 and h > 8 and w > 8:
rgb = rectangle(rgb, (x, y+h), (x+w, y), (0,255,0),3)
#Applying mask2 to extract binary text blocks
mask2 = rectangle(mask2, (x, y+h), (x+w, y), (255, 255, 255),-1)
bwgray = cv2.bitwise_not(closing)
imgCropped = cv2.bitwise_and(bwgray,bwgray,mask = mask2)
imgCropped = cv2.bitwise_not(imgCropped)
#imgShow('asa',resize(imgCropped, height=700))
#print(ImgProcPath)
#Save the cropped image as processed image
cv2.imwrite(ImgProcPath, imgCropped)
|
[
"Parvez Rafi"
] |
Parvez Rafi
|
63d81133a9343fd0e2eae01d936b968703860d0c
|
e0c257a6846ffac4303a733ba2420cfc1dc7526b
|
/src/167. 两数之和 II - 输入有序数组.py
|
e5f89c0b4a7d19ff1deee1bba5619eb77d01c83e
|
[] |
no_license
|
Hanlen520/Leetcode-4
|
1d21112e23d8a782e0bfe82d3b1cc216fc1ef350
|
389e004052ba2c0951ffd66af97ac368f7de84d4
|
refs/heads/master
| 2022-03-31T21:48:49.675426 | 2020-01-06T17:21:45 | 2020-01-06T17:21:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,038 |
py
|
"""
给定一个已按照升序排列 的有序数组,找到两个数使得它们相加之和等于目标数。
函数应该返回这两个下标值 index1 和 index2,其中 index1 必须小于 index2。
说明:
返回的下标值(index1 和 index2)不是从零开始的。
你可以假设每个输入只对应唯一的答案,而且你不可以重复使用相同的元素。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/two-sum-ii-input-array-is-sorted
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# 不要陷入定式思维,这道题就不用二分法了,直接用双指针
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
p1, p2 = 0, len(numbers)-1
while True:
if numbers[p1]+numbers[p2] == target:
break
elif numbers[p1]+numbers[p2]<target:
p1 += 1
else:
p2 -= 1
return [p1+1, p2+1]
|
[
"bjwu@zju.edu.cn"
] |
bjwu@zju.edu.cn
|
1f7b6def21e40adfe79bd98cba4af1591bc4eb81
|
e9c497e382d3e499c6391efd4c9a56707e928145
|
/mfti/robot-tasks-master/task_13.py
|
f001f7ca00c1d4293d6615b5e321db4969526051
|
[] |
no_license
|
pavelpianov/python_learning
|
b1520314b9d4c285f1a6c7624103fc52bdaa669e
|
b2f772d1a39dcb0890b831a47b94440417741388
|
refs/heads/master
| 2023-08-05T15:28:08.898758 | 2021-09-18T10:44:53 | 2021-09-18T10:44:53 | 268,367,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 317 |
py
|
#!/usr/bin/python3
from pyrob.api import *
@task
def task_8_10():
while True:
if not wall_is_above():
move_up()
fill_cell()
move_down()
if not wall_is_beneath():
move_down()
fill_cell()
move_up()
if wall_is_on_the_right():
break
move_right()
if __name__ == '__main__':
run_tasks()
|
[
"x14303@gmail.com"
] |
x14303@gmail.com
|
4bbe3e0c935cc8f119518f4af3c33d8bc99c5ace
|
ff1daac1ebac7a2cb03d717a189fe1edf3f4e4c2
|
/Lecture 16. PsychoPy 5- Getting Responses/posner-master/resaveData.py
|
b2cb2d91992dfb1f8260f6e003284eefa93486f4
|
[
"MIT"
] |
permissive
|
hejibo/Python-for-Psychologist
|
bf17c50c49cced2f6933ab3337d6a0c52eb508ed
|
0ef5a2a618b30b87ecb390757c456681957b313c
|
refs/heads/master
| 2021-01-12T17:06:19.364305 | 2016-12-07T21:54:18 | 2016-12-07T21:54:18 | 71,505,777 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 504 |
py
|
"""This is not a particularly useful example of saving out
csv files from a set of psydat files
"""
from os import path
from psychopy import misc, gui
#select some files to use
filenames = gui.fileOpenDlg(allowed="*.psydat")
#loop through the files
for thisFilename in filenames:
#get a new name
fileNoExt, fileExt = path.splitext(thisFilename)
newName = fileNoExt+"NEW.csv"
#load and save
dat = misc.fromFile(thisFilename)
dat.saveAsWideText(newName)
print 'saved', newName
|
[
"hejibo12"
] |
hejibo12
|
1f92b497581e4a6ccd8b4449b7ac74696b9d2aa2
|
0fd6478325937f0f7ff6d6000e95789d43609c74
|
/Sensors/klasseknop.py
|
f364d24a0edc0cf0d7d7109451593c95e900b107
|
[] |
no_license
|
StijnVandendriessche1/testProject3
|
88069b83571f02c5a16993bb7de8585475012f6b
|
b0e20d63c71ef980716b54901f041dacf6fd3954
|
refs/heads/master
| 2022-10-27T04:45:10.519424 | 2020-06-16T17:01:12 | 2020-06-16T17:01:12 | 272,765,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
class Button:
def __init__(self, pin, bouncetime=200):
self.pin = pin
self.bouncetime = bouncetime
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.IN, GPIO.PUD_UP)
@property
def pressed(self):
return not GPIO.input(self.pin)
def on_action(self, call_method):
GPIO.add_event_detect(self.pin, GPIO.BOTH, call_method, bouncetime=self.bouncetime)
|
[
"stijnvandendriessche@live.be"
] |
stijnvandendriessche@live.be
|
f58aa90421646a81d8717aae9a15bf1b420faea3
|
de8b648307048bc876e83dbee1e52d506410f91e
|
/cloudferrylib/scheduler/base/end_task.py
|
8bb911eed419eaa20de92a4ee09aa0c3e429a4ac
|
[
"Apache-2.0"
] |
permissive
|
roman-verchikov/CloudFerry
|
2ee4277800bd0a444553dd78b17270a0b4051111
|
2fddb39d92991b23c056dcd1731147a32b0c5190
|
refs/heads/master
| 2021-01-20T15:36:47.523757 | 2015-09-13T20:35:37 | 2015-09-13T20:35:37 | 31,447,965 | 0 | 0 | null | 2015-02-28T02:22:09 | 2015-02-28T02:22:08 | null |
UTF-8
|
Python
| false | false | 782 |
py
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
__author__ = 'mirrorcoder'
from cloudferrylib.scheduler.task import Task
class EndTask(Task):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
super(EndTask, self).__init__()
|
[
"dsozinov@mirantis.com"
] |
dsozinov@mirantis.com
|
ae8bcbd91e18849dee40f6016e0e7a7dfea122e1
|
be977244110a9c9cadb6c782bc4ba60590b3fd33
|
/etl.py
|
cf1a94544b937cddbc10bf4d4d1843d1cbd51804
|
[] |
no_license
|
ra2w/saas_benchmarking
|
3f0a90fe5f1337209d9df18b9150a04b0ecf5309
|
c433c86827375ac264ecdafb4b8e635f548870e5
|
refs/heads/main
| 2023-03-19T10:57:01.559713 | 2021-03-09T22:08:10 | 2021-03-09T22:08:10 | 340,752,221 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,621 |
py
|
import datetime
import numpy as np
import pandas as pd
import streamlit as st
global cols
cols = ['date',
'arr__m',
'subscription', 'services',
'total_revenue', 'current_quarter_revenue_growth', 'growth_persistance', 'cost_of_revenue', 'gross_profit',
'gross_margin', 'research_&_development', 'r&d_%', 'sales_&_marketing', 's&m_%', 'general_&_administrative',
'g&a_%', 'total_operating_expense', 'net_income', 'deprecation_&_amoritization',
'cash_from_operating_activites',
'capex', 'free_cash_flow', 'free_cash_flow_%', 'cash', 'short_term_investments', 'long_term_debt',
'short_term_debt', 'total_debt',
'magic_number', 'ltm_cac_ratio', 'ltm_magic_number', 'current_cac_ratio', 'arr_per_employee__k',
'net_dollar_retention', 'customers', 'other']
filter_cols = ['date', 'arr__m', 'total_revenue', 'growth_persistance',
'gross_profit', 'gross_margin',
'research_&_development', 'sales_&_marketing', 's&m_%', 'general_&_administrative',
'total_operating_expense', 'deprecation_&_amoritization',
'net_income', 'free_cash_flow', 'free_cash_flow_%', 'cash_from_operating_activites',
'ltm_cac_ratio', 'net_dollar_retention', 'customers']
global standard
def convert_currency(val):
"""
Convert the string number value to a float
- Remove $
- Remove commas
- Convert to float type
"""
try:
new_val = val.replace(',', '').replace('$', '')
except AttributeError:
new_val = np.NaN
return float(new_val)
def convert_date(val):
date_time_str = val
try:
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y_%b')
except ValueError:
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y_%B')
return date_time_obj.strftime("%m/%Y")
def convert_currency_k_to_mil(val):
return float(convert_currency(val) / 1000)
def convert_float(val):
try:
new_val = val.replace(',', '').replace('x', '')
except AttributeError:
new_val = np.NaN
return float(new_val)
def convert_percent(val):
"""
Convert the percentage string to an actual floating point percent
- Remove %
"""
try:
new_val = val.replace('%', '')
new_val = float(new_val)
except AttributeError:
new_val = np.NaN
return new_val
"""
#dollar_fmt = '{0:,.0f}'
dollar_fmt = '{0:.0f}'
percent_fmt = '{:.0f}%'
int_fmt = '{:d}'
float_fmt = '{:.2}'
col_formats = {'arr__m': dollar_fmt,
'subscription': dollar_fmt,
'services': dollar_fmt,
'total_revenue': dollar_fmt,
'ltm_rev': dollar_fmt,
'current_quarter_revenue_growth': percent_fmt,
'growth_persistance': percent_fmt,
'cost_of_revenue': dollar_fmt,
'gross_profit': dollar_fmt,
'gross_margin': percent_fmt,
'research_&_development': dollar_fmt,
'r&d_%': percent_fmt,
'sales_&_marketing': dollar_fmt,
's&m_%': percent_fmt,
'general_&_administrative': dollar_fmt,
'g&a_%': convert_percent,
'total_operating_expense': dollar_fmt,
'net_income': dollar_fmt,
'deprecation_&_amoritization': dollar_fmt,
'cash_from_operating_activites': dollar_fmt,
'capex': dollar_fmt,
'free_cash_flow': dollar_fmt,
'free_cash_flow_%': percent_fmt,
'cash': dollar_fmt,
'short_term_investments': dollar_fmt,
'short_term_debt': dollar_fmt,
'long_term_debt': dollar_fmt,
'total_debt': dollar_fmt,
'net_dollar_retention': percent_fmt,
'customers': float_fmt,
'magic_number': float_fmt,
'ltm_magic_number': float_fmt,
'ltm_cac_ratio': float_fmt,
'current_cac_ratio': float_fmt,
'arr_per_employee__k': dollar_fmt}
"""
col_types = {'date': convert_date,
'arr__m': convert_currency,
'subscription': convert_currency_k_to_mil,
'services': convert_currency_k_to_mil,
'total_revenue': convert_currency_k_to_mil,
'current_quarter_revenue_growth': convert_percent,
'growth_persistance': convert_percent,
'cost_of_revenue': convert_currency_k_to_mil,
'gross_profit': convert_currency_k_to_mil,
'gross_margin': convert_percent,
'research_&_development': convert_currency_k_to_mil,
'r&d_%': convert_percent,
'sales_&_marketing': convert_currency_k_to_mil,
's&m_%': convert_percent,
'general_&_administrative': convert_currency_k_to_mil,
'g&a_%': convert_percent,
'total_operating_expense': convert_currency_k_to_mil,
'net_income': convert_currency_k_to_mil,
'deprecation_&_amoritization': convert_currency_k_to_mil,
'cash_from_operating_activites': convert_currency_k_to_mil,
'capex': convert_currency_k_to_mil,
'free_cash_flow': convert_currency_k_to_mil,
'free_cash_flow_%': convert_percent,
'cash': convert_currency_k_to_mil,
'short_term_investments': convert_currency_k_to_mil,
'short_term_debt': convert_currency_k_to_mil,
'long_term_debt': convert_currency_k_to_mil,
'total_debt': convert_currency_k_to_mil,
'net_dollar_retention': convert_percent,
'customers': convert_float,
'magic_number': convert_float,
'ltm_magic_number': convert_float,
'ltm_cac_ratio': convert_float,
'current_cac_ratio': convert_float,
'arr_per_employee__k': convert_float}
def normalize_column_names(df):
# remove $ and whitespace from column names; add _ between words, add __m for $m
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')',
'').str.replace(
'$', '_').str.replace('/', '_')
# .str.replace('%','pct')
return df
def add_ltm_rev(df):
df['ltm_rev'] = df['total_revenue'] + df['total_revenue'].shift(1)+ \
df['total_revenue'].shift(2) + df['total_revenue'].shift(3)
return df
def add_growth_rates(df):
df['ltm_rev_g'] = np.NaN
df['ntm_rev_g'] = np.NaN
df['next_q_total_revenue'] = df['total_revenue'].shift(-1)
# Year-over-Year quarterly revenue growth [r_q(i)/(r_q(i-4))]-1
df['ltm_rev_g'] = (df['total_revenue'] / df['total_revenue'].shift(4) - 1) * 100
# quarter over quarter revenue growth [(r_q(i)/r_q(i-1))]-1
df['qoq_rev_g'] = (df['total_revenue'] / (df['total_revenue'].shift(1)) - 1) * 100
# 1 year forward quarterly revenue growth [r_q(i+4)/r_q(i)]-1
df['ntm_rev_g'] = (df['total_revenue'].shift(-4) / df['total_revenue'] - 1) * 100
# 2 year forward quarterly revenue growth [r_q(i+8)/r_q(i)]^(1/2)-1
df['ntm_rev_2yr_g'] = ((df['total_revenue'].shift(-8) / df['total_revenue']) ** (0.5) - 1) * 100
df['rule_of_40'] = df['ltm_rev_g'] + df['free_cash_flow_%'] * 100
df['new_arr'] = df['total_revenue'] - df['total_revenue'].shift(1)
df['q_sales_ef'] = df['new_arr'] / df['sales_&_marketing'].shift(1)
df['rol_s&m_%'] = df['s&m_%']
for i in range(3):
df['rol_s&m_%'] += df['s&m_%'].shift(i + 1)
df['rol_s&m_%'] = df['rol_s&m_%'] / 4
expected_rev_g = ((df['sales_&_marketing'] / (df['rol_s&m_%'].shift(1) / 100)) / df['total_revenue'].shift(
1) - 1) * 100
df['growth_ef'] = df['qoq_rev_g'] - expected_rev_g
df['rol_growth_ef'] = df['growth_ef']
df['rol_qoq_rev_g'] = df['qoq_rev_g']
for i in range(3):
df['rol_growth_ef'] += df['growth_ef'].shift(i + 1)
df['rol_qoq_rev_g'] += df['rol_qoq_rev_g'].shift(i + 1)
df['rol_growth_ef'] = df['rol_growth_ef'] / 4
df['rol_qoq_rev_g'] = df['rol_qoq_rev_g'] / 4
return df
def add_sales_efficiency(df):
'''
df['new_revenue']=df['total_revenue']-df['total_revenue'].shift(1)
acc = 0
# smooth out CAC estimate over last quarters
for t in range(0,4):
acc += df['new_revenue'].shift(t)/df['sales_&_marketing'].shift(t+1)
acc = acc/4
df['sales_ef']=acc
'''
return df
def set_ipo_timelines(ticker, df, ipo_month, ipo_year):
df['year'] = df['date'].apply(lambda x: float(datetime.datetime.strptime(x, '%m/%Y').strftime("%Y"))) - ipo_year
df['month'] = df['date'].apply(lambda x: float(datetime.datetime.strptime(x, '%m/%Y').strftime("%m")))
# truncate year (e.g. 2020 --> 20) to reduce the length of the field
df['date'] = df['date'].apply(lambda x: datetime.datetime.strptime(x, '%m/%Y').strftime("%m/%y"))
df['error'] = (df['month'] - ipo_month) % 3
if df['error'].sum() != 0:
st.write("Error: Invalid quarter month ", df['month'])
df['quarter'] = (df['month'] - ipo_month) / 3
df['t'] = df['year'] * 4 + df['quarter'] # y=quarters since IPO. +x = x quarters after IPO, -x = x quarters before IPO
df.drop(['error', 'month', 'year'], 1, inplace=True)
return df
def read_company_csv(filename):
return pd.read_csv(filename)
def load_financials(ticker, ipo_month, ipo_year):
filename = 'data/' + ticker + '.htm.csv'
df = read_company_csv(filename)
# Normalize columns
# remove $ and whitespace from column names; add _ between words, add __m for $m
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_',regex=False).str.replace('(', '',regex=False).str.replace(')',
'',regex=False).str.replace(
'$', '_',regex=False).str.replace('/', '_',regex=False)
df = df.set_index('fiscal_year')
# Drop empty rows
df.dropna(axis='rows', how='all', inplace=True)
# Drop empty columns
df.dropna(axis='columns', how='all', inplace=True)
# Transpose
df = df.T
# Normalize transpose columns
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_',regex=False).str.replace('(', '',regex=False).str.replace(')',
'',regex=False).str.replace(
'$', '_',regex=False).str.replace('/', '_',regex=False)
df = df[df['total_revenue'].notna()]
df.reset_index(inplace=True)
df.rename(columns={'index': 'date'}, inplace=True)
missing_cols = [v for v in cols if v not in df.columns]
for v in missing_cols:
# print(ticker,":adding ",v," which was not found!")
df[v] = [np.NaN] * df.index.size
new_cols = [v for v in df.columns if v not in cols]
for v in new_cols:
st.write(ticker, "WARNING!: Found ", v, " missing in master column set!")
# set column types
for c in df.columns:
if c in col_types:
df[c] = df[c].apply(col_types[c])
df = df[filter_cols] ## XXX REMOVE!
df = set_ipo_timelines(ticker, df, ipo_month, ipo_year)
df['s&m_%'] = df['sales_&_marketing'] / df['total_revenue'] * 100
#df = add_growth_rates(df)
#df = add_sales_efficiency(df)
#df = add_ltm_rev(df)
return df
# Company db is a dict with the following schema:
# [ticker]: {name | sector | gtm | ipo_year | ipo_month}
# Examples:
# ZM: {apps | bottom_up| 2019 |jan}
@st.cache(suppress_st_warning=True)
def load_companies(_meta_df):
_c = {}
dfs_to_concat = []
tickers_to_concat = []
st.write("ETL Hello3")
cnt = 0
my_bar = st.progress(0)
ticker_list = list(_meta_df['ticker'])
for ticker in ticker_list:
_c[ticker] = load_financials(ticker,
int(_meta_df[_meta_df['ticker'] == ticker]['ipo_month']),
float(_meta_df[_meta_df['ticker'] == ticker]['ipo_year']))
_meta_df.loc[_meta_df['ticker'] == ticker, 'earliest'] = _c[ticker]['t'].min()
_meta_df.loc[_meta_df['ticker'] == ticker, 'latest'] = _c[ticker]['t'].max()
_c[ticker].set_index('t', inplace=True)
_c[ticker].columns.names = ['']
# st.table(_c[ticker])
dfs_to_concat.append(_c[ticker])
tickers_to_concat.append(ticker)
cnt = cnt + 1
my_bar.progress(cnt / len(ticker_list))
# _m is the master dataframe with all companies merged indexed to a common ipo timeline
# t=0 is the last quarter before IPO
_m = pd.concat(dfs_to_concat, axis=0, keys=tickers_to_concat)
return _m
@st.cache(suppress_st_warning=True)
def load_companies_refactored(_meta_df):
_c = {}
dfs_to_concat = []
tickers_to_concat = []
cnt = 0
my_bar = st.progress(0)
ticker_list = list(_meta_df['ticker'])
for ticker in ticker_list:
_c[ticker] = load_financials(ticker,
int(_meta_df[_meta_df['ticker'] == ticker]['ipo_month']),
float(_meta_df[_meta_df['ticker'] == ticker]['ipo_year']))
_c[ticker].set_index('t', inplace=True)
_c[ticker].columns.names = ['']
cnt = cnt + 1
my_bar.progress(cnt / len(ticker_list))
return _c
|
[
"rar204@gmail.com"
] |
rar204@gmail.com
|
02d4546366ff27ec3f6a6505edd0f005e0734383
|
24eaa6d8e3bffdf2d91b5b8222754c94866be6ba
|
/asteroids.py
|
0042de77aaa92c125d737865e6c9b0b7b97ab48a
|
[] |
no_license
|
KosPavel/tkinter-asteroid-game
|
b999425d92e6f001403ca119d62de66c8fe8f215
|
73febf4b8b22acdcbc2e29fe95ef378b1ec45012
|
refs/heads/master
| 2020-03-27T05:37:29.018118 | 2018-09-22T15:44:58 | 2018-09-22T15:44:58 | 146,035,804 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,217 |
py
|
import math
import random
import time
import settings
class Asteroids():
def __init__(self, root):
self.MIN_SIZE = settings.min_size
self.MAX_SIZE = settings.max_size
self.MIN_SPEED = settings.min_speed
self.MAX_SPEED = settings.max_speed
self.MAX_ASTEROIDS = settings.max_asteroids
self.x_coords = []
self.y_coords = []
self.asteroid_sizes = []
self.asteroid_speeds = []
self.asteroid_direction_angles = []
self.fill()
def fill(self):
while len(self.x_coords) != self.MAX_ASTEROIDS:
self.y_coords.append('x')
self.x_coords.append('x')
self.asteroid_sizes.append('x')
self.asteroid_speeds.append('x')
self.asteroid_direction_angles.append('x')
def borders(self):
i = 0
while i < self.MAX_ASTEROIDS:
if (self.x_coords[i] >= settings.width) or (self.y_coords[i] >= settings.height):
self.x_coords[i] = 'x'
self.y_coords[i] = 'x'
self.asteroid_sizes[i] = 'x'
self.asteroid_speeds[i] = 'x'
self.asteroid_direction_angles[i] = 'x'
elif (self.x_coords[i] <= 0) or (self.y_coords[i] <= 0):
self.x_coords[i] = 'x'
self.y_coords[i] = 'x'
self.asteroid_sizes[i] = 'x'
self.asteroid_speeds[i] = 'x'
self.asteroid_direction_angles[i] = 'x'
i += 1
def supply_asteroids(self):
i = 0
while i < self.MAX_ASTEROIDS:
if self.x_coords[i] == 'x': # check for lack of asteroids
x_or_y = random.choice(['x', 'y'])
if x_or_y == 'x':
self.x_coords[i] = random.randint(0, settings.width)
self.y_coords[i] = random.choice([1, settings.height - 1])
else:
self.y_coords[i] = random.randint(0, settings.height)
self.x_coords[i] = random.choice([1, settings.width - 1])
self.asteroid_speeds[i] = random.randint(self.MIN_SPEED, self.MAX_SPEED)
self.asteroid_direction_angles[i] = random.randint(0, 360)
self.asteroid_sizes[i] = random.randint(self.MIN_SIZE, self.MAX_SIZE)
i += 1
''' main methods '''
def shape(self, i):
self.supply_asteroids()
shape = [
self.x_coords[i] - self.asteroid_sizes[i],
self.y_coords[i] - self.asteroid_sizes[i],
self.x_coords[i] + self.asteroid_sizes[i],
self.y_coords[i] + self.asteroid_sizes[i]
]
return shape
def direction(self):
i = 0
while i < self.MAX_ASTEROIDS:
self.x_coords[i] = self.x_coords[i] + self.asteroid_speeds[i] \
* math.cos(math.radians(self.asteroid_direction_angles[i]))
self.y_coords[i] = self.y_coords[i] + self.asteroid_speeds[i] \
* math.sin(math.radians(self.asteroid_direction_angles[i]))
i += 1
self.borders()
self.supply_asteroids()
|
[
"kos.pav007@yandex.ru"
] |
kos.pav007@yandex.ru
|
8eb0f921dc96106e3a1a6627b000ea2b25e818ce
|
6bd28b019557ff86e6848af0d0e14bf6feb630c9
|
/tpe6.py
|
f82e20129dc5396e5ee9c5595db5c9ac7b9563e8
|
[] |
no_license
|
dejalar/MOPE_lab6
|
b34b62394cdb10539fa36e7c60c011c9b73c02f7
|
743c74b9441f6279bd050e8429e45c700475cca0
|
refs/heads/main
| 2023-05-02T12:01:40.558807 | 2021-05-22T15:30:48 | 2021-05-22T15:30:48 | 365,840,334 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,496 |
py
|
import math
import random
from _decimal import Decimal
from itertools import compress
from scipy.stats import f, t
import numpy
from functools import reduce
import matplotlib.pyplot as plot
def aboba():
def regression_equation(x1, x2, x3, coeffs, importance=[True] * 11):
factors_array = [1, x1, x2, x3, x1 * x2, x1 * x3, x2 * x3, x1 * x2 * x3, x1 ** 2, x2 ** 2, x3 ** 2]
return sum([el[0] * el[1] for el in compress(zip(coeffs, factors_array), importance)])
def func(x1, x2, x3):
coeffs = [2.2, 1.6, 9.2, 9.5, 0.8, 0.7, 6.5, 0.2, 0.9, 8.7, 9.1]
return regression_equation(x1, x2, x3, coeffs)
norm_plan_raw = [[-1, -1, -1],
[-1, +1, +1],
[+1, -1, +1],
[+1, +1, -1],
[-1, -1, +1],
[-1, +1, -1],
[+1, -1, -1],
[+1, +1, +1],
[-1.73, 0, 0],
[+1.73, 0, 0],
[0, -1.73, 0],
[0, +1.73, 0],
[0, 0, -1.73],
[0, 0, +1.73]]
natur_plan_raw = [[-40, -35, 20],
[-40, -35, 25],
[-40, 15, 20],
[-40, 15, 25],
[20, -35, 20],
[20, -35, 25],
[20, 15, 20],
[20, 15, 25],
[-61.9, 10, 22.5],
[-41.9, 10, 22.5],
[-10, -41.9, 22.5],
[-10, 61.9, 22.5],
[-10, 10, -29.4],
[-10, 10, 74.4],
[-10, 10, 22.5]]
def generate_factors_table(raw_array):
raw_list = [row + [row[0] * row[1], row[0] * row[2], row[1] * row[2], row[0] * row[1] * row[2]] + list(
map(lambda x: x ** 2, row)) for row in raw_array]
return list(map(lambda row: list(map(lambda el: round(el, 3), row)), raw_list))
def generate_y(m, factors_table):
return [[round(func(row[0], row[1], row[2]) + random.randint(-5, 5), 3) for _ in range(m)] for row in
factors_table]
def print_matrix(m, N, factors, y_vals, additional_text=":"):
labels_table = list(map(lambda x: x.ljust(10),
["x1", "x2", "x3", "x12", "x13", "x23", "x123", "x1^2", "x2^2", "x3^2"] + [
"y{}".format(i + 1) for i in range(m)]))
rows_table = [list(factors[i]) + list(y_vals[i]) for i in range(N)]
print("\nМатриця планування" + additional_text)
print(" ".join(labels_table))
print("\n".join([" ".join(map(lambda j: "{:<+10}".format(j), rows_table[i])) for i in range(len(rows_table))]))
print("\t")
def print_equation(coeffs, importance=[True] * 11):
x_i_names = list(
compress(["", "x1", "x2", "x3", "x12", "x13", "x23", "x123", "x1^2", "x2^2", "x3^2"], importance))
coefficients_to_print = list(compress(coeffs, importance))
equation = " ".join(
["".join(i) for i in zip(list(map(lambda x: "{:+.2f}".format(x), coefficients_to_print)), x_i_names)])
print("Рівняння регресії: y = " + equation)
def set_factors_table(factors_table):
def x_i(i):
with_null_factor = list(map(lambda x: [1] + x, generate_factors_table(factors_table)))
res = [row[i] for row in with_null_factor]
return numpy.array(res)
return x_i
def m_ij(*arrays):
return numpy.average(reduce(lambda accum, el: accum * el, list(map(lambda el: numpy.array(el), arrays))))
def find_coefficients(factors, y_vals):
x_i = set_factors_table(factors)
coeffs = [[m_ij(x_i(column), x_i(row)) for column in range(11)] for row in range(11)]
y_numpy = list(map(lambda row: numpy.average(row), y_vals))
free_values = [m_ij(y_numpy, x_i(i)) for i in range(11)]
beta_coefficients = numpy.linalg.solve(coeffs, free_values)
return list(beta_coefficients)
def cochran_criteria(m, N, y_table):
def get_cochran_value(f1, f2, q):
partResult1 = q / f2
params = [partResult1, f1, (f2 - 1) * f1]
fisher = f.isf(*params)
result = fisher / (fisher + (f2 - 1))
return Decimal(result).quantize(Decimal('.0001')).__float__()
print("Перевірка рівномірності дисперсій за критерієм Кохрена: m = {}, N = {}".format(m, N))
y_variations = [numpy.var(i) for i in y_table]
max_y_variation = max(y_variations)
gp = max_y_variation / sum(y_variations)
f1 = m - 1
f2 = N
p = 0.95
q = 1 - p
gt = get_cochran_value(f1, f2, q)
print("Gp = {}; Gt = {}; f1 = {}; f2 = {}; q = {:.2f}".format(gp, gt, f1, f2, q))
if gp < gt:
print("Gp < Gt => дисперсії рівномірні - все правильно")
return True
else:
print("Gp > Gt => дисперсії нерівномірні - треба ще експериментів")
return False
def student_criteria(m, N, y_table, beta_coefficients):
def get_student_value(f3, q):
return Decimal(abs(t.ppf(q / 2, f3))).quantize(Decimal('.0001')).__float__()
print("\nПеревірка значимості коефіцієнтів регресії за критерієм Стьюдента: m = {}, N = {} ".format(m, N))
average_variation = numpy.average(list(map(numpy.var, y_table)))
variation_beta_s = average_variation / N / m
standard_deviation_beta_s = math.sqrt(variation_beta_s)
t_i = [abs(beta_coefficients[i]) / standard_deviation_beta_s for i in range(len(beta_coefficients))]
f3 = (m - 1) * N
q = 0.05
t_our = get_student_value(f3, q)
importance = [True if el > t_our else False for el in list(t_i)]
# print result data
print("Оцінки коефіцієнтів βs: " + ", ".join(list(map(lambda x: str(round(float(x), 3)), beta_coefficients))))
print("Коефіцієнти ts: " + ", ".join(list(map(lambda i: "{:.2f}".format(i), t_i))))
print("f3 = {}; q = {}; tтабл = {}".format(f3, q, t_our))
beta_i = ["β0", "β1", "β2", "β3", "β12", "β13", "β23", "β123", "β11", "β22", "β33"]
global importance_to_print
importance_to_print = ["важливий" if i else "неважливий" for i in importance]
to_print = map(lambda x: x[0] + " " + x[1], zip(beta_i, importance_to_print))
print(*to_print, sep="; ")
print_equation(beta_coefficients, importance)
# y = []
# x = []
# for i in range(len(list(t_i))):
# x.append(i)
# if t_i[i] > t_our:
# y.append(t_i[i])
# else:
# y.append(-t_i[i])
#
# plot.plot(x, y)
# plot.grid(True)
# plot.axis([0, 11, -11, 11])
# plot.show()
return importance
def fisher_criteria(m, N, d, x_table, y_table, b_coefficients, importance):
def get_fisher_value(f3, f4, q):
return Decimal(abs(f.isf(q, f4, f3))).quantize(Decimal('.0001')).__float__()
f3 = (m - 1) * N
f4 = N - d
q = 0.05
theoretical_y = numpy.array([regression_equation(row[0], row[1], row[2], b_coefficients) for row in x_table])
average_y = numpy.array(list(map(lambda el: numpy.average(el), y_table)))
s_ad = m / (N - d) * sum((theoretical_y - average_y) ** 2)
y_variations = numpy.array(list(map(numpy.var, y_table)))
s_v = numpy.average(y_variations)
f_p = float(s_ad / s_v)
f_t = get_fisher_value(f3, f4, q)
theoretical_values_to_print = list(
zip(map(lambda x: "x1 = {0[1]:<10} x2 = {0[2]:<10} x3 = {0[3]:<10}".format(x), x_table), theoretical_y))
print("\nПеревірка адекватності моделі за критерієм Фішера: m = {}, N = {} для таблиці y_table".format(m, N))
print("Теоретичні значення y для різних комбінацій факторів:")
print("\n".join(["{arr[0]}: y = {arr[1]}".format(arr=el) for el in theoretical_values_to_print]))
print("Fp = {}, Ft = {}".format(f_p, f_t))
print("Fp < Ft => модель адекватна" if f_p < f_t else "Fp > Ft => модель неадекватна")
return True if f_p < f_t else False
m = 3
N = 15
natural_plan = generate_factors_table(natur_plan_raw)
y_arr = generate_y(m, natur_plan_raw)
while not cochran_criteria(m, N, y_arr):
m += 1
y_arr = generate_y(m, natural_plan)
print_matrix(m, N, natural_plan, y_arr, " для натуралізованих факторів:")
coefficients = find_coefficients(natural_plan, y_arr)
print_equation(coefficients)
importance = student_criteria(m, N, y_arr, coefficients)
d = len(list(filter(None, importance)))
fisher_criteria(m, N, d, natural_plan, y_arr, coefficients, importance)
k = 0
for i in range(100):
aboba()
k = k + len(importance_to_print)
print("Загальна кількість значимих коофіцієнтів - ", k)
|
[
"noreply@github.com"
] |
dejalar.noreply@github.com
|
fea971baa87aa9357bfde53b3915b5e65cd81b2d
|
0f2f57a52f35acee03ba288fe5466104e8fc2d8f
|
/newsite/authentication/apis/serializers.py
|
fdf1c6832b2f0773cccb5dc189507bd29f8a3050
|
[] |
no_license
|
PratyushAdhi/NewsSite
|
3c8bc19d156f4e434f351047f25cee08d9ef38ee
|
de7f0771116a7185256c58edcf4fdb424f253926
|
refs/heads/master
| 2023-01-19T11:17:33.201771 | 2020-11-05T17:57:43 | 2020-11-05T17:57:43 | 299,095,711 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,393 |
py
|
from rest_framework import serializers
from rest_framework.relations import HyperlinkedRelatedField
from rest_framework.serializers import HyperlinkedModelSerializer
from rest_framework.exceptions import AuthenticationFailed
from django.contrib import auth
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, smart_bytes, force_bytes, force_str, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import reverse
from authors.models import Author
from .utils import Utils
class RegisterSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
username = serializers.CharField(max_length = 50)
password = serializers.CharField(max_length = 50,write_only = True)
def validate(self, attrs):
email = attrs.get("email", "")
username = attrs.get("username", "")
password = attrs.get("password", "")
return attrs
def create(self, validated_data):
email = validated_data.get("email")
username = validated_data.get("username")
password = validated_data.get("password")
return Author.objects.create_user(email, username, password)
class Meta:
model = Author
fields = ("username", "email", "password")
class LoginSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=50)
username = serializers.CharField(max_length = 50, read_only=True)
tokens = serializers.CharField(max_length=255, read_only=True)
class Meta:
model = Author
fields = ["email", "username", "password", "tokens"]
def validate(self, attrs):
email = attrs["email"]
password = attrs["password"]
user = auth.authenticate(email=email, password=password)
if not user:
raise AuthenticationFailed("Invalid Credentials")
return {
"email": email,
"password": password,
"tokens": user.tokens()
}
return super().validate(attrs)
class RequestPasswordResetEmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = Author
fields = ("email",)
class SetNewPasswordSerializer(serializers.Serializer):
password = serializers.CharField(max_length=50, write_only =True)
token = serializers.CharField(write_only =True)
uidb64 = serializers.CharField(max_length = 255, write_only =True)
# fields = ("password", "token", "uidb64",)
def validate(self, attrs):
try:
password = attrs.get("password", "")
token = attrs.get("token", "")
uidb64 = attrs.get("uidb64", "")
print(uidb64)
id = force_str(urlsafe_base64_decode(uidb64))
print(id)
user = Author.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
raise AuthenticationFailed("Invalid Reset Parameter", 401)
user.set_password(password)
user.save()
return user
except Exception:
raise AuthenticationFailed("Invalid Reset Parameter", 401)
return super().validate(attrs)
|
[
"pratyushadhikary@hotmail.com"
] |
pratyushadhikary@hotmail.com
|
5e8638f119913473ddfe3759bb970454693db209
|
fe35240eec60ec1288b8b52ff5d0735573ac1650
|
/app.py
|
4f5c7bebf0112f0ea61ea1009bc4f7560bf59810
|
[] |
no_license
|
tvliveapp/server
|
a679fd68397d0c7a9a64c2ddee1e9e797f9efaf8
|
efdbc6fc4f07c42421bb8cab9d9d443881c349cc
|
refs/heads/master
| 2022-11-23T23:38:48.248451 | 2020-02-11T12:32:43 | 2020-02-11T12:32:43 | 276,706,032 | 0 | 0 | null | 2020-07-02T17:20:47 | 2020-07-02T17:20:47 | null |
UTF-8
|
Python
| false | false | 2,597 |
py
|
#!/usr/bin/python
try:
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
except:
from http.server import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
try:
from urlparse import urlparse
from urlparse import urlparse, parse_qs
except:
from urllib.parse import urlparse, parse_qs
import os
port = int(os.environ.get("PORT", 5000))
PORT_NUMBER = port
#de leo borrar
import citas
citas=citas.citas
f=open('citas.py','w')
f.write('citas='+str(citas))
f.close()
####
class myHandler(BaseHTTPRequestHandler):
global citas
#Handler for the GET requests
def do_GET(self):
self.path=self.path.split('?')
try:
arg=self.path[1].replace('%20',' ')
print(arg)
arg=arg.split(';')
citas[arg[0]][arg[1]][arg[2]].append(arg[3])
print(citas[arg[0]])
f=open('citas.py','w')
f.write('citas='+str(citas))
f.close()
except:
pass
self.path=self.path[0]
if self.path=="/": #127.0.0.1:5000/
self.path="/index.html" #127.0.0.1:5000/index.html
print()
try:
#Check the file extension required and
#set the right mime type
sendReply = False
if self.path.endswith(".html"):
mimetype='text/html'
sendReply = True
f = open(curdir + sep + self.path)
data=f.read()
f.close()
if self.path.endswith(".jpg"):
mimetype='image/jpg'
sendReply = True
if self.path.endswith(".gif"):
mimetype='image/gif'
sendReply = True
if self.path.endswith(".js"):
mimetype='application/javascript'
data='citas='+str(citas)
f=open('citas.py','r')
data=f.read()
f.close()
sendReply = True
if self.path.endswith(".css"):
mimetype='text/css'
sendReply = True
if sendReply == True:
#Open the static file requested and send it
#f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
#data=f.read()
try:
self.wfile.write(data)
except:
self.wfile.write(bytes(data, 'UTF-8'))
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('0.0.0.0', PORT_NUMBER), myHandler)
print ('Started httpserver on port ' , PORT_NUMBER)
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print ('^C received, shutting down the web server')
server.socket.close()
|
[
"noreply@github.com"
] |
tvliveapp.noreply@github.com
|
52890da7dbeb50e2962006085b369e2b130e3485
|
f5ffd566166948c4202eb1e66bef44cf55a70033
|
/test/test_array_of_role_no_i_ds.py
|
ce70095d1ed2d0425958c0329fc4b10467e24d33
|
[] |
no_license
|
skyportal/skyportal_client
|
ed025ac6d23589238a9c133d712d4f113bbcb1c9
|
15514e4dfb16313e442d06f69f8477b4f0757eaa
|
refs/heads/master
| 2023-02-10T02:54:20.757570 | 2021-01-05T02:18:03 | 2021-01-05T02:18:03 | 326,860,562 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,321 |
py
|
"""
Fritz: SkyPortal API
SkyPortal provides an API to access most of its underlying functionality. To use it, you will need an API token. This can be generated via the web application from your profile page or, if you are an admin, you may use the system provisioned token stored inside of `.tokens.yaml`. ### Accessing the SkyPortal API Once you have a token, you may access SkyPortal programmatically as follows. #### Python ```python import requests token = 'ea70a5f0-b321-43c6-96a1-b2de225e0339' def api(method, endpoint, data=None): headers = {'Authorization': f'token {token}'} response = requests.request(method, endpoint, json=data, headers=headers) return response response = api('GET', 'http://localhost:5000/api/sysinfo') print(f'HTTP code: {response.status_code}, {response.reason}') if response.status_code in (200, 400): print(f'JSON response: {response.json()}') ``` #### Command line (curl) ```shell curl -s -H 'Authorization: token ea70a5f0-b321-43c6-96a1-b2de225e0339' http://localhost:5000/api/sysinfo ``` ### Response In the above examples, the SkyPortal server is located at `http://localhost:5000`. In case of success, the HTTP response is 200: ``` HTTP code: 200, OK JSON response: {'status': 'success', 'data': {}, 'version': '0.9.dev0+git20200819.84c453a'} ``` On failure, it is 400; the JSON response has `status=\"error\"` with the reason for the failure given in `message`: ```js { \"status\": \"error\", \"message\": \"Invalid API endpoint\", \"data\": {}, \"version\": \"0.9.1\" } ``` # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 0.9.dev0+git20201221.76627dd
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.array_of_role_no_i_ds import ArrayOfRoleNoIDs
class TestArrayOfRoleNoIDs(unittest.TestCase):
"""ArrayOfRoleNoIDs unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testArrayOfRoleNoIDs(self):
"""Test ArrayOfRoleNoIDs"""
# FIXME: construct object with mandatory attributes with example values
# model = ArrayOfRoleNoIDs() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"profjsb@gmail.com"
] |
profjsb@gmail.com
|
0686e32a82548111de0af645b2185393ee7e0d2f
|
cdbd21cbd4e63ba09c584afa84d8076eba71bf22
|
/bin/pip3
|
f3a55f041643e4eaf7fbd0a221c0cbac52e933e2
|
[] |
no_license
|
kmohab71/sorting-techniques-Python-
|
030573a1ca04501f1aafafc05b3ca87153b44d9f
|
91bd98b957f5cc929c4e12639b690d7ddad6c8ea
|
refs/heads/master
| 2020-03-12T07:33:15.845341 | 2018-04-21T20:21:25 | 2018-04-21T20:21:25 | 130,508,631 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
#!/Users/khaledmohab/PycharmProjects/heap_lab/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"noreply@github.com"
] |
kmohab71.noreply@github.com
|
|
293ff497bc9c02162313472b028ec2ddb6e186bc
|
dd7dc458691dcff1b2493c927acd62695c2187c4
|
/lib/python2.7/site-packages/envisage/ui/workbench/workbench_plugin.py
|
224c2068f00fc03f60552f917b2f9ce3c91fd991
|
[] |
no_license
|
stephenosullivan/science
|
16e0c7fb441af29810cad630e6187961ad57398e
|
164e82df0655337ac4966273d9cc489d002d8987
|
refs/heads/master
| 2021-03-27T09:52:05.330679 | 2015-07-25T04:51:25 | 2015-07-25T04:51:25 | 39,672,995 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,048 |
py
|
""" The Envisage workbench plugin. """
# Enthought library imports.
from envisage.api import ExtensionPoint, Plugin, ServiceOffer
from traits.api import Callable, List
# This module's package.
PKG = '.'.join(__name__.split('.')[:-1])
class WorkbenchPlugin(Plugin):
""" The Envisage workbench plugin.
The workbench plugin uses the PyFace workbench to provide the basis of an
IDE-like user interface. The interface is made up of perspectives, views
and editors.
Note that this is not intended to be a 'general-purpose' plugin for user
interfaces - it provides an IDE-like style and that is all. If your
application requires another style of interface then write another plugin
(you can still re-use all the menu, group and action contribution stuff!).
"""
# The Ids of the extension points that this plugin offers.
ACTION_SETS = PKG + '.action_sets'
PERSPECTIVES = PKG + '.perspectives'
PREFERENCES_PAGES = PKG + '.preferences_pages'
WORKBENCH_SERVICE_OFFERS = PKG + '.service_offers'
VIEWS = PKG + '.views'
# The Ids of the extension points that this plugin contributes to.
PREFERENCES = 'envisage.preferences'
SERVICE_OFFERS = 'envisage.service_offers'
#### 'IPlugin' interface ##################################################
# The plugin's unique identifier.
id = 'envisage.ui.workbench'
# The plugin's name (suitable for displaying to the user).
name = 'Workbench'
#### Extension points offered by this plugin ##############################
action_sets = ExtensionPoint(
List(Callable), id=ACTION_SETS, desc="""
An action set contains the toobars, menus, groups and actions that you
would like to add to top-level workbench windows (i.e. the main
application window). You can create new toolbars, menus and groups
and/or add to existing ones.
Each contribution to this extension point must be a factory that
creates an action set, where 'factory' means any callable with the
following signature::
callable(**traits) -> IActionSet
The easiest way to contribute such a factory is to create a class
that derives from 'envisage.ui.action.api.ActionSet'.
"""
)
perspectives = ExtensionPoint(
List(Callable), id=PERSPECTIVES, desc="""
A perspective is simply an arrangment of views around the (optionally
hidden) editor area.
Each contribution to this extension point must be a factory that
creates a perspective, where 'factory' means any callable with the
following signature::
callable(**traits) -> IPerspective
The easiest way to contribute such a factory is to create a class
that derives from 'pyface.workbench.api.IPerspective'.
"""
)
preferences_pages = ExtensionPoint(
List(Callable), id=PREFERENCES_PAGES, desc="""
A preferences page appears in the preferences dialog to allow the user
to manipulate some preference values.
Each contribution to this extension point must be a factory that
creates a preferences page, where 'factory' means any callable with the
following signature::
callable(**traits) -> IPreferencesPage
The easiest way to contribute such a factory is to create a class
that derives from 'apptools.preferences.ui.api.IPreferencesPage'.
"""
)
service_offers = ExtensionPoint(
List(ServiceOffer),
id = WORKBENCH_SERVICE_OFFERS,
desc = """
Services are simply objects that a plugin wants to make available to
other plugins. This extension point allows you to offer 'per
window' services that are created 'on-demand' (where 'on demand' means
the first time somebody looks up a service of the appropriate
protocol).
.
e.g.
my_service_offer = ServiceOffer(
protocol = 'acme.IMyService',
factory = an_object_or_a_callable_that_creates_one,
properties = {'a dictionary' : 'that is passed to the factory'}
)
Any properties specified are passed as keywrod arguments to the
factory, i.e. the factory signature is::
callable(**properties)
"""
)
views = ExtensionPoint(
List(Callable), id=VIEWS, desc="""
A view provides information to the user to support their current
task. Views can contain anything you like(!) and are arranged around
the (optionally hidden) editor area. The user can re-arrange views as
he/she sees fit.
Each contribution to this extension point must be a factory that
creates a view, where 'factory' means any callable with the following
signature::
callable(**traits) -> IView
The easiest way to contribute such a factory is to create a class
that derives from 'pyface.workbench.api.View'.
It is also common to use a simple function (especially when a view
is a representation of a service) e.g::
def foo_view_factory(**traits):
' Create a view that is a representation of a service. '
foo = self.application.get_service('IFoo')
return FooView(foo=foo, **traits)
"""
)
#### Contributions to extension points made by this plugin ################
my_action_sets = List(contributes_to=ACTION_SETS)
def _my_action_sets_default(self):
""" Trait initializer. """
from default_action_set import DefaultActionSet
return [DefaultActionSet]
my_preferences = List(contributes_to=PREFERENCES)
def _my_preferences_default(self):
""" Trait initializer. """
return ['pkgfile://envisage.ui.workbench/preferences.ini']
my_preferences_pages = List(contributes_to=PREFERENCES_PAGES)
def _my_preferences_pages_default(self):
""" Trait initializer. """
from workbench_preferences_page import WorkbenchPreferencesPage
return [WorkbenchPreferencesPage]
my_service_offers = List(contributes_to=SERVICE_OFFERS)
def _my_service_offers_default(self):
""" Trait initializer. """
preferences_manager_service_offer = ServiceOffer(
protocol = 'apptools.preferences.ui.preferences_manager'
'.PreferencesManager',
factory = self._create_preferences_manager_service
)
workbench_service_offer = ServiceOffer(
protocol = 'envisage.ui.workbench.workbench.Workbench',
factory = self._create_workbench_service
)
return [preferences_manager_service_offer, workbench_service_offer]
###########################################################################
# Private interface.
###########################################################################
def _create_preferences_manager_service(self, **properties):
""" Factory method for the preferences manager service. """
from apptools.preferences.ui.api import PreferencesManager
preferences_manager = PreferencesManager(
pages=[factory() for factory in self.preferences_pages]
)
return preferences_manager
def _create_workbench_service(self, **properties):
""" Factory method for the workbench service. """
# We don't actually create the workbench here, we just return a
# reference to it.
#
# fixme: This guard is really just for testing when we have the
# workbench plugin as a source egg (i.e. if the egg is on our path
# then we get the plugin for any egg-based application, even if it is
# not a workbench application!).
return getattr(self.application, 'workbench', None)
### EOF ######################################################################
|
[
"osullisg@gmail.com"
] |
osullisg@gmail.com
|
ec8a01cbff8acf166dab0e78c9e78998b27dfe0f
|
cdea5224f020551fa355859fcc2fb897452ea0a9
|
/utils/get_config_params.py
|
1551d30218b7511648dc02dc4c15d9400f338508
|
[
"Apache-2.0"
] |
permissive
|
Delerik/canvas_drawer
|
87855d3a489cf926131930fd335c997e8e045da8
|
93849d5f5086b5c90f36152ca42667208a526ed9
|
refs/heads/main
| 2022-12-27T03:51:02.094042 | 2020-10-12T00:26:44 | 2020-10-12T00:26:44 | 303,217,455 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
py
|
from configparser import ConfigParser
from .project_utils import get_root_project_path
def config(section: object) -> object:
filename = get_root_project_path()+'/configuration/params.ini'
parser = ConfigParser()
parser.read(filename)
config_params = {}
if parser.has_section(section):
params = parser.items(section)
for index, param in enumerate(params):
config_params[params[index][0]] = params[index][1]
else:
raise Exception('Error: Section {0}, in the file {1}, does not exist'.format(section, filename))
return config_params
|
[
"julian.cisneros@appgate.com"
] |
julian.cisneros@appgate.com
|
3ced9c0df05456c26131ad411b619344290879f9
|
ce7694243f5cc8485627f828ef8b1fdd2868f1ff
|
/students/drovdahl/lesson06/mailroom_4.py
|
2af00633d4d72da9dcaa35e8217bc11ae99c75d8
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/Sp2018-Accelerated
|
81c1759ab3313aa2f2ddff84f6f49d08e4b3fe29
|
9eb74ab8dbf1f9075afc1037be32c218eb55a811
|
refs/heads/master
| 2021-04-15T14:07:37.943693 | 2018-05-23T21:48:34 | 2018-05-23T21:48:34 | 126,766,533 | 0 | 21 | null | 2018-05-27T03:21:19 | 2018-03-26T03:08:31 |
Python
|
UTF-8
|
Python
| false | false | 7,035 |
py
|
#! /usr/local/bin/python3
'''
simple mail program for non profit
'''
import sys
import os
import pathlib
import datetime
donors = [('Iron Man', [100000, 50000, 1000]),
('Thor', [50, 25, 100]),
('Hulk', [500]),
('Captain America', [30, 40]),
('Nick Fury', [100000, 545, 1000]),
('Hawkeye', [75, 50, 20]),
('Black Panther', [1000, 900, 50]),
('War Machine', [10, 10])
]
donors_dict = dict(donors)
def thankyou_menu():
'''submenu for the THANK YOU function'''
switch_func_dict = {'1': add_donation_user_input, '2': print_donors_list,
'list': print_donors_list, '3': 1, 'quit': 1}
while True:
os.system('clear')
print('''THANK YOU Menu
Let's record that new donation and draft a THANK YOU message.\n\n''')
print('\nChoose an action:\n')
print('1 - Enter the name of a donor\n'
'2 - Enter "list" to see a list of existing donors\n'
'3 - Enter "quit" to return to the main menu\n')
response = input(' >> ')
if switch_func_dict.get(response) is None:
os.system('clear')
print('not a valid repsonse, try again\n')
elif switch_func_dict.get(response) is 1:
os.system('clear')
print('MAIN Menu')
return
else:
switch_func_dict.get(response)()
def print_donors_list():
os.system('clear')
print('Here\'s a list of our generous donors:\n')
print(*(x for x in donors_dict), sep='\n')
input('\n press "Enter" to return to the THANK YOU Menu ')
return
def add_donation_user_input():
'''Function to collect user input for donor and donation_amount values'''
print('\nType the name of an existing or new donor.\n'
'(type "quit" at any time to return to the main menu)')
donor = input(' >> ')
if donor.lower() == 'quit':
return
donor_donation = input('What is the donation amount? \n >> ')
# normallize the donation amount
donor_donation = donor_donation.replace('$', '').replace(',', '')
try:
donor_donation = float(donor_donation)
except ValueError:
print('\nNot a valid entry. Need to enter a numerical value for the '
'donation amount\n')
input(' press "Enter" to return to the THANK YOU Menu ')
return
add_donation(donor, donor_donation)
print_thankyou_letter(donor, donor_donation)
return
def add_donation(donor, donor_donation):
'''Update donor if they exist or add new donor and donation amount'''
donors_dict.setdefault(donor, []).append(donor_donation)
return
def retrieve_donation(donor):
'''If donor exists, return donations, otherwise return None'''
if donor in donors_dict:
return donors_dict[donor]
def delete_donor(donor):
'''removes donor from dictionary'''
return donors_dict.pop(donor)
def print_thankyou_letter(donor, donor_donation):
os.system('clear')
print('Below is an email tailored to this donor...\n\n')
print(thankyou_letter(donor, donor_donation))
input('\n press "Enter" to return to the THANK YOU Menu ')
return
def thankyou_letter(donor, donor_donation):
'''Returns a THANK YOU letter'''
# compute the number of donated kittens at $5.00 per kitten with a matching
# donation of * 2
k = (donor_donation/5 * 2)
letter = (f'''Dear {donor},
We at the Avengers Fund-a-Kitten Initiative would like to thank you for
your generous donation of ${donor_donation:,.2f}.\n
Taking advantage of our kitten matching partner, with these added funds we
will be able to provide {k:,.2f} kitten(s) to well deserving little girls
all over the world including hard to reach places like Antarctica and
Tacoma, WA!\n\n
Sincerely,
Your Friends at AFAK
''')
return letter
def report():
'''
This report will print a list of donors, total donated, number of
donations,, and average donation amounts as values in each row.
Using string formatting, the output rows will line up as nice as possible.
The end result will be tabular (values in each column should align with
those above and below)
'''
os.system('clear')
# print top row
a = 'Donor Name'
b = 'Total Given'
c = 'Num Gifts'
d = 'Average Gift'
print(f'{a: <20}|{b:^13}|{c:^13}|{d:^15}')
# print bar
a = b = c = d = '-'
print(f'{a:-<20}-{b:-<13}-{c:-<13}-{d:-<15}')
# print donor specific rows
for a in donors_dict:
b = round(sum(donors_dict.get(a)), 2)
c = len(donors_dict.get(a))
d = round(b / c, 2)
print(f'{a: <20} ${b: >12,.2f} {c:^13} ${d: >14,.2f}')
input('\n\n\n press "Enter" to return to the THANK YOU Menu ')
os.system('clear')
print('MAIN Menu')
return
def letters_to_all(TEST=False):
'''
Function to go through all the donors and generate a thank you letter that
will be written to disk as a text file in the 'letters' directory.
The letter files will all have unique names and include time/date stamps in
the file name.
'''
os.system('clear')
# create 'letters' directory if one does not exist
pathlib.Path('letters').mkdir(exist_ok=True)
# set the datetime format variable
dt_format = '.%m-%d-%Y'
# iterate over donors data and create files for each donor
for donor in donors_dict:
donor_donations = round(sum(donors_dict.get(donor)), 2)
letter_text = thankyou_letter(donor, donor_donations)
# set the file path using pathlib
p = pathlib.Path('letters/' + donor +
datetime.datetime.now().strftime(dt_format) + '.txt')
with open(p, 'w') as outfile:
outfile.write(letter_text)
print('All the letters have been composed and can be found in the '
'\'letters\' directory\n\n')
if TEST:
# added to avoid user input IO complications in the testing script
return
input('\n\n\n press "Enter" to return to the THANK YOU Menu ')
os.system('clear')
print('MAIN Menu')
return
def quit():
sys.exit()
def mainloop():
'''main menu function'''
os.system('clear')
print('''Avengers: Fund-a-Kitten Initiative
Because every little girl
Everywhere in the world
...deserves a kitten
Welcome to Mailroom\n\n''')
switch_func_dict = {'1': thankyou_menu, '2': report, '4': quit,
'quit': quit, '3': letters_to_all}
while True:
print('\nChoose an action:\n')
print('1 - Send a THANK YOU\n'
'2 - Create a report\n'
'3 - Send letters to everyone\n'
'4 - Quit')
response = input('\n >> ')
if switch_func_dict.get(response) is None:
print('not a valid repsonse, type "1, 2, 3, or 4"\n')
else:
switch_func_dict.get(response)()
if __name__ == "__main__":
mainloop()
|
[
"rdrovdahl@Ryans-MacBook-Pro.local"
] |
rdrovdahl@Ryans-MacBook-Pro.local
|
b5c47ad20845fee240868b9493629d647740c66c
|
f8d129c6a135b0df2ebb63a51f0a0319a0f51110
|
/sandbox/language-translation-demo/utils.py
|
0fa14ff4f68c22fb9b3b85c776c01a4b56ff4197
|
[] |
no_license
|
andressamarcal/Deep-Learning-Nanodegree
|
a82d377756089c6a931a7f156a4576cb4afbd07a
|
a3f1857d29fe65c1ce47cb07119582aad277918f
|
refs/heads/master
| 2020-05-24T01:13:51.311174 | 2017-08-01T15:29:56 | 2017-08-01T15:29:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,838 |
py
|
# -*- coding: utf-8 -*-
import lxml
import lxml.etree
import requests
class PermanentSMSError(Exception):
"""
Permanent SMS Error
"""
class TransientSMSError(Exception):
"""
Transient SMS Error
"""
class AlpaSMSClient(object):
perm_error = ('403', '408', '414')
"""
403 - Invalid API key
408 - Insufficient credits
414 - Bloqued account
"""
def __init__(self, key):
self.key = key
self.url = 'http://painel.smsalfa.com.br/%s'
def send(self, tel, message, type_send=9):
"""
Send sms with Alpa SMS Api.
:param tel: int: national format phone number
:param message: str: unicode base text
:return: str: message sent id
"""
url = self.url % 'send'
params = {
'number': tel,
'msg': message,
'type': type_send,
'key': self.key
}
resp = requests.post(url, params)
xml = lxml.etree.fromstring(resp.content)
try:
sent_id = xml.find('retorno').attrib['id']
except KeyError:
code = xml.find('retorno').attrib.get('codigo')
if code in self.perm_error:
raise PermanentSMSError('Response error code: %s' % code)
else:
raise TransientSMSError('Response error code: %s' % code)
return sent_id
def status(self, sms_id):
"""
Return sms status by ID.
:param sms_id: int: SMS Sent ID
:return: str: Status sent
"""
url = self.url % 'get'
params = {
'id': sms_id,
'key': self.key,
'action': 'status'
}
resp = requests.post(url, params)
xml = lxml.etree.fromstring(resp.content)
return xml.find('retorno').text
|
[
"rafa.reis.novello@gmail.com"
] |
rafa.reis.novello@gmail.com
|
28d959137d64d0820dbdfa75e9a7fb1599ddf172
|
9aa6a9b601c7e42276b24c04cd23e7ef47a1a796
|
/5.py
|
6ad2364409906fcad99cce5f055015ef530d15d1
|
[] |
no_license
|
davidombosco/EP1
|
536272637a574fc312aa7fca4cc5352cb8bbc115
|
50f604805da416b5ae254e5c7d4ae8a1bcf5ccb7
|
refs/heads/master
| 2020-03-12T18:16:51.869303 | 2018-04-26T19:46:15 | 2018-04-26T19:46:15 | 130,756,421 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,061 |
py
|
# -*- coding: utf-8 -*-
# Parte 1: CRIAR ESTOQUES
#estoque = {loja{produto:{quantidade: ,valor: }}}
import json
with open ("dados5.json", "r") as dadosabertos:
dadosprontos=dadosabertos.read()
estoque=json.loads(dadosprontos)
##########################################################################################################
#estoque = {}
##########################################################################################################
while True: #p1:criando um loop de opções
print('\nControle de Estoque:')
print('\n0 = sair e salvar\n1 = adicionar item\n2 = remover item\n3 = variação de estoque\n4 = imprime estoque\n5 = alterar valor de um produto\n6 = produtos com estoque negativo\n7 = valor em estoque') #p1
escolha = int(input('Faça sua escolha: '))
##########################################################################################################
if escolha == 0:
print('Até mais')
break
##########################################################################################################
elif escolha == 1:
loja = input('Qual sua loja:')
if loja not in estoque:
print('Loja não cadastrada')
cadastrar = int(input('Deseja cadastrar a loja?\n1 = Sim\n2 = Não\n'))
if cadastrar == 1: # quer cadastrar e adicionar itens
produto = input('Nome do produto: ')
quantidade_inicial = int(input('Quantidade inicial: '))
if quantidade_inicial < 0:
print('A quantidade inicial não pode ser negativa.')
else:
# PARTE 3 - ADICIONA O VALOR DO PRODUTO
valor_produto = float(input('Valor do produto: '))
if valor_produto < 0:
print('O valor deve ser positivo')
else:
estoque[loja]={produto:{"quantidade":quantidade_inicial,"valor":valor_produto}}
else: # loja in estoque
produto = input('Nome do produto: ')
if produto not in estoque[loja]:
quantidade_inicial = int(input('Quantidade inicial: '))
if quantidade_inicial < 0:
print('A quantidade inicial não pode ser negativa.')
else:
valor_produto = float(input('Valor do produto: '))
if valor_produto < 0:
print('O valor deve ser positivo')
else:
estoque[loja][produto] = {"quantidade":quantidade_inicial,"valor":valor_produto}
else:
print('Produto já cadastrado.')
##########################################################################################################
elif escolha == 2: # remover itens
produto = input('Nome do produto: ')
loja = input('De qual loja deseja remover: ')
if loja not in estoque:
print('Loja não cadastrada')
else:
if produto in estoque[loja]:
del(estoque[loja][produto])
else:
print('Elemento não encontrado')
##########################################################################################################
elif escolha == 3: # alterar item
# alterar valores dos
produto = input('Nome do produto: ')
loja = input('De qual loja deseja alterar estoque? ')
if loja not in estoque:
print('Loja não cadastrada')
else:
if produto in estoque[loja]:
alteracao = int(input('Quantidade que irá variar: '))
estoque[loja][produto]['quantidade'] = (estoque[loja][produto]['quantidade'] + alteracao)
print('Novo estoque de {0} na {1}: {2}'.format(produto,loja,estoque[loja][produto]["quantidade"]))
else:
print('Elemento não encontrado')
##########################################################################################################
elif escolha == 4: # imprime estoque
loja = input("Deseja imprimir o estoque de qual loja? ")
if loja == 'todas':
for loja in estoque:
print('Estoque {0}:'.format(loja))
for produto in estoque[loja]:
print("{0}:{1}".format(produto,estoque[loja][produto]['quantidade']))
elif loja in estoque:
print('Estoque {0}:'.format(loja))
for produto in estoque[loja]:
print("{0}: {1}".format(produto,estoque[loja][produto]['quantidade']))
else:
print('Loja não cadastrada')
##########################################################################################################
# parte 3 - alterar o valor de um produto
elif escolha == 5:
loja = input('Qual a loja que deve ter o valor de um item alterado? ')
if loja not in estoque:
print('Loja não cadastrada')
else:
produto = input('Nome do produto: ')
if produto in estoque[loja]:
print('O atual valor é: {0}'.format(estoque[loja][produto]['valor']))
novo_valor = float(input('Digite o novo valor do produto: '))
estoque[loja][produto]['valor'] = novo_valor
print('{0} agora na {1} custa:R${2:.2f}'.format(produto,loja,novo_valor))
else:
print('Produto não encontrado')
##########################################################################################################
# parte 4 - produtos com estoque negativo e valor do estoque
elif escolha == 6:
for loja in estoque:
for i in estoque[loja]:
produtos_negativos = {}
if estoque[loja][i]['quantidade'] < 0:
quantidade_negativa=estoque[loja][i]["quantidade"]
produtos_negativos[i]= quantidade_negativa
print('Os produtos com quantidade em estoque negativa da loja {0} são:'.format(loja))
for produto in produtos_negativos:
print('{0}:{1}'.format(produto,produtos_negativos[produto]))
##########################################################################################################
elif escolha == 7:
for loja in estoque:
for i in estoque[loja]:
lista_valores = []
lista_valores.append(estoque[loja][i]['quantidade'] * estoque[loja][i]['valor'])
valor_monetario = sum(lista_valores)
print('O valor em estoque da loja {0}: R${1:.2f}'.format(loja,valor_monetario))
##############################################################################################################
with open ("dados5.json","w") as importacao:
estoque=json.dumps(estoque,sort_keys=True,indent=4)
importacao.write(estoque)
|
[
"noreply@github.com"
] |
davidombosco.noreply@github.com
|
8c2a06a3a2a23f85178b92df7725bf9d9963c94a
|
880ac811c71a70ab311d8b6ec044b6c70c061bb7
|
/8.py
|
a22e83247b8229d7a061f8527ba353788e6261d2
|
[] |
no_license
|
RaczeQ/AdventOfCode2018
|
e9e6315fb8ef25be866e681ce35f0a12721a2c4d
|
2a2629228a03d0caf3f687d27d684485faee320d
|
refs/heads/master
| 2020-04-08T23:56:27.656881 | 2018-12-08T10:04:09 | 2018-12-08T10:04:09 | 159,846,217 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 347 |
py
|
from utils import read_input, save_result
from _8.logic import one, two
FILE_NAME = '_8/input.txt'
RESULT_ONE = '_8/one.txt'
RESULT_TWO = '_8/two.txt'
if __name__ == '__main__':
result_one = one(read_input(FILE_NAME))
save_result(RESULT_ONE, result_one)
result_two = two(read_input(FILE_NAME))
save_result(RESULT_TWO, result_two)
|
[
"kar@dsr.com.pl"
] |
kar@dsr.com.pl
|
a3b8634442e8642b73086964ccbe59c019b77874
|
128812e2b89d06f8da5cf654aaaa9e78a875ced7
|
/model/discriminator.py
|
65478b539e3d7c68428ab526b5e24bdf90bcc3e0
|
[] |
no_license
|
Chenge321/3D-Face-model-reconstruction-combine-with-cycle-GAN
|
929fff69d18da08d087bfd39de75f8692ccb2c1d
|
accf0a85f73c21d41034c0abbb01ddf66d277e31
|
refs/heads/master
| 2023-02-08T14:48:03.096533 | 2021-01-03T03:24:11 | 2021-01-03T03:24:11 | 318,552,223 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
import torch.nn as nn
import torchvision.models as models
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
#in_channels = args.n_colors
resnet = models.resnet18(pretrained=True)
self.resnet = nn.Sequential(resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4,
resnet.avgpool)
self.classifier = nn.Linear(512, 1)
def forward(self, x):
x = self.resnet(x)
x = x.view(x.shape[0], -1)
return self.classifier(x)
|
[
"noreply@github.com"
] |
Chenge321.noreply@github.com
|
974a21e90468d97356ad39f4202efe6840d95a64
|
a31fafd1110f31a2ba8f3bd6c6ead0b71245f795
|
/boolop/tree/tree.py
|
98eb402dcd8be2fa85ed7bb66e44a024a3b9eae1
|
[] |
no_license
|
shengtanmao/Boolean-Operations
|
0f56d13642e9732be89d9b0c4b3e04b05d25bc5e
|
1ff0e8a4f101516e1b7608e10c4f1c0eb453e0f5
|
refs/heads/master
| 2022-12-14T00:16:30.398804 | 2020-09-14T14:55:12 | 2020-09-14T14:55:12 | 181,906,744 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,492 |
py
|
from .node import Node
from ..basic.segment import Segment
class Tree:
def __init__(self,node):
self.root=node
#input: value
#output: node containing the value
def find(self,val):
if self.root==None:
raise NameError("empty tree")
node=self.__findHelper(self.root,val)
self.__splay(node)
assert self.root==node
return node
#input: root node it is searching in, value
#output: node in tree containing the value
def __findHelper(self,node,val):
if val<node.val:
if node.left==None:
raise NameError("value not in tree")
return self.__findHelper(node.left,val)
elif val>node.val:
if node.right==None:
raise NameError("value not in tree")
return self.__findHelper(node.right,val)
else:
return node
# this is a tree operation, because it may change root, as well as a node.
# Since rotation is used only in splay, however, it is not required to maintain the root here; splay could reset it.
def __left_rotate(self, x):
y = x.right
x.right = y.left
if y.left != None:
y.left.parent = x
y.parent = x.parent
if x.parent == None:
self.root = y
elif x == x.parent.left:
x.parent.left = y
else:
x.parent.right = y
y.left = x
x.parent = y
# rotate right at node x
def __right_rotate(self, x):
y = x.left
x.left = y.right
if y.right != None:
y.right.parent = x
y.parent = x.parent
if x.parent == None:
self.root = y
elif x == x.parent.right:
x.parent.right = y
else:
x.parent.left = y
y.right = x
x.parent = y
# operations on a tree node
# Splaying node x move x to the root of the tree
def __splay(self, x):
while x.parent != None:
if x.parent.parent == None:
if x == x.parent.left:
# zig rotation
self.__right_rotate(x.parent)
else:
# zag rotation
self.__left_rotate(x.parent)
elif x == x.parent.left and x.parent == x.parent.parent.left:
# zig-zig rotation
self.__right_rotate(x.parent.parent)
self.__right_rotate(x.parent)
elif x == x.parent.right and x.parent == x.parent.parent.right:
# zag-zag rotation
self.__left_rotate(x.parent.parent)
self.__left_rotate(x.parent)
elif x == x.parent.right and x.parent == x.parent.parent.left:
# zig-zag rotation
self.__left_rotate(x.parent)
self.__right_rotate(x.parent)
else:
# zag-zig rotation
self.__right_rotate(x.parent)
self.__left_rotate(x.parent)
return x # useful for testing...
# insert a value into the tree
def insert(self, val):
y = None
x = self.root
while x != None:
y = x
if val < x.val:
x = x.left
else:
x = x.right
node = Node(val)
node.parent=y
if y == None:
self.root = node
elif node.val < y.val:
y.left = node
else:
y.right = node
# splay the node
self.__splay(node)
assert self.root==node
return val
#SM:changed to joinNodes
#input: node2, everything in self is smaller than the tree of node
#output: one node that is the merge of them
def __joinNodes(self,node2):
if self.root==None:
self.root=node2
return self.root
if node2==None:
return self.root
node=self.root.findMax()
self.__splay(node) #needs splaying
assert self.root==node
node.right=node2
node2.parent=node
return node
#input: value of node to split at; must be in the tree.
#output: root nodes of two trees, everything in node1 is less than or equal to node; subtree of rightroot is greater
#found node is placed at the root of tree, with no right child.
def __splitNode(self,val):
node=self.find(val) #find the node and splay to top
assert self.root==node
if node.right!=None:
rightroot=node.right
rightroot.parent=None
else:
rightroot=None
node.right=None
return [node,rightroot]
#input:value
#output:self if successfully deleted
def delete(self,val):
[valnode,rightroot]=self.__splitNode(val)
if valnode.left != None:
self.root=valnode.left
self.root.parent=None
self.__joinNodes(rightroot)
elif valnode.left==None and rightroot!=None:
self.root=rightroot
elif valnode.left==None and rightroot==None:
self.root=None
return self
return self
# In-Order traversal
# Left Subtree -> Node -> Right Subtree
def inorder(self):
list=[]
self.__in_order_helper(self.root,list)
return list
def __in_order_helper(self, node, list):
if node != None:
self.__in_order_helper(node.left,list)
list.append(node.val)
self.__in_order_helper(node.right,list)
def plot(self):
list=self.inorder()
for i in range(len(list)):
list[i].plot()
#Bundle oriented operations
#input: flag
#output: the node containing the bundle containing or directly below the flag
def findFlag(self,flag):
node=self.__findFlagHelper(flag,self.root)
self.__splay(node)
assert self.root==node
return node.val
def __findFlagHelper(self,flag,node):
if flag.cmpSeg(node.val.min)<0:
return self.__findFlagHelper(flag,node.left)
elif node.val.abv.abv!=None and flag.cmpSeg(node.val.abv.abv.min)>=0:
return self.__findFlagHelper(flag,node.right)
else:
assert flag.cmpSeg(node.val.min)>=0
return node
#Segment oriented operations
#class method to create a tree for a single segment
def fromSeg(seg):
return Tree(Node(seg))
def fromCoord(px,py,qx,qy,color):
return Tree(Node(Segment.fromCoord(px,py,qx,qy,color)))
def contains(self,val):
try:
self.find(val)
return True
except NameError:
return False
#input: other tree, self<other, no empty trees
#output: other tree merged into self
#SM: change other tree to have same value as self
def joinTrees(self,other):
self.__joinNodes(other.root)
return self
#input: value to make the split
#output: two trees
def splitTree(self,val):
[node1,node2]=self.__splitNode(val)
if node2==None:
return [Tree(node1),None]
return [Tree(node1),Tree(node2)]
|
[
"maoshengtan2011@gmail.com"
] |
maoshengtan2011@gmail.com
|
3cab7754e8b3fccdf95c33f381aace4fd8b901aa
|
24d8ef05ae0ee86f3be4511dd8d52ba021d4edc3
|
/MaximumSumCircularSubarray.py
|
7d2a5c47d22b7791dbded2f4985af859d5330eb9
|
[] |
no_license
|
Sree-vathsan/leetcode
|
ce57d03adcb3bdec910037ff7027bb9f67a4a0f4
|
8ccc9011a0021d423ea63ceae3384481fd814a1a
|
refs/heads/master
| 2021-04-15T02:14:17.658092 | 2020-06-22T13:03:25 | 2020-06-22T13:03:25 | 249,286,122 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,814 |
py
|
"""
Given a circular array C of integers represented by A, find the maximum possible sum of a non-empty subarray of C.
Here, a circular array means the end of the array connects to the beginning of the array. (Formally, C[i] = A[i] when 0 <= i < A.length, and C[i+A.length] = C[i] when i >= 0.)
Also, a subarray may only include each element of the fixed buffer A at most once. (Formally, for a subarray C[i], C[i+1], ..., C[j], there does not exist i <= k1, k2 <= j with k1 % A.length = k2 % A.length.)
Example 1:
Input: [1,-2,3,-2]
Output: 3
Explanation: Subarray [3] has maximum sum 3
Example 2:
Input: [5,-3,5]
Output: 10
Explanation: Subarray [5,5] has maximum sum 5 + 5 = 10
Example 3:
Input: [3,-1,2,-1]
Output: 4
Explanation: Subarray [2,-1,3] has maximum sum 2 + (-1) + 3 = 4
Example 4:
Input: [3,-2,2,-3]
Output: 3
Explanation: Subarray [3] and [3,-2,2] both have maximum sum 3
Example 5:
Input: [-2,-3,-1]
Output: -1
Explanation: Subarray [-1] has maximum sum -1
Note:
-30000 <= A[i] <= 30000
1 <= A.length <= 30000
"""
from math import inf
class Solution:
"""
1,-1,3 1,-1,3
1,0,3(2), 4(2), 3(2), 3(5)
"""
def apply_kandane_algorithm(self, arr: List[int]) -> int:
result, arr_len = -inf, len(arr)
curr_sum = -inf
for i in range(0, arr_len):
curr_sum = max(0, curr_sum) + arr[i]
result = max(result, curr_sum)
return result
def maxSubarraySumCircular(self, A: List[int]) -> int:
non_circular_max_subarray_sum = self.apply_kandane_algorithm(A)
total_sum, min_sum = sum(A), self.apply_kandane_algorithm([-i for i in A])
return non_circular_max_subarray_sum if total_sum + min_sum == 0 else max(total_sum + min_sum, non_circular_max_subarray_sum)
|
[
"noreply@github.com"
] |
Sree-vathsan.noreply@github.com
|
5aa3773961b4396a82398e8848fb3685a0f7c180
|
b3ebed1d1d2d0b51498a98ba187f8693ed8e049f
|
/COMP9318-Project/helper.py
|
49b7390b63132d849a6fee84fb70e1a794c8fa5a
|
[] |
no_license
|
lindsay-unsw/Data-Mining-and-Warehouse
|
17e68fbedc883a253e52e8b76a13697a08f26222
|
59b408ed9ebc463e35ebbe372ae6e31c74ed1d6c
|
refs/heads/master
| 2022-02-20T16:08:22.773056 | 2019-08-15T08:44:46 | 2019-08-15T08:44:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,196 |
py
|
from sklearn import svm
## Please do not change these functions...
class countcalls(object):
__instances = {}
def __init__(self, f):
self.__f = f
self.__numcalls = 0
countcalls.__instances[f] = self
def __call__(self, *args, **kwargs):
self.__numcalls += 1
return self.__f(*args, **kwargs)
@staticmethod
def count(f):
return countcalls.__instances[f].__numcalls
@staticmethod
def counts():
res = sum(countcalls.count(f) for f in countcalls.__instances)
for f in countcalls.__instances:
countcalls.__instances[f].__numcalls = 0
return res
## Strategy() class provided to facilitate the implementation.
class strategy:
## Read in the required training data...
def __init__(self):
with open('class-0.txt','r') as class0:
class_0=[line.strip().split(' ') for line in class0]
with open('class-1.txt','r') as class1:
class_1=[line.strip().split(' ') for line in class1]
self.class0=class_0
self.class1=class_1
@countcalls
def train_svm(parameters, x_train, y_train):
## Populate the parameters...
gamma=parameters['gamma']
C=parameters['C']
kernel=parameters['kernel']
degree=parameters['degree']
coef0=parameters['coef0']
## Train the classifier...
clf = svm.SVC(kernel=kernel, C=C, gamma=gamma, degree=degree, coef0=coef0)
assert x_train.shape[0] <=541 and x_train.shape[1] <= 5720
clf.fit(x_train, y_train)
return clf
## Function to check the Modification Limits...(Modify EXACTLY 20- DISTINCT TOKENS)
def check_data(self, original_file, modified_file):
with open(original_file, 'r') as infile:
data=[line.strip().split(' ') for line in infile]
Original={}
for idx in range(len(data)):
Original[idx] = data[idx]
with open(modified_file, 'r') as infile:
data=[line.strip().split(' ') for line in infile]
Modified={}
for idx in range(len(data)):
Modified[idx] = data[idx]
count_ = 0
for k in sorted(Original.keys()):
record=set(Original[k])
sample=set(Modified[k])
#print('1: ',len((set(record)-set(sample)))) # added for debug
#print('2: ',len((set(sample)-set(record)))) # added for debug
count_ += 1
if True:
if len((set(record)-set(sample)) | (set(sample)-set(record)))!=20:
print('original K:',Original[k])
print('modified :',Modified[k])
print('Kth line: ',k)
print('record - sample \n',set(record) - set(sample))
print(len(set(record) - set(sample)))
print('sample - record \n',set(sample) - set(record))
print('count_: ',count_)
#print('good \n', set(record) - set(sample))
#print('good \n', set(sample) - set(record))
assert len((set(record)-set(sample)) | (set(sample)-set(record)))==20
return True
|
[
"z5143329@student.unsw.edu.au"
] |
z5143329@student.unsw.edu.au
|
fa6733cb185c67c67b988073f556300b9b2f3a8e
|
8f71437a5ec334d869e01b78adeed7d67a9184c5
|
/0x07-python-test_driven_development/3-say_my_name.py
|
ffb297a31e737e8397dfdc7a61c74c9687bd64f9
|
[] |
no_license
|
KFredlund/holbertonschool-higher_level_programming
|
6c16cb909ed1993a2dbe26aa5f207b43a01dfee0
|
18b2489084e7b4377e1d1db539561f7683526eb6
|
refs/heads/master
| 2020-09-29T01:06:05.394859 | 2020-03-29T17:37:42 | 2020-03-29T17:37:42 | 226,909,654 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 612 |
py
|
#!/usr/bin/python3
def say_my_name(first_name, last_name=""):
"""Function that prints 'My name is <first name> <last name>'
Args:
first_name: first string
last_name: second string
Returns:
first and last name
Raises:
TypeError: if args are not strings
Doctest Examples:
see dir: /tests/3-say_my_name.txt
"""
if type(first_name) is not str:
raise TypeError("first_name must be a string")
if type(last_name) is not str:
raise TypeError("last_name must be a string")
print("My name is {} {}".format(first_name, last_name))
|
[
"katihomes@gmail.com"
] |
katihomes@gmail.com
|
bea63d8479eaaba6baada75d8a5eca0a2fbdf7ac
|
41e3f749d61dda95febed9ee9fec82dc4dcfc6fe
|
/binancechain/exceptions.py
|
cb9ed6ef0907fcc5ec115d5444c39dea7dcce670
|
[
"MIT"
] |
permissive
|
lmacken/binance-chain-python
|
e798d1d6910f11889963ee31d89f336e7a078021
|
483e51394ebc9f9998f5248910ac7b7dff7198f9
|
refs/heads/develop
| 2020-05-10T00:02:19.730956 | 2019-05-01T01:51:50 | 2019-05-01T01:51:50 | 181,519,156 | 23 | 10 |
MIT
| 2019-05-01T04:14:40 | 2019-04-15T15:50:50 |
Python
|
UTF-8
|
Python
| false | false | 395 |
py
|
# Copyright 2019, Luke Macken, Kim Bui, and the binance-chain-python contributors
# SPDX-License-Identifier: MIT
import aiohttp
from typing import Optional
class BinanceChainException(Exception):
def __init__(self, response: Optional[aiohttp.ClientResponse] = None):
self.response = response
def __repr__(self):
return f"<{self.__class__.__name__} {self.response}>"
|
[
"lmacken@users.noreply.github.com"
] |
lmacken@users.noreply.github.com
|
e15921c3602f09639e1a75b780376560ca94e509
|
0dc816af0b9feecc4ba672eca979654caa0c91bc
|
/main/ordinance/views.py
|
a7404d9f30bb4aefab716237a5b288cab1a41885
|
[] |
no_license
|
Stelmaszv/remote-learning
|
b57589ed5bde8387c0d114951b13ad37ebf80f68
|
ae567c473e50826edb98a4b434e63cc446be0852
|
refs/heads/master
| 2022-11-25T17:08:15.658486 | 2020-08-07T14:43:59 | 2020-08-07T14:43:59 | 256,490,629 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,921 |
py
|
from core.baseview import baseCreate,baseListView,baseShowView,baseUpdateView;
from core.decorators import login_required,login_manager,login_educator
from authorization.forms import educator,manager
from .forms import Lesson as LessonForm,TasksSolution,TasksSetRote,AccountForm,DashbordForm
from .models import Lesson,Tasks,Classroom,Dashbord,DashbordType
from authorization.models import Account,AccountType
from authorization.formMenager import passwordGeneartor
from helpel import email
from django.shortcuts import redirect,render
import datetime
class add_Student(baseCreate):
template_name = 'ordinance/addperson.html'
success_url = '/ordinance/myStudents/'
form=educator
getObject = Account
@login_educator
def get(self, request, *args, **kwargs) ->baseCreate:
return self.addGet(request)
def postSave(self, request, *args, **kwargs)-> None:
item = Account.objects.latest('id')
item.username = self.form.cleaned_data['first_name'] + ' ' + self.form.cleaned_data['last_name']
password = passwordGeneartor().setPassword()
print(password)
item.set_password(password)
item.staff = True
item.is_student = request.user.is_educator
item.save()
email().sent('Dane do konta', 'kotek',['zupartl@johnderasia.com'])
class add_Personel(baseCreate):
template_name = 'ordinance/addperson.html'
success_url = '/ordinance/myPersonel/'
form=manager
@login_manager
def get(self, request, *args, **kwargs) ->baseCreate:
return self.addGet(request)
def postSave(self, request, *args, **kwargs) -> None:
item = Account.objects.latest('id')
item.username = self.form.cleaned_data['first_name'] + ' ' + self.form.cleaned_data['last_name']
password = passwordGeneartor().setPassword()
print(password)
item.set_password(password)
item.staff = True
item.save()
email().sent('Dane do konta', 'kotek', ['zupartl@johnderasia.com'])
class addDashbord(baseCreate):
template_name = 'ordinance/addLesson.html'
success_url = '/'
form = DashbordForm
def postSave(self, request, *args, **kwargs) -> None:
Type = DashbordType.objects.get(name='normal')
self.item.author=request.user
self.item.type=Type
self.item.save()
class addLesson(baseCreate):
template_name = 'ordinance/addLesson.html'
success_url = '/'
form=LessonForm
def get(self, request, *args, **kwargs)->baseCreate:
self.form.email=request.user.email
return self.addGet(request)
def post(self,request, *args, **kwargs)->baseCreate:
self.form.email = request.user.email
print(request)
return self.addPost(request)
def postSave(self, request, *args, **kwargs) -> None:
classrom=Classroom.objects.get(name=self.item.classroom).students.all()
for student in classrom:
task = Tasks(student=student, data_recived=False,lessons=self.item,rote=0)
task.save()
self.item.tasks.add(task)
self.item.save()
Type=DashbordType.objects.get(name='lesson')
place=AccountType.objects.get(name='student')
dashbord=Dashbord(theme=self.item.theme,description=self.item.description,place=place,lesson=self.item,type=Type,author=request.user)
dashbord.save()
class myStudents(baseListView):
template_name = 'ordinance/myStudents.html'
@login_educator
def get(self, request, *args, **kwargs)->baseListView:
return self.addGet(request)
def setContext(self, request)->baseListView:
self.context = {
'items': Account.objects.filter(is_student__name=request.user.is_educator).order_by('-last_name')
}
class myLesson(baseListView):
template_name = 'ordinance/myLessons.html'
def get(self, request, *args, **kwargs)->baseListView:
return self.addGet(request)
def setContext(self, request)->baseListView:
self.context = {
'items': Lesson.objects.filter(teacher=request.user).order_by('-data')
}
class myTask(baseListView):
template_name = 'ordinance/myTasks.html'
def get(self, request, *args, **kwargs)->baseListView:
return self.addGet(request)
def setContext(self, request)->baseListView:
self.context = {
'items': self.set_Data(self.set_Objects(request),request)
}
def set_Data(self,objects,request)->list:
for item in objects:
for task in item.tasks.all():
if task.student == request.user:
item.idAction=task.id
item.stan = 'ToAceptRecived'
if task.data_recived == True:
item.stan = 'ConfirmRecived'
if task.rote>0:
item.stan = 'rote'
item.rote = task.rote
return objects
def set_Objects(self,request)->list:
lesson = Lesson.objects.all()
lessonNewArray=[];
for item in lesson:
if item.classroom == request.user.is_student:
lessonNewArray.append(item)
return lessonNewArray
class sentSolution(baseUpdateView):
success_url = '/'
template_name = 'ordinance/sentSolution.html'
getObject = Tasks
form = TasksSolution
def setContext(self,request, *args, **kwarg)->baseUpdateView:
self.context={
'item':Tasks.objects.get(id=self.kwargs.get("id")),
'form':self.form
}
class setRote(baseUpdateView):
success_url = '/'
template_name = 'ordinance/sentSolution.html'
getObject = Tasks
form = TasksSetRote
def postSave(self, request, *args, **kwargs)-> None:
self.item.rotedata=datetime.datetime.now()
self.item.save()
class myRotes(baseListView):
getObject = Tasks
template_name = 'ordinance/myrotes.html'
def get(self, request, *args, **kwargs)->baseListView:
return self.addGet(request)
def setContext(self,request)->baseListView:
self.context={
'items':self.get_object(request),
}
def get_object(self,request):
query=self.getObject.objects.filter(student__email=request.user.email)
return query
class ShowLesson(baseShowView):
template_name='ordinance/showlesson.html'
getObject=Lesson
def setContext(self,request)->baseShowView:
self.context={
'context':self.get_object(),
'students':self.get_students()
}
def get_students(self)->list:
tasks=self.get_object().tasks.all()
for task in tasks:
task.status = 'ToAceptRecived'
if task.data_recived == True:
task.status= 'ConfirmRecived'
if task.taskfile:
task.status = ''
return tasks
class sentMess(baseUpdateView):
success_url = '/ordinance/myStudents/'
template_name = 'ordinance/sentMess.html'
getObject = Account
form = AccountForm
def post(self,request, *args, **kwargs)->baseUpdateView:
self.setContext(request)
self.form = self.setform(request)
if self.form.is_valid():
email().sent(self.form.cleaned_data['subject'], self.form.cleaned_data['message'], [self.get_object().email])
return redirect(self.success_url)
else:
self.setContext(request)
return render(request, self.template_name, self.context)
return render(request, self.template_name, self.context)
class passwordReset(baseShowView):
template_name = 'ordinance/showlesson.html'
success_url = '/ordinance/myStudents/'
getObject = Account
def get(self, request, *args, **kwargs)->baseShowView:
password = passwordGeneartor().setPassword()
print(password)
item = Account.objects.get(id=self.kwargs.get("id"))
mess= 'Email : '+item.email+' hasło: '+password
email().sent('Nowe hasło', mess, [item.email])
item.set_password(password)
item.save()
return redirect(self.success_url)
class ConfirmRecivedLesson(baseUpdateView):
getObject = Tasks
template_name = 'ordinance/showlesson.html'
def get(self, request, *args, **kwargs)->baseUpdateView:
id_ = self.kwargs.get("id")
item=Tasks.objects.get(id=id_)
item.data_recived=True
item.save()
self.success_url = '/ordinance/ShowLesson/'+str(item.lessons.id)
return redirect(self.success_url)
class myPersonel(baseListView):
template_name = 'ordinance/myPersonel.html'
@login_manager
def get(self, request, *args, **kwargs)->baseListView:
return self.addGet(request)
def setContext(self, request)->baseListView:
self.context = {
'items': Account.objects.filter(is_student__name=request.user.is_educator).order_by('-last_name')
}
|
[
"stelmaszv@gmail.com"
] |
stelmaszv@gmail.com
|
0a42a8da2771438df95b9dddc11c2092dd852fdd
|
eb7c814bf3356d4a5653eac09ae8f2a353ea2cf9
|
/houzz_el/__manifest__.py
|
2e5dc6bcba408531cf2e77ba1cd8dd1772da0856
|
[] |
no_license
|
qq18436558/el_addons
|
bde3ef5770228ffc74dd921ebf6061738c013250
|
d5813f00fa9941b4677ced133262a5498a9c85a4
|
refs/heads/master
| 2023-03-17T07:45:59.814606 | 2019-03-14T09:41:39 | 2019-03-14T09:41:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
# -*- encoding: utf-8 -*-
{
'name': 'Houzz Eileen',
'description': 'Houzz API.',
'author': 'Joe Han',
'depends': ['sale'],
'application': True,
'data': [
'views/houzz_view.xml',
'views/sale_view.xml',
'views/sale_order_tree.xml',
'wizard/houzz_order_import_view.xml',
'wizard/houzz_stock_view.xml',
'views/houzz_menu.xml',
'security/ir.model.access.csv'
]
}
|
[
"hanbiji@163.com"
] |
hanbiji@163.com
|
b7d924c237780a6793f9e6f29098b5740d905094
|
3943408748d4f94f243dd634b3aeb7d674ddd88c
|
/Day 6.py
|
3bca383d779f64a952d1c0cb689a0287bf396261
|
[] |
no_license
|
parv1512/EDUYEAR-PYTHON-20
|
0bf4ebd784d099786f8305b3b24c5e45eb93c689
|
b9afeff6f695d984859302f0eb11527073927930
|
refs/heads/main
| 2023-04-19T18:20:17.611315 | 2021-05-18T14:54:18 | 2021-05-18T14:54:18 | 360,242,839 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 636 |
py
|
# part - 1 [Give all the index values of vowels.]
first_str = "Python-Programming"
temp = []
for vowels in range(len(first_str)):
if first_str[vowels] in "aeiou":
temp.append(vowels)
print(temp)
# part - 2 [Reverse the words of a string]
string = 'hello world happy birthday'
split_string = string.split(' ')
reversed_string = reversed(split_string)
final_string = ' '.join(reversed_string)
print(final_string)
# part - 3 [Remove duplicate elemnts without using set()]
test_list = [1, 3, 5, 6, 3, 5, 6, 1, 9, 8, 6, 9]
temp = []
[temp.append(x) for x in test_list if x not in temp]
print (str(temp))
|
[
"noreply@github.com"
] |
parv1512.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.