hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbef40d1d77a7ea412c5b45aa8b16fa7be4ecbe1 | 23,124 | py | Python | authkit/authenticate/__init__.py | bobrock/AuthKit | ba82501d9dff699be9eef33266aecd03d016cec2 | [
"MIT"
] | null | null | null | authkit/authenticate/__init__.py | bobrock/AuthKit | ba82501d9dff699be9eef33266aecd03d016cec2 | [
"MIT"
] | null | null | null | authkit/authenticate/__init__.py | bobrock/AuthKit | ba82501d9dff699be9eef33266aecd03d016cec2 | [
"MIT"
] | 1 | 2020-06-24T19:20:13.000Z | 2020-06-24T19:20:13.000Z | """Authentication middleware
This module provides one piece of middleware named
``authkit.authenticate.middleware`` which is used to intercept responses with
a specified status code, present a user with a means of authenticating
themselves and handle the sign in process.
Each of the authentication methods supported by the middleware is described in
detail in the main AuthKit manual. The methods include:
* HTTP Basic (``basic``)
* HTTP Digest (``digest``)
* OpenID Passurl (``openid``)
* Form and Cookie (``form``)
* Forward (``forward``)
* Redirect (``redirect``)
The authenticate middleware can be configured directly or by means of a Paste
deploy config file as used by Pylons. It can be used directly like this:
.. code-block:: Python
from authkit.authenticate import middleware, test_app
from paste.httpserver import serve
import sys
app = middleware(
test_app,
enable = True,
method = 'passurl',
cookie_secret='some_secret',
)
serve(app, host='0.0.0.0', port=8000)
"""
import types
import warnings
import logging
import os
import os.path
from paste.util.import_string import eval_import
from multi import MultiHandler, status_checker
from pkg_resources import iter_entry_points, load_entry_point
from paste.deploy.converters import asbool
import paste.httpexceptions
import webob.exc
from authkit.authorize import authorize_request
from authkit.permissions import RemoteUser, no_authkit_users_in_environ, \
AuthKitConfigError
# Main middleware base classes
# Setting up logging
log = logging.getLogger('authkit.authenticate')
def valid_password(environ, username, password):
"""
A function which can be used with the ``basic`` and ``form`` authentication
methods to validate a username and passowrd.
This implementation is used by default if no other method is specified. It
checks the for an ``authkit.users`` object present in the ``environ``
dictionary under the ``authkit.users`` key and uses the information there
to validate the username and password.
In this implementation usernames are case insensitive and passwords are
case sensitive. The function returns ``True`` if the user ``username`` has
the password specified by ``password`` and returns ``False`` if the user
doesn't exist or the password is incorrect.
If you create and specify your own ``authkit.users`` object with the same
API, this method will also work correctly with your custom solution. See
the AuthKit manual for information on the user management api, how to
specify a different ``authkit.users`` object (say to read user information
from a file rather than have it specified directly) and for information on
how to create your own ``Users`` objects.
"""
log.debug("valid_password called. username: %s", username)
if not environ.has_key('authkit.users'):
raise no_authkit_users_in_environ
users = environ['authkit.users']
if not users.user_exists(username):
return False
elif users.user_has_password(username.lower(), password):
return True
return False
def digest_password(environ, realm, username):
"""
This is similar to ``valid_password()`` but is used with the ``digest``
authentication method and rather than checking a username and password and
returning ``True`` or ``False`` it takes the realm and username as input,
looks up the correct password and and returns a digest by calling the
``authkit.authenticate.digest.digest_password()`` function with the
parameters ``realm``, ``username`` and ``password`` respectively. The
digest returned is then compared with the one submitted by the browser.
As with ``valid_password()`` this method is designed to work with the user
management API so you can use it with ``authkit.users`` objects or your own
custom ``Users`` objects. Alternatively you can specify your own function
which can lookup the password in whichever way you prefer, perhaps from a
database or LDAP connection.
Only required if you intend to use HTTP digest authentication.
"""
log.debug(
"digest_password called. username: %s, realm: %s", username, realm
)
if not environ.has_key('authkit.users'):
raise no_authkit_users_in_environ
users = environ['authkit.users']
if users.user_exists(username):
password = users.user(username)['password']
return digest.digest_password(realm, username, password)
# After speaking to Clark Evans who wrote the origianl code, this is the
# correct thing:
return None
def get_authenticate_function(app, authenticate_conf, format, prefix):
"""
Sets up the users object, adds the middleware to add the users object
to the environ and then returns authenticate methods to check a password
and a digest.
"""
function = None
users = None
if len(authenticate_conf) < 1:
raise AuthKitConfigError('Expected at least one authenticate key, not'
' %r'%authenticate_conf)
if authenticate_conf.keys() == ['function']:
function = authenticate_conf['function']
if isinstance(function, (str, unicode)):
function = eval_import(function)
else:
user_conf = strip_base(authenticate_conf, 'user.')
if not user_conf:
raise AuthKitConfigError('No authenticate function or users specified')
else:
if user_conf.has_key('encrypt'):
if format == 'digest':
raise AuthKitConfigError('Encryption cannot be used with '
'digest authentication because the server needs to '
'know the password to generate the digest, try basic '
'or form and cookie authentication instead')
enc_func = eval_import(user_conf['encrypt'])
secret = user_conf.get('encrypt.secret','')
else:
encrypt = None
user_object = 'authkit.users.UsersFromString'
if 'type' in user_conf.keys():
user_object = user_conf['type']
if isinstance(user_object, (str, unicode)):
user_object = eval_import(user_object)
if not hasattr(user_object, "api_version"):
users = user_object(user_conf['data'], encrypt)
app = AddToEnviron(app, 'authkit.users', users)
log.debug("authkit.users added to environ")
elif user_object.api_version == 0.4:
app = AddUsersObjectToEnviron(
app,
'authkit.users',
user_object,
encrypt=encrypt,
data=user_conf.get('data'),
)
log.debug("Setting up authkit.users middleware")
else:
raise Exception(
'Unknown API version %s for user management API'%(
users.api_version,
)
)
if format == 'basic':
function = valid_password
log.debug("valid_password chosen %r", function)
elif format == 'digest':
log.debug("digest_password chosen %r", function)
function = digest_password
else:
raise Exception('Invalid format for authenticate function %r'
% format)
return app, function, users
def get_template(template_conf, prefix):
"""
Another utility method to reduce code duplication. This function parses a
template from one of the available template options:
``string``
The template as a string
``file``
A file containing the template
``obj``
A paste eval_import string or callable which returns a string
authkit.form.template.string =
authkit.form.template.file =
authkit.form.template.obj =
"""
template = None
if len(template_conf) != 1:
raise AuthKitConfigError('Expected one template entry, not %r' %
(', '.join(template_conf.keys())))
if template_conf.keys()[0] not in ['string', 'file', 'obj']:
raise AuthKitConfigError("Template option can only be 'string', 'file'"
" or 'obj'")
if template_conf.keys()[0] == 'string':
template = template_conf['string']
elif template_conf.keys()[0] == 'file':
if not os.path.exists(template_conf['file']):
raise AuthKitConfigError('No such file %r exists. It was specified'
' by config option %r' %
(template_conf['file'], prefix+'file'))
fp = open(template_conf['file'], 'r')
template = fp.read()
fp.close()
if not template:
raise AuthKitConfigError('No data in template file %s specified by'
' config option %r' %
(template_conf['file'], prefix+'file'))
elif template_conf.keys()[0] == 'obj':
template = eval_import(template_conf['obj'])
if not template:
raise AuthKitConfigError('No data in template obj %s specified by '
'config option %r' %
(template_conf['obj'], prefix+'obj'))
else:
raise AuthKitConfigError("Unknown option %r" %
(prefix+template_conf.keys()[0]))
if not template:
raise AuthKitConfigError("The template loaded did not contain any data")
if isinstance(template, (str, unicode)):
return render_template
return template
#
# Main middleware creator
#
def get_methods():
"""Get a dictionary of the available method entry points."""
available_methods = {}
for method_handler in iter_entry_points(group='authkit.method', name=None):
available_methods[method_handler.name] = method_handler
return available_methods
def load_method(name, from_these=None):
if from_these:
return from_these[name].load()
else:
return load_entry_point('AuthKit','authkit.method',name)
def load_config(options, app_conf, prefix):
merged = strip_base(app_conf, prefix)
# Now override the auth_conf_options with the manaully specified options
for key, value in options.items():
if merged.has_key(key):
warnings.warn(
'Key %s with value %r set in the config file is being ' + \
'replaced with value %r set in the application'%(
key,
auth_conf_options[key],
value
)
)
merged[key.replace('_','.')] = value
return merged
def middleware(app, app_conf=None, global_conf=None, prefix='authkit.',
handle_httpexception=True, middleware=None, **options):
"""
This function sets up the AuthKit authenticate middleware and its use and
options are described in detail in the AuthKit manual.
The function takes the following arguments and returns a WSGI application
wrapped in the appropriate AuthKit authentication middleware based on the
options specified:
``app``
The WSGI application the authenticate middleware should wrap
``app_conf``
A paste deploy ``app_conf`` dictionary to be used to setup the
middleware
``global_conf``
A paste deploy ``global_conf`` dictionary
``prefix``
The prefix which all authkit related options in the config file will
have prefixed to their names. This defaults to ``authkit.`` and
shouldn't normally need overriding.
``middleware``
A make_middleware function which should be called directly instead of
loading and calling a function based on the method name. If this is
set then ``authkit.setup.methof`` should not be set.
``**options``
Any AuthKit options which are setup directly in Python code. If
specified, these options will override any options specifed in a config
file.
All option names specified in the config file will have their prefix
removed and any ``.`` characters replaced by ``_`` before the options
specified by ``options`` are merged in. This means that the the option
``authkit.cookie.name`` specified in a config file sets the same options as
``cookie_name`` specified directly as an option.
"""
if handle_httpexception:
app = HTTPExceptionHandler(app)
# Configure the config files
if global_conf is None:
global_conf = {}
if app_conf is None:
app_conf = {}
if not isinstance(app_conf, dict):
raise AuthKitConfigError(
"Expected app_conf to be paste deploy app_conf dictionary "
"from not %r" % app_conf
)
# Merge config file and options
available_methods = get_methods()
all_conf = load_config(options, app_conf, prefix)
if middleware is not None and all_conf.has_key('setup.method'):
raise AuthKitConfigError(
'You cannot specify a middleware function '
'and an authkit.setup.method'
)
if not middleware and not all_conf.has_key('setup.method'):
raise AuthKitConfigError('No authkit.setup.method was specified')
# Add the configuration to the environment
enable_ = asbool(all_conf.get('setup.enable', True))
all_conf['setup.enable'] = enable_
app = AddToEnviron(app, 'authkit.config', all_conf)
if all_conf.has_key('setup.fakeuser'):
app = AddToEnviron(app, 'REMOTE_USER', all_conf['setup.fakeuser'])
# Check to see if middleware is disabled
if enable_ == False:
warnings.warn("AuthKit middleware has been turned off by the config "
"option authkit.setup.enable")
return app
# Status Checking/Changing Middleware
intercept = [str(x).strip() for x in \
all_conf.get('setup.intercept','401').split(',')]
if not '401' in intercept:
warnings.warn(
"AuthKit is configured via the authkit.setup.intercept option not "
"to intercept 401 responses so the authentication middleware will "
"not be triggered even if a 401 Unauthenticated response is "
"returned.")
if middleware:
prefix_ = prefix
app = middleware(
app,
auth_conf=all_conf,
app_conf=app_conf,
global_conf=global_conf,
prefix=prefix_,
)
else:
methods = [method.strip() for method in all_conf['setup.method'].split(',')]
log.debug("Trying to load the following methods: %r", methods)
for method in methods:
if method in ['setup','config']:
raise AuthKitConfigError("The name %s is reserved cannot be used "
"as a method name" % method)
if not available_methods.has_key(method):
raise AuthKitConfigError(
'The authkit method %r is not available. The available methods '
'are %s and %s'%(
all_conf['setup.method'],
', '.join(available_methods.keys()[:-1]),
available_methods.keys()[-1],
)
)
prefix_ = prefix+method+'.'
auth_conf = strip_base(all_conf, method+'.')
app = available_methods[method].load()(
app,
auth_conf=auth_conf,
app_conf=app_conf,
global_conf=global_conf,
prefix=prefix_,
)
app = AddDictToEnviron(
app,
{
'authkit.config':strip_base(all_conf, 'config.'),
'authkit.intercept':intercept,
'authkit.authenticate': True,
}
)
return app
def sample_app(environ, start_response):
"""
A sample WSGI application that returns a 401 status code when the path
``/private`` is entered, triggering the authenticate middleware to
prompt the user to sign in.
If used with the authenticate middleware's form method, the path
``/signout`` will display a signed out message if
``authkit.cookie.signout = /signout`` is specified in the config file.
If used with the authenticate middleware's forward method, the path
``/signin`` should be used to display the sign in form.
The path ``/`` always displays the environment.
"""
if environ['PATH_INFO']=='/private':
authorize_request(environ, RemoteUser())
if environ['PATH_INFO'] == '/signout':
start_response('200 OK', [('Content-type', 'text/plain; charset=UTF-8')])
if environ.has_key('REMOTE_USER'):
return ["Signed Out"]
else:
return ["Not signed in"]
elif environ['PATH_INFO'] == '/signin':
start_response('200 OK', [('Content-type', 'text/plain; charset=UTF-8')])
return ["Your application would display a \nsign in form here."]
else:
start_response('200 OK', [('Content-type', 'text/plain; charset=UTF-8')])
result = ['You Have Access To This Page.\n\nHere is the environment...\n\n']
for k,v in environ.items():
result.append('%s: %s\n'%(k,v))
return result
| 38.604341 | 84 | 0.625195 |
bbefdf91c1e6ecf066af1879e3918f12b778aa84 | 11,398 | py | Python | options_chain_pull.py | anupamsharma01/python | f415aa663c9e83ff8ab615da93a5a71ec877834b | [
"blessing"
] | 2 | 2020-12-25T22:30:52.000Z | 2021-11-26T14:08:12.000Z | options_chain_pull.py | anupamsharma01/python_options_trade | f415aa663c9e83ff8ab615da93a5a71ec877834b | [
"blessing"
] | null | null | null | options_chain_pull.py | anupamsharma01/python_options_trade | f415aa663c9e83ff8ab615da93a5a71ec877834b | [
"blessing"
] | 3 | 2020-04-10T15:00:10.000Z | 2021-08-19T21:20:19.000Z | from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
import requests
import ssl
import sys
import tdameritrade.auth #added as40183
import urllib
import urllib3 #as40183
from sys import argv
import pymysql.cursors
import datetime
import dateutil.relativedelta
import calendar
import time
import json
import ast
import pandas
import sqlite3
import string
import xlwt
import openpyxl
KEY = 'STOCKTIPS'
# Arguements
in_file = r'C:\Anupam\market\stock_options_api-master\trading_api\tdameritrade\my_programs\data\program_in.txt'
out_file=r'C:\Anupam\market\stock_options_api-master\trading_api\tdameritrade\my_programs\data\program_out.txt'
script='C:/Anupam/market/stock_options_api-master/trading_api/tdameritrade/my_programs/options_chain_pull.py'
debug = 'true'
f_in = open(in_file)
equity_list = f_in.readlines()
equity_list = [l.replace('\n','') for l in equity_list]
f_out = open(out_file,'w')
print ('EQUITY | CMP | 52WkRange', file=f_out)
#sqlite3 connection
connection = sqlite3.connect('C:\Anupam\Technical\sqlite\db\mydb.db')
cursor = connection.cursor()
create_sql = """CREATE TABLE IF NOT EXISTS chain (
equity text NOT NULL,
symbol text NOT NULL,
cmp real NOT NULL, --added from stocks
_52WkRange text NOT NULL, --added from stocks
strikePrice real NOT NULL,
last real NOT NULL,
bid real NOT NULL,
ask real NOT NULL,
bidSize real NOT NULL,
askSize real NOT NULL,
totalVolume real NOT NULL,
volatility real NOT NULL,
putCall text NOT NULL,
inTheMoney text NOT NULL,
daysToExpiration int NOT NULL,
timeValue real NOT NULL,
theoreticalVolatility real NOT NULL
);"""
drop_sql = "DROP TABLE CHAIN"
select_sql = "SELECT * FROM CHAIN"
delete_sql = "DELETE FROM CHAIN"
if (debug == 'true'):
print ('create_sql==',create_sql)
print ('delete_sql==',delete_sql)
#cursor.execute(drop_sql)
cursor.execute(create_sql)
cursor.execute(delete_sql)
connection.commit()
cursor.execute(select_sql)
row=cursor.fetchall()
print (row)
# Declare
#start = datetime.now()
args_list = []
count = str(250)
myFormat = "%Y-%m-%d"
today = datetime.date.today()
rd = dateutil.relativedelta.relativedelta(days=1, weekday=dateutil.relativedelta.FR)
next_friday = today + rd
if (debug == 'true'):
print ('today=',today)
print('next_friday=',str(next_friday))
#debug: Remove comment to use expiration of a future date
next_friday=today+datetime.timedelta(days=17)
print('next_friday=', str(next_friday))
#debug starts
#equity='AAPL'
count=40
start_date=next_friday
#active_day variables start - syncup from excel_pull
#CUSTOMIZATION BLOCK starts
debug='false'
skip_days=0 #set to 0 if placing order today; update to 1 if need for tomorrow+day-after-tomorrow
#CUSTOMIZATION BLOCK ends
curr_date = datetime.date.today() + datetime.timedelta(days=skip_days)
if (curr_date.isoweekday() == 6):
curr_date = curr_date + datetime.timedelta(days=2)
elif (curr_date.isoweekday() == 7):
curr_date = curr_date + datetime.timedelta(days=1)
if curr_date.isoweekday() in set((5, 6)):
next_date = curr_date + datetime.timedelta(days=8 - curr_date.isoweekday())
else:
next_date = curr_date + datetime.timedelta(days=1)
print (curr_date, calendar.day_name[curr_date.weekday()], curr_date.isoweekday())
print (next_date, calendar.day_name[next_date.weekday()], next_date.isoweekday())
active_day_today = calendar.day_name[curr_date.weekday()]
active_day_tomorrow = calendar.day_name[next_date.weekday()]
print (active_day_today, active_day_tomorrow)
#active_day variables end
# for NEXT WEEK FRIDAY DEBUG only
#next_friday = next_friday + datetime.timedelta(days=7)
#start_date = start_date + datetime.timedelta(days=7)
#print("NEXT WEEK next_friday-start_date", next_friday, start_date)
# END OF NEXT WEEK DEBUG
for equity in equity_list:
#EQUITY STOCK CODE
time.sleep(1.01)
equity, mkt_time = equity.split(",")
equity = equity.strip()
print('equity=', equity)
start_equity = datetime.datetime.now()
url = 'https://api.tdameritrade.com/v1/marketdata/'+equity+'/quotes?apikey='+KEY
#url1 = 'https://api.tdameritrade.com/v1/marketdata/AAPL/quotes?apikey=STOCKTIPS'
r = requests.get(url)
payload = r.json()
if (debug=='true'):
print(url)
print ('r=',r)
print ('r.text=',r.text)
print ('payload=',payload)
equity = payload[equity]['symbol']
cmp = payload[equity]['regularMarketLastPrice'] #lastPrice
_52WkLow = round(payload[equity]['52WkLow'])
_52WkHigh = round(payload[equity]['52WkHigh'])
if (debug=='true'):
print ('equity=',equity)
print ('cmp=',cmp)
print ('EQUITY | CMP | 52WkRange', file=f_out)
print (equity, '|', cmp, '|', _52WkLow, '-', _52WkHigh, file=f_out)
#OPTION CHAIN CODE
url = 'https://api.tdameritrade.com/v1/marketdata/chains?apikey=' + KEY + \
'&symbol=' + equity + '&contractType=' + 'PUT' + '&range=OTM' + '&fromDate=' + \
str(start_date) + '&toDate=' + str(next_friday) + '&strikeCount=' + str(count) # + '&strike<170.0'
r = requests.get(url) # <Response [200]>
payload = r.json()
if (debug == 'true'):
print('URL==', url)
print(r.text)
print(payload)
symbol = payload['symbol']
# Get Puts
for keyy, valuee in payload["putExpDateMap"].items():
d = datetime.datetime.strptime(keyy, "%Y-%m-%d:%f")
ex_date = d.strftime(myFormat)
for key, value in valuee.items():
for v in value:
args = [ v['symbol'], payload["symbol"], v['strikePrice'], v['last'], v['bid'], v['ask'], v['bidSize'], v['askSize'], v['totalVolume'], v['volatility'], v['putCall'], ex_date, v['inTheMoney'], v['daysToExpiration'], v['timeValue'], v['theoreticalVolatility'] ]
if (debug == 'true'):
print (v['strikePrice'] ,'CMP=', float(cmp))
if (v['strikePrice'] < float(cmp)):
args_list.append(args)
if (debug == 'true'):
print ('args_list=',args_list)
insert_sql = "INSERT INTO CHAIN (" \
+ " equity, symbol, cmp, _52WkRange, strikePrice, last, bid, ask, bidSize, askSize, totalVolume, volatility, putCall, inTheMoney, daysToExpiration, timeValue, theoreticalVolatility " \
+ ") values ('" \
+ payload['symbol'] + "','" \
+ v['symbol'] + "'," \
+ str(cmp) + "," \
+ str("'" + str(_52WkLow) + "-" + str(_52WkHigh)) + "'" + "," \
+ str(v['strikePrice']) + "," \
+ str(v['last']) + "," \
+ str(v['bid']) + "," \
+ str(v['ask']) + "," \
+ str(v['bidSize']) + "," \
+ str(v['askSize']) + "," \
+ str(v['totalVolume']) + "," \
+ str(v['volatility']) + ",'" \
+ str(v['putCall']) + "','" \
+ str(v['inTheMoney']) + "'," \
+ str(v['daysToExpiration']) + "," \
+ str(v['timeValue']) + "," \
+ str(v['theoreticalVolatility']) \
+ ")"
if (debug == 'true'):
print ('insert_sql==',insert_sql)
cursor.execute(insert_sql)
connection.commit()
# FINAL RESULT SQLs
wbkName_out = r'C:\Anupam\market\consolidated_excel_data.xlsx'
wbk_out = openpyxl.load_workbook(wbkName_out)
wks_out = wbk_out[active_day_today+'-'+active_day_tomorrow]
#WRITE OUTPUT TO EXCEL
select_sql1 = "select distinct equity, market_time from chain order by equity;"
print ('select_sql1=',select_sql1)
print ("-----------------", file=f_out)
select_sql2 = "select distinct equity, cmp, _52WkRange from chain order by equity;"
print ('select_sql2=',select_sql2)
cursor.execute(select_sql2)
rows = cursor.fetchall()
idx=2
#wks_out.cell(row=1, column=3).value = " ".join(["EQUITY" , " | " , "CMP" , "|" , "52WkRange"])
wks_out.cell(row=1, column=2).value = "52WkRange"
wks_out.cell(row=1, column=5).value = "CMP"
for row in rows:
if (debug == 'true'):
print('select_sql2:', row[0], "|" ,row[1], "|", row[2])
#wks_out.cell(row=idx, column=3).value = " ".join([str(row[0]) , "|" , str(row[1]) , "|" , str(row[2])])
wks_out.cell(row=idx, column=2).value = str(row[2])
wks_out.cell(row=idx, column=5).value = row[1]
idx+= 1
if (debug == 'true'):
print ('select_sql2:idx=',idx)
print ("-----------------", file=f_out)
select_sql3 = "select equity, strikeprice, bid, round(bid*100/strikeprice,2) prem_per from chain " + \
"where equity||strikeprice in (select equity||max(strikeprice) from chain group by equity) order by equity;"
print ('select_sql3=',select_sql3)
cursor.execute(select_sql3)
rows = cursor.fetchall()
idx=2
#wks_out.cell(row=1, column=4).value = " ".join(["EQUITY" , " | " , "STRIKEPRICE" , "|" , "BID", "|", "PREM_PCT"])
wks_out.cell(row=1, column=3).value = "EQUITY"
wks_out.cell(row=1, column=6).value = "STRIKEPRICE"
wks_out.cell(row=1, column=7).value = "BID"
wks_out.cell(row=1, column=8).value = "PREM_PCT"
for row in rows:
print(row[0], "|" ,row[1], "|", row[2], "|", row[3])
#wks_out.cell(row=idx, column=4).value = " ".join([str(row[0]) , "|" , str(row[1]) , "|" , str(row[2]), "|" , str(row[3])])
wks_out.cell(row=idx, column=3).value = str(row[0])
wks_out.cell(row=idx, column=6).value = row[1]
wks_out.cell(row=idx, column=7).value = row[2]
wks_out.cell(row=idx, column=8).value = row[3]
idx+= 1
if (debug == 'true'):
print ('select_sql3:idx=',idx)
print ("-----------------", file=f_out)
select_sql4 = "select equity, strikeprice, round(((cmp-strikeprice)*-100/cmp),1) prc_diff, bid, round(bid*100/strikeprice,1) prem_per from chain a " + \
"where bid>=0.05 and (prc_diff <=-5 and prc_diff >= -12) or (prc_diff <= -14 and prc_diff >= -20) " + \
"order by equity, prc_diff;"
print ('select_sql4=',select_sql4)
cursor.execute(select_sql4)
rows = cursor.fetchall()
idx=2
wks_out.cell(row=1, column=9).value = " ".join(["EQUITY" , " | " , "STRIKEPRICE" , "|", "PCT_DIFF", "|" , "BID", "|", "PREM_PCT"])
for row in rows:
new_eq = row[0]
if (debug == 'true'):
print(row[0], "|" ,row[1], "|", row[2], "|", row[3], "|", row[4], file=f_out)
wks_out.cell(row=idx, column=9).value = " ".join([str(row[0]) , "|" , str(row[1]) , "|" , str(row[2]), "|" , str(row[3]), "|" , str(row[4])])
idx += 1
if (debug == 'true'):
print('idx=', idx)
prev_eq = row[0]
if (prev_eq != new_eq):
print ("---", file=f_out)
wbk_out.save(wbkName_out)
wbk_out.close
| 37.993333 | 277 | 0.5887 |
bbf00ed1d2c63a8cbd6917e7f62b070f2c550c40 | 4,492 | py | Python | src/main.py | Naman-ntc/3D-HourGlass-Network | e58b7b6a78d35bc14fe4c0bc611f80022b2f409b | [
"MIT"
] | 53 | 2018-10-28T20:07:16.000Z | 2021-12-17T02:25:57.000Z | src/main.py | Naman-ntc/3D-HourGlass-Network | e58b7b6a78d35bc14fe4c0bc611f80022b2f409b | [
"MIT"
] | 3 | 2019-01-07T14:01:39.000Z | 2019-05-07T12:01:44.000Z | src/main.py | Naman-ntc/3D-HourGlass-Network | e58b7b6a78d35bc14fe4c0bc611f80022b2f409b | [
"MIT"
] | 9 | 2018-10-28T22:31:29.000Z | 2021-10-14T02:54:27.000Z | import os
import time
import datetime
import ref
import torch
import torch.utils.data
from opts import opts
from model.Pose3D import Pose3D
from datahelpers.dataloaders.fusedDataLoader import FusionDataset
from datahelpers.dataloaders.h36mLoader import h36m
from datahelpers.dataloaders.mpiiLoader import mpii
from datahelpers.dataloaders.posetrackLoader import posetrack
from utils.utils import adjust_learning_rate
from utils.logger import Logger
from train import train,val
from inflateScript import *
if __name__ == '__main__':
#torch.set_default_tensor_type('torch.DoubleTensor')
main()
| 31.412587 | 163 | 0.70236 |
bbf0d14e96a9123beca18b5184daaba1114c19e3 | 4,129 | py | Python | simulated_crowds/simulated_heterogeneous_crowd_study.py | aburnap/JMD2015-When-Crowdsourcing-Fails | b0bdcba7e35a1678c5c42c8f69461d724887fb35 | [
"MIT"
] | 3 | 2015-03-19T11:49:05.000Z | 2019-06-06T22:25:49.000Z | simulated_crowds/simulated_heterogeneous_crowd_study.py | aburnap/JMD2015-When-Crowdsourcing-Fails | b0bdcba7e35a1678c5c42c8f69461d724887fb35 | [
"MIT"
] | null | null | null | simulated_crowds/simulated_heterogeneous_crowd_study.py | aburnap/JMD2015-When-Crowdsourcing-Fails | b0bdcba7e35a1678c5c42c8f69461d724887fb35 | [
"MIT"
] | null | null | null | #-----------------------------------------------------------------------------
#
# Paper: When Crowdsourcing Fails: A Study of Expertise on Crowdsourced
# Design Evaluation
# Author: Alex Burnap - aburnap@umich.edu
# Date: October 10, 2014
# License: Apache v2
# Description: Simulated Crowd Study for Heterogeneous Crowds. Used to
# generate data for Figure 5.
#
#-----------------------------------------------------------------------------
import simulation_heterogeneous as sim
import model
import numpy as np
import pymc
import csv
#-----------------------------------------------------------------------------
# Simulated Crowd Variables
crowd_parameters = {
'num_participants' : 60,
'crowd_makeup' : 'homogeneous',
'homo_mean' : .8,
'homo_std_dev' : .1,
'mixture_means' : (.2, .8),
'mixture_std_dev' : (.1, .1),
'mixture_coefficients' : (.9, .1),
}
design_parameters = {
'num_designs' : 8,
'num_subcriteria' : 1,
'true_design_criteria_score_makeup' : 'random',
'true_design_evaluation_difficulty_makeup' : 'same',
'true_design_evaluation_difficulty_score' : .5,
}
cluster_biases0 = np.zeros(design_parameters['num_designs'])
cluster_biases1 = np.zeros(design_parameters['num_designs'])
cluster_biases1[6] = 0.5
cluster_parameters = {
'num_clusters' : 2,
'cluster_proportions' : (.8,.2),
'cluster_biases' : (cluster_biases0 ,cluster_biases1), # this is on 0_1 scale
}
evaluation_parameters = {
'num_queries_per_participant' : 20,
'num_designs_per_query' : 3,
'interface_difficulty' : 0,
'logistic_scale' : .1,
}
for i in xrange(250):
print '----------------------------------------------------'
print "Iteration %i" % (i+1)
print
i_cluster0_proportion = np.random.random()
cluster_parameters['cluster_proportions']= (i_cluster0_proportion, 1-i_cluster0_proportion)
env=sim.Environment(crowd_parameters, design_parameters, evaluation_parameters, cluster_parameters)
env.designs[6].true_criteria_score = 0.2
env.run_evaluations()
raw_model = model.create_model(env.evaluations_matrix,
crowd_parameters['num_participants'],
design_parameters['num_designs'])
model_instance = pymc.Model(raw_model)
# Initial Values Set by MAP
#pymc.MAP(model_instance).fit(method='fmin_powell')
print '---------- Finished Running MAP to Set MCMC Initial Values ----------'
# Run MCMC
print '--------------------------- Starting MCMC ---------------------------'
M = pymc.MCMC(model_instance)
M.sample(200000,100000, thin=5, verbose=0)
true_abilities = [env.participants[i].true_ability for i in xrange(crowd_parameters['num_participants'])]
true_scores=[(env.designs[i].true_criteria_score*4+1) for i in xrange(design_parameters['num_designs'])]
bayesian_network_scores = np.transpose(M.criteria_score_vector.stats()['mean'])*4+1
bayesian_network_abilities = np.transpose(M.ability_vector.stats()['mean'])
averaging_scores = [np.average(env.evaluations_matrix[:,i]) for i in xrange(design_parameters['num_designs'])]
averaging_MSqE = np.average((np.array(true_scores) - np.array(averaging_scores))**2)
bayesian_network_MSqE = np.average((np.array(true_scores) - np.array(bayesian_network_scores))**2)
bayesian_network_abilities_MSqE = np.average((np.array(true_abilities) - np.array(bayesian_network_abilities))**2)
bayesian_network_logistic_scale = M.logistic_scale_num.stats()['mean']
bayesian_network_design_difficulty = M.design_difficulty_num.stats()['mean']
with open("./simulated_crowd_results/results_heterogeneous_clusters.csv","a") as csvfile:
results=csv.writer(csvfile)
results.writerow([i_cluster0_proportion, averaging_MSqE,
bayesian_network_MSqE,
bayesian_network_abilities_MSqE,
bayesian_network_logistic_scale,
bayesian_network_design_difficulty])
| 43.463158 | 118 | 0.627755 |
bbf0f0dbbea749b29ef7a61b2ac5e680c12f1409 | 1,053 | py | Python | basic-part-1/07-print-file-extension.py | inderpal2406/python-practice-2022 | 59e280a5babefc96b1a9c773a79fb5176e876f7a | [
"MIT"
] | null | null | null | basic-part-1/07-print-file-extension.py | inderpal2406/python-practice-2022 | 59e280a5babefc96b1a9c773a79fb5176e876f7a | [
"MIT"
] | null | null | null | basic-part-1/07-print-file-extension.py | inderpal2406/python-practice-2022 | 59e280a5babefc96b1a9c773a79fb5176e876f7a | [
"MIT"
] | null | null | null | # This script will accept a filename from the user and print the extension of that.
# If the script doesn't find a period in filename, then it'll display result accordingly.
# "not in" or "in" membership operator can be used with strings as well along with list, tuples.
# Need to check which additional other places can it be used.
# Import modules.
import platform
import os
# Detect the OS and clear the screen.
os_name = platform.system()
if os_name == "Windows":
os.system("cls")
elif os_name == "Linux":
os.system("clear")
# Display purpose of the script.
print(f"This script will accept filename from the user and print its extension.\n")
# Accept user input.
filename = input("Enter the filename: ")
# Check if the filename has a period "." in it. If it contains a period, then extract the extension and display it.
if "." not in filename:
print(f"\nThe filename doesn't contain . in it. It seems to be a file without extension.\n")
else:
our_list = filename.split(".")
print(f"\nFile extension: {our_list[-1]}\n")
| 30.970588 | 115 | 0.716999 |
bbf2ae61952632fab35bb3d4da6625e30a6cc5d4 | 1,279 | py | Python | src/Xtb/Python/__init__.py | qcscine/xtb_wrapper | 5295244771ed5efe3d9e1582e07ed9d26545d387 | [
"BSD-3-Clause"
] | null | null | null | src/Xtb/Python/__init__.py | qcscine/xtb_wrapper | 5295244771ed5efe3d9e1582e07ed9d26545d387 | [
"BSD-3-Clause"
] | null | null | null | src/Xtb/Python/__init__.py | qcscine/xtb_wrapper | 5295244771ed5efe3d9e1582e07ed9d26545d387 | [
"BSD-3-Clause"
] | 1 | 2022-02-04T13:40:00.000Z | 2022-02-04T13:40:00.000Z | __copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import os
import scine_utilities as utils
from distutils import ccompiler
manager = utils.core.ModuleManager()
if not manager.module_loaded('Xtb'):
shlib_suffix = ccompiler.new_compiler().shared_lib_extension
module_filename = "xtb.module" + shlib_suffix
# Look within the python module directory (module is here in the case of
# python packages) and the lib folder the site packages are in
current_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
test_paths = [current_path, lib_path]
if not any(map(exists_and_could_load, test_paths)):
raise ImportError('{} could not be located.'.format(module_filename))
| 36.542857 | 78 | 0.693511 |
bbf40515dd7d835260533fe653dd331f52016415 | 5,062 | py | Python | perch/validators.py | OpenPermissions/perch | 36d78994133918f3c52c187f19e50132960a0156 | [
"Apache-2.0"
] | 3 | 2016-05-03T20:07:25.000Z | 2020-12-22T07:16:11.000Z | perch/validators.py | OpenPermissions/perch | 36d78994133918f3c52c187f19e50132960a0156 | [
"Apache-2.0"
] | 17 | 2016-04-26T09:35:42.000Z | 2016-08-18T10:07:40.000Z | perch/validators.py | OpenPermissions/perch | 36d78994133918f3c52c187f19e50132960a0156 | [
"Apache-2.0"
] | 1 | 2019-05-20T01:40:56.000Z | 2019-05-20T01:40:56.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""Voluptuous validotor functions"""
import re
from urlparse import urlsplit
from voluptuous import AllInvalid, Invalid, Schema, ALLOW_EXTRA
from .model import State
def partial_schema(schema, filtered_fields):
"""
Validator for part of a schema, ignoring some fields
:param schema: the Schema
:param filtered_fields: fields to filter out
"""
return Schema({
k: v for k, v in schema.schema.items()
if getattr(k, 'schema', k) not in filtered_fields
}, extra=ALLOW_EXTRA)
def valid_email(email):
"""Validate email."""
if "@" not in email:
raise Invalid('This email is invalid.')
return email
def validate_hex(color):
"""
Validate string is a hex color code
"""
hex_re = '^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(hex_re, color):
raise Invalid('Invalid Hex Color')
return color
def validate_url(url):
"""Validate URL is valid
NOTE: only support http & https
"""
schemes = ['http', 'https']
netloc_re = re.compile(
r'^'
r'(?:\S+(?::\S*)?@)?' # user:pass auth
r'(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])'
r'(?:\.(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9]))*' # host
r'(?::[0-9]{2,5})?' # port
r'$', re.IGNORECASE
)
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
raise Invalid('Invalid URL')
if scheme not in schemes:
raise Invalid('Missing URL scheme')
if not netloc_re.search(netloc):
raise Invalid('Invalid URL')
return url
def validate_reference_links(reference_links):
"""
Vaidate reference links data structure
Expected data structure:
{
"links": {
id_type1: url1,
id_type2: url2
},
"redirect_id_type": id_type1 | id1_type2
}
where links is an optional key but must be a dictionary with id types to
URLs if it exists, and redirect_id_type is optional but if it exists,
it must point to one of the existing id types in the links object. It is
used to set a default redirect URL that is used by the resolution service.
"""
allowed_keys = ['links', 'redirect_id_type']
if not isinstance(reference_links, dict):
raise Invalid('Expected reference_links to be an object')
if 'links' in reference_links and not isinstance(reference_links['links'], dict):
raise Invalid('Expected links in reference_links to be an object')
links = reference_links.get('links', {})
redirect_id_type = reference_links.get('redirect_id_type')
for key in reference_links:
if key not in allowed_keys:
raise Invalid('Key {} is not allowed'.format(key))
if redirect_id_type and redirect_id_type not in links:
raise Invalid('Redirect ID type must point to one of the links\' ID types')
[validate_url(url) for url in links.values()]
return reference_links
VALID_STATES = {x.name for x in State}
VALID_USER_STATES = {x.name for x in [State.approved, State.deactivated]}
def _validate_state(state, valid_states):
"""Validate a state string"""
if state in State:
return state.name
elif state in valid_states:
return state
else:
raise Invalid('Invalid state')
| 29.260116 | 107 | 0.644212 |
bbf5f66b6a4f40cea15c174917bd79930606ce25 | 189 | py | Python | tests/controls/scroller.py | whitegreyblack/PyWin | 78f3637b4c03c11d7f6ef15b20a1acf699d4be24 | [
"MIT"
] | null | null | null | tests/controls/scroller.py | whitegreyblack/PyWin | 78f3637b4c03c11d7f6ef15b20a1acf699d4be24 | [
"MIT"
] | null | null | null | tests/controls/scroller.py | whitegreyblack/PyWin | 78f3637b4c03c11d7f6ef15b20a1acf699d4be24 | [
"MIT"
] | null | null | null | """ScrollList Component Test"""
import curses
from source.controls import Window
from source.controls import ScrollList as Scroller
__author__ = "Samuel Whang"
| 18.9 | 50 | 0.777778 |
bbf6bf0479cef19ff010cf6f671d185104dd03d3 | 9,060 | py | Python | glycan_profiling/tandem/evaluation_dispatch/task.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 4 | 2019-04-26T15:47:57.000Z | 2021-04-20T22:53:58.000Z | glycan_profiling/tandem/evaluation_dispatch/task.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 8 | 2017-11-22T19:20:20.000Z | 2022-02-14T01:49:58.000Z | glycan_profiling/tandem/evaluation_dispatch/task.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 3 | 2017-11-21T18:05:28.000Z | 2021-09-23T18:38:33.000Z | import os
from collections import deque
from glycan_profiling.task import TaskBase
debug_mode = bool(os.environ.get("GLYCRESOFTDEBUG"))
| 37.438017 | 115 | 0.562252 |
bbf71be865b8e26676ff85c557b20b334f5953a8 | 4,420 | py | Python | tests/queries/recursive/query_typeddict.py | s1s5/python-gql-compiler | 52e0ed7c9fa6deafe2c169c8340d66e8cc168491 | [
"MIT"
] | null | null | null | tests/queries/recursive/query_typeddict.py | s1s5/python-gql-compiler | 52e0ed7c9fa6deafe2c169c8340d66e8cc168491 | [
"MIT"
] | null | null | null | tests/queries/recursive/query_typeddict.py | s1s5/python-gql-compiler | 52e0ed7c9fa6deafe2c169c8340d66e8cc168491 | [
"MIT"
] | null | null | null | # @generated AUTOGENERATED file. Do not Change!
# flake8: noqa
# fmt: off
# isort: skip_file
import typing
from gql import gql, Client
Episode = typing.Literal["NEWHOPE", "EMPIRE", "JEDI"]
GetRecursive__hero__Droid__friends__Droid__friends = typing.TypedDict("GetRecursive__hero__Droid__friends__Droid__friends", {"name": str})
GetRecursive__hero__Droid__friends__Droid = typing.TypedDict("GetRecursive__hero__Droid__friends__Droid", {"__typename": typing.Literal["Droid"], "id": str, "name": str, "friends": typing.List[typing.Optional[GetRecursive__hero__Droid__friends__Droid__friends]]})
GetRecursive__hero__Droid__friends__Human__starships = typing.TypedDict("GetRecursive__hero__Droid__friends__Human__starships", {"name": str})
GetRecursive__hero__Droid__friends__Human = typing.TypedDict("GetRecursive__hero__Droid__friends__Human", {"__typename": typing.Literal["Human"], "id": str, "name": str, "starships": typing.List[typing.Optional[GetRecursive__hero__Droid__friends__Human__starships]]})
__GetRecursive__hero__Droid__friends = typing.TypedDict("__GetRecursive__hero__Droid__friends", {"__typename": typing.Literal["Character"], "id": str})
GetRecursive__hero__Droid__friends = typing.Union[__GetRecursive__hero__Droid__friends, GetRecursive__hero__Droid__friends__Human, GetRecursive__hero__Droid__friends__Droid]
GetRecursive__hero__Droid = typing.TypedDict("GetRecursive__hero__Droid", {"__typename": typing.Literal["Droid"], "name": str, "primaryFunction": str, "friends": typing.List[typing.Optional[GetRecursive__hero__Droid__friends]]})
GetRecursive__hero__Human__friends__Droid = typing.TypedDict("GetRecursive__hero__Human__friends__Droid", {"__typename": typing.Literal["Droid"], "id": str, "name": str})
GetRecursive__hero__Human__friends__Human = typing.TypedDict("GetRecursive__hero__Human__friends__Human", {"__typename": typing.Literal["Human"], "name": str})
__GetRecursive__hero__Human__friends = typing.TypedDict("__GetRecursive__hero__Human__friends", {"__typename": typing.Literal["Character"]})
GetRecursive__hero__Human__friends = typing.Union[__GetRecursive__hero__Human__friends, GetRecursive__hero__Human__friends__Human, GetRecursive__hero__Human__friends__Droid]
GetRecursive__hero__Human = typing.TypedDict("GetRecursive__hero__Human", {"__typename": typing.Literal["Human"], "name": str, "friends": typing.List[typing.Optional[GetRecursive__hero__Human__friends]]})
__GetRecursive__hero = typing.TypedDict("__GetRecursive__hero", {"__typename": typing.Literal["Character"], "name": str})
GetRecursive__hero = typing.Union[__GetRecursive__hero, GetRecursive__hero__Human, GetRecursive__hero__Droid]
GetRecursiveResponse = typing.TypedDict("GetRecursiveResponse", {"hero": GetRecursive__hero})
_GetRecursiveInput__required = typing.TypedDict("_GetRecursiveInput__required", {"episode": Episode})
_GetRecursiveInput__not_required = typing.TypedDict("_GetRecursiveInput__not_required", {}, total=False)
| 43.762376 | 267 | 0.721493 |
bbf803380db0ef251842437e33a2f97c28f09e88 | 795 | py | Python | core/render.py | ayyuriss/EigenFunctions | 8cb6c22871fcddb633392c0a12691e960dad5143 | [
"MIT"
] | null | null | null | core/render.py | ayyuriss/EigenFunctions | 8cb6c22871fcddb633392c0a12691e960dad5143 | [
"MIT"
] | null | null | null | core/render.py | ayyuriss/EigenFunctions | 8cb6c22871fcddb633392c0a12691e960dad5143 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed May 16 09:32:56 2018
@author: gamer
"""
import pygame as pg
import numpy as np
import skimage.transform as transform | 24.090909 | 73 | 0.548428 |
bbf98d99386d0154fceea52ba139487cd08f628c | 660 | py | Python | scripts/branching_recursion.py | ithasnext/python_fractals | 1eea4e464d2073ddd0f9dd2000af101cad23c0f8 | [
"MIT"
] | null | null | null | scripts/branching_recursion.py | ithasnext/python_fractals | 1eea4e464d2073ddd0f9dd2000af101cad23c0f8 | [
"MIT"
] | null | null | null | scripts/branching_recursion.py | ithasnext/python_fractals | 1eea4e464d2073ddd0f9dd2000af101cad23c0f8 | [
"MIT"
] | null | null | null | import pygame
import sys
# branching recursion
width = input("Enter a width: ")
height = input("Enter a height: ")
radius = input("Enter a radius: ")
setup(int(width), int(height), int(radius)) | 27.5 | 73 | 0.689394 |
bbfa57bb471088a16fc1c6466ecf225acd101941 | 684 | py | Python | WorkInProgress/MagnetoMeter/callibrate.py | SpudGunMan/LMS-uart-esp | 95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4 | [
"BSD-3-Clause"
] | 8 | 2021-03-21T21:34:59.000Z | 2022-03-25T20:51:47.000Z | WorkInProgress/MagnetoMeter/callibrate.py | SpudGunMan/LMS-uart-esp | 95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4 | [
"BSD-3-Clause"
] | 7 | 2021-04-07T07:40:23.000Z | 2022-01-22T21:05:40.000Z | WorkInProgress/MagnetoMeter/callibrate.py | SpudGunMan/LMS-uart-esp | 95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4 | [
"BSD-3-Clause"
] | 5 | 2022-01-21T18:37:20.000Z | 2022-02-17T00:35:28.000Z | from hmc5883l import HMC5883L
sensor = HMC5883L(scl=5, sda=4)
valmin=[0,0,0]
valmax=[0,0,0]
valscaled=[0,0,0]
f=open("cal.csv",'w')
for count in range(3000):
valread = sensor.read()
# for i in range(3):
# if valread[i]<valmin[i]: valmin[i]=valread[i]
# if valread[i]>valmax[i]: valmax[i]=valread[i]
# valscaled[i]=convert(valread[i],valmin[i],valmax[i],-100,100)
#degrees, minutes = sensor.heading(valscaled[0], valscaled[1])
print("%04d"%count,valmin,valmax,valread)
f.write("%f,%f,%f\n"%valread)
f.close()
| 27.36 | 75 | 0.631579 |
bbfb94b2d81a97cc98431123af9d98c4a0ea9623 | 16,554 | py | Python | edgetpu/swig/edgetpu_cpp_wrapper.py | TinkerEdgeT/mendel-edgetpu | 5df7f62a2d88dc0d9e98c8c794717d77b62daa89 | [
"Apache-2.0"
] | null | null | null | edgetpu/swig/edgetpu_cpp_wrapper.py | TinkerEdgeT/mendel-edgetpu | 5df7f62a2d88dc0d9e98c8c794717d77b62daa89 | [
"Apache-2.0"
] | null | null | null | edgetpu/swig/edgetpu_cpp_wrapper.py | TinkerEdgeT/mendel-edgetpu | 5df7f62a2d88dc0d9e98c8c794717d77b62daa89 | [
"Apache-2.0"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
_edgetpu_cpp_wrapper = swig_import_helper()
del swig_import_helper
else:
import _edgetpu_cpp_wrapper
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
_object = object
_newclass = 1
except AttributeError:
_newclass = 0
SwigPyIterator_swigregister = _edgetpu_cpp_wrapper.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
StringVector_swigregister = _edgetpu_cpp_wrapper.StringVector_swigregister
StringVector_swigregister(StringVector)
def GetRuntimeVersion():
"""
Returns runtime (libedgetpu.so) version.
The version is dynamically retrieved from shared object.
Retruns:
string.
"""
return _edgetpu_cpp_wrapper.GetRuntimeVersion()
BasicEngine_swigregister = _edgetpu_cpp_wrapper.BasicEngine_swigregister
BasicEngine_swigregister(BasicEngine)
cvar = _edgetpu_cpp_wrapper.cvar
kEdgeTpuCppWrapperVersion = cvar.kEdgeTpuCppWrapperVersion
kSupportedRuntimeVersion = cvar.kSupportedRuntimeVersion
ImprintingEngine_swigregister = _edgetpu_cpp_wrapper.ImprintingEngine_swigregister
ImprintingEngine_swigregister(ImprintingEngine)
_edgetpu_cpp_wrapper.EdgeTpuState_kNone_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kNone = _edgetpu_cpp_wrapper.EdgeTpuState_kNone
_edgetpu_cpp_wrapper.EdgeTpuState_kAssigned_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kAssigned = _edgetpu_cpp_wrapper.EdgeTpuState_kAssigned
_edgetpu_cpp_wrapper.EdgeTpuState_kUnassigned_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kUnassigned = _edgetpu_cpp_wrapper.EdgeTpuState_kUnassigned
def ListEdgeTpuPaths(state):
"""
Lists paths of Edge TPU devices available to host.
Args:
state: device's current state. Can be:
EDGE_TPU_STATE_ASSIGNED: devices that are associated with BasicEngine instance.
EDGE_TPU_STATE_UNASSIGNED: devices that are available.
EDGE_TPU_STATE_NONE: ASSIGNED or UNASSIGNED, all devices detected by host.
Returns:
tuple of strings, which represents device paths in certain state.
"""
return _edgetpu_cpp_wrapper.ListEdgeTpuPaths(state)
# This file is compatible with both classic and new-style classes.
| 31.411765 | 100 | 0.691434 |
bbfba00ada95ca4b323dab1489addc7b7c3e9bf4 | 13,774 | py | Python | pyriemann/utils/mean.py | qbarthelemy/pyRiemann | b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3 | [
"BSD-3-Clause"
] | 1 | 2021-09-30T01:18:51.000Z | 2021-09-30T01:18:51.000Z | pyriemann/utils/mean.py | qbarthelemy/pyRiemann | b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3 | [
"BSD-3-Clause"
] | null | null | null | pyriemann/utils/mean.py | qbarthelemy/pyRiemann | b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3 | [
"BSD-3-Clause"
] | null | null | null | """Mean covariance estimation."""
from copy import deepcopy
import numpy as np
from .base import sqrtm, invsqrtm, logm, expm
from .ajd import ajd_pham
from .distance import distance_riemann
from .geodesic import geodesic_riemann
def _get_sample_weight(sample_weight, data):
"""Get the sample weights.
If none provided, weights init to 1. otherwise, weights are normalized.
"""
if sample_weight is None:
sample_weight = np.ones(data.shape[0])
if len(sample_weight) != data.shape[0]:
raise ValueError("len of sample_weight must be equal to len of data.")
sample_weight /= np.sum(sample_weight)
return sample_weight
def mean_riemann(covmats, tol=10e-9, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Riemannian metric.
The procedure is similar to a gradient descent minimizing the sum of
riemannian distance to the mean.
.. math::
\mathbf{C} = \arg\min{(\sum_i \delta_R ( \mathbf{C} , \mathbf{C}_i)^2)}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the gradient descent. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
# init
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
nu = 1.0
tau = np.finfo(np.float64).max
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter) and (nu > tol):
k = k + 1
C12 = sqrtm(C)
Cm12 = invsqrtm(C)
J = np.zeros((n_channels, n_channels))
for index in range(n_trials):
tmp = np.dot(np.dot(Cm12, covmats[index, :, :]), Cm12)
J += sample_weight[index] * logm(tmp)
crit = np.linalg.norm(J, ord='fro')
h = nu * crit
C = np.dot(np.dot(C12, expm(nu * J)), C12)
if h < tau:
nu = 0.95 * nu
tau = h
else:
nu = 0.5 * nu
return C
def mean_logeuclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the log-Euclidean
metric.
.. math::
\mathbf{C} = \exp{(\frac{1}{N} \sum_i \log{\mathbf{C}_i})}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * logm(covmats[index, :, :])
C = expm(T)
return C
def mean_kullback_sym(covmats, sample_weight=None):
"""Return the mean covariance matrix according to KL divergence.
This mean is the geometric mean between the Arithmetic and the Harmonic
mean, as shown in [1]_.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Moakher, Maher, and Philipp G. Batchelor. "Symmetric
positive-definite matrices: From geometry to applications and
visualization." In Visualization and Processing of Tensor Fields, pp.
285-298. Springer Berlin Heidelberg, 2006.
"""
C_Arithmetic = mean_euclid(covmats, sample_weight)
C_Harmonic = mean_harmonic(covmats, sample_weight)
C = geodesic_riemann(C_Arithmetic, C_Harmonic, 0.5)
return C
def mean_harmonic(covmats, sample_weight=None):
r"""Return the harmonic mean of a set of covariance matrices.
.. math::
\mathbf{C} = \left(\frac{1}{N} \sum_i {\mathbf{C}_i}^{-1}\right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * np.linalg.inv(covmats[index, :, :])
C = np.linalg.inv(T)
return C
def mean_logdet(covmats, tol=10e-5, maxiter=50, init=None, sample_weight=None):
r"""Return the mean covariance matrix according to the logdet metric.
This is an iterative procedure where the update is:
.. math::
\mathbf{C} = \left(\sum_i \left( 0.5 \mathbf{C} + 0.5 \mathbf{C}_i \right)^{-1} \right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
J += sample_weight[index] * np.linalg.inv(0.5 * Ci + 0.5 * C)
Cnew = np.linalg.inv(J)
crit = np.linalg.norm(Cnew - C, ord='fro')
C = Cnew
return C
def mean_wasserstein(covmats, tol=10e-4, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Wasserstein metric.
This is an iterative procedure where the update is [1]_:
.. math::
\mathbf{K} = \left(\sum_i \left( \mathbf{K} \mathbf{C}_i \mathbf{K} \right)^{1/2} \right)^{1/2}
with :math:`\mathbf{K} = \mathbf{C}^{1/2}`.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Barbaresco, F. "Geometric Radar Processing based on Frechet distance:
Information geometry versus Optimal Transport Theory", Radar Symposium
(IRS), 2011 Proceedings International.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
K = sqrtm(C)
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = np.dot(np.dot(K, Ci), K)
J += sample_weight[index] * sqrtm(tmp)
Knew = sqrtm(J)
crit = np.linalg.norm(Knew - K, ord='fro')
K = Knew
if k == maxiter:
print('Max iter reach')
C = np.dot(K, K)
return C
def mean_euclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the Euclidean metric :
.. math::
\mathbf{C} = \frac{1}{N} \sum_i \mathbf{C}_i
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
return np.average(covmats, axis=0, weights=sample_weight)
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C
def mean_alm(covmats, tol=1e-14, maxiter=100,
verbose=False, sample_weight=None):
r"""Return Ando-Li-Mathias (ALM) mean
Find the geometric mean recursively [1]_, generalizing from:
.. math::
\mathbf{C} = A^{\frac{1}{2}}(A^{-\frac{1}{2}}B^{\frac{1}{2}}A^{-\frac{1}{2}})^{\frac{1}{2}}A^{\frac{1}{2}}
require a high number of iterations.
This is the adaptation of the Matlab code proposed by Dario Bini and
Bruno Iannazzo, http://bezout.dm.unipi.it/software/mmtoolbox/
Extremely slow, due to the recursive formulation.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop iterations
:param maxiter: maximum number of iteration, default 100
:param verbose: indicate when reaching maxiter
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.8.dev
References
----------
.. [1] T. Ando, C.-K. Li and R. Mathias, "Geometric Means", Linear Algebra
Appl. 385 (2004), 305-334.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
C = covmats
C_iter = np.zeros_like(C)
n_trials = covmats.shape[0]
if n_trials == 2:
alpha = sample_weight[1] / sample_weight[0] / 2
X = geodesic_riemann(covmats[0], covmats[1], alpha=alpha)
return X
else:
for k in range(maxiter):
for h in range(n_trials):
s = np.mod(np.arange(h, h + n_trials - 1) + 1, n_trials)
C_iter[h] = mean_alm(C[s], sample_weight=sample_weight[s])
norm_iter = np.linalg.norm(C_iter[0] - C[0], 2)
norm_c = np.linalg.norm(C[0], 2)
if (norm_iter / norm_c) < tol:
break
C = deepcopy(C_iter)
else:
if verbose:
print('Max number of iterations reached')
return C_iter.mean(axis=0)
def mean_identity(covmats, sample_weight=None):
r"""Return the identity matrix corresponding to the covmats sit size
.. math::
\mathbf{C} = \mathbf{I}_d
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:returns: the identity matrix of size n_channels
"""
C = np.eye(covmats.shape[1])
return C
def mean_covariance(covmats, metric='riemann', sample_weight=None, *args):
"""Return the mean covariance matrix according to the metric
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param metric: the metric (default 'riemann'), can be : 'riemann',
'logeuclid', 'euclid', 'logdet', 'identity', 'wasserstein', 'ale',
'alm', 'harmonic', 'kullback_sym' or a callable function
:param sample_weight: the weight of each sample
:param args: the argument passed to the sub function
:returns: the mean covariance matrix
"""
if callable(metric):
C = metric(covmats, sample_weight=sample_weight, *args)
else:
C = mean_methods[metric](covmats, sample_weight=sample_weight, *args)
return C
mean_methods = {'riemann': mean_riemann,
'logeuclid': mean_logeuclid,
'euclid': mean_euclid,
'identity': mean_identity,
'logdet': mean_logdet,
'wasserstein': mean_wasserstein,
'ale': mean_ale,
'harmonic': mean_harmonic,
'kullback_sym': mean_kullback_sym,
'alm': mean_alm}
def _check_mean_method(method):
"""checks methods """
if isinstance(method, str):
if method not in mean_methods.keys():
raise ValueError('Unknown mean method')
else:
method = mean_methods[method]
elif not hasattr(method, '__call__'):
raise ValueError('mean method must be a function or a string.')
return method
| 32.795238 | 116 | 0.635908 |
bbfe214e8149d499ea33bd81fda220abbce8fdef | 3,578 | py | Python | fabfile.py | nprapps/sitemaps | 6be4393d881b3c8766d35fbe479873247f05c13b | [
"FSFAP"
] | null | null | null | fabfile.py | nprapps/sitemaps | 6be4393d881b3c8766d35fbe479873247f05c13b | [
"FSFAP"
] | null | null | null | fabfile.py | nprapps/sitemaps | 6be4393d881b3c8766d35fbe479873247f05c13b | [
"FSFAP"
] | 1 | 2021-02-18T11:24:28.000Z | 2021-02-18T11:24:28.000Z | #!/usr/bin/env python
from fabric.api import *
import app
import app_config
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
"""
Template-specific functions
Changing the template functions should produce output
with fab render without any exceptions. Any file used
by the site templates should be rendered by fab render.
"""
def update_index():
"""
Downloads a Google Doc as an .xls file.
"""
base_url = 'https://docs.google.com/spreadsheet/pub?key=%s&output=csv'
doc_url = base_url % app_config.SITEMAP_GOOGLE_DOC_KEY
local('curl -o data/index.csv "%s"' % doc_url)
def render():
"""
Render HTML templates and compile assets.
"""
update_index()
# Fake out deployment target
app_config.configure_targets(env.get('settings', None))
for rule in app.app.url_map.iter_rules():
rule_string = rule.rule
name = rule.endpoint
if name == 'static':
continue
filename = 'www' + rule_string
print 'Rendering %s' % (filename)
with app.app.test_request_context(path=rule_string):
view = app.__dict__[name]
content = view()[0]
with open(filename, 'w') as f:
f.write(content.encode('utf-8'))
# Un-fake-out deployment target
app_config.configure_targets(app_config.DEPLOYMENT_TARGET)
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
def _deploy_to_s3():
"""
Deploy the gzipped stuff to S3.
"""
s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'
s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude "*" --include-from gzip_types.txt put gzip/ %s'
for bucket in env.s3_buckets:
env.s3_bucket = bucket
local(s3cmd % ('s3://%(s3_bucket)s/' % env))
local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))
def _gzip_www():
"""
Gzips everything in www and puts it all in gzip
"""
local('python gzip_www.py')
local('rm -rf gzip/live-data')
def deploy():
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
render()
_gzip_www()
_deploy_to_s3()
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
_confirm("You are about to destroy everything deployed to %(settings)s for this project.\nDo you know what you're doing?" % env)
with settings(warn_only=True):
s3cmd = 's3cmd del --recursive %s'
for bucket in env.s3_buckets:
env.s3_bucket = bucket
local(s3cmd % ('s3://%(s3_bucket)s/%(project_slug)s' % env))
| 27.312977 | 186 | 0.66322 |
bbfebfa3a6e07ffb390ccc9c51bbfd1c5eb387b7 | 2,531 | py | Python | img-xlsx.py | jherskovic/img-xlsx | ba301b43c8a3df2282622e70904fcb2d55bad2a3 | [
"CNRI-Python"
] | null | null | null | img-xlsx.py | jherskovic/img-xlsx | ba301b43c8a3df2282622e70904fcb2d55bad2a3 | [
"CNRI-Python"
] | 4 | 2019-08-25T13:16:03.000Z | 2021-01-07T23:20:24.000Z | img-xlsx.py | jherskovic/img-xlsx | ba301b43c8a3df2282622e70904fcb2d55bad2a3 | [
"CNRI-Python"
] | null | null | null | from PIL import Image
from openpyxl import Workbook
from openpyxl.styles import PatternFill
from openpyxl.utils import get_column_letter
from functools import partial
import sys
import argparse
if __name__ == "__main__":
args = handle_arguments()
convert(args)
| 33.746667 | 111 | 0.600948 |
bbff69aa5097c6b5253948d0d9595188ebebf3c2 | 7,502 | py | Python | tests/test_multithread_access.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | null | null | null | tests/test_multithread_access.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | null | null | null | tests/test_multithread_access.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | null | null | null | from unittest import TestCase
from dat_analysis.dat_object.dat_hdf import DatHDF
from dat_analysis.hdf_file_handler import HDFFileHandler
from dat_analysis.dat_object.make_dat import get_dat, get_dats, DatHandler
from tests.helpers import get_testing_Exp2HDF
from dat_analysis.data_standardize.exp_specific.Feb21 import Feb21Exp2HDF
import concurrent.futures
import os
import h5py
import numpy as np
import shutil
import time
from tests import helpers
dat_dir = os.path.abspath('fixtures/dats/2021Feb')
# Where to put outputs (i.e. DatHDFs)
output_dir = os.path.abspath('Outputs/test_multithread_access')
hdf_folder_path = os.path.join(output_dir, 'Dat_HDFs')
Testing_Exp2HDF = get_testing_Exp2HDF(dat_dir, output_dir, base_class=Feb21Exp2HDF)
| 40.551351 | 132 | 0.681818 |
a51f8b0d486e0ae6fcf2e60b6ae5a88312c39cab | 2,721 | py | Python | early_projects/theater.py | JSBCCA/pythoncode | b7f2af8b0efc2d01d3e4568265eb3a5038a8679f | [
"MIT"
] | null | null | null | early_projects/theater.py | JSBCCA/pythoncode | b7f2af8b0efc2d01d3e4568265eb3a5038a8679f | [
"MIT"
] | null | null | null | early_projects/theater.py | JSBCCA/pythoncode | b7f2af8b0efc2d01d3e4568265eb3a5038a8679f | [
"MIT"
] | null | null | null | import myshop
theater()
| 32.011765 | 79 | 0.484748 |
a51f8b0f6e2a6c5f1924803b2a7a2c961da769d4 | 43,469 | py | Python | TSScall-master/TSScall.py | AdelmanLab/GetGeneAnnotation_GGA | ae8c8328640892a4e50408ba566dd95e70f18d52 | [
"MIT"
] | 1 | 2021-04-02T14:36:12.000Z | 2021-04-02T14:36:12.000Z | TSScall-master/TSScall.py | AdelmanLab/GetGeneAnnotation_GGA | ae8c8328640892a4e50408ba566dd95e70f18d52 | [
"MIT"
] | 3 | 2018-02-23T19:47:31.000Z | 2019-07-15T16:58:54.000Z | TSScall-master/TSScall.py | AdelmanLab/GetGeneAnnotation_GGA | ae8c8328640892a4e50408ba566dd95e70f18d52 | [
"MIT"
] | 1 | 2017-01-06T20:16:07.000Z | 2017-01-06T20:16:07.000Z | #!/usr/bin/env python
# CREATED BY CHRISTOPHER LAVENDER
# BASED ON WORK BY ADAM BURKHOLDER
# INTEGRATIVE BIOINFORMATICS, NIEHS
# WORKING OBJECT ORIENTED VERSION
import os
import math
import argparse
import sys
from operator import itemgetter
# STRAND_STATUS IS USED TO DETERMINE IF STRAND IS USED IN SORT
# ENTRY 1 IS LESS THAN ENTRY 2?
# ENTRY 1 IS WITHIN ENTRY 2?
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fdr', default=None, type=float,
help='set read threshold by FDR (FLOAT) (Default \
method: less than 0.001)')
parser.add_argument('--false_positives', default=None, type=int,
help='set read threshold by false positive count')
parser.add_argument('--utss_filter_size', default=750, type=int,
help='set uTSS filter size; any read within INTEGER \
of obsTSS/annoTSS is filtered prior to uTSS calling \
(Default: 750)')
parser.add_argument('--utss_search_window', default=250, type=int,
help='set uTSS search window size to INTEGER \
(Default: 250)')
parser.add_argument('--bidirectional_threshold', default=1000, type=int,
help='INTEGER threshold to associate bidirectional \
TSSs (Default: 1000)')
parser.add_argument('--detail_file', default=None, type=str,
help='create a tab-delimited TXT file with details \
about TSS calls')
parser.add_argument('--cluster_threshold', default=1000, type=int,
help='INTEGER threshold to associate TSSs into \
clusters (Default: 1000)')
parser.add_argument('--annotation_file', '-a', type=str,
help='annotation in GTF format')
parser.add_argument('--call_method', type=str, default='bin_winner',
choices=['global', 'bin_winner'],
help='TSS calling method to use (Default: bin_winner)')
parser.add_argument('--annotation_join_distance', type=int, default=200,
help='set INTEGER distace threshold for joining search \
windows from annotation (Default: 200)')
parser.add_argument('--annotation_search_window', type=int, default=1000,
help='set annotation search window size to INTEGER \
(Default: 1000)')
parser.add_argument('--set_read_threshold', type=float, default=None,
help='set read threshold for TSS calling to FLOAT; do \
not determine threshold from data')
parser.add_argument('--bin_winner_size', type=int, default=200,
help='set bin size for call method bin_winner \
(Default: 200)')
parser.add_argument('--cluster_bed', type=str, default=None,
help='write clusters to output bed file')
parser.add_argument('forward_bedgraph', type=str,
help='forward strand Start-seq bedgraph file')
parser.add_argument('reverse_bedgraph', type=str,
help='reverse strand Start-seq bedgraph file')
parser.add_argument('chrom_sizes', type=str,
help='standard tab-delimited chromosome sizes file')
parser.add_argument('output_bed', type=str, help='output TSS BED file')
args = parser.parse_args()
TSSCalling(**vars(args))
| 43.996964 | 80 | 0.494835 |
a51fd6b2b0c4c430c0e920bd959a2e1d06f3221b | 234 | py | Python | grayToBinary.py | gaurav3dua/OpenCV | d816158c40c35b897ce9873c176ce72735220069 | [
"MIT"
] | 1 | 2018-11-25T19:30:22.000Z | 2018-11-25T19:30:22.000Z | grayToBinary.py | gaurav3dua/OpenCV | d816158c40c35b897ce9873c176ce72735220069 | [
"MIT"
] | null | null | null | grayToBinary.py | gaurav3dua/OpenCV | d816158c40c35b897ce9873c176ce72735220069 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
img = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
thresh = 127
im_bw = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imshow('image', im_bw)
cv2.waitKey(0)
cv2.destroyAllWindows() | 21.272727 | 62 | 0.713675 |
a52068720298fd51fbb513a22dc8a2e7f0bdd3f1 | 652 | py | Python | 006-argparse.py | KitchenTableCoders/cli-video | 35cacc059f6ac86c7bf6b1f86f42ea178e16165c | [
"MIT"
] | 6 | 2016-03-06T05:51:06.000Z | 2017-01-10T05:49:03.000Z | 006-argparse.py | KitchenTableCoders/cli-video | 35cacc059f6ac86c7bf6b1f86f42ea178e16165c | [
"MIT"
] | null | null | null | 006-argparse.py | KitchenTableCoders/cli-video | 35cacc059f6ac86c7bf6b1f86f42ea178e16165c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Introduces the "argparse" module, which is used to parse more complex argument strings
eg: ./006-argparse.py --name Jeff mauve
"""
import argparse # http://docs.python.org/2/library/argparse.html#module-argparse
import subprocess
if __name__ == '__main__':
main() | 28.347826 | 99 | 0.707055 |
a5209d004c35406d08483e6a8a94534fc1c1b17b | 4,573 | py | Python | solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py | ZLLentz/solid-attenuator | 766ac1df169b3b9459222d979c9ef77a9be2b509 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-04-21T02:55:11.000Z | 2021-04-21T02:55:11.000Z | solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py | ZLLentz/solid-attenuator | 766ac1df169b3b9459222d979c9ef77a9be2b509 | [
"BSD-3-Clause-LBNL"
] | 27 | 2020-12-07T23:11:42.000Z | 2022-02-02T23:59:03.000Z | solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py | ZLLentz/solid-attenuator | 766ac1df169b3b9459222d979c9ef77a9be2b509 | [
"BSD-3-Clause-LBNL"
] | 2 | 2020-04-01T05:52:03.000Z | 2020-07-24T16:56:36.000Z | """
This is the IOC source code for the unique AT2L0, with its 18 in-out filters.
"""
from typing import List
from caproto.server import SubGroup, expand_macros
from caproto.server.autosave import RotatingFileManager
from .. import calculator, util
from ..filters import InOutFilterGroup
from ..ioc import IOCBase
from ..system import SystemGroupBase
from ..util import State
def create_ioc(prefix, filter_group, macros, **ioc_options):
"""IOC Setup."""
filter_index_to_attribute = {
index: f'filter_{suffix}'
for index, suffix in filter_group.items()
}
subgroups = {
filter_index_to_attribute[index]: SubGroup(
InOutFilterGroup, prefix=f':FILTER:{suffix}:', index=index)
for index, suffix in filter_group.items()
}
subgroups['sys'] = SubGroup(SystemGroup, prefix=':SYS:')
low_index = min(filter_index_to_attribute)
high_index = max(filter_index_to_attribute)
motor_prefix = expand_macros(macros["motor_prefix"], macros)
motor_prefixes = {
idx: f'{motor_prefix}{idx:02d}:STATE'
for idx in range(low_index, high_index + 1)
}
IOCMain = IOCBase.create_ioc_class(filter_index_to_attribute, subgroups,
motor_prefixes)
ioc = IOCMain(prefix=prefix, macros=macros, **ioc_options)
autosave_path = expand_macros(macros['autosave_path'], macros)
ioc.autosave_helper.filename = autosave_path
ioc.autosave_helper.file_manager = RotatingFileManager(autosave_path)
return ioc
| 36.293651 | 79 | 0.650776 |
a520cc9aad5c8512bee199a8b970862484795d67 | 4,530 | py | Python | mysite/urls.py | jtkim03/Find-a-QT | a330c95f76bcc148febf39284c07d3ac4f909b4e | [
"BSD-3-Clause"
] | null | null | null | mysite/urls.py | jtkim03/Find-a-QT | a330c95f76bcc148febf39284c07d3ac4f909b4e | [
"BSD-3-Clause"
] | 9 | 2021-03-30T13:42:35.000Z | 2022-03-12T00:36:19.000Z | mysite/urls.py | jtkim03/Find-a-QT | a330c95f76bcc148febf39284c07d3ac4f909b4e | [
"BSD-3-Clause"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include, re_path
from find_a_qt.views import home, QuestionListView, \
QuestionDetailView, question_post, answer_post, room_post, \
AnswerListView, user_history, UserQuestionView, question_answers, upvote_question_detail,\
upvote_answer_question, downvote_question_detail, downvote_answer_question
from django.views.generic import TemplateView
from users import views as user_views
from find_a_qt import views as find_a_qt_views
from django.conf import settings
from django.conf.urls.static import static
from chat.models import Room
from find_a_qt.models import Question
urlpatterns = [
path('',TemplateView.as_view(template_name = 'find_a_qt/home.html'), name='faqt-home'), #TODO Merge this login template with homepage
path('admin/', admin.site.urls),
url(r'^', include('chat.urls')),
path('accounts/', include('allauth.urls')),
path('about/', TemplateView.as_view(template_name = 'find_a_qt/about.html')),
path('register/', user_views.register, name='register'),
path('login/', auth_views.LoginView.as_view(template_name = 'users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name = 'users/logout.html'), name = 'logout'),
path('profile/', user_views.view_profile, name='profile'),
url(r'^profile/(?P<pk>\d+)/$', user_views.view_profile, name='profile_with_pk'),
path('profile/edit/', user_views.edit_profile, name='edit_profile'),
#path('profile/MyQuestions/', UserQuestionView.as_view(), name='myqs'),
url(r'^profile/(?P<username>\w+)/$', user_views.profile_page, name='public_profile'),
path('questions/', QuestionListView.as_view(), name='viewquestions'),
path('answers/', AnswerListView.as_view(), name='viewanswers'),
path('questions/new/', question_post, name='createquestions'),
path('questions/<int:pk>/', QuestionDetailView.as_view(), name = 'viewquestions-detail'),
path('choose_question', TemplateView.as_view(template_name = 'find_a_qt/choose_question.html')),
path('questions/search/', TemplateView.as_view(template_name = 'find_a_qt/search_question.html'), name = 'search'),
path('s/', find_a_qt_views.search_view, name = 'search'),
path('answer/new/', answer_post, name='createqs'),
path('chat/new/', room_post, name='createroom'),
path('reset-password/', auth_views.PasswordResetView.as_view(), name='reset_password'),
path('reset-password/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
re_path(r'^reset-password/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,23})/$',
auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('reset-password/complete/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('my-questions/', user_history, name='user_question'),
# path('answer-question/',question_answers,name='answer_question'),
path('answers/<int:pk>/',question_answers,name='answer_question'),
url(r'^like/(?P<username>\w+)/$', user_views.like, name='like'),
url(r'^dislike/(?P<username>\w+)/$', user_views.dislike, name='dislike'),
url(r'^upvote_q_d/(?P<answer_id>\d+)/(?P<pk>\d+)/$', upvote_question_detail, name='upvote_question_detail'),
url(r'^upvote_a_q/(?P<answer_id>\d+)/(?P<pk>\d+)/$', upvote_answer_question, name='upvote_answer_question'),
url(r'^downvote_q_d/(?P<answer_id>\d+)/(?P<pk>\d+)/$', downvote_question_detail, name='downvote_question_detail'),
url(r'^downvote_a_q/(?P<answer_id>\d+)/(?P<pk>\d+)/$', downvote_answer_question, name='downvote_answer_question'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 56.625 | 137 | 0.722296 |
a5212eabcb877d5b4c6f79a259ed99bcf35ed6f2 | 396 | py | Python | app/forms/login.py | mkorcha/CoyoteLab | 8932d9cc35fb840e468368c2e1249ca4811b59d0 | [
"MIT"
] | 2 | 2016-12-01T00:10:46.000Z | 2016-12-31T19:18:35.000Z | app/forms/login.py | mkorcha/CoyoteLab | 8932d9cc35fb840e468368c2e1249ca4811b59d0 | [
"MIT"
] | null | null | null | app/forms/login.py | mkorcha/CoyoteLab | 8932d9cc35fb840e468368c2e1249ca4811b59d0 | [
"MIT"
] | null | null | null | from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired
| 28.285714 | 66 | 0.765152 |
a522e39d60daf369e5808e7febdbf847f905a859 | 880 | py | Python | budgetcalc/admin.py | MAPC/MBTA | a1e669004509832a42ca49ef4d7d06d05e3a88fd | [
"BSD-3-Clause"
] | null | null | null | budgetcalc/admin.py | MAPC/MBTA | a1e669004509832a42ca49ef4d7d06d05e3a88fd | [
"BSD-3-Clause"
] | null | null | null | budgetcalc/admin.py | MAPC/MBTA | a1e669004509832a42ca49ef4d7d06d05e3a88fd | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from budgetcalc.models import Category, Optiongroup, Option, Submission
admin.site.register(Category, CategoryAdmin)
admin.site.register(Optiongroup, OptiongroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(Submission, SubmissionAdmin) | 32.592593 | 91 | 0.718182 |
a52614c1f178a95384236fabe39f5251f3b714f1 | 117 | py | Python | USP_Curso/Semana3/Exercicio4.py | IagoAntunes/Python__learning | cb96a1ae902c290270479c7a7f4e97b56c538297 | [
"MIT"
] | null | null | null | USP_Curso/Semana3/Exercicio4.py | IagoAntunes/Python__learning | cb96a1ae902c290270479c7a7f4e97b56c538297 | [
"MIT"
] | null | null | null | USP_Curso/Semana3/Exercicio4.py | IagoAntunes/Python__learning | cb96a1ae902c290270479c7a7f4e97b56c538297 | [
"MIT"
] | null | null | null | num = int(input("Digite um numero: "))
if(num % 5 == 0 and num % 3 == 0):
print("FizzBuzz")
else:
print(num) | 19.5 | 38 | 0.555556 |
a528fb1f9441de07bad65e4dc6932a2f3895273e | 3,720 | py | Python | gpg_reaper.py | kacperszurek/gpg_reaper | 8fd0de32944900c813f8dbb5b83bb83abcea037f | [
"MIT"
] | 95 | 2018-03-05T18:20:00.000Z | 2021-09-28T18:51:40.000Z | gpg_reaper.py | n0ncetonic/gpg_reaper | 8fd0de32944900c813f8dbb5b83bb83abcea037f | [
"MIT"
] | null | null | null | gpg_reaper.py | n0ncetonic/gpg_reaper | 8fd0de32944900c813f8dbb5b83bb83abcea037f | [
"MIT"
] | 15 | 2018-03-13T01:44:00.000Z | 2021-12-20T09:59:26.000Z | # GPG Reaper
#
# MIT License
#
# Copyright (c) 2018 Kacper Szurek
# https://security.szurek.pl/
from pgpy.packet.fields import MPI, RSAPriv
from pgpy.constants import PubKeyAlgorithm, KeyFlags, HashAlgorithm, SymmetricKeyAlgorithm, CompressionAlgorithm
from pgpy import PGPKey
from pgpy.packet.packets import PrivKeyV4
import json
import codecs
import sys
import os
begin_block = '--START_GPG_REAPER--'
end_block = '--END_GPG_REAPER--'
if len(sys.argv) != 2:
print "Usage: " + __file__ + " output.txt"
os._exit(0)
file_path = sys.argv[1]
if not os.path.isfile(file_path):
print "[-] File not exist"
os._exit(0)
try:
file_encoding = detect_by_bom(file_path)
content = open(file_path).read()
if file_encoding:
content = content.decode(file_encoding)
begin_find = content.find(begin_block)
end_find = content.find(end_block)
if begin_find != -1 and end_find != -1:
data = json.loads(content[begin_find+len(begin_block):end_find])
if type(data) is not list:
data = [data]
for gpg in data:
try:
rsa_priv = RSAPriv()
rsa_priv.e = MPI(int(gpg['e'], 16))
rsa_priv.n = MPI(int(gpg['n'], 16))
rsa_priv.d = MPI(int(gpg['d'], 16))
rsa_priv.p = MPI(int(gpg['p'], 16))
rsa_priv.q = MPI(int(gpg['q'], 16))
rsa_priv.u = MPI(int(gpg['u'], 16))
rsa_priv._compute_chksum()
restored_priv_key = PrivKeyV4()
restored_priv_key.pkalg = PubKeyAlgorithm.RSAEncryptOrSign
restored_priv_key.keymaterial = rsa_priv
restored_priv_key.update_hlen()
pgp_key = PGPKey()
pgp_key._key = restored_priv_key
public_key, _ = PGPKey.from_blob(gpg['public'])
# fingerprint contains cration date so we need explicit copy this one
pgp_key._key.created = public_key._key.created
pgp_key.add_uid(
public_key.userids[0],
usage={
KeyFlags.Sign,
KeyFlags.EncryptCommunications,
KeyFlags.EncryptStorage
},
hashes=[
HashAlgorithm.SHA256,
HashAlgorithm.SHA384,
HashAlgorithm.SHA512,
HashAlgorithm.SHA224],
ciphers=[
SymmetricKeyAlgorithm.AES256,
SymmetricKeyAlgorithm.AES192,
SymmetricKeyAlgorithm.AES128],
compression=[
CompressionAlgorithm.ZLIB,
CompressionAlgorithm.BZ2,
CompressionAlgorithm.ZIP,
CompressionAlgorithm.Uncompressed])
# print pgp_key
key_fingeprint = pgp_key.fingerprint.replace(" ", "")
print "[+] Dump {} - {}".format(key_fingeprint, public_key.userids[0])
open(key_fingeprint+".key", "w").write(str(pgp_key))
except Exception as e:
print "[-] Error: "+str(e)
else:
print "[-] No info"
except Exception as e:
print "[-] Error: "+str(e) | 35.09434 | 112 | 0.552419 |
a52c5d9d6fb9e5755519e9da6cf0e9e7b0ba2f4d | 221 | py | Python | db.py | HoolaBoola/tsoha_article_library | 9c1d79eb06811a97c6984d4c970ee71a18724df7 | [
"MIT"
] | null | null | null | db.py | HoolaBoola/tsoha_article_library | 9c1d79eb06811a97c6984d4c970ee71a18724df7 | [
"MIT"
] | 2 | 2021-04-26T18:19:39.000Z | 2021-04-26T19:43:35.000Z | db.py | HoolaBoola/tsoha_article_library | 9c1d79eb06811a97c6984d4c970ee71a18724df7 | [
"MIT"
] | 1 | 2021-05-06T09:10:35.000Z | 2021-05-06T09:10:35.000Z | from app import app
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
| 24.555556 | 62 | 0.809955 |
a52cc5e0156fbef790ecdf07862d92b75464ebf8 | 399 | py | Python | classifier/nets/build.py | yidarvin/firstaid_classification | 5cb1ec5a896766ec4670e0daca23014a879e6c14 | [
"MIT"
] | null | null | null | classifier/nets/build.py | yidarvin/firstaid_classification | 5cb1ec5a896766ec4670e0daca23014a879e6c14 | [
"MIT"
] | null | null | null | classifier/nets/build.py | yidarvin/firstaid_classification | 5cb1ec5a896766ec4670e0daca23014a879e6c14 | [
"MIT"
] | null | null | null |
import torch
from os.path import join
from fvcore.common.registry import Registry
ARCHITECTURE_REGISTRY = Registry("ARCHITECTURE")
| 26.6 | 91 | 0.749373 |
a52e756102241b8ea4824f9de3490cd248e22558 | 14,034 | py | Python | graphGenerator.py | carlklier/flappai-bird | ea640005494eaf70abc22c41e502593a8aff436f | [
"Apache-2.0"
] | null | null | null | graphGenerator.py | carlklier/flappai-bird | ea640005494eaf70abc22c41e502593a8aff436f | [
"Apache-2.0"
] | null | null | null | graphGenerator.py | carlklier/flappai-bird | ea640005494eaf70abc22c41e502593a8aff436f | [
"Apache-2.0"
] | null | null | null | #%%
import base64
import matplotlib.pyplot as plt
import numpy as np
import json
from ast import literal_eval
data1encoded = 'eyJkZGFFbmFibGVkIjpmYWxzZSwiZGF0YSI6W3sic3RhcnRUaW1lIjoxNjE5MTM3MjUxMzg1LCJkdXJhdGlvbiI6NjQ1Miwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxMzcyNjMwMDYsImR1cmF0aW9uIjo3NTYwLCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzI3NTI0NywiZHVyYXRpb24iOjEyNzQ4LCJzY29yZSI6NCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzI5ODc5OSwiZHVyYXRpb24iOjczOTcsInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3MzEwNzQxLCJkdXJhdGlvbiI6MTUyNTAsInNjb3JlIjo1LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3MzMxMDg0LCJkdXJhdGlvbiI6MjYyNjgsInNjb3JlIjoxMiwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjEyfSx7InN0YXJ0VGltZSI6MTYxOTEzNzM2MTc0NiwiZHVyYXRpb24iOjkxNzAsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3Mzc1Mjg1LCJkdXJhdGlvbiI6MTI2MzEsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3MzkyMzM1LCJkdXJhdGlvbiI6MjA0MjcsInNjb3JlIjo3LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3NDE3MTMyLCJkdXJhdGlvbiI6OTQwNSwic2NvcmUiOjEsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc0MzA3MjgsImR1cmF0aW9uIjoxNjAxNiwic2NvcmUiOjUsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxMzc0NjEwNDUsImR1cmF0aW9uIjo4MTU2LCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzU1Njk1NCwiZHVyYXRpb24iOjg2NzIsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3NTY5NzA4LCJkdXJhdGlvbiI6MTIwNDAsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3NTg2MjU1LCJkdXJhdGlvbiI6MTI3NTIsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3NjAzNzE5LCJkdXJhdGlvbiI6OTM1MCwic2NvcmUiOjEsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc2MTY5OTcsImR1cmF0aW9uIjoxNzczMSwic2NvcmUiOjcsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc2MzkwODMsImR1cmF0aW9uIjo4Nzk3LCJzY29yZSI6MSwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzY1MjMyNywiZHVyYXRpb24iOjc4OTMsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3NjY1MTMxLCJkdXJhdGlvbiI6Njg1Miwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc2NzYzODcsImR1cmF0aW9uIjo4ODg5LCJzY29yZSI6MSwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjEyfSx7InN0YXJ0VGltZSI6MTYxOTEzNzY4OTEwMiwiZHVyYXRpb24iOjcwMjAsInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3Njk5Nzk3LCJkdXJhdGlvbiI6ODcxNSwic2NvcmUiOjEsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn1dfQ=='
data1bytes = base64.b64decode(data1encoded)
data1 = json.loads(data1bytes.decode('utf8'))
data2encoded = 'eyJkZGFFbmFibGVkIjpmYWxzZSwiZGF0YSI6W3sic3RhcnRUaW1lIjoxNjE5MTg0NTQ1Nzk0LCJkdXJhdGlvbiI6NjMyMiwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxODQ1NzMyMDgsImR1cmF0aW9uIjo2NTQ1LCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NDU4NzE3NSwiZHVyYXRpb24iOjY5NjEsInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg0NjI2ODk3LCJkdXJhdGlvbiI6MTIzODYsInNjb3JlIjo0LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg0NjY5OTE3LCJkdXJhdGlvbiI6MzA4NjUsInNjb3JlIjoxOCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NDk5NTc0NSwiZHVyYXRpb24iOjc1MjAsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1MDA4MTAzLCJkdXJhdGlvbiI6MTc0NTYsInNjb3JlIjo4LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1Mzg2ODE4LCJkdXJhdGlvbiI6MTA4ODIsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1NDk1NTE3LCJkdXJhdGlvbiI6MjA1NzcsInNjb3JlIjoxMCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NTUyMzk3MCwiZHVyYXRpb24iOjE0MjczLCJzY29yZSI6NiwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjEyfSx7InN0YXJ0VGltZSI6MTYxOTE4NTU0NTAwOSwiZHVyYXRpb24iOjY2MDksInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NTU2OTg2LCJkdXJhdGlvbiI6MTAwMTgsInNjb3JlIjoyLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1NTcxMDg4LCJkdXJhdGlvbiI6MTA3MzcsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NTg5MTEyLCJkdXJhdGlvbiI6NjIxMCwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxODU1OTk4MjQsImR1cmF0aW9uIjo3MjAxLCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NTYxMTY3MywiZHVyYXRpb24iOjgxMTMsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1NjI0MDAxLCJkdXJhdGlvbiI6ODc4NSwic2NvcmUiOjIsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxODU2MzY4MjMsImR1cmF0aW9uIjoxNTI2NSwic2NvcmUiOjYsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxODU2NTYzNjgsImR1cmF0aW9uIjoyMjg4MSwic2NvcmUiOjEyLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NjgzNjM3LCJkdXJhdGlvbiI6MTIxNDQsInNjb3JlIjo0LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1Njk5NTgyLCJkdXJhdGlvbiI6MTQyNzMsInNjb3JlIjo2LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NzIwOTkwLCJkdXJhdGlvbiI6ODgzMywic2NvcmUiOjIsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxODU3MzY1MzAsImR1cmF0aW9uIjoxMDczNywic2NvcmUiOjMsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX1dfQ=='
data2bytes = base64.b64decode(data2encoded)
data2 = json.loads(data2bytes.decode('utf8'))
scores1 = []
gravity1 = []
pipeInterval1 = []
pipeHeight1 = []
for data in data1["data"]:
scores1.append(data['score'])
gravity1.append(data['gravity'])
pipeInterval1.append(data['pipeInterval'])
pipeHeight1.append(data['pipeheight'])
scores2 = []
gravity2 = []
pipeInterval2 = []
pipeHeight2 = []
for data in data2["data"]:
scores2.append(data['score'])
gravity2.append(data['gravity'])
pipeInterval2.append(data['pipeInterval'])
pipeHeight2.append(data['pipeheight'])
x = np.arange(1, 24, 1)
fig, ax = plt.subplots() # Create a figure containing a single axes.
ax.plot(x, scores1, label='JOE')
ax.plot(x, scores2, label='DALTON')
plt.xticks(x)
ax.set_xlabel('Playthrough')
ax.set_ylabel('Score')
ax.set_title('Scores over the playthroughs')
ax.legend()
fig2, ax2 = plt.subplots() # Create a figure containing a single axes.
ax2.plot(x, gravity1, label='player1')
ax2.plot(x, gravity2, label='player2')
plt.xticks(x)
ax2.set_xlabel('Playthrough')
ax2.set_ylabel('Gravity')
ax2.set_title('Gravity strength over 10 playthroughs')
ax2.legend()
fig3, ax3 = plt.subplots() # Create a figure containing a single axes.
ax3.plot(x, pipeInterval1, label='player1')
ax3.plot(x, pipeInterval2, label='player2')
plt.xticks(x)
ax3.set_xlabel('Playthrough')
ax3.set_ylabel('Pipe Interval Distance')
ax3.set_title('Pipe interval distance over 10 playthroughs')
ax3.legend()
fig4, ax4 = plt.subplots() # Create a figure containing a single axes.
ax4.plot(x, pipeHeight1, label='player1')
ax4.plot(x, pipeHeight2, label='player2')
plt.xticks(x)
ax4.set_xlabel('Playthrough')
ax4.set_ylabel('Pipe Height')
ax4.set_title('Pipe height over 10 playthroughs')
ax4.legend()
# %%
import csv
import base64
import matplotlib.pyplot as plt
import numpy as np
import json
data = []
with open('notddaEnabledData.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
data.append(row[0])
allData = []
for testerData in data:
dataBytes = base64.b64decode(testerData)
jsonData = json.loads(dataBytes.decode('utf8'))
scores = []
pipeInterval = []
pipeHeight = []
i = 0
for run in jsonData["data"]:
if(i != 0 and run['pipeInterval'] == 1410.5):
break
scores.append(run['score'])
pipeInterval.append(run['pipeInterval'])
pipeHeight.append(run['pipeheight'])
i = i + 1
xvals = np.arange(1, len(scores) + 1, 1)
playerData = {"xvals":xvals, "scores":scores, "pipeInterval":pipeInterval, "pipeHeight":pipeHeight}
allData.append(playerData)
fig, ax = plt.subplots()
ax.set_xlabel('Playthrough')
ax.set_ylabel('Score')
ax.set_title('DDA Enabled Scores')
for i in range(len(allData)):
ax.plot(allData[i]["xvals"], allData[i]["scores"], label='player' + str(i))
ax.legend()
fig, ax = plt.subplots()
ax.set_xlabel('Playthrough')
ax.set_ylabel('pipeInterval')
ax.set_title('Baseline pipeInterval Distances')
for i in range(len(allData)):
ax.plot(allData[i]["xvals"], allData[i]["pipeInterval"], label='player' + str(i))
ax.legend()
fig, ax = plt.subplots()
ax.set_xlabel('Playthrough')
ax.set_ylabel('pipeHeight')
ax.set_title('Baseline pipeHeight Distances')
for i in range(len(allData)):
ax.plot(allData[i]["xvals"], allData[i]["pipeHeight"], label='player' + str(i))
ax.legend()
# %%
import csv
import base64
import matplotlib.pyplot as plt
import numpy as np
import json
dataEnabled = []
with open('ddaEnabledData.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
dataEnabled.append(row[0])
dataNotEnabled = []
with open('notddaEnabledData.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
dataNotEnabled.append(row[0])
allDataEnabled = []
for testerData in dataEnabled:
dataBytes = base64.b64decode(testerData)
jsonData = json.loads(dataBytes.decode('utf8'))
scores = []
pipeInterval = []
pipeHeight = []
i = 0
for run in jsonData["data"]:
if(i != 0 and run['pipeInterval'] == 1410.5):
break
scores.append(run['score'])
pipeInterval.append(run['pipeInterval'])
pipeHeight.append(run['pipeheight'])
i = i + 1
xvals = np.arange(1, len(scores) + 1, 1)
playerData = {"xvals":xvals, "scores":scores, "pipeInterval":pipeInterval, "pipeHeight":pipeHeight}
allDataEnabled.append(playerData)
allDataNotEnabled = []
for testerData in dataNotEnabled:
dataBytes = base64.b64decode(testerData)
jsonData = json.loads(dataBytes.decode('utf8'))
scores = []
pipeInterval = []
pipeHeight = []
i = 0
for run in jsonData["data"]:
if(i != 0 and run['pipeInterval'] == 1410.5):
break
scores.append(run['score'])
pipeInterval.append(run['pipeInterval'])
pipeHeight.append(run['pipeheight'])
i = i + 1
xvals = np.arange(1, len(scores) + 1, 1)
playerData = {"xvals":xvals, "scores":scores, "pipeInterval":pipeInterval, "pipeHeight":pipeHeight}
allDataNotEnabled.append(playerData)
fig, ax = plt.subplots()
#plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
ax.set_xlabel('Playthrough')
ax.set_ylabel('Score')
ax.set_title('DDA vs. Baseline Scores')
for i in range(len(allDataEnabled)):
ax.plot(allDataEnabled[i]["xvals"], allDataEnabled[i]["scores"], label='DDA_Enabled', color='green')
for i in range(len(allDataNotEnabled)):
ax.plot(allDataNotEnabled[i]["xvals"], allDataNotEnabled[i]["scores"], label='Baseline', color='red')
handles, labels = plt.gca().get_legend_handles_labels()
labels, ids = np.unique(labels, return_index=True)
handles = [handles[i] for i in ids]
plt.legend(handles, labels, loc='best')
#ax.legend()
# %%
| 64.972222 | 4,005 | 0.862619 |
a5320d08df77982d660989950f89ae694eb0d00c | 2,870 | py | Python | C45Tree/apply.py | ManuelFreytag/Algorithm_implementation | 380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8 | [
"MIT"
] | 1 | 2018-07-31T08:29:11.000Z | 2018-07-31T08:29:11.000Z | C45Tree/apply.py | ManuelFreytag/Algorithm_implementation | 380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8 | [
"MIT"
] | null | null | null | C45Tree/apply.py | ManuelFreytag/Algorithm_implementation | 380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 16:08:04 2016
@author: Manuel
"""
from C45Tree_own import split
import pandas as pa
| 32.247191 | 81 | 0.501394 |
a5323fdb26fc504a82070a2ba96f6ac67837b9e8 | 1,545 | py | Python | application_form/migrations/0005_occupancy_id_to_int.py | frwickst/apartment-application-service | 40387327a0f82ba01bfcb6ab8532ea4aec40d37a | [
"MIT"
] | 1 | 2021-03-15T11:29:12.000Z | 2021-03-15T11:29:12.000Z | application_form/migrations/0005_occupancy_id_to_int.py | frwickst/apartment-application-service | 40387327a0f82ba01bfcb6ab8532ea4aec40d37a | [
"MIT"
] | 130 | 2020-09-07T08:30:29.000Z | 2022-03-29T11:49:27.000Z | application_form/migrations/0005_occupancy_id_to_int.py | frwickst/apartment-application-service | 40387327a0f82ba01bfcb6ab8532ea4aec40d37a | [
"MIT"
] | 4 | 2020-09-07T05:34:13.000Z | 2021-11-07T12:51:21.000Z | # Generated by Django 2.2.16 on 2020-10-28 06:58
from django.db import migrations, models
| 31.530612 | 84 | 0.487379 |
a5344db9359a04a85d968f45541c787bd80db9a0 | 579 | py | Python | sample.py | aakibqureshi/goibibo_python | fb8b43eb85bad9baf016e701051a3196ac84a23d | [
"MIT"
] | null | null | null | sample.py | aakibqureshi/goibibo_python | fb8b43eb85bad9baf016e701051a3196ac84a23d | [
"MIT"
] | null | null | null | sample.py | aakibqureshi/goibibo_python | fb8b43eb85bad9baf016e701051a3196ac84a23d | [
"MIT"
] | null | null | null | """
Sample Example
"""
from goibibo import goibiboAPI
GO = goibiboAPI("ae51f09b", "da2d83a905110d15a51795b018605026")
print GO.FlightSearch("BLR", "HYD", 20141028)
print GO.MinimumFare("BLR", "HYD", 20141028)
print GO.BusSearch("bangalore", "hyderabad", 20141028)
print GO.BusSeatMap("vJ52KC0ymd0635qTD9bDDy9GHBkGl5FJMJje0aFX\
_GQTyev_4N9Y62TTfrmS-Re3dCHl0-UxLq4AsoQ%3D")
print GO.SearchHotelsByCity(6771549831164675055)
print GO.GetHotelData([1017089108070373346, 6085103403340214927])
print GO.GetHotelPriceByCity(6771549831164675055, 20141101, 20141102)
| 36.1875 | 69 | 0.789292 |
a534ca263cb0f9b6e64a0efc7bf4cd98c4b776a8 | 1,237 | py | Python | aiogithub/objects/rate_limit.py | flying-sheep/aiogithub | 566252cac036d9abe2b1eb2acb268547e01ca63e | [
"BSD-3-Clause"
] | 10 | 2016-09-13T15:50:10.000Z | 2021-05-27T15:36:58.000Z | aiogithub/objects/rate_limit.py | flying-sheep/aiogithub | 566252cac036d9abe2b1eb2acb268547e01ca63e | [
"BSD-3-Clause"
] | 5 | 2017-03-26T13:51:40.000Z | 2020-04-22T19:46:36.000Z | aiogithub/objects/rate_limit.py | flying-sheep/aiogithub | 566252cac036d9abe2b1eb2acb268547e01ca63e | [
"BSD-3-Clause"
] | 2 | 2020-03-05T06:07:18.000Z | 2022-02-11T14:23:46.000Z | from datetime import datetime
from dateutil.tz import tzutc
from aiogithub.objects.response import BaseResponseObject
from aiogithub.utils import return_key
| 22.089286 | 74 | 0.611964 |
a5357ab347b4e01cb284298a5ffef482b143a7cb | 736 | py | Python | python/basic_opertions.py | runningforlife/CodingExamples | 808b12cdb996390225d40a687bf6215c4b7d1822 | [
"Apache-2.0"
] | null | null | null | python/basic_opertions.py | runningforlife/CodingExamples | 808b12cdb996390225d40a687bf6215c4b7d1822 | [
"Apache-2.0"
] | null | null | null | python/basic_opertions.py | runningforlife/CodingExamples | 808b12cdb996390225d40a687bf6215c4b7d1822 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""how to user basic math operations in python """
def do_bitwise_operations():
"""bitwise operations"""
assert 5 & 3 == 1
assert ~4 == -5
assert 5 ^ 3 == 6
assert 5 >> 1 == 2
assert 5 << 2 == 20
print("bitwise operations done")
if __name__ == "__main__":
math_operations()
do_bitwise_operations()
| 16 | 50 | 0.539402 |
a5379fd45bcc411d7e294e71572901a73fd67651 | 8,204 | py | Python | cogs/original_command.py | RT-Team/rt-bot | 39698efb6b2465de1e84063cba9d207a5bf07fa5 | [
"BSD-4-Clause"
] | 26 | 2021-11-30T02:48:16.000Z | 2022-03-26T04:47:25.000Z | cogs/original_command.py | RT-Team/rt-bot | 39698efb6b2465de1e84063cba9d207a5bf07fa5 | [
"BSD-4-Clause"
] | 143 | 2021-11-04T07:47:53.000Z | 2022-03-31T23:13:33.000Z | cogs/original_command.py | RT-Team/rt-bot | 39698efb6b2465de1e84063cba9d207a5bf07fa5 | [
"BSD-4-Clause"
] | 14 | 2021-11-12T15:32:27.000Z | 2022-03-28T04:04:44.000Z | # RT - Original Command
from __future__ import annotations
from discord.ext import commands
import discord
from aiomysql import Pool, Cursor
from rtutil import DatabaseManager
def setup(bot):
bot.add_cog(OriginalCommand(bot))
| 30.385185 | 104 | 0.50902 |
a53a2c90ed2f68c611f75caaa74a581e8ab0f1b5 | 12,626 | py | Python | cli_stats/get_data/api_scraper/api_scraper.py | timoudas/premier_league_api | 2b850466ed1c910ee901c68e660706d55f53df61 | [
"MIT"
] | 2 | 2020-02-13T12:30:47.000Z | 2020-03-21T16:32:47.000Z | cli_stats/get_data/api_scraper/api_scraper.py | timoudas/premier_league_api | 2b850466ed1c910ee901c68e660706d55f53df61 | [
"MIT"
] | 2 | 2021-04-06T18:27:57.000Z | 2021-06-02T03:51:47.000Z | cli_stats/get_data/api_scraper/api_scraper.py | timoudas/premier_league_api | 2b850466ed1c910ee901c68e660706d55f53df61 | [
"MIT"
] | null | null | null | import re
import requests
import sys
sys.path.append('cli_stats')
from directory import Directory
from pprint import pprint
from storage_config import StorageConfig
from tqdm import tqdm
session = requests.Session()
#TODO
"""
*Program is not scaling well
"""
"""***HOW TO USE***
1. Create an instance of Football, this initiates the leagues dict which holds
all the leagueIDs.
fb = Football()
2. To get the all the seasons for all leagues, first run the the method
fb.load_leagues()
this fills the leagues dict with nessesery info to make further querys.
To get season values the league abbreviation has to be passed like below:
fb.leagues['EN_PR'].load_seasons()
This selects the key 'EN_PR' which is the parent key in leagues and loads
the season for that league by running the method load.seasons() which is in
class Leagues(). This returns a dict seasons holding the following:
1992/93': {'competition': 1, 'id': 1, 'label': '1992/93'}
Where the '1992/93' is the key containing that seasons information.
***WHAT IS NEEDED FOR ARBITRAIRY QUERYS***
League abbreviation
Season label
Team name
"""
def load_raw_data(url):
"""Retreives Ids for different pages on the API"""
page = 0
data_temp = []
while True:
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://www.premierleague.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}
params = (('pageSize', '100'),
('page', str(page),))
# request to obtain the team info
try:
response = session.get(url, headers=headers, params=params).json()
if url.endswith('staff'):
data = response['players']
return data
elif 'fixtures' in url:
data = response["content"]
#loop to get info for each game
data_temp.extend(data)
else:
data = response['content']
# note: bit of a hack, for some reason 'id' is a float, but everywhere it's referenced, it's an int
for d in data:
d['id'] = int(d['id'])
return data
except Exception as e:
print(e, 'Something went wrong with the request')
return {}
page += 1
if page >= response["pageInfo"]["numPages"]:
break
for d in data_temp:
d['id'] = int(d['id'])
return data_temp
#NO IDE HOW THIS WORKS - REPLICATE SeasonTeams
if __name__ == '__main__':
# ValidateParams().main()
# Dir = Directory()
fb = Football()
# lg = League()
# fx = FixtureInfo()
fb.load_leagues()
pprint(fb.leagues['EN_PR'].load_seasons())
pprint(fb.leagues['EN_PR'].seasons['2019/2020'].load_teams())
# pprint(fb.leagues['EN_PR'].seasons['2016/2017'].teams['Arsenal'].load_players())
# ds = fb.leagues['EU_CL'].load_seasons()
# fb.leagues['EU_CL'].seasons['2016/2017'].load_teams()
# pprint(fb.leagues['EU_CL'].seasons['2016/2017'].teams['Atltico'].load_players())
| 34.497268 | 165 | 0.606368 |
a53ba70350cca6563c1076848345c71f8f783379 | 826 | py | Python | web/checkout/models.py | Arvind-4/E-Commerce | d7d2f395a4e64a683dd73fed29c627a2210f479a | [
"MIT"
] | null | null | null | web/checkout/models.py | Arvind-4/E-Commerce | d7d2f395a4e64a683dd73fed29c627a2210f479a | [
"MIT"
] | null | null | null | web/checkout/models.py | Arvind-4/E-Commerce | d7d2f395a4e64a683dd73fed29c627a2210f479a | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
from django_countries import countries
COUNTRY_CHOICES = tuple(countries)
User = get_user_model()
# Create your models here.
| 30.592593 | 71 | 0.746973 |
a53bcdd38f44a14806e05907ccae272513b9cf1c | 1,787 | py | Python | archive/least_squares_BCES.py | Alexander-Serov/abortive-initiation-analysis | 2a036a5186459b79e7cdbd84aa8a7b130226b5e1 | [
"MIT"
] | null | null | null | archive/least_squares_BCES.py | Alexander-Serov/abortive-initiation-analysis | 2a036a5186459b79e7cdbd84aa8a7b130226b5e1 | [
"MIT"
] | null | null | null | archive/least_squares_BCES.py | Alexander-Serov/abortive-initiation-analysis | 2a036a5186459b79e7cdbd84aa8a7b130226b5e1 | [
"MIT"
] | null | null | null |
import numpy as np
def least_squares_BCES(Y1, Y2, V11, V22, V12=0, origin=False):
"""
Make a least-squares fit for non-NaN values taking into account the errors in both rho and J variables. This implementation is based on Akritas1996 article. It is a generalization of the least-squares method. The variance of the slope is also calculated. The intersect is checked to be 0, otherwise a warning is issued.
The fit is performed for the model
X2i = alpha + beta * X1i + ei
Yki = Xki + eki
alpha = 0
so the slope is for X2(X1) function and not the inverse.
If origin == True, no intersect assumed. This doesn't change the lest-squares slope, but changes it's error estimate.
Input:
vectors of data points and errors corresponding to different embryos and ncs.
Output:
(beta, beta_V, alpha, alpha_V)
"""
# Find and drop nans
inds_not_nan = list(set(np.flatnonzero(~np.isnan(Y1))) & set(
np.flatnonzero(~np.isnan(Y2))))
Y1, Y2, V11, V22 = [v[inds_not_nan] for v in (Y1, Y2, V11, V22)]
Y1m = Y1.mean()
Y2m = Y2.mean()
n = len(Y1)
# Estimates for slope (beta) and intersect (alpha)
beta = (
np.sum((Y1 - Y1m) * (Y2 - Y2m) - V12) /
np.sum((Y1 - Y1m)**2 - V11)
)
if not origin:
alpha = (Y2m - beta * Y1m)
else:
alpha = 0
# Error on the estimates
ksi = ((Y1 - Y1m) * (Y2 - beta * Y1 - alpha) + beta * V11 - V12) / (Y1.var() - V11.mean())
zeta = Y2 - beta * Y1 - Y1m * ksi
beta_V = ksi.var() / n
alpha_V = zeta.var() / n
# T, _, _, _ = np.linalg.lstsq(slopes[:, np.newaxis], Ns, rcond=None)
# print(beta, np.sqrt(beta_V), alpha, np.sqrt(alpha_V))
# print('Finished!')
return (beta, beta_V, alpha, alpha_V)
| 33.716981 | 323 | 0.613318 |
a53c77391ca18888fe3d4f6374d65264bcebc717 | 7,696 | py | Python | tests/test_face.py | andfranklin/ErnosCube | a9dd7feda4bc0e9162cd884cd450f47c6b19c350 | [
"MIT"
] | null | null | null | tests/test_face.py | andfranklin/ErnosCube | a9dd7feda4bc0e9162cd884cd450f47c6b19c350 | [
"MIT"
] | 4 | 2020-10-28T19:27:47.000Z | 2020-11-04T00:12:25.000Z | tests/test_face.py | andfranklin/ErnosCube | a9dd7feda4bc0e9162cd884cd450f47c6b19c350 | [
"MIT"
] | null | null | null | from ErnosCube.face_enum import FaceEnum
from ErnosCube.orient_enum import OrientEnum
from ErnosCube.sticker import Sticker
from ErnosCube.face import Face
from ErnosCube.face import RowFaceSlice, ColFaceSlice
from plane_rotatable_tests import PlaneRotatableTests
from hypothesis import given
from strategies import sticker_matrices
from strategies_face import faces, faces_minus_c2, faces_minus_c4
from utils import N_and_flatten
from copy import deepcopy
from pytest import mark, fixture
| 37 | 79 | 0.652677 |
a53cb8a72414679c109b52c99f7c00abcac934ad | 19,752 | py | Python | tests/test_djangoes.py | Exirel/djangoes | 7fee0ec0383077fc8ac5da8515c51a0b304f84be | [
"CC0-1.0"
] | 4 | 2015-01-05T21:04:20.000Z | 2015-09-16T12:56:47.000Z | tests/test_djangoes.py | Exirel/djangoes | 7fee0ec0383077fc8ac5da8515c51a0b304f84be | [
"CC0-1.0"
] | 15 | 2015-01-14T10:08:01.000Z | 2021-06-02T07:09:49.000Z | tests/test_djangoes.py | Exirel/djangoes | 7fee0ec0383077fc8ac5da8515c51a0b304f84be | [
"CC0-1.0"
] | 2 | 2015-02-17T11:11:31.000Z | 2016-05-06T07:11:24.000Z | from unittest.case import TestCase
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from djangoes import (ConnectionHandler,
IndexDoesNotExist,
ConnectionDoesNotExist,
load_backend)
from djangoes.backends.abstracts import Base
from djangoes.backends import elasticsearch
| 31.552716 | 90 | 0.571132 |
a53ce607d2484b47e38e0b6a97b11b56e4d3bb58 | 8,497 | py | Python | bin/yap_conflict_check.py | Novartis/yap | 8399e87e6083e6394d1f9340e308a01751465a03 | [
"Apache-2.0"
] | 23 | 2015-01-14T21:32:11.000Z | 2021-07-19T12:59:10.000Z | bin/yap_conflict_check.py | Novartis/yap | 8399e87e6083e6394d1f9340e308a01751465a03 | [
"Apache-2.0"
] | 1 | 2017-06-30T10:54:57.000Z | 2017-06-30T10:54:57.000Z | bin/yap_conflict_check.py | Novartis/yap | 8399e87e6083e6394d1f9340e308a01751465a03 | [
"Apache-2.0"
] | 9 | 2015-09-02T17:44:24.000Z | 2021-07-05T18:59:16.000Z | #!/usr/bin/env python
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
| 37.933036 | 79 | 0.578086 |
a53d6a324052f390797cf713195803de6c9fa43f | 1,148 | py | Python | PS4/ps4a.py | PanPapag/MIT-OCW-Introduction-to-Computer-Science-and-Programming-in-Python-6.0001 | f9aeb55c1473920a7d283bfc09726bdef5614331 | [
"MIT"
] | 3 | 2019-05-20T19:37:49.000Z | 2020-05-16T08:57:04.000Z | PS4/ps4a.py | PanPapag/MIT-OCW-6.0001 | f9aeb55c1473920a7d283bfc09726bdef5614331 | [
"MIT"
] | null | null | null | PS4/ps4a.py | PanPapag/MIT-OCW-6.0001 | f9aeb55c1473920a7d283bfc09726bdef5614331 | [
"MIT"
] | null | null | null | def get_permutations(sequence):
'''
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
>>> get_permutations('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
'''
if len(sequence) == 0 or len(sequence) == 1:
result = [sequence]
else:
x = sequence[0]
permutations = get_permutations(sequence[1:])
result = []
for p in permutations:
for i in range(len(p) + 1):
result.append(p[:i] + x + p[i:])
return result
if __name__ == '__main__':
example_input = 'abc'
print('Input:', example_input)
print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
print('Actual Output:', get_permutations(example_input))
| 30.210526 | 79 | 0.595819 |
a53fd665444f0740f577cb5726aba9622f38b8eb | 3,246 | py | Python | streambox/test/regression.py | chenzongxiong/streambox | 76f95780d1bf6c02731e39d8ac73937cea352b95 | [
"Unlicense"
] | 3 | 2019-07-03T14:03:31.000Z | 2021-12-19T10:18:49.000Z | streambox/test/regression.py | chenzongxiong/streambox | 76f95780d1bf6c02731e39d8ac73937cea352b95 | [
"Unlicense"
] | 6 | 2020-02-17T12:01:30.000Z | 2021-12-09T22:02:33.000Z | streambox/test/regression.py | chenzongxiong/streambox | 76f95780d1bf6c02731e39d8ac73937cea352b95 | [
"Unlicense"
] | 2 | 2020-12-03T04:41:18.000Z | 2021-01-11T21:44:42.000Z | all_tests = [
{
"name" : "wc-fast",
"exec" : "./test-wc.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 100,
"target_ms" : 1000,
"input_file" : "/ssd/1g.txt",
# --- optional: soft delay --- #
#"softdelay_maxbad_ratio" : 0.1, # okay if aonmaly delay % is less than this in a window
#"softdelay_maxbad_ms" : 2000, # upper bound of anonmaly delay
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 5200, # used to be compared with the test results
"tput_hint" : 5000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # skip the test
},
{
"name" : "wingrep-fast",
"exec" : "./test-wingrep.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 1000,
"target_ms" : 1000,
"input_file" : "/ssd/9g.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 38500, # used to be compared with the test results
"tput_hint" : 37000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "test-join-2-fast",
"exec" : "./test-join-2.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 8, #sizeof(long)
"target_ms" : 1000,
"input_file" : "/ssd/test-digit.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 5200, # used to be compared with the test results
"tput_hint" : 5000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "test-distinct-fast",
"exec" : "./test-distinct.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 100,
"target_ms" : 1000,
"input_file" : "/ssd/train-out.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 2000, # xzl: can do 2000? used to be compared with the test results
"tput_hint" : 2000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "networklatency-fast",
"exec" : "./networklatency.bin",
"records" : 500 * 1000, # records per epoch
"record_size" : 40, #sizeof(struct srcdst_rtt)
"target_ms" : 1000,
"input_file" : "/ssd/region_Raw_PingmeshData.result",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 1000, # xzl: 878
"tput_hint" : 800, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "test-tweet-fast",
"exec" : "./test-tweet.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 200,
"target_ms" : 1000,
"input_file" : "/ssd/twitter_download/filtered_tweets.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 5000, # used to be compared with the test results
"tput_hint" : 4000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
]
| 33.8125 | 90 | 0.608133 |
a5411aefaed2b9a42a7bcdc0b02b6093311e2594 | 277 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/vowels-of-all-substrings.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/vowels-of-all-substrings.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/vowels-of-all-substrings.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n)
# Space: O(1)
| 23.083333 | 92 | 0.505415 |
a541ad6227bc2976b930cd5ee28105b474b1a9e3 | 1,350 | py | Python | flash_test/utils/log.py | nikolas-hermanns/flash-test | dda642e96f76113b42a7d64415eb3d8cdc03fca5 | [
"Apache-2.0"
] | null | null | null | flash_test/utils/log.py | nikolas-hermanns/flash-test | dda642e96f76113b42a7d64415eb3d8cdc03fca5 | [
"Apache-2.0"
] | null | null | null | flash_test/utils/log.py | nikolas-hermanns/flash-test | dda642e96f76113b42a7d64415eb3d8cdc03fca5 | [
"Apache-2.0"
] | null | null | null | '''
Created on Jan 16, 2016
@author: enikher
'''
import logging
import datetime
LOG = logging.getLogger(__name__)
LOG_LEVEL = logging.DEBUG
LOG_PATH = "./dlService.log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename=LOG_PATH,
datefmt='%Y-%m-%dT:%H:%M:%s', level=LOG_LEVEL)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console.setFormatter(formatter)
LOG.addHandler(console)
| 31.395349 | 71 | 0.545926 |
a544d907f7886fe6cd4c85cf8051e8844cf738ac | 54 | py | Python | flask_api/app/common/__init__.py | brennanhfredericks/network-monitor-server | 7c811d7851aee5d069569306c46dff39d8d52400 | [
"MIT"
] | null | null | null | flask_api/app/common/__init__.py | brennanhfredericks/network-monitor-server | 7c811d7851aee5d069569306c46dff39d8d52400 | [
"MIT"
] | null | null | null | flask_api/app/common/__init__.py | brennanhfredericks/network-monitor-server | 7c811d7851aee5d069569306c46dff39d8d52400 | [
"MIT"
] | null | null | null | from .appifaceprog import api
from .database import db | 27 | 29 | 0.833333 |
a546651f1dcad01340583064244d142fb1215fd5 | 1,061 | py | Python | EasyPortfolioExplorer/app/utils/resource_loader.py | jblemoine/EasyPortfolioExplorer | 88484a1acb8f41f7497129ffefc89608af2d34d5 | [
"MIT"
] | null | null | null | EasyPortfolioExplorer/app/utils/resource_loader.py | jblemoine/EasyPortfolioExplorer | 88484a1acb8f41f7497129ffefc89608af2d34d5 | [
"MIT"
] | null | null | null | EasyPortfolioExplorer/app/utils/resource_loader.py | jblemoine/EasyPortfolioExplorer | 88484a1acb8f41f7497129ffefc89608af2d34d5 | [
"MIT"
] | 1 | 2018-05-07T23:44:40.000Z | 2018-05-07T23:44:40.000Z | from EasyPortfolioExplorer.app.easy.base import EasyBase
| 33.15625 | 122 | 0.600377 |
a5477960eb696e3d1bcdbdddd2a93a52054fb340 | 11,884 | py | Python | pybind/slxos/v16r_1_00b/brocade_firmware_rpc/dad_status/output/dad_status_entries/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_firmware_rpc/dad_status/output/dad_status_entries/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_firmware_rpc/dad_status/output/dad_status_entries/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
| 61.57513 | 496 | 0.73073 |
a548314328afb7d3cb5f380d9d16cde6403fb2e0 | 1,275 | py | Python | lintcode/Easy/085_Insert_Node_in_a_Binary_Search_Tree.py | Rhadow/leetcode | 43209626720321113dbfbac67b3841e6efb4fab3 | [
"MIT"
] | 3 | 2017-04-03T12:18:24.000Z | 2018-06-25T08:31:04.000Z | lintcode/Easy/085_Insert_Node_in_a_Binary_Search_Tree.py | Rhadow/leetcode | 43209626720321113dbfbac67b3841e6efb4fab3 | [
"MIT"
] | null | null | null | lintcode/Easy/085_Insert_Node_in_a_Binary_Search_Tree.py | Rhadow/leetcode | 43209626720321113dbfbac67b3841e6efb4fab3 | [
"MIT"
] | null | null | null | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
| 27.12766 | 62 | 0.481569 |
a5488a57c13d79bfc459f46fd458c1c896f8b4d3 | 1,268 | py | Python | Python/1289.MatrixSpiral.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 263 | 2020-10-05T18:47:29.000Z | 2022-03-31T19:44:46.000Z | Python/1289.MatrixSpiral.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 1,264 | 2020-10-05T18:13:05.000Z | 2022-03-31T23:16:35.000Z | Python/1289.MatrixSpiral.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 760 | 2020-10-05T18:22:51.000Z | 2022-03-29T06:06:20.000Z | """This program takes a matrix of size mxn as input, and prints the matrix in a spiral format
for example: input ->> [[1,2,3],
[4,5,6],
[7,8,9],
[10,11,12]]
output ->> 1 2 3 6 9 12 11 10 7 4 5 8"""
| 37.294118 | 93 | 0.502366 |
a54a675c308dee0b53b78a00aef279613875fd2d | 4,694 | py | Python | lib/sde.py | NCIA-Diffusion/ScoreSDE | b5a562908daf66e6dcf0b791beb83f1fcb61174b | [
"MIT"
] | 2 | 2022-03-02T06:54:28.000Z | 2022-03-02T06:56:45.000Z | lib/sde.py | NCIA-Diffusion/ScoreSDE | b5a562908daf66e6dcf0b791beb83f1fcb61174b | [
"MIT"
] | null | null | null | lib/sde.py | NCIA-Diffusion/ScoreSDE | b5a562908daf66e6dcf0b791beb83f1fcb61174b | [
"MIT"
] | 2 | 2022-02-23T11:49:15.000Z | 2022-03-02T06:56:46.000Z | import abc
import numpy as np
import torch
import torch.nn as nn
class VPSDE(AbstractSDE):
# def proposal_distribution(self):
# def g2(t):
# return self.beta_0 + t * (self.beta_1 - self.beta_0)
# def a2(t):
# log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) \
# - 0.5 * t * self.beta_0
# return 1. - torch.exp(2. * log_mean_coeff)
# t = torch.arange(1, 1001) / 1000
# p = g2(t) / a2(t)
# normalizing_const = p.sum()
# return p, normalizing_const
| 31.503356 | 85 | 0.532169 |
a54a95f758a5621c7d99991bc1935abca6851391 | 799 | py | Python | tests/lanczos/build.py | weikengchen/Libra | 7ad48800febee0d4426a6146d54906476b7acc5a | [
"Apache-2.0"
] | 28 | 2020-01-05T12:05:57.000Z | 2021-11-23T16:18:40.000Z | tests/lanczos/build.py | weikengchen/Libra | 7ad48800febee0d4426a6146d54906476b7acc5a | [
"Apache-2.0"
] | 1 | 2020-08-10T17:15:38.000Z | 2020-08-11T16:14:46.000Z | tests/lanczos/build.py | weikengchen/Libra | 7ad48800febee0d4426a6146d54906476b7acc5a | [
"Apache-2.0"
] | 13 | 2020-01-31T05:53:37.000Z | 2021-08-02T14:05:43.000Z | import os
os.system('./build.sh')
os.system('g++ parser_sha_data_parallel.cpp -o psdp -O3')
os.system('./psdp lanczos2_16.pws lanczos2_16_112_N=16_rdl.pws lanczos2_112_N=16_circuit.txt lanczos2_112_N=16_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_176_N=64_rdl.pws lanczos2_176_N=64_circuit.txt lanczos2_176_N=64_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_304_N=256_rdl.pws lanczos2_304_N=256_circuit.txt lanczos2_304_N=256_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_560_N=1024_rdl.pws lanczos2_560_N=1024_circuit.txt lanczos2_560_N=1024_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_1072_N=4096_rdl.pws lanczos2_1072_N=4096_circuit.txt lanczos2_1072_N=4096_meta.txt')
os.system('make -C ../.. linear_gkr_zk')
os.system('cp ../../bin/main_zk .')
| 61.461538 | 130 | 0.807259 |
a54b68b3a18c130ef71abef51b17c638d75ff918 | 1,166 | py | Python | diagrams/seq-tables.py | PerFuchs/master-thesis | 85386c266fecf72348114bcbafeeb896a9e74601 | [
"MIT"
] | 1 | 2019-11-02T20:23:03.000Z | 2019-11-02T20:23:03.000Z | diagrams/seq-tables.py | PerFuchs/master-thesis | 85386c266fecf72348114bcbafeeb896a9e74601 | [
"MIT"
] | null | null | null | diagrams/seq-tables.py | PerFuchs/master-thesis | 85386c266fecf72348114bcbafeeb896a9e74601 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from diagrams.base import *
DATASET = DATASET_FOLDER + "ama0302.csv"
tabulize_data(DATASET_FOLDER + "ama0302.csv", GENERATED_PATH + "seq-table-ama0302.tex")
tabulize_data(DATASET_FOLDER + "ama0601.csv", GENERATED_PATH + "seq-table-ama0601.tex")
tabulize_data(DATASET_FOLDER + "snb-sf1.csv", GENERATED_PATH + "seq-table-snb-sf1.tex")
| 32.388889 | 117 | 0.596913 |
a54b6dc0f255b7a92415a48a23ac09a9d0e01321 | 1,513 | py | Python | instance-segmentation/detectron_train_PointRend.py | diwgan32/IKEA_ASM_Dataset | 8f41c15c4a7fb47f53235d2292d0eff8136ae492 | [
"MIT"
] | null | null | null | instance-segmentation/detectron_train_PointRend.py | diwgan32/IKEA_ASM_Dataset | 8f41c15c4a7fb47f53235d2292d0eff8136ae492 | [
"MIT"
] | null | null | null | instance-segmentation/detectron_train_PointRend.py | diwgan32/IKEA_ASM_Dataset | 8f41c15c4a7fb47f53235d2292d0eff8136ae492 | [
"MIT"
] | null | null | null | # Run training with PointRend head
# uses default configuration from detectron2
# The model is initialized via pre-trained coco models from detectron2 model zoo
#
# Fatemeh Saleh <fatemehsadat.saleh@anu.edu.au>
import os
from detectron2.config import get_cfg
from detectron2.data.datasets import register_coco_instances
from detectron2.engine import DefaultTrainer
import sys; sys.path.insert(1, "projects/PointRend")
import point_rend
from detectron2.utils.logger import setup_logger
setup_logger()
if __name__=='__main__':
register_coco_instances("ikea_train", {}, "path/to/annotation/train_manual_coco_format.json", "/path/to/images/")
cfg = get_cfg()
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file("projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml")
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 7
cfg.DATASETS.TRAIN = ("ikea_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
# initialize training
cfg.MODEL.WEIGHTS = "detectron2://PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl"
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.0025 # pick a good LR
cfg.SOLVER.MAX_ITER = 60000
cfg.SOLVER.STEPS = (20000, 40000)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 7
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
| 36.902439 | 134 | 0.769993 |
a54c32d9df76ea887a0b0bac9c4f21cd01fe50ff | 3,991 | py | Python | verify-local-sendnsca-client/update_nagios_cfg.py | raychorn/svn_jenkins_projects | 93d22c28735c9fe6cb4ac632b6e79d89530e3bfb | [
"CC0-1.0"
] | null | null | null | verify-local-sendnsca-client/update_nagios_cfg.py | raychorn/svn_jenkins_projects | 93d22c28735c9fe6cb4ac632b6e79d89530e3bfb | [
"CC0-1.0"
] | null | null | null | verify-local-sendnsca-client/update_nagios_cfg.py | raychorn/svn_jenkins_projects | 93d22c28735c9fe6cb4ac632b6e79d89530e3bfb | [
"CC0-1.0"
] | null | null | null | import re, os, sys
isBeingDebugged = False if (not os.environ.has_key('WINGDB_ACTIVE')) else int(os.environ['WINGDB_ACTIVE']) == 1
__re__ = re.compile("(?P<commented>(#|))cfg_file=(?P<cfg_file>.*)", re.DOTALL | re.MULTILINE)
__top__ = '/usr' if (not isBeingDebugged) else r'J:\@11.1'
fpath = find_nagios_cfg(__top__, 'nagios.cfg')
fdest = sys.argv[-1]
print 'INFO(1): nagios.cfg is %s' % (fpath)
fdest_dir = os.path.dirname(fdest)
fdest_base = os.path.basename(fdest)
toks = fdest_base.split('_')
retirees = toks[1:-1]
if (len(retirees) > 0):
del toks[1:-1]
fdest_base = '_'.join(toks)
fdest = os.sep.join([fdest_dir,fdest_base])
print 'INFO(2): fdest is %s' % (fdest)
print 'INFO: nagios.cfg is %s' % (fpath)
if (os.path.exists(fdest)):
if (os.path.exists(fpath)):
fIn = open(fpath, 'r')
lines = fIn.readlines()
fIn.close()
__temp_path__ = os.path.dirname(fpath)
toks = __temp_path__.split(os.sep)
if (len(toks) > 1):
del toks[-1]
toks.append('tmp')
__temp_path__ = os.sep.join(toks)
if (not os.path.exists(__temp_path__)):
os.mkdir(__temp_path__)
__lines__ = []
__matches__ = []
first_time_used = -1
count = 0
__was__ = False
for l in lines:
__is__ = False
matches = __re__.search(l)
if (matches):
print 'FOUND: %s' % (matches.groupdict())
is_commented = len(matches.groupdict().get('commented','')) > 0
if (not is_commented):
cfg_file = matches.groupdict().get('cfg_file',None)
if (cfg_file):
cfg_file = str(cfg_file).rstrip()
if (cfg_file == fdest):
__was__ = True
__matches__.append(matches.groupdict())
if (first_time_used == -1):
first_time_used = count
else: # is a match but is commented so use the line.
__is__ = True
else: # not a match so use the line.
__is__ = True
if (__is__):
__lines__.append(str(l).rstrip())
count += 1
i = len(__lines__)-1
while (i > 2):
if (len(__lines__[i]) == 0) and (len(__lines__[i-1]) == 0) and (len(__lines__[i-2]) == 0):
del __lines__[i]
i -= 1
if (not __was__):
d = {'commented': '', 'cfg_file': fdest}
print 'APPEND: %s' % (d)
__matches__.append(d)
fOut = open(fpath+'.new', mode='w')
count = 0
for l in __lines__:
print >> fOut, str(l).rstrip()
if (count == first_time_used):
for m in __matches__:
is_commented = len(m.get('commented','')) > 0
comment = ''
if (is_commented):
comment = '#'
cfg_file = m.get('cfg_file',None)
print >> fOut, '%s%s' % (comment,'cfg_file=%s' % (cfg_file))
count += 1
fOut.flush()
fOut.close()
os.remove(fpath)
os.rename(fOut.name,fpath)
else:
print >> sys.stderr, 'WARNING: Cannot find "%s".' % (fpath)
else:
print >> sys.stderr, 'WARNING: Cannot find dest config file "%s"; make sure this file is mentioned on the command line as the 1st argument.' % (fdest)
| 35.954955 | 155 | 0.491606 |
a54c3694831528b032a63a41c9cef6f955e863a0 | 11,775 | py | Python | dataviva/attrs/views.py | dogobox/datavivamaster | c89596778e2d8d01a2193b02ca5960bd17f4468d | [
"MIT"
] | null | null | null | dataviva/attrs/views.py | dogobox/datavivamaster | c89596778e2d8d01a2193b02ca5960bd17f4468d | [
"MIT"
] | null | null | null | dataviva/attrs/views.py | dogobox/datavivamaster | c89596778e2d8d01a2193b02ca5960bd17f4468d | [
"MIT"
] | null | null | null | import urllib2
from sqlalchemy import func, distinct, asc, desc, and_, or_
from flask import Blueprint, request, jsonify, abort, g, render_template, make_response, redirect, url_for, flash
from dataviva import db, __latest_year__
from dataviva.attrs.models import Bra, Wld, Hs, Isic, Cbo, Yb
from dataviva.secex.models import Yp, Yw
from dataviva.rais.models import Yi, Yo
from dataviva.ask.models import Question
from dataviva.utils.gzip_data import gzip_data
from dataviva.utils.cached_query import cached_query
from dataviva.utils.exist_or_404 import exist_or_404
from dataviva.utils.title_case import title_case
mod = Blueprint('attrs', __name__, url_prefix='/attrs')
############################################################
# ----------------------------------------------------------
# All attribute views
#
############################################################
| 34.429825 | 148 | 0.545563 |
a54cde621c4d8d9c2e11ad32222e88ab799ae414 | 701 | py | Python | leetcode/easy/sort-array-by-parity.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 8 | 2019-05-14T12:50:29.000Z | 2022-03-01T09:08:27.000Z | leetcode/easy/sort-array-by-parity.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 46 | 2019-03-24T20:59:29.000Z | 2019-04-09T16:28:43.000Z | leetcode/easy/sort-array-by-parity.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 1 | 2022-01-28T12:46:29.000Z | 2022-01-28T12:46:29.000Z | """
Given an array A of non-negative integers, return an array consisting of all the even elements of A,
followed by all the odd elements of A.
You may return any answer array that satisfies this condition.
Example 1:
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Note:
1 <= A.length <= 5000
0 <= A[i] <= 5000
"""
result = Solution().sortArrayByParity([3,1,2,4])
print(result)
| 20.028571 | 100 | 0.617689 |
a54d9516f3cf42047c6d21bb9815568bd1e67161 | 2,922 | py | Python | meerschaum/_internal/docs/index.py | bmeares/Meerschaum | 37bd7a9923efce53e91c6a1d9c31f9533b9b4463 | [
"Apache-2.0"
] | 32 | 2020-09-14T16:29:19.000Z | 2022-03-08T00:51:28.000Z | meerschaum/_internal/docs/index.py | bmeares/Meerschaum | 37bd7a9923efce53e91c6a1d9c31f9533b9b4463 | [
"Apache-2.0"
] | 3 | 2020-10-04T20:03:30.000Z | 2022-02-02T21:04:46.000Z | meerschaum/_internal/docs/index.py | bmeares/Meerschaum | 37bd7a9923efce53e91c6a1d9c31f9533b9b4463 | [
"Apache-2.0"
] | 5 | 2021-04-22T23:49:21.000Z | 2022-02-02T12:59:08.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
<img src="https://meerschaum.io/assets/banner_1920x320.png" alt="Meerschaum banner">
| PyPI | GitHub | License |
| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
|  |  |  |
|  |  | |
# What is Meerschaum?
Meerschaum is a platform for quickly creating and managing time-series data streams called **pipes**. With Meerschaum, you can have a data visualization stack running in minutes.
The power of the Meerschaum system comes from projects like [pandas](https://pandas.pydata.org/), [sqlalchemy](https://www.sqlalchemy.org/), [fastapi](https://fastapi.tiangolo.com/), and more.
# Why Meerschaum?
If you've worked with time-series data, you know the headaches that come with ETL. Meerschaum is a system that makes consolidating and syncing data easy.
Don't rely on scripts that will silently break in a year. Meerschaum instead gives you better tools to define and sync your data streams. And don't worry you can always incorporate Meerschaum into your existing scripts.
# Quick Start
For a more thorough setup guide, visit the [Getting Started](https://meerschaum.io/get-started/) page at [meerschaum.io](https://meerschaum.io).
## TL;DR
```bash
pip install -U --user meerschaum
mrsm stack up -d db grafana
mrsm bootstrap pipes
```
## Usage Documentation
Please visit [meerschaum.io](https://meerschaum.io) for setup, usage, and troubleshooting information. You can find technical documentation at [docs.meerschaum.io](https://docs.meerschaum.io).
## Plugins
Here is the [list of community plugins](https://meerschaum.io/reference/plugins/list-of-plugins/).
For details on installing, using, and writing plugins, check out the [plugins documentation](https://meerschaum.io/reference/plugins/types-of-plugins) at [meerschaum.io](https://meerschaum.io).
# Support Meerschaum's Development
I'm a full-time graduate student, and I work on Meerschaum in my free time. If you enjoy Meerschaum and want to support its development, you can [buy me a beer (or coffee)](https://www.buymeacoffee.com/bmeares).
"""
| 57.294118 | 292 | 0.647502 |
a54e64b95c67ef2ea40471b8b49ce1a8e5671cf2 | 1,065 | py | Python | insights/parsers/tests/test_kpatch_patches.py | akshay196/insights-core | 598865e6563119089c77152599300de38a77c72c | [
"Apache-2.0"
] | null | null | null | insights/parsers/tests/test_kpatch_patches.py | akshay196/insights-core | 598865e6563119089c77152599300de38a77c72c | [
"Apache-2.0"
] | null | null | null | insights/parsers/tests/test_kpatch_patches.py | akshay196/insights-core | 598865e6563119089c77152599300de38a77c72c | [
"Apache-2.0"
] | null | null | null | from insights.parsers import kpatch_patches
from insights.tests import context_wrap
from insights.core.plugins import ContentException
import pytest
ASSORTED_KPATCHES = """
asdfasdfasdf_asdfasdfasdf-asdfasdfasdf_asdfasdfasdf.ko
asdfasdfasdf_asdfasdfasdf-asdfasdfasdf_asdfasdfasdf.ko.xz
foo-bar.ko
foo-bar.ko.xz
foo.ko
foo.ko.xz
test_klp_callbacks_demo.ko
test_klp_callbacks_demo.ko.xz
""".strip()
NO_KPATCH = """
/bin/ls: cannot access '/var/lib/kpatch/4.18.0-147.8.el8.x86_64': No such file or directory
""".strip()
# Try a bunch of random potential patch names
# Compare to expected module names
# Try the case of no patches installed
| 27.307692 | 91 | 0.753991 |
a54eed00dc082ef6adf720e7a6dc2ace18221748 | 127 | py | Python | tests/ev3dev/brick/battery.py | GianCann/pybricks-micropython | f23cdf7fdf9abd068e7e84ca54d6162b4fc5f72a | [
"MIT"
] | null | null | null | tests/ev3dev/brick/battery.py | GianCann/pybricks-micropython | f23cdf7fdf9abd068e7e84ca54d6162b4fc5f72a | [
"MIT"
] | null | null | null | tests/ev3dev/brick/battery.py | GianCann/pybricks-micropython | f23cdf7fdf9abd068e7e84ca54d6162b4fc5f72a | [
"MIT"
] | null | null | null | from pybricks.hubs import EV3Brick
ev3 = EV3Brick()
print(ev3.battery.voltage()) # 7400
print(ev3.battery.current()) # 180
| 18.142857 | 36 | 0.724409 |
a5504cacd4d378cc9aecf50aa2070a23b003b4f8 | 3,878 | py | Python | app/service/messages/dispatcher.py | ryan4yin/flypy-backend | 7fcc2971ac27d3b44e352dfed73acd12e1913d65 | [
"MIT"
] | 6 | 2019-03-14T02:39:17.000Z | 2021-10-31T11:43:58.000Z | app/service/messages/dispatcher.py | ryan4yin/flypy-backend | 7fcc2971ac27d3b44e352dfed73acd12e1913d65 | [
"MIT"
] | null | null | null | app/service/messages/dispatcher.py | ryan4yin/flypy-backend | 7fcc2971ac27d3b44e352dfed73acd12e1913d65 | [
"MIT"
] | 2 | 2020-02-04T07:44:37.000Z | 2021-04-02T23:02:20.000Z | # -*- coding: utf-8 -*-
import copy
import logging
from operator import attrgetter
from typing import Dict
from app.service.messages.handler import Handler
logger = logging.getLogger(__name__)
| 34.318584 | 90 | 0.566787 |
a55166529d4d734a528fe78b010050a25360e8b0 | 1,647 | py | Python | StyleTransferTensorFlow/style.py | LordHarsh/Neural_Style_Transfer | a3e95cfe13ba89f0f4a529e5c45a7b365b1e27d4 | [
"MIT"
] | 1 | 2021-01-31T06:41:29.000Z | 2021-01-31T06:41:29.000Z | build/lib/StyleTransferTensorFlow/style.py | LordHarsh/Neural_Style_Transfer | a3e95cfe13ba89f0f4a529e5c45a7b365b1e27d4 | [
"MIT"
] | null | null | null | build/lib/StyleTransferTensorFlow/style.py | LordHarsh/Neural_Style_Transfer | a3e95cfe13ba89f0f4a529e5c45a7b365b1e27d4 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from pytube import YouTube
import os
import cv2
from PIL import Image
import shutil
import glob
import ffmpy
if __name__ == "__main__":
import sys
transfer(sys.argv[1], sys.argv[2]) | 37.431818 | 148 | 0.757134 |
a551e5731106adef0abaef205055eb2d9ca12152 | 15,493 | py | Python | bfs/bfs.py | NordFk/bfs-soap-api-wrapper | f149e33db9a19f325e3ae335bb6682e15b667e6a | [
"Apache-2.0"
] | 2 | 2021-11-20T14:16:56.000Z | 2021-12-15T10:33:01.000Z | bfs/bfs.py | NordFk/bfs-soap-api-wrapper | f149e33db9a19f325e3ae335bb6682e15b667e6a | [
"Apache-2.0"
] | null | null | null | bfs/bfs.py | NordFk/bfs-soap-api-wrapper | f149e33db9a19f325e3ae335bb6682e15b667e6a | [
"Apache-2.0"
] | 2 | 2021-11-20T16:49:38.000Z | 2021-11-20T21:26:16.000Z | from collections import OrderedDict
from zeep import Client
from zeep import xsd
import zeep.helpers
import zeep.exceptions
import logging.config
import re
from .constants import methods
| 40.033592 | 128 | 0.606274 |
a554983edfe142d8b785a94b5027ce1bfbe95b20 | 1,370 | py | Python | booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py | 7552-2020C2-grupo5/bookings-microservice | 92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161 | [
"Apache-2.0"
] | null | null | null | booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py | 7552-2020C2-grupo5/bookings-microservice | 92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161 | [
"Apache-2.0"
] | null | null | null | booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py | 7552-2020C2-grupo5/bookings-microservice | 92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161 | [
"Apache-2.0"
] | null | null | null | """booking_status
Revision ID: 7eb209b7ab1e
Revises: 0a95c6679356
Create Date: 2021-02-22 01:19:10.744915
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from booking_microservice.constants import BookingStatus
# revision identifiers, used by Alembic.
revision = '7eb209b7ab1e'
down_revision = '0a95c6679356'
branch_labels = None
depends_on = None
| 24.909091 | 67 | 0.642336 |
a555224273d739957311d97daec8970ec07b9037 | 669 | py | Python | cookbookex/c01/3.2.3.py | fengchunhui/cookbookex | 0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79 | [
"Apache-2.0"
] | null | null | null | cookbookex/c01/3.2.3.py | fengchunhui/cookbookex | 0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79 | [
"Apache-2.0"
] | null | null | null | cookbookex/c01/3.2.3.py | fengchunhui/cookbookex | 0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79 | [
"Apache-2.0"
] | null | null | null | records = [('foo', 1, 2), ('bar', 'hello'), ('foo', 3, 4)]
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)#
line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/user/bin/flase'
uname, *fields, homedir, sh = line.split(':')
print(uname)
print(fields)
print(homedir)
print(sh)
record = ('ACME', 50, 123.45, (12, 18, 2017))
name, *_, (*_, year) = record
print(name)
print(year)
items = [1, 10, 7, 4, 5, 9]
print(sum(items))#
| 19.114286 | 68 | 0.578475 |
a5559dfa11b05a0a8b6fa50b10ff68e791bc3b1c | 4,577 | py | Python | mobilenet_v3_configs.py | 1e100/mobilenet_v3 | 4c5058db6960741e849294c7701e7fddfa241a15 | [
"BSD-3-Clause"
] | 8 | 2019-09-25T08:41:27.000Z | 2020-07-26T12:49:41.000Z | mobilenet_v3_configs.py | 1e100/mobilenet_v3 | 4c5058db6960741e849294c7701e7fddfa241a15 | [
"BSD-3-Clause"
] | null | null | null | mobilenet_v3_configs.py | 1e100/mobilenet_v3 | 4c5058db6960741e849294c7701e7fddfa241a15 | [
"BSD-3-Clause"
] | 4 | 2019-10-04T21:32:56.000Z | 2020-08-05T12:36:52.000Z | """ Configurations shared between PyTorch and Keras. """
CONFIG = {
"large": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 1, 1, None, "relu"],
[16, 64, 24, 3, 2, 1, None, "relu"],
[24, 72, 24, 3, 1, 1, None, "relu"],
[24, 72, 40, 5, 2, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 240, 80, 3, 2, 1, None, "hardswish"],
[80, 200, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 480, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 160, 5, 2, 1, 0.25, "hardswish"],
[160, 960, 160, 5, 1, 1, 0.25, "hardswish"],
[160, 960, 160, 5, 1, 1, 0.25, "hardswish"],
],
"small": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 2, 1, 0.25, "relu"],
[16, 72, 24, 3, 2, 1, None, "relu"],
[24, 88, 24, 3, 1, 1, None, "relu"],
[24, 96, 40, 5, 2, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 120, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 144, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 96, 5, 2, 1, 0.25, "hardswish"],
[96, 576, 96, 5, 1, 1, 0.25, "hardswish"],
[96, 576, 96, 5, 1, 1, 0.25, "hardswish"],
],
"large_detection": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 1, 1, None, "relu"],
[16, 64, 24, 3, 2, 1, None, "relu"],
[24, 72, 24, 3, 1, 1, None, "relu"],
[24, 72, 40, 5, 2, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 240, 80, 3, 2, 1, None, "hardswish"],
[80, 200, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 480, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 80, 5, 2, 1, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 1, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 1, 0.25, "hardswish"],
],
"small_detection": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 2, 1, 0.25, "relu"],
[16, 72, 24, 3, 2, 1, None, "relu"],
[24, 88, 24, 3, 1, 1, None, "relu"],
[24, 96, 40, 5, 2, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 120, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 144, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 2, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 1, 0.25, "hardswish"],
],
# Stride 16, last 3 blocks dilated by 2.
"large_segmentation": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 1, 1, None, "relu"],
[16, 64, 24, 3, 2, 1, None, "relu"],
[24, 72, 24, 3, 1, 1, None, "relu"],
[24, 72, 40, 5, 2, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 240, 80, 3, 2, 1, None, "hardswish"],
[80, 200, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 480, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 80, 5, 1, 2, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 2, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 2, 0.25, "hardswish"],
],
# Stride 16, last 3 blocks dilated by 2.
"small_segmentation": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 2, 1, 0.25, "relu"],
[16, 72, 24, 3, 2, 1, None, "relu"],
[24, 88, 24, 3, 1, 1, None, "relu"],
[24, 96, 40, 5, 2, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 120, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 144, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 2, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 2, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 2, 0.25, "hardswish"],
],
}
| 44.436893 | 66 | 0.444614 |
a555c2aabfb2fed9428a296a73e22048b9b84d87 | 14,288 | py | Python | rotkehlchen/exchanges/iconomi.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 137 | 2018-03-05T11:53:29.000Z | 2019-11-03T16:38:42.000Z | rotkehlchen/exchanges/iconomi.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 385 | 2018-03-08T12:43:41.000Z | 2019-11-10T09:15:36.000Z | rotkehlchen/exchanges/iconomi.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 59 | 2018-03-08T10:08:27.000Z | 2019-10-26T11:30:44.000Z | import base64
import hashlib
import hmac
import json
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.accounting.ledger_actions import LedgerAction
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import UNSUPPORTED_ICONOMI_ASSETS, asset_from_iconomi
from rotkehlchen.constants import ZERO
from rotkehlchen.constants.assets import A_AUST
from rotkehlchen.errors.asset import UnknownAsset, UnsupportedAsset
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Location,
MarginPosition,
Price,
Trade,
TradeType,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeQueryBalances
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_fee,
deserialize_fval,
)
from rotkehlchen.types import ApiKey, ApiSecret, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_iconomi(raw_trade: Dict) -> Trade:
"""Turn an iconomi trade entry to our own trade format
May raise:
- UnknownAsset
- DeserializationError
- KeyError
"""
timestamp = raw_trade['timestamp']
if raw_trade['type'] == 'buy_asset':
trade_type = TradeType.BUY
tx_asset = asset_from_iconomi(raw_trade['target_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['source_ticker'])
native_amount = deserialize_asset_amount(raw_trade['source_amount'])
elif raw_trade['type'] == 'sell_asset':
trade_type = TradeType.SELL
tx_asset = asset_from_iconomi(raw_trade['source_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['source_amount'])
native_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['target_ticker'])
amount = tx_amount
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee_amount'])
fee_asset = asset_from_iconomi(raw_trade['fee_ticker'])
return Trade(
timestamp=timestamp,
location=Location.ICONOMI,
base_asset=tx_asset,
quote_asset=native_asset,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['transactionId']),
)
| 35.542289 | 95 | 0.568309 |
a555df3734f412141194b24eeb0c8ebadcbf1a97 | 1,010 | py | Python | doc/conf.py | rerobots/cli | e97a20d8aa4b1f118272ded9675b6c0747db321f | [
"Apache-2.0"
] | 1 | 2021-04-17T22:51:42.000Z | 2021-04-17T22:51:42.000Z | doc/conf.py | rerobots/cli | e97a20d8aa4b1f118272ded9675b6c0747db321f | [
"Apache-2.0"
] | null | null | null | doc/conf.py | rerobots/cli | e97a20d8aa4b1f118272ded9675b6c0747db321f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# parts of this were originally generated by sphinx-quickstart on Thu Aug 31 17:31:36 2017.
import os.path
import sys
sys.path.append(os.path.abspath('..'))
project = 'CLI'
copyright = '2021 rerobots, Inc | <a href="https://github.com/rerobots/cli">source code</a>'
author = 'rerobots, Inc.'
html_logo = '_static/logo.svg'
version = ''
release = ''
language = None
extensions = ['sphinx.ext.autodoc']
autoclass_content = 'init'
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = []
templates_path = ['_templates']
pygments_style = 'sphinx'
# read more about customization of this style at
# http://alabaster.readthedocs.io/en/stable/customization.html
html_theme = 'alabaster'
html_sidebars = {
}
html_theme_options = {
'show_powered_by': 'false'
}
# Prepare to build on hosts of https://readthedocs.org/
import os
if os.environ.get('READTHEDOCS', 'False') == 'True':
import subprocess
subprocess.check_call('./get-deps.sh')
| 21.489362 | 92 | 0.705941 |
a555e99a46c6efc7e9dda4b03dbc6e9937a3b54b | 620 | py | Python | pytorch-extension/pytorch_extension_official/cpp/perform_test.py | xdr940/utils | c4b7b1479956475a7feee90a723541904ec82306 | [
"MIT"
] | null | null | null | pytorch-extension/pytorch_extension_official/cpp/perform_test.py | xdr940/utils | c4b7b1479956475a7feee90a723541904ec82306 | [
"MIT"
] | null | null | null | pytorch-extension/pytorch_extension_official/cpp/perform_test.py | xdr940/utils | c4b7b1479956475a7feee90a723541904ec82306 | [
"MIT"
] | null | null | null | import time
from lltm.lltm import LLTM
import torch
batch_size = 16
input_features = 32
state_size = 128
X = torch.randn(batch_size, input_features)
h = torch.randn(batch_size, state_size)
C = torch.randn(batch_size, state_size)
rnn = LLTM(input_features, state_size)#net init
forward = 0
backward = 0
for _ in range(1000):
start = time.time()
new_h, new_C = rnn(X, (h, C))
forward += time.time() - start
start = time.time()
(new_h.sum() + new_C.sum()).backward()
backward += time.time() - start
print('Forward: {:.3f} us | Backward {:.3f} us'.format(forward * 1e6/1e3, backward * 1e6/1e3)) | 23.846154 | 94 | 0.675806 |
a55636a8a913811f2be1912dad1aedac22c6a849 | 1,980 | py | Python | helper/create_functions_table.py | Abhisheknishant/iteration_utilities | b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc | [
"Apache-2.0"
] | 72 | 2016-09-12T03:01:02.000Z | 2022-03-05T16:54:45.000Z | helper/create_functions_table.py | Abhisheknishant/iteration_utilities | b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc | [
"Apache-2.0"
] | 127 | 2016-09-14T02:07:33.000Z | 2022-03-19T13:17:32.000Z | helper/create_functions_table.py | Abhisheknishant/iteration_utilities | b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc | [
"Apache-2.0"
] | 11 | 2017-02-22T20:40:37.000Z | 2022-03-05T16:55:40.000Z | # Licensed under Apache License Version 2.0 - see LICENSE
"""This is a helper that prints the content of the function overview tables .
- docs/index.rst
- README.rst
Both contain a table of functions defined in iteration_utilities and
manually updating them is a pain. Therefore this file can be executed and the
contents can be copy pasted there. Just use::
>>> python helper/create_functions_table.py
Unfortunately the header lines of these tables have to be removed manually,
I haven't found a way to remove them programmatically using the
astropy.io.ascii.RST class.
It's actually important to call this helper from the main repo directory
so the file resolution works correctly.
"""
def _create_overview_table(repo_path, readme=False):
"""Creates an RST table to insert in the "Readme.rst" file for the
complete overview of the package.
Requires `astropy`!
"""
from iteration_utilities import Iterable
from astropy.table import Table
from astropy.io.ascii import RST
import pathlib
p = pathlib.Path(repo_path).joinpath('docs', 'generated')
funcs = sorted([file.name.split('.rst')[0] for file in p.glob('*.rst')],
key=str.lower)
if readme:
rtd_link = ('`{0} <https://iteration-utilities.readthedocs.io/'
'en/latest/generated/{0}.html>`_')
else:
rtd_link = ':py:func:`~iteration_utilities.{0}`'
it = (Iterable(funcs)
# Create a Sphinx link from function name and module
.map(rtd_link.format)
# Group into 4s so we get a 4 column Table
.grouper(4, fillvalue='')
# Convert to list because Table expects it.
.as_list())
print('\n'.join(RST().write(Table(rows=it))))
if __name__ == '__main__':
import pathlib
repo_path = pathlib.Path.cwd()
_create_overview_table(repo_path=repo_path, readme=False)
print('\n\n\n')
_create_overview_table(repo_path=repo_path, readme=True)
| 32.459016 | 77 | 0.685354 |
a55746c92e9741f67f15dac2983e811ae99e916b | 1,092 | py | Python | Python Scripts/lesson_49_tuples.py | jessequinn/udemy_python_complete | b97e657dea2a8680557949f01ac80d3230c82c41 | [
"MIT"
] | null | null | null | Python Scripts/lesson_49_tuples.py | jessequinn/udemy_python_complete | b97e657dea2a8680557949f01ac80d3230c82c41 | [
"MIT"
] | null | null | null | Python Scripts/lesson_49_tuples.py | jessequinn/udemy_python_complete | b97e657dea2a8680557949f01ac80d3230c82c41 | [
"MIT"
] | null | null | null | # Given the tuple below that represents the Imelda May album "More Mayhem", write
# code to print the album details, followed by a listing of all the tracks in the album.
#
# Indent the tracks by a single tab stop when printing them (remember that you can pass
# more than one item to the print function, separating them with a comma).
# imelda = "More Mayhem", "Imelda May", 2011, (
# (1, "Pulling the Rug"), (2, "Psycho"), (3, "Mayhem"), (4, "Kentish Town Waltz"))
#
# print(imelda)
#
# title, artist, year, tracks = imelda
# print(title)
# print(artist)
# print(year)
# for song in tracks:
# track, title = song
# print("\tTrack number {}, Title: {}".format(track, title))
#
imelda = "More Mayhem", "Imelda May", 2011, (
[(1, "Pulling the Rug"), (2, "Psycho"), (3, "Mayhem"), (4, "Kentish Town Waltz")])
print(imelda)
imelda[3].append((5, "All For You"))
title, artist, year, tracks = imelda
tracks.append((6, "Eternity"))
print(title)
print(artist)
print(year)
for song in tracks:
track, title = song
print("\tTrack number {}, Title: {}".format(track, title)) | 31.2 | 88 | 0.659341 |
a557d896bbb7713624a8d9ae1db240388f2eb7f7 | 1,785 | py | Python | MyWriter/testdragdrop.py | haha517/mywriter | 8ddd5ce3b2f31491480dee9beb7367c8d6182282 | [
"MIT"
] | null | null | null | MyWriter/testdragdrop.py | haha517/mywriter | 8ddd5ce3b2f31491480dee9beb7367c8d6182282 | [
"MIT"
] | null | null | null | MyWriter/testdragdrop.py | haha517/mywriter | 8ddd5ce3b2f31491480dee9beb7367c8d6182282 | [
"MIT"
] | null | null | null | import sys
import os
from PyQt4 import QtGui, QtCore
def main():
app = QtGui.QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_()
if __name__ == '__main__':
main()
| 28.790323 | 78 | 0.576471 |
a5582803ca69b47af8a599a971fe68204b6f9492 | 3,392 | py | Python | apps/ndn_demoapps_wldr.py | theuerse/emulation_lib | d9388202d7ec9283404f9ab4d2448ff19922b44f | [
"MIT"
] | 2 | 2018-12-11T10:02:06.000Z | 2019-04-01T10:39:09.000Z | apps/ndn_demoapps_wldr.py | theuerse/emulation_lib | d9388202d7ec9283404f9ab4d2448ff19922b44f | [
"MIT"
] | null | null | null | apps/ndn_demoapps_wldr.py | theuerse/emulation_lib | d9388202d7ec9283404f9ab4d2448ff19922b44f | [
"MIT"
] | null | null | null | import os
from .. import constants
from . import application
| 50.626866 | 163 | 0.645047 |
a55b04ba7921f1a3ec26bc5a38d932e27524c9ac | 1,918 | py | Python | catoclient/commands/scheduletasks.py | cloudsidekick/catoclient | 26907127e38d01f56959618263f4bf61e60784ee | [
"Apache-2.0"
] | 1 | 2017-08-31T03:26:50.000Z | 2017-08-31T03:26:50.000Z | catoclient/commands/scheduletasks.py | cloudsidekick/catoclient | 26907127e38d01f56959618263f4bf61e60784ee | [
"Apache-2.0"
] | null | null | null | catoclient/commands/scheduletasks.py | cloudsidekick/catoclient | 26907127e38d01f56959618263f4bf61e60784ee | [
"Apache-2.0"
] | null | null | null | #########################################################################
# Copyright 2011 Cloud Sidekick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
import catoclient.catocommand
from catoclient.param import Param
import json
| 39.142857 | 155 | 0.588634 |
a55cd95076293cb8d38f62d5a86be378db28011c | 7,589 | py | Python | highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py | ethz-asl/high_level_planning | 094a73e993a6a9924f6ed067dcdbee70d1ead80e | [
"BSD-3-Clause"
] | null | null | null | highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py | ethz-asl/high_level_planning | 094a73e993a6a9924f6ed067dcdbee70d1ead80e | [
"BSD-3-Clause"
] | null | null | null | highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py | ethz-asl/high_level_planning | 094a73e993a6a9924f6ed067dcdbee70d1ead80e | [
"BSD-3-Clause"
] | null | null | null | import pybullet as p
import numpy as np
from icecream import ic
from scipy.spatial.transform import Rotation as R
from highlevel_planning_py.tools.util import (
homogenous_trafo,
invert_hom_trafo,
pos_and_orient_from_hom_trafo,
SkillExecutionError,
)
| 34.339367 | 91 | 0.573593 |
a55d8714cd1710f5fc46c5b77d8879d3591e23b3 | 39 | py | Python | pynairus/actions/__init__.py | venairus/pynairus | 76227072aa0f0f98a36a3a04eb6a436473cfd9a6 | [
"MIT"
] | 2 | 2018-02-15T12:16:10.000Z | 2018-09-11T12:05:12.000Z | pynairus/actions/__init__.py | venairus/pynairus | 76227072aa0f0f98a36a3a04eb6a436473cfd9a6 | [
"MIT"
] | null | null | null | pynairus/actions/__init__.py | venairus/pynairus | 76227072aa0f0f98a36a3a04eb6a436473cfd9a6 | [
"MIT"
] | 1 | 2019-10-30T09:40:28.000Z | 2019-10-30T09:40:28.000Z | # coding: utf-8
"""Actions package"""
| 9.75 | 21 | 0.615385 |
a55f46928d283fccaf2605f89dce3d22df548a5c | 5,844 | py | Python | Eager/elk-experiment/appserver/service_time_analyzer.py | UCSB-CS-RACELab/eager-appscale | d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d | [
"Apache-2.0"
] | 3 | 2016-06-12T01:18:49.000Z | 2018-07-16T18:20:23.000Z | Eager/elk-experiment/appserver/service_time_analyzer.py | UCSB-CS-RACELab/eager-appscale | d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d | [
"Apache-2.0"
] | null | null | null | Eager/elk-experiment/appserver/service_time_analyzer.py | UCSB-CS-RACELab/eager-appscale | d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d | [
"Apache-2.0"
] | 1 | 2020-05-25T02:59:15.000Z | 2020-05-25T02:59:15.000Z | import argparse
import httplib
import json
import numpy
import re
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Analyzes execution time of cloud services.')
parser.add_argument('--server', '-s', dest='server', default='128.111.179.159')
parser.add_argument('--port', '-p', type=int, dest='port', default=9200)
parser.add_argument('--index', '-i', dest='index', default='appscale-internal')
parser.add_argument('--app', '-a', dest='app', default='watchtower')
parser.add_argument('--time_window', '-t', dest='time_window', default='1h')
parser.add_argument('--order', '-o', dest='order', action='store_true')
parser.add_argument('--filtered_services', '-fs', nargs='+', dest='filtered_services', default=[])
args = parser.parse_args()
time_window_ms = parse_time_delta(args.time_window)
requests = get_request_info(args.server, args.port, args.index, args.app, time_window_ms, args.filtered_services)
if requests:
print_output(requests, args.order)
else:
print 'No request information found'
| 41.15493 | 117 | 0.5936 |
a55fff704388ce5d543d33e5e01d893ff1080816 | 523 | py | Python | favorites/models.py | plegulluche/OPC-P11 | 9705d56bb77bb548495954c80af02d421dcbf3a2 | [
"Unlicense"
] | null | null | null | favorites/models.py | plegulluche/OPC-P11 | 9705d56bb77bb548495954c80af02d421dcbf3a2 | [
"Unlicense"
] | null | null | null | favorites/models.py | plegulluche/OPC-P11 | 9705d56bb77bb548495954c80af02d421dcbf3a2 | [
"Unlicense"
] | null | null | null | from django.db import models
from products.models import Product
from account.models import Account
| 29.055556 | 87 | 0.707457 |
a56370a9c455e4054cc211abc2c3f2c8a9e7a1f6 | 171 | py | Python | tests/const.py | makotookamura/GmoCoin | 025d3e68364bf52418dbc3445987ff21528db732 | [
"Apache-2.0"
] | 1 | 2021-05-20T01:34:28.000Z | 2021-05-20T01:34:28.000Z | tests/const.py | makotookamura/GmoCoin | 025d3e68364bf52418dbc3445987ff21528db732 | [
"Apache-2.0"
] | 44 | 2020-11-15T01:17:38.000Z | 2021-07-20T13:45:12.000Z | tests/const.py | makotookamura/GmoCoin | 025d3e68364bf52418dbc3445987ff21528db732 | [
"Apache-2.0"
] | 1 | 2021-07-17T16:56:03.000Z | 2021-07-17T16:56:03.000Z | from gmocoin.common.const import ConstMeta
| 21.375 | 42 | 0.77193 |
a56450c0dab785583a1aaf64015a7d73ea36fb2a | 2,443 | py | Python | pydis_site/apps/api/migrations/0044_migrate_nominations_from_infraction_to_nomination_model.py | Numerlor/site | e4cec0aeb2a791e622be8edd94fb4e82d150deab | [
"MIT"
] | 700 | 2018-11-17T15:56:51.000Z | 2022-03-30T22:53:17.000Z | pydis_site/apps/api/migrations/0044_migrate_nominations_from_infraction_to_nomination_model.py | Numerlor/site | e4cec0aeb2a791e622be8edd94fb4e82d150deab | [
"MIT"
] | 542 | 2018-11-17T13:39:42.000Z | 2022-03-31T11:24:00.000Z | pydis_site/apps/api/migrations/0044_migrate_nominations_from_infraction_to_nomination_model.py | Numerlor/site | e4cec0aeb2a791e622be8edd94fb4e82d150deab | [
"MIT"
] | 178 | 2018-11-21T09:06:56.000Z | 2022-03-31T07:43:28.000Z | # Generated by Django 2.2.5 on 2019-09-30 12:15
import logging
from django.db import migrations
from django.db.models import Q
log = logging.getLogger('nomination_migration')
def migrate_nominations_to_new_model(apps, schema_editor):
"""
Migrations nominations from the infraction model to the nomination model.
This migration works by replaying the nomination history in chronological order, adding and
ending nominations as we've recorded them.
"""
Infraction = apps.get_model('api', 'Infraction')
Nomination = apps.get_model('api', 'Nomination')
all_nominations = (
Q(reason__startswith="Helper nomination:") | Q(reason__startswith="Unwatched (talent-pool):")
)
for infraction in Infraction.objects.filter(all_nominations).order_by('inserted_at'):
if infraction.reason.startswith("Helper nomination:"):
if Nomination.objects.filter(user=infraction.user, active=True).exists():
log.error(
f"User `{infraction.user.id}` already has an active nomination, aborting."
)
continue
nomination = Nomination(
user=infraction.user,
inserted_at=infraction.inserted_at,
reason=infraction.reason[19:], # Strip "Helper nomination: " prefix
actor=infraction.actor,
active=True,
)
nomination.save()
infraction.delete()
elif infraction.reason.startswith("Unwatched (talent-pool):"):
if not Nomination.objects.filter(user=infraction.user, active=True).exists():
log.error(
f"User `{infraction.user.id}` has no active nomination, can't end it!"
)
continue
nomination = Nomination.objects.get(user=infraction.user, active=True)
nomination.end_reason = infraction.reason[25:] # Strip "Unwatched (talent-pool):"
nomination.ended_at = infraction.inserted_at
nomination.active = False
nomination.save()
infraction.delete()
else:
log.error(f"I don't understand this infraction: {infraction}")
| 37.584615 | 101 | 0.632419 |
a565f1cec83237287d55c5339e0a84e9756b4648 | 276 | py | Python | python_Ashwin-A-K/day_22.py | 01coders/50-Days-Of-Code | 98928cf0e186ee295bc90a4da0aa9554e2918659 | [
"MIT"
] | null | null | null | python_Ashwin-A-K/day_22.py | 01coders/50-Days-Of-Code | 98928cf0e186ee295bc90a4da0aa9554e2918659 | [
"MIT"
] | null | null | null | python_Ashwin-A-K/day_22.py | 01coders/50-Days-Of-Code | 98928cf0e186ee295bc90a4da0aa9554e2918659 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import datetime
x = datetime.datetime.now()
print(x) # current date and time
print(x.year) # current year
print(x.strftime("%A")) # current day
y = datetime.datetime(2020, 5, 17) # set date
print(y)
print(y.strftime("%B")) # Month name, full version
| 21.230769 | 51 | 0.681159 |
a567756da5a12285509d78272354ba05926525ee | 587 | py | Python | DesignPatterns/Node.py | QuantumFractal/Python-Scripts | 9959af9fe835abd550365e98e4fc63e6b8357d1f | [
"MIT"
] | 2 | 2015-01-30T04:51:27.000Z | 2015-12-31T08:47:22.000Z | DesignPatterns/Node.py | QuantumFractal/Python-Scripts | 9959af9fe835abd550365e98e4fc63e6b8357d1f | [
"MIT"
] | null | null | null | DesignPatterns/Node.py | QuantumFractal/Python-Scripts | 9959af9fe835abd550365e98e4fc63e6b8357d1f | [
"MIT"
] | null | null | null |
n1 = Node(None)
n2 = Node(n1)
n3 = Node(n1)
n4 = Node(n2)
n5 = Node(n2)
n1.print_tree_below()
| 16.771429 | 38 | 0.654174 |
a5690fcf0124a35633cc811501e412b22c1aa270 | 60 | py | Python | tests/__init__.py | ziliac/pyrmx | 54f1c79f85f2142b8fb755f815642c4701e5a57b | [
"MIT"
] | null | null | null | tests/__init__.py | ziliac/pyrmx | 54f1c79f85f2142b8fb755f815642c4701e5a57b | [
"MIT"
] | null | null | null | tests/__init__.py | ziliac/pyrmx | 54f1c79f85f2142b8fb755f815642c4701e5a57b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit test package for pyrmx."""
| 15 | 34 | 0.55 |
a569dd73bf4c737b5da9b60bab3083b5192099d3 | 5,151 | py | Python | weakest_link/game.py | jmattfong/weakest-link | c4dba2b51a7271b83d3cc14b1329836805019671 | [
"Apache-2.0"
] | null | null | null | weakest_link/game.py | jmattfong/weakest-link | c4dba2b51a7271b83d3cc14b1329836805019671 | [
"Apache-2.0"
] | null | null | null | weakest_link/game.py | jmattfong/weakest-link | c4dba2b51a7271b83d3cc14b1329836805019671 | [
"Apache-2.0"
] | null | null | null | from weakest_link.util import wait_for_choice, green, red, dollars, get_random_mean_word, starts_with_vowel, format_time
| 40.559055 | 165 | 0.628422 |
a56b8b89c70b03cbae514c630dd4557886c37a12 | 1,338 | py | Python | infiltrate/models/card/expedition.py | Qazzquimby/eternalCardEvaluator | ef8640ed819a89e5198f8aedf0861a29c57c5720 | [
"MIT"
] | 4 | 2019-04-08T09:30:10.000Z | 2020-09-15T19:25:30.000Z | infiltrate/models/card/expedition.py | Qazzquimby/eternalCardEvaluator | ef8640ed819a89e5198f8aedf0861a29c57c5720 | [
"MIT"
] | 19 | 2019-04-09T19:02:14.000Z | 2020-12-25T05:22:45.000Z | infiltrate/models/card/expedition.py | Qazzquimby/eternalCardEvaluator | ef8640ed819a89e5198f8aedf0861a29c57c5720 | [
"MIT"
] | null | null | null | import typing as t
import infiltrate.browsers as browsers
import infiltrate.eternal_warcy_cards_browser as ew_cards
import infiltrate.models.card as card_mod
from infiltrate import db
def update_is_in_expedition():
"""Sets the is_in_expedition column of the cards table
to match Eternal Warcry readings."""
card_mod.Card.query.update({"is_in_expedition": False})
expedition_card_ids = _get_expedition_card_ids()
for card_id in expedition_card_ids:
card_mod.Card.query.filter(
card_mod.Card.set_num == card_id.set_num,
card_mod.Card.card_num == card_id.card_num,
).update({"is_in_expedition": True})
db.session.commit()
if __name__ == "__main__":
result = _get_expedition_card_ids()
| 32.634146 | 74 | 0.750374 |
a56e7c7d3eb512b85fa07082bf02be47726e19fd | 8,373 | py | Python | attribution/authorship_pipeline/classifiers/BaseClassifier.py | yangzhou6666/authorship-detection | f28701dea256da70eb8ba216c2572e1975c99b54 | [
"MIT"
] | 14 | 2020-10-26T06:05:55.000Z | 2022-03-08T08:32:17.000Z | attribution/authorship_pipeline/classifiers/BaseClassifier.py | yangzhou6666/authorship-detection | f28701dea256da70eb8ba216c2572e1975c99b54 | [
"MIT"
] | 10 | 2020-02-29T16:55:20.000Z | 2021-11-06T10:40:32.000Z | attribution/authorship_pipeline/classifiers/BaseClassifier.py | yangzhou6666/authorship-detection | f28701dea256da70eb8ba216c2572e1975c99b54 | [
"MIT"
] | 4 | 2021-07-28T12:27:46.000Z | 2021-10-04T18:12:33.000Z | from collections import namedtuple
from math import ceil
from typing import Tuple, Dict, Union, List, Counter
import numpy as np
import pandas as pd
from classifiers.config import Config
from data_loading.PathMinerDataset import PathMinerDataset
from data_loading.PathMinerLoader import PathMinerLoader
from data_loading.PathMinerSnapshotLoader import PathMinerSnapshotLoader
from preprocessing.context_split import PickType, ContextSplit
from util import ProcessedFolder, ProcessedSnapshotFolder
ClassificationResult = namedtuple(
'ClassificationResult',
('accuracy', 'macro_precision', 'macro_recall', 'fold_ind')
)
def compute_classification_result(
true_labels: List, predicted_labels: List, fold_ind: Union[int, Tuple[int, int]]
) -> ClassificationResult:
"""
Compute metric values (accuracy, precision, recall), given the predictions.
:param true_labels: true authors
:param predicted_labels: model's predictions
:param fold_ind: index that is used to refer to the fold in cross-validation
:return: an instance of ClassificationResult that contains the computed metric values
"""
true_labels = np.array(true_labels, dtype=np.int)
predicted_labels = np.array(predicted_labels, dtype=np.int)
labels, counts = np.unique(true_labels, return_counts=True)
tp, fp, tn, fn = 0, 0, 0, 0
precisions = []
recalls = []
# print('===========')
# for true_label, predicted_label in zip(true_labels, predicted_labels):
# if true_label != predicted_label:
# print(f'true: {true_label} predicted: {predicted_label}')
# print('===========')
for label, count in zip(labels, counts):
true_positive = np.sum(np.logical_and(true_labels == label, predicted_labels == label))
false_positive = np.sum(np.logical_and(true_labels != label, predicted_labels == label))
true_negative = np.sum(np.logical_and(true_labels != label, predicted_labels != label))
false_negative = np.sum(np.logical_and(true_labels == label, predicted_labels != label))
tp += true_positive
fp += false_positive
tn += true_negative
fn += false_negative
precisions.append(tp / (tp + fp) if (tp + fp > 0) else 0.)
recalls.append(tp / (tp + fn))
return ClassificationResult(
accuracy=np.mean(true_labels == predicted_labels),
macro_precision=np.mean(precisions),
macro_recall=np.mean(recalls),
fold_ind=fold_ind
)
| 47.845714 | 128 | 0.651618 |
a56ef28284a9ee515302682dd5904409e87c4d93 | 4,965 | py | Python | irekua_rest_api/views/__init__.py | IslasGECI/irekua-rest-api | 35cf5153ed7f54d12ebad2ac07d472585f04e3e7 | [
"BSD-4-Clause"
] | null | null | null | irekua_rest_api/views/__init__.py | IslasGECI/irekua-rest-api | 35cf5153ed7f54d12ebad2ac07d472585f04e3e7 | [
"BSD-4-Clause"
] | 11 | 2020-03-28T18:51:50.000Z | 2022-01-13T01:47:40.000Z | irekua_rest_api/views/__init__.py | IslasGECI/irekua-rest-api | 35cf5153ed7f54d12ebad2ac07d472585f04e3e7 | [
"BSD-4-Clause"
] | 1 | 2021-05-06T19:38:14.000Z | 2021-05-06T19:38:14.000Z | # pylint: disable=C0301
from .annotations.annotation_tools import AnnotationToolViewSet
from .annotations.annotation_votes import AnnotationVoteViewSet
from .annotations.annotations import AnnotationViewSet
from .data_collections.collection_devices import CollectionDeviceViewSet
from .data_collections.collection_sites import CollectionSiteViewSet
from .data_collections.collection_users import CollectionUserViewSet
from .data_collections.data_collections import CollectionViewSet
from .data_collections.metacollections import MetaCollectionViewSet
from .data_collections.administrators import CollectionAdministratorViewSet
from .devices.device_brands import DeviceBrandViewSet
from .devices.devices import DeviceViewSet
from .devices.physical_devices import PhysicalDeviceViewSet
from .items.items import ItemViewSet
from .licences import LicenceViewSet
from .object_types.annotation_types import AnnotationTypeViewSet
from .object_types.data_collections.collection_administrators import CollectionTypeAdministratorViewSet
from .object_types.data_collections.collection_annotation_types import CollectionTypeAnnotationTypeViewSet
from .object_types.data_collections.collection_licence_types import CollectionTypeLicenceTypeViewSet
from .object_types.data_collections.collection_sampling_event_types import CollectionTypeSamplingEventTypeViewSet
from .object_types.data_collections.collection_site_types import CollectionTypeSiteTypeViewSet
from .object_types.data_collections.collection_event_types import CollectionTypeEventTypeViewSet
from .object_types.data_collections.collection_types import CollectionTypeViewSet
from .object_types.data_collections.collection_item_types import CollectionTypeItemTypeViewSet
from .object_types.data_collections.collection_device_types import CollectionTypeDeviceTypeViewSet
from .object_types.data_collections.collection_roles import CollectionTypeRoleViewSet
from .object_types.device_types import DeviceTypeViewSet
from .object_types.entailment_types import EntailmentTypeViewSet
from .object_types.event_types import EventTypeViewSet
from .object_types.item_types import ItemTypeViewSet
from .object_types.mime_types import MimeTypeViewSet
from .object_types.licence_types import LicenceTypeViewSet
from .object_types.sampling_events.sampling_event_type_device_types import SamplingEventTypeDeviceTypeViewSet
from .object_types.sampling_events.sampling_event_type_site_types import SamplingEventTypeSiteTypeViewSet
from .object_types.sampling_events.sampling_event_types import SamplingEventTypeViewSet
from .object_types.site_types import SiteTypeViewSet
from .object_types.term_types import TermTypeViewSet
from .sampling_events.sampling_event_devices import SamplingEventDeviceViewSet
from .sampling_events.sampling_events import SamplingEventViewSet
from .items.secondary_items import SecondaryItemViewSet
from .sites import SiteViewSet
from .items.tags import TagViewSet
from .terms.entailments import EntailmentViewSet
from .terms.synonym_suggestions import SynonymSuggestionViewSet
from .terms.synonyms import SynonymViewSet
from .terms.term_suggestions import TermSuggestionViewSet
from .terms.terms import TermViewSet
from .users.institutions import InstitutionViewSet
from .users.roles import RoleViewSet
from .users.users import UserViewSet
from .models.model import ModelViewSet
from .models.model_version import ModelVersionViewSet
from .models.model_prediction import ModelPredictionViewSet
__all__ = [
'AnnotationToolViewSet',
'AnnotationTypeViewSet',
'AnnotationViewSet',
'AnnotationVoteViewSet',
'CollectionDeviceViewSet',
'CollectionSiteViewSet',
'CollectionTypeAdministratorViewSet',
'CollectionTypeAnnotationTypeViewSet',
'CollectionTypeLicenceTypeViewSet',
'CollectionTypeSamplingEventTypeViewSet',
'CollectionTypeItemTypeViewSet',
'CollectionTypeSiteTypeViewSet',
'CollectionTypeEventTypeViewSet',
'CollectionTypeViewSet',
'CollectionUserViewSet',
'CollectionViewSet',
'DeviceBrandViewSet',
'DeviceTypeViewSet',
'DeviceViewSet',
'EntailmentTypeViewSet',
'EntailmentViewSet',
'EventTypeViewSet',
'InstitutionViewSet',
'ItemTypeViewSet',
'ItemViewSet',
'LicenceTypeViewSet',
'LicenceViewSet',
'MetaCollectionViewSet',
'PhysicalDeviceViewSet',
'RoleViewSet',
'SamplingEventDeviceViewSet',
'SamplingEventTypeDeviceTypeViewSet',
'SamplingEventTypeSiteTypeViewSet',
'SamplingEventTypeViewSet',
'SamplingEventViewSet',
'SecondaryItemViewSet',
'SiteTypeViewSet',
'SiteViewSet',
'SynonymSuggestionViewSet',
'SynonymViewSet',
'TagViewSet',
'TermSuggestionViewSet',
'TermTypeViewSet',
'TermViewSet',
'UserViewSet',
'CollectionTypeDeviceTypeViewSet',
'CollectionTypeRoleViewSet',
'CollectionAdministratorViewSet',
'MimeTypeViewSet',
'ModelViewSet',
'ModelVersionViewSet',
'ModelPredictionViewSet'
]
| 45.136364 | 113 | 0.848338 |
a5722597309534c03d51a4f6182a1dcb0d277f2d | 2,765 | py | Python | acurite/AcuriteManager.py | jamespauly/udi-acurite-poly | 8c4866c3b18cf1c27f37ead392e732aa49e1bc07 | [
"MIT"
] | null | null | null | acurite/AcuriteManager.py | jamespauly/udi-acurite-poly | 8c4866c3b18cf1c27f37ead392e732aa49e1bc07 | [
"MIT"
] | null | null | null | acurite/AcuriteManager.py | jamespauly/udi-acurite-poly | 8c4866c3b18cf1c27f37ead392e732aa49e1bc07 | [
"MIT"
] | null | null | null | from datetime import datetime, timezone
import udi_interface
import requests
import json
from enums import BatteryLevel, DeviceStatus
from nodes import AcuriteDeviceNode
LOGGER = udi_interface.LOGGER
Custom = udi_interface.Custom
| 37.876712 | 120 | 0.58264 |
a5724709634797eaf22a9f27d89e8c87596f3423 | 115 | py | Python | recvCases/conf.py | BattleJudge/recvCase | b7e55cd3c40603fe2c0086066421b269a0664f1e | [
"MIT"
] | null | null | null | recvCases/conf.py | BattleJudge/recvCase | b7e55cd3c40603fe2c0086066421b269a0664f1e | [
"MIT"
] | null | null | null | recvCases/conf.py | BattleJudge/recvCase | b7e55cd3c40603fe2c0086066421b269a0664f1e | [
"MIT"
] | null | null | null | ErrorMsg = {'BadZipFile' : 'Uploaded zip file is bad',
'EmptyZipFile' : 'Uploaded zip file is empty',} | 57.5 | 59 | 0.626087 |
a5728dfb70546b1b777313ae3ec58c3a19425e50 | 5,650 | py | Python | DTL/db/models/graphicsmodels.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 1 | 2015-03-23T18:52:12.000Z | 2015-03-23T18:52:12.000Z | DTL/db/models/graphicsmodels.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | null | null | null | DTL/db/models/graphicsmodels.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 2 | 2017-05-21T12:50:41.000Z | 2021-10-17T03:32:45.000Z | from DTL.qt import QtCore, QtGui
from DTL.qt.QtCore import Qt
#------------------------------------------------------------
#------------------------------------------------------------
#------------------------------------------------------------
#------------------------------------------------------------
| 37.417219 | 94 | 0.420354 |
a5734519608276ff9f8fee5a5bd77871ef93780f | 4,461 | py | Python | tests/test_renderers.py | adamchainz/classy-django-rest-framework | 19f57d88d13f5ddd2ee33a3239c51e97829e5e6f | [
"MIT"
] | null | null | null | tests/test_renderers.py | adamchainz/classy-django-rest-framework | 19f57d88d13f5ddd2ee33a3239c51e97829e5e6f | [
"MIT"
] | null | null | null | tests/test_renderers.py | adamchainz/classy-django-rest-framework | 19f57d88d13f5ddd2ee33a3239c51e97829e5e6f | [
"MIT"
] | null | null | null | import unittest
from mock import mock_open, patch
from rest_framework.generics import ListAPIView
from rest_framework_ccbv.renderers import (
BasePageRenderer, IndexPageRenderer, LandPageRenderer, ErrorPageRenderer,
SitemapRenderer, DetailPageRenderer,
)
from rest_framework_ccbv.config import VERSION
from rest_framework_ccbv.inspector import Attributes
KLASS_FILE_CONTENT = (
'{"2.2": {"rest_framework.generics": ["RetrieveDestroyAPIView", "ListAPIView"]},'
'"%s": {"rest_framework.generics": ["RetrieveDestroyAPIView", "ListAPIView"]}}' % VERSION
)
| 44.61 | 101 | 0.726743 |
a57349956429b4d3071a79222d869b969895aec7 | 1,320 | py | Python | emailpal/tests/test_views.py | 18F/django-email-pal | 7471342741d814d19713d4353a3f566e490177a4 | [
"CC0-1.0"
] | 5 | 2017-05-25T00:51:55.000Z | 2020-06-13T16:37:42.000Z | emailpal/tests/test_views.py | 18F/django-email-pal | 7471342741d814d19713d4353a3f566e490177a4 | [
"CC0-1.0"
] | 30 | 2017-05-25T00:41:45.000Z | 2017-09-15T23:27:45.000Z | emailpal/tests/test_views.py | 18F/django-email-pal | 7471342741d814d19713d4353a3f566e490177a4 | [
"CC0-1.0"
] | 2 | 2017-05-25T17:30:30.000Z | 2021-02-14T11:32:33.000Z | import pytest
from django.conf.urls import include, url
from django.test import Client, override_settings
from .util import all_template_engines
from .test_sendable_email import MY_SENDABLE_EMAIL
urlpatterns = [
url(r'^examples/', include('emailpal.urls')),
]
def test_invalid_example_raises_404(client):
response = client.get('/examples/blarg.html')
assert response.status_code == 404
def test_valid_html_example_works(client):
response = client.get('/examples/{}.html'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am HTML' in response.content.decode('utf-8')
def test_valid_plaintext_example_works(client):
response = client.get('/examples/{}.txt'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am plaintext' in response.content.decode('utf-8')
| 31.428571 | 72 | 0.731818 |
a5738d0cf40642a74790a9e4436c0e8da30c46ce | 259 | py | Python | db_tools/data/user_data.py | thebesteric/bright | 4cd8173e7e53115395fcf25bd4db72990fdb6b3f | [
"MIT"
] | null | null | null | db_tools/data/user_data.py | thebesteric/bright | 4cd8173e7e53115395fcf25bd4db72990fdb6b3f | [
"MIT"
] | null | null | null | db_tools/data/user_data.py | thebesteric/bright | 4cd8173e7e53115395fcf25bd4db72990fdb6b3f | [
"MIT"
] | null | null | null | from common.utils import crypt_utils
from bright import settings
row_data = [
{
'username': 'admin',
'password': crypt_utils.md5('admin', settings.APP_SALT),
'cellphone': '13966660426',
'email': 'admin@wesoft.com'
}
]
| 21.583333 | 64 | 0.6139 |
a57546dcf10db7ae680036449e4ff2d0de0b36d3 | 2,328 | py | Python | employee-management/app.py | desitomato/flask-docker | 03dadddfbda478180554f3364e91af41b72dce87 | [
"MIT"
] | null | null | null | employee-management/app.py | desitomato/flask-docker | 03dadddfbda478180554f3364e91af41b72dce87 | [
"MIT"
] | null | null | null | employee-management/app.py | desitomato/flask-docker | 03dadddfbda478180554f3364e91af41b72dce87 | [
"MIT"
] | null | null | null | import os
from flask import Flask, request, jsonify
from flask_restful import Api
from resources.company import Company, Companylist
from resources.employee import Employee, EmployeeList
from db import db
from resources.user import UserRegister, UserLogin, UserLogout
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'prateek'
api = Api(app)
api.add_resource(Company, '/company/<string:name>')
api.add_resource(Companylist, '/company')
api.add_resource(Employee, '/employee/<string:name>')
api.add_resource(EmployeeList, '/employee')
api.add_resource(UserRegister, '/register')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogout, '/logout/<string:username>')
if __name__ == '__main__':
db.init_app(app)
app.run(port=5000, debug=True)
#API's without flask_restful
""" companies = [{
'name': 'samsung',
'employees': [{
'name':'prateek',
'salary':10000
}]
}]
@app.route('/company', methods=['POST'])
def create_company():
request_data = request.get_json()
new_company = {'name': request_data['name'],
'employees': []
}
companies.append(new_company)
return jsonify(new_company), 201
@app.route('/company/<string:name>')
def get_company(name):
for company in companies:
if company['name'] == name:
return jsonify(company), 200
@app.route('/company')
def get_company_list():
return jsonify(companies), 200
@app.route('/company/<string:name>/employee', methods=['POST'])
def create_employee_in_company(name):
request_data = request.get_json()
print(request_data)
for company in companies:
if company['name'] == name:
new_employee = {
'name' : request_data['name'],
'salary': request_data['salary']
}
company['employees'].append(new_employee)
return jsonify(new_employee), 201
@app.route('/company/<string:name>/employee')
def get_employee_in_company(name):
for company in companies:
if company['name'] == name:
return jsonify(company['employees']), 200 """
| 25.582418 | 91 | 0.668814 |
a57794912fc7fe375ace5545bb257b022affaf27 | 1,381 | py | Python | workon/contrib/security/models.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | workon/contrib/security/models.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | workon/contrib/security/models.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
from django.db import models
from django.conf import settings | 44.548387 | 107 | 0.735699 |
a57859ecd89b9b31c6238458c1c3953448a728df | 1,234 | py | Python | leetcode/31.py | sputnikW/algorithm | 2c9412d7fc4fdb7f71c31ee3310833014272f0c9 | [
"MIT"
] | null | null | null | leetcode/31.py | sputnikW/algorithm | 2c9412d7fc4fdb7f71c31ee3310833014272f0c9 | [
"MIT"
] | null | null | null | leetcode/31.py | sputnikW/algorithm | 2c9412d7fc4fdb7f71c31ee3310833014272f0c9 | [
"MIT"
] | null | null | null |
"""
T=O(N)
""" | 30.097561 | 80 | 0.423015 |
a57be98b089324586aa986fd832a393072298d21 | 2,412 | py | Python | leads/migrations/0009_alter_bankstatement_bank_statement_and_more.py | sumeet2605/CRM | 1c9a740ef052d0e51b2689dd3e1666ff4673db98 | [
"MIT"
] | null | null | null | leads/migrations/0009_alter_bankstatement_bank_statement_and_more.py | sumeet2605/CRM | 1c9a740ef052d0e51b2689dd3e1666ff4673db98 | [
"MIT"
] | null | null | null | leads/migrations/0009_alter_bankstatement_bank_statement_and_more.py | sumeet2605/CRM | 1c9a740ef052d0e51b2689dd3e1666ff4673db98 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-02-06 09:46
from django.db import migrations, models
import leads.models
import rizcrm.storage_backends
| 43.071429 | 161 | 0.665837 |
a57e42b92567d730da83f49a7ddb9cffb40477e6 | 28,338 | py | Python | ipm.py | AVilezhaninov/STM32_IAR_ProjectManager | 906c34c70715d5ceec4937fb8d9705318017b3e9 | [
"MIT"
] | null | null | null | ipm.py | AVilezhaninov/STM32_IAR_ProjectManager | 906c34c70715d5ceec4937fb8d9705318017b3e9 | [
"MIT"
] | 4 | 2017-03-10T13:06:46.000Z | 2017-03-10T13:24:00.000Z | ipm.py | AVilezhaninov/STM32_IAR_ProjectManager | 906c34c70715d5ceec4937fb8d9705318017b3e9 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# MIT License
# Copyright (c) 2017 Aleksey Vilezhaninov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import sys
import shutil
from lxml import etree
# ------------------------------------------------------------------------------
# Help messages ----------------------------------------------------------------
# ------------------------------------------------------------------------------
MAIN_HELP_MESSAGE = '''
IPM - IAR Embedded Workbench project manager for STM32F M0, M3, M4, M7 MCU.
Program capabilities:
- create new project with standart ST CMSIS files;
- add folder struct to existing project;
- clean EWARM workspace folder;
- rename existing workspace and project;
usage: ipm <command> <args> [-h | --help]
commands:
create Create new project
add_folder Copy folder to project and add folder to project file
clean Clean workspace folder
rename_workspace Rename workspace
rename_project Rename project
rename Rename both workspace and project
For details use: ipm <command> -h
IPM v0.1 Copyright (c) 2017 Aleksey Vilezhaninov a.vilezhaninov@gmail.com
'''
CREATE_HELP_MESSAGE = '''
Create new IAR EWARM project with specified name and device.
usage: ipm create <name> <device> [-h | --help]
parameters:
-n, --name <name> New project name
-d, --device <device> New project device
Device must be specified as in "CMSIS/Device/ST/STM32Fxxx/Include/stm32fxxx.h".
For usage - download IPM executable file, IPM "template" folder and
standart ST CMSIS folder in the same folder and run program.
'''
ADD_FOLDER_HELP_MESSAGE = '''
Copy folder to project source directory and ddd folder to project file.
usage: ipm add_folder <project_path> <folder_path> [ignore] [-h | --help]
parameters:
-p, --project_path <path> Project path
-f, --folder_path <path> Folder path
-i, --ignore <ignore> Ignore file extentions
For usage - just specify project path, folder to add path and ignore
extentions devided with "/" char (for example "-i c/h/cpp/icf/").
'''
CLEAN_HELP_MESSAGE = '''
Clean workspace folder - delete all files and folders except *.eww and *.ewp.
usage: ipm clean <workspace_path> [-h | --help]
parameters:
-w, --workspace_path <path> Workspace path
For usage - just specify workspace path.
'''
RENAME_WORKSPACE_HELP_MESSAGE = '''
Rename workspace with specified name.
usage: ipm rename_workspace <workspace_path> <name> [-h | --help]
parameters:
-w, --workspace_path <path> Workspace path
-n, --name <name> New workspace name
For usage - just specify workspace path and new workspace name.
'''
RENAME_PROJECT_HELP_MESSAGE = '''
Rename project with specified name.
usage: ipm rename_project <project_path> <workspace_path> <name> [-h | --help]
parameters:
-p, --project_path <path> Project path
-w, --workspace_path <path> Workspace path
-n, --name <name> New project name
For usage - just specify project path, workspace containing this project path
and new project name.
'''
RENAME_HELP_MESSAGE = '''
Rename both workspace and project with specified name.
usage: ipm rename <project_path> <workspace_path> <name> [-h | --help]
parameters:
-p, --project_path <path> Project path
-w, --workspace_path <path> Workspace path
-n, --name <name> New project name
For usage - just specify project path, workspace containing this project path
and new project name.
'''
# ------------------------------------------------------------------------------
# Argparser configuration
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Create new IAR EWARM project with specified name and device
# ------------------------------------------------------------------------------
# Copy and rename EWARM workspace and project template files
# Copy CMSIS files in project CMSIS folder
# Change template lines in project file
# ------------------------------------------------------------------------------
# Copy folder to project source directory. Add folder in project file
# ------------------------------------------------------------------------------
# Parse foder and add subfolders and files in XML tree
# Append node in XML tree
# ------------------------------------------------------------------------------
# Clean workspace folder - delete all files and folders except *.eww and *.ewp
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Rename workspace with specified name
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Rename project with specified name
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Common functions
# ------------------------------------------------------------------------------
# Replace text in file
# Copy folder tree
# Make directory
# Copy file
# Decorate path to next template "folder/subfolder/file.xxx"
# Print message and exit
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
arg_parser = CreateArgParser()
arg_parser_namespace = arg_parser.parse_args()
# Create command
if arg_parser_namespace.command == "create":
if (arg_parser_namespace.help == True or
arg_parser_namespace.name == None or
arg_parser_namespace.device == None):
Exit(CREATE_HELP_MESSAGE)
else:
Create(arg_parser_namespace.name, arg_parser_namespace.device)
# Add folder command
elif arg_parser_namespace.command == "add_folder":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.folder_path == None):
Exit(ADD_FOLDER_HELP_MESSAGE)
else:
AddFolder(arg_parser_namespace.project_path,
arg_parser_namespace.folder_path,
arg_parser_namespace.ignore)
# Clean command
elif arg_parser_namespace.command == "clean":
if (arg_parser_namespace.help == True or
arg_parser_namespace.workspace_path == None):
Exit(CLEAN_HELP_MESSAGE)
else:
Clean(arg_parser_namespace.workspace_path)
# Rename workspace command
elif arg_parser_namespace.command == "rename_workspace":
if (arg_parser_namespace.help == True or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_WORKSPACE_HELP_MESSAGE)
else:
RenameWorkspace(arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Rename project command
elif arg_parser_namespace.command == "rename_project":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_PROJECT_HELP_MESSAGE)
else:
RenameProject(arg_parser_namespace.project_path,
arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Rename command
elif arg_parser_namespace.command == "rename":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_HELP_MESSAGE)
else:
RenameProject(arg_parser_namespace.project_path,
arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
RenameWorkspace(arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Undefined command
else:
Exit(MAIN_HELP_MESSAGE)
| 38.979367 | 80 | 0.571071 |