hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9440e6319530ec77d36ca44c2d10c5d28d16894 | 3,227 | py | Python | pullintradayprices.py | rtstock/rtstock4 | 040b3409cfb022767dde467578f359210a689512 | [
"MIT"
] | null | null | null | pullintradayprices.py | rtstock/rtstock4 | 040b3409cfb022767dde467578f359210a689512 | [
"MIT"
] | null | null | null | pullintradayprices.py | rtstock/rtstock4 | 040b3409cfb022767dde467578f359210a689512 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Retrieve intraday stock data from Google Finance.
"""
import csv
import datetime
import re
import pandas as pd
import requests
def intradaystockprices(ticker, period=60, days=1):
"""
Retrieve intraday stock data from Google Finance.
Parameters
----------
ticker : str
Company ticker symbol.
period : int
Interval between stock values in seconds.
days : int
Number of days of data to retrieve.
Returns
-------
df : pandas.DataFrame
DataFrame containing the opening price, high price, low price,
closing price, and volume. The index contains the times associated with
the retrieved price values.
"""
#import pytz
#localtz = pytz.timezone('America/Los_Angeles')
uri = 'https://finance.google.com/finance/getprices?q={ticker}&x=&p={days}d&i={period}&f=d,c,o,h,l,v'.format(
ticker=ticker,
period=str(period),
days=str(days)
)
## uri = 'http://www.google.com/finance/getprices?i={period}&p={days}d&f=d,o,h,l,c,v&df=cpct&q={ticker}'.format(
## ticker=ticker,
## period=period,
## days=days
## )
#uri= 'http://www.google.com/finance/getprices?q=GOOG&x=NASD&i=86400&p=40Y&f=d,c,v,k,o,h,l&df=cpct&auto=0&ei=Ef6XUYDfCqSTiAKEMg'
#uri= 'http://www.google.com/finance/getprices?q=MSFT&x=&i=86400&p=3d&f=d,c,v,k,o,h,l&df=cpct&auto=0&ei=Ef6XUYDfCqSTiAKEMg'
#uri = 'https://finance.google.com/finance/getprices?q=BX&x=&p=1d&i=60&f=d,c,o,h,l,v'
page = requests.get(uri)
#print uri
reader = csv.reader(page.content.splitlines())
columns = ['Open', 'High', 'Low', 'Close', 'Volume']
rows = []
times = []
for row in reader:
#print row
if re.match('^[a\d]', row[0]):
if row[0].startswith('a'):
start = datetime.datetime.fromtimestamp(int(row[0][1:]))
times.append(start)
else:
times.append(start+datetime.timedelta(seconds=period*int(row[0])))
rows.append(map(float, row[1:]))
if len(rows):
df_final = pd.DataFrame(rows, index=pd.DatetimeIndex(times, name='Date'),columns=columns)
#return pd.DataFrame(rows, index=pd.DatetimeIndex(times, name='Date'),columns=columns)
else:
df_final = pd.DataFrame(rows, index=pd.DatetimeIndex(times, name='Date'))
#return pd.DataFrame(rows, index=pd.DatetimeIndex(times, name='Date'))
df_final['Ticker']=ticker
df_final.sort_index(inplace=True)
return df_final
if __name__=='__main__':
df = intradaystockprices(ticker='BX',period=60, days=1)
print df
| 40.3375 | 132 | 0.515029 |
a946ab769d869df40935f6c4d6219757e390f7ee | 1,750 | py | Python | auto/lookup.py | ggicci/fuck-leetcode | 45b488530b9dbcc8b7c0b90160ea45b1ab4f8475 | [
"MIT"
] | null | null | null | auto/lookup.py | ggicci/fuck-leetcode | 45b488530b9dbcc8b7c0b90160ea45b1ab4f8475 | [
"MIT"
] | null | null | null | auto/lookup.py | ggicci/fuck-leetcode | 45b488530b9dbcc8b7c0b90160ea45b1ab4f8475 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import json
from argparse import ArgumentParser
ROOT = os.path.dirname(os.path.abspath(__file__))
DB_FILE = os.path.join(ROOT, 'problems.json')
def parse_args():
"""Parse CLI tool options.
"""
parser = ArgumentParser()
parser.add_argument('problem_id', type=int)
parser.add_argument('--field', type=str, help='extract field value')
parser.add_argument('--markdown',
type=bool,
default=False,
help='print markdown content')
parser.add_argument('--context',
type=str,
help='additional context to lookup')
return parser.parse_args()
if __name__ == '__main__':
main()
| 25 | 74 | 0.582286 |
a9470a504b0eced5d1fe21002e68de978c63f971 | 6,789 | py | Python | src/application/dungeon.py | meteoric-minks/code-jam | b094350176e54d873a04a483dc37d70533013c37 | [
"MIT"
] | 1 | 2021-07-09T14:41:12.000Z | 2021-07-09T14:41:12.000Z | src/application/dungeon.py | meteoric-minks/code-jam | b094350176e54d873a04a483dc37d70533013c37 | [
"MIT"
] | null | null | null | src/application/dungeon.py | meteoric-minks/code-jam | b094350176e54d873a04a483dc37d70533013c37 | [
"MIT"
] | null | null | null | from __future__ import annotations # Fixes an issue with some annotations
from .ascii_box import Light, LineChar
from .ascii_drawing import DrawingChar
| 32.328571 | 112 | 0.531153 |
a947b10cb0870e9e229f94e7dbdc49713a33eb91 | 1,216 | py | Python | ntc103f397/ntc103f397.py | hhk7734/avr_proj | cb0c5c53af7eb8a0924f8c483a1a010be4b92636 | [
"MIT"
] | null | null | null | ntc103f397/ntc103f397.py | hhk7734/avr_proj | cb0c5c53af7eb8a0924f8c483a1a010be4b92636 | [
"MIT"
] | null | null | null | ntc103f397/ntc103f397.py | hhk7734/avr_proj | cb0c5c53af7eb8a0924f8c483a1a010be4b92636 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# NTC103F397F datasheet
d_T = np.array(np.arange(-40, 121, 5))
d_R = np.array([333.110, 240.704, 175.794, 129.707, 96.646, 72.691, 55.169, 42.234, 32.6,
25.364, 19.886, 15.705, 12.490, 10.0, 8.0584, 6.5341, 5.3297, 4.3722,
3.6065, 2.9906, 2.4925, 2.0875, 1.7565, 1.4848, 1.2605, 1.0746, 0.91983,
0.79042, 0.68178, 0.59020, 0.51271, 0.44690, 0.39080])
d_B = 3970
d_T0 = 273.15 + 25
d_R0 = 10
# B parameter equation
b_T = 1/d_T0 - (1/d_B)*np.log(d_R0) + (1/d_B)*np.log(d_R)
# SteinhartHart equation
s_T = b_T + 0.000000254 * (np.log(d_R)**3)
s_T = 1/s_T - 273.15
b_T = 1/b_T - 273.15
# B, SH
plt.figure(1)
plt.plot(d_T, d_R, label="datasheet", marker='*')
plt.plot(b_T, d_R, label="B equ")
plt.plot(s_T, d_R, label="SH equ")
plt.yscale('log')
plt.grid()
plt.legend()
plt.xlabel(r"$\degree C$")
plt.ylabel(r"$k\Omega$")
# find optimal resistan
plt.figure(2)
for R in [3, 5, 10, 20]:
T_v = d_R*5/(R+d_R)
plt.plot(d_T, T_v, label=r"{0} $k\Omega$".format(R))
plt.xticks(np.arange(-40, 121, 10))
plt.yticks(np.arange(0, 5.1, 0.2))
plt.grid()
plt.xlabel(r"$\degree C$")
plt.ylabel("V")
plt.legend()
plt.show()
| 26.434783 | 89 | 0.613487 |
a94809d2a1b0b2d4efef4518fceb1a00c7233013 | 3,836 | py | Python | math2/graph/graphs.py | AussieSeaweed/math2 | 9e83fa8a5a5d227d72fec1b08f6759f0f0f41fca | [
"MIT"
] | 2 | 2021-03-29T03:15:57.000Z | 2021-03-29T03:23:21.000Z | math2/graph/graphs.py | AussieSeaweed/math2 | 9e83fa8a5a5d227d72fec1b08f6759f0f0f41fca | [
"MIT"
] | 1 | 2021-04-07T11:07:17.000Z | 2021-04-07T11:07:17.000Z | math2/graph/graphs.py | AussieSeaweed/math2 | 9e83fa8a5a5d227d72fec1b08f6759f0f0f41fca | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from collections import defaultdict
from functools import partial
from auxiliary import default
| 25.573333 | 79 | 0.574035 |
a9486ecd7a389e48b19a2604872a64308d6ffd50 | 216 | py | Python | anasyspythontools/__init__.py | quiksand/anasys-pytools | 50c21711c742d3201a72f9eab4986317b8590095 | [
"MIT"
] | 3 | 2019-11-05T16:44:45.000Z | 2020-07-27T17:02:02.000Z | anasyspythontools/__init__.py | quiksand/anasys-pytools | 50c21711c742d3201a72f9eab4986317b8590095 | [
"MIT"
] | 1 | 2018-05-30T15:44:02.000Z | 2018-05-30T17:05:53.000Z | anasyspythontools/__init__.py | quiksand/anasys-pytools | 50c21711c742d3201a72f9eab4986317b8590095 | [
"MIT"
] | 3 | 2019-02-21T13:11:27.000Z | 2022-02-21T17:27:32.000Z | from . import anasysfile
from . import anasysdoc
from . import heightmap
from . import irspectra
from . import anasysio
| 21.6 | 44 | 0.759259 |
a948b58d4adf86897d648d15d474fef3166794ec | 5,734 | py | Python | src/models/test_ensemble.py | nybupt/athena | 2808f5060831382e603e5dc5ec6a9e9d8901a3b2 | [
"MIT"
] | null | null | null | src/models/test_ensemble.py | nybupt/athena | 2808f5060831382e603e5dc5ec6a9e9d8901a3b2 | [
"MIT"
] | 8 | 2020-09-25T22:32:00.000Z | 2022-02-10T01:17:17.000Z | src/models/test_ensemble.py | nybupt/athena | 2808f5060831382e603e5dc5ec6a9e9d8901a3b2 | [
"MIT"
] | 1 | 2021-08-12T12:48:51.000Z | 2021-08-12T12:48:51.000Z | import os
import sys
import time
import numpy as np
from sklearn.metrics import accuracy_score
from utils.config import TRANSFORMATION
from utils.ensemble import load_models, prediction, ensemble_defenses_util
BSLabelFP=sys.argv[1]
samplesDir=sys.argv[2]
modelsDir=sys.argv[3]
AETypes = {
"biml2": ["bim_ord2_nbIter100_eps1000", "bim_ord2_nbIter100_eps250", "bim_ord2_nbIter100_eps500"],
"bimli":["bim_ordinf_nbIter100_eps100", "bim_ordinf_nbIter100_eps90", "bim_ordinf_nbIter100_eps75"],
"cwl2":["cw_l2_lr350_maxIter100", "cw_l2_lr500_maxIter100", "cw_l2_lr700_maxIter100"],
"dfl2":["deepfool_l2_overshoot20", "deepfool_l2_overshoot30", "deepfool_l2_overshoot50"],
"fgsm":["fgsm_eps100", "fgsm_eps250", "fgsm_eps300"],
"jsma":["jsma_theta30_gamma50", "jsma_theta50_gamma50", "jsma_theta50_gamma70"],
"mim":["mim_eps20_nbIter1000", "mim_eps30_nbIter1000", "mim_eps50_nbIter1000"],
"op":["onepixel_pxCount15_maxIter30_popsize100", "onepixel_pxCount30_maxIter30_popsize100", "onepixel_pxCount5_maxIter30_popsize100"],
"pgd":["pgd_eps250", "pgd_eps100", "pgd_eps300"]
}
sampleSubDirs=[
"legitimates"#, "fgsm"
#"biml2", "bimli", "cwl2", "dfl2"
#"fgsm", "jsma", "mim", "op", "pgd"
]
# (nSamples, <sample dimension>, nChannels)
# (nClasses)
trueLabelVec=np.load(BSLabelFP)
trueLabels = np.argmax(trueLabelVec, axis=1)
nClasses = trueLabelVec.shape[1]
EnsembleIDs=[0,1,2,3]
rows=0
cols=1+len(EnsembleIDs)
if "legitimates" in sampleSubDirs:
rows=1+3*(len(sampleSubDirs) - 1)
else:
rows=3*len(sampleSubDirs)
accs = np.zeros((rows, cols))
modelFilenamePrefix="mnist-cnn" # dataset name and network architecture
# include "clean" type: no transformation.
# transformationList[0] is "clean"
transformationList=TRANSFORMATION.supported_types()
# remove "clean" because the correspondingly model will not be used in ensemble
transformationList.remove("clean")
nTrans = len(transformationList)
transTCs_Prob = np.zeros((rows, nTrans))
transTCs_Logit = np.zeros((rows, nTrans))
predTCs_Prob = np.zeros((rows, nTrans))
predTCs_Logit = np.zeros((rows, nTrans))
ensembleTCs = np.zeros((rows, 5))
rowIdx=0
rowHeaders=[]
AEFilenamePrefix="test_AE-mnist-cnn-clean"
datasetFilePaths = []
for subDirName in sampleSubDirs:
if subDirName == "legitimates": # BS
datasetFilePaths.append(
os.path.join(os.path.join(samplesDir, subDirName), "test_BS-mnist-clean.npy"))
rowHeaders.append("BS")
else: # AE
AETags = AETypes[subDirName]
for AETag in AETags:
datasetFilePaths.append(
os.path.join(os.path.join(samplesDir, subDirName), AEFilenamePrefix+"-"+AETag+".npy"))
rowHeaders.append(AETag)
useLogit = False
print("Loading prob models")
models = load_models(modelsDir, modelFilenamePrefix, transformationList, convertToLogit=useLogit)
for datasetFilePath in datasetFilePaths:
accs[rowIdx, 0:4], transTCs_Prob[rowIdx], predTCs_Prob[rowIdx], ensembleTCs[rowIdx, 0:4] = testOneData(
datasetFilePath,
models,
nClasses,
transformationList,
EnsembleIDs,
trueLabels,
useLogit=useLogit
)
rowIdx+=1
del models
useLogit=True
print("Loading logit models")
logitModels = load_models(modelsDir, modelFilenamePrefix, transformationList, convertToLogit=useLogit)
rowIdx=0
for datasetFilePath in datasetFilePaths:
accs[rowIdx, 4], transTCs_Logit[rowIdx], predTCs_Logit[rowIdx], ensembleTCs[rowIdx, 4] = testOneData(
datasetFilePath,
logitModels,
nClasses,
transformationList,
EnsembleIDs,
trueLabels,
useLogit=useLogit
)
rowIdx+=1
del logitModels
np.save("acc_ensemble_test.npy", accs)
with open("acc_ensemble_test.txt", "w") as fp:
fp.write("Acc\tRD\tMV\tAVEP\tT2MV\tAVEL\n")
for ridx in range(len(rowHeaders)):
fp.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(
rowHeaders[ridx],
accs[ridx, 0],
accs[ridx, 1],
accs[ridx, 2],
accs[ridx, 3],
accs[ridx, 4]))
transTCs = (transTCs_Prob + transTCs_Logit)/2
np.save("transTCs.npy", transTCs)
np.save("predTCs_Prob.npy", predTCs_Prob)
np.save("predTCs_Logit.npy", predTCs_Logit)
np.save("ensembleTCs.npy", ensembleTCs)
| 33.144509 | 142 | 0.671085 |
a9497a37feb2dcdef1ff9d5ca11f00c665e15759 | 20,716 | py | Python | ems/views.py | abhi20161997/Apogee-2017 | e4ae1b379bd5111a3bd7d3399d081dda897a8566 | [
"BSD-3-Clause"
] | null | null | null | ems/views.py | abhi20161997/Apogee-2017 | e4ae1b379bd5111a3bd7d3399d081dda897a8566 | [
"BSD-3-Clause"
] | null | null | null | ems/views.py | abhi20161997/Apogee-2017 | e4ae1b379bd5111a3bd7d3399d081dda897a8566 | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from ems.models import Score, Level, Judge, Label, Team
from Event.models import Event
from registration.models import Participant
from django.contrib.auth.models import User
# Create your views here.
EMSADMINS = [
# have access to all events
'admin',
'controls',
'webmasterdvm',
'deepak'
]
def events_levels(request, eventid):
event = Event.objects.get(id=eventid)
levels = Level.objects.filter(event=event)
emsadmin = True if request.user.username in EMSADMINS else False
if request.method == 'POST':
if 'delete-level' in request.POST:
levelid = request.POST['delete-level']
level = Level.objects.get(id=levelid)
level.teams.clear()
level.delete()
if 'delete-judge' in request.POST:
judgeid = request.POST['delete-judge']
judge = Judge.objects.get(id=judgeid)
judge.level_set.clear()
judge.user.delete()
judge.delete()
if 'delete-label' in request.POST:
labelid = request.POST['delete-label']
label = Label.objects.get(id=labelid)
label.delete()
context = {
'event' : event,
'levels' : levels,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_levels.html', context)
def events_levels_add(request, eventid):
event = Event.objects.get(id=eventid)
levels = Level.objects.filter(event=event)
emsadmin = True if request.user.username in EMSADMINS else False
if request.method == 'POST':
if 'add' in request.POST:
name = request.POST['name']
position = int(request.POST['position'])
level = Level.objects.create(name=name, position=position, event=event)
if 'judgesheet' in request.POST:
labelid = request.POST['label']
label = Label.objects.get(id=labelid)
level.label = label
level.save()
judges = request.POST.getlist('judge')
for judgeid in judges:
judge = Judge.objects.get(id=judgeid)
level.judges.add(judge)
return redirect('ems:events_levels', event.id)
context = {
'event' : event,
'levels' : levels,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_levels_add.html', context)
def events_labels_add(request, eventid):
event = Event.objects.get(id=eventid)
levels = Level.objects.filter(event=event)
emsadmin = True if request.user.username in EMSADMINS else False
if request.method == 'POST':
if 'add' in request.POST:
names = request.POST.getlist("name")
maxvalues = request.POST.getlist("max")
label = Label(event=event)
for i, name in enumerate(names):
attr = "var" + str(i+1) + "name"
setattr(label, attr, name)
for i, maxvalue in enumerate(maxvalues):
attr = "var" + str(i+1) + "max"
setattr(label, attr, maxvalue)
label.save()
return redirect('ems:events_levels', event.id)
context = {
'event' : event,
'levels' : levels,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_labels_add.html', context)
def events_judges_add(request, eventid):
event = Event.objects.get(id=eventid)
levels = Level.objects.filter(event=event)
emsadmin = True if request.user.username in EMSADMINS else False
if request.method == 'POST':
if 'add' in request.POST:
name = request.POST['name']
username = request.POST['username']
password = request.POST['password']
try:
user = User.objects.create_user(username=username, password=password)
except:
return HttpResponse("Please use a different username. Press the back button to continue")
judge = Judge.objects.create(name=name, event=event, user=user)
judge.user = user
judge.save()
return redirect('ems:events_levels', event.id)
context = {
'event' : event,
'levels' : levels,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_judges_add.html', context)
def events_levels_edit(request, eventid, levelid):
emsadmin = True if request.user.username in EMSADMINS else False
event = Event.objects.get(id=eventid)
level = Level.objects.get(id=levelid)
if 'save' in request.POST:
name = request.POST['name']
position = int(request.POST['position'])
level.name = name
level.position = position
level.label = None
level.save()
level.judges.clear()
if 'judgesheet' in request.POST:
labelid = request.POST['label']
label = Label.objects.get(id=labelid)
level.label = label
level.save()
judges = request.POST.getlist('judge')
for judgeid in judges:
judge = Judge.objects.get(id=judgeid)
level.judges.add(judge)
return redirect('ems:events_levels', event.id)
if 'delete' in request.POST:
level.delete()
return redirect('ems:events_home', event.id)
context = {
'event' : event,
'level' : level,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_levels_edit.html', context)
def events_judge_home(request, eventid):
event = Event.objects.get(id=eventid)
context = {
'event' : event,
}
return render(request, 'ems/events_judge_home.html', context)
def events_judge_login(request, eventid, levelid, judgeid):
event = Event.objects.get(id=eventid)
judge = Judge.objects.get(id=judgeid)
level = Level.objects.get(id=levelid)
context = {
'event' : event,
'level' : level,
'judge' : judge,
}
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active and user == judge.user:
login(request, user)
return redirect('ems:events_levels_judge', event.id, level.id, judge.id)
else:
context['status'] = 0
return render(request, 'ems/login.html', context)
def events_levels_judge(request, eventid, levelid, judgeid):
event = Event.objects.get(id=eventid)
level = Level.objects.get(id=levelid)
judge = Judge.objects.get(id=judgeid)
emsadmin = True if request.user.username in EMSADMINS else False
if not emsadmin and request.user != judge.user:
return redirect('ems:events_judge_login', event.id, level.id, judge.id)
if request.method == 'POST':
if 'leave' in request.POST:
for team in level.teams.all():
try:
score = Score.objects.get(level=level, team=team, judge=judge)
if not score.is_frozen:
score.delete()
score = Score.objects.create(level=level, team=team, judge=judge)
score.is_frozen = True
score.save()
except:
score = Score.objects.create(level=level, team=team, judge=judge)
score.is_frozen = True
score.save()
elif "save" or "freeze" in request.POST:
for team in level.teams.all():
scores = request.POST.getlist(str(team.id))
try:
score = Score.objects.get(level=level, team=team, judge=judge)
except:
score = Score.objects.create(level=level, team=team, judge=judge)
for i, val in enumerate(scores):
attr = 'var' + str(i+1)
if val == '':
val = None
setattr(score, attr, val)
comments = request.POST['comment-'+str(team.id)]
score.comments = comments
if "freeze" in request.POST:
score.is_frozen = True
score.save()
teams = []
for team in level.teams.all():
try:
score = Score.objects.get(level=level, team=team, judge=judge)
team.score = score
total = 0
for x in range(1, 11):
attr = 'var' + str(x)
try:
val = getattr(score, attr)
total += val
except:
pass
team.score.total = total
except:
pass
teams.append(team)
context = {
'event' : event,
'level' : level,
'judge' : judge,
'teams' : teams,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_judge_edit.html', context)
def events_participants(request, eventid):
event = Event.objects.get(id=eventid)
emsadmin = True if request.user.username in EMSADMINS else False
if event.is_team:
return redirect('ems:events_teams', event.id)
teams = Team.objects.filter(event=event)
if request.method == 'POST':
if 'delete-team' in request.POST:
teamid = request.POST['delete-team']
team = Team.objects.get(id=teamid)
team.delete()
context = {
'event' : event,
'teams' : teams,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_participants.html', context)
def events_participants_add(request, eventid):
event = Event.objects.get(id=eventid)
emsadmin = True if request.user.username in EMSADMINS else False
parts = []
if request.method == 'POST':
if 'fetch' in request.POST:
partid = request.POST['aadhaar']
partids = map(lambda s:s.strip().replace("BITS", "").replace("MSPS", ""), partid.split())
print partids
for partid in partids:
try:
part = Participant.bitsians.get(uniqueid__iexact=partid)
parts.append(part)
except:
pass
try:
part = Participant.objects.get(id__iexact=partid)
parts.append(part)
except:
pass
if 'add' in request.POST:
partids = request.POST.getlist('part')
for partid in partids:
part = Participant.objects.get(id=partid)
try:
team = Team.objects.get(leader=part, event=event, members=None)
except:
team = Team.objects.create(leader=part, event=event)
registered = Level.objects.get(name="Registered", event=event)
team.levels.add(registered)
return redirect('ems:events_participants', event.id)
context = {
'event' : event,
'parts' : parts,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_participants_add.html', context)
def events_teams(request, eventid):
event = Event.objects.get(id=eventid)
emsadmin = True if request.user.username in EMSADMINS else False
if not event.is_team:
return redirect('ems:events_participants', event.id)
teams = Team.objects.filter(event=event)
if request.method == 'POST':
if 'delete-team' in request.POST:
teamid = request.POST['delete-team']
team = Team.objects.get(id=teamid)
team.delete()
context = {
'event' : event,
'teams' : teams,
'emsadmin' : emsadmin,
}
return render(request, 'ems/events_teams.html', context)
def events_teams_add(request, eventid):
event = Event.objects.get(id=eventid)
parts = []
select = []
errors = []
if request.method == 'POST':
if 'fetch' in request.POST:
partid = request.POST['aadhaar']
if ";" in partid:
existingteams = Team.objects.filter(event=event)
newteams = partid.split(";")
for newteam in newteams:
team_parts = []
team_partids = map(lambda s:s.strip().replace("BITS", "").replace("MSPS", ""), newteam.split())
for team_partid in team_partids:
try:
part = Participant.bitsians.get(uniqueid__iexact=team_partid)
parts.append(part)
except:
pass
try:
part = Participant.objects.get(id__iexact=team_partid)
team_parts.append(part)
except:
pass
if team_parts:
leader = team_parts[0]
team = Team.objects.create(leader=leader, event=event)
members = team_parts[1:]
for member in members:
team.members.add(member)
registered = Level.objects.get(name="Registered", event=event)
team.levels.add(registered)
return redirect('ems:events_participants', event.id)
else:
partids = partid.split()
partids = map(lambda s:s.strip().replace("BITS", "").replace("MSPS", ""), partids)
for partid in partids:
try:
part = Participant.bitsians.get(uniqueid__iexact=partid)
parts.append(part)
except:
pass
try:
part = Participant.objects.get(id__iexact=partid)
parts.append(part)
except:
pass
if 'next' in request.POST:
teams = event.team_set.all()
partids = request.POST.getlist('part')
for partid in partids:
part = Participant.objects.get(id=partid)
if not errors:
for partid in partids:
part = Participant.objects.get(id=partid)
select.append(part)
if "add" in request.POST:
teams = event.team_set.all()
partids = request.POST.getlist('part')
leaderid = request.POST['leader']
name = request.POST['name']
comments = request.POST['comments']
leader = Participant.objects.get(id=leaderid)
team = Team.objects.create(leader=leader, event=event, name=name, comments=comments)
for partid in partids:
if partid != leaderid:
part = Participant.objects.get(id=partid)
team.members.add(part)
registered = Level.objects.get(name="Registered", event=event)
team.levels.add(registered)
return redirect('ems:events_participants', event.id)
context = {
'event' : event,
'parts' : parts,
'select' : select,
'errors' : errors,
}
return render(request, 'ems/events_teams_add.html', context)
def events_teams_edit(request, eventid, teamid):
event = Event.objects.get(id=eventid)
team = Team.objects.get(id=teamid)
if request.method == 'POST':
if 'save' in request.POST:
name = request.POST['team_name']
comments = request.POST['comments']
position = request.POST['position']
team.name = name
team.comments = comments
team.position = position
team.save()
context = {
'event' : event,
'team' : team,
}
return render(request, 'ems/events_teams_edit.html', context) | 32.217729 | 203 | 0.695791 |
a949db919cd36868c22671e2839695a92034044f | 3,117 | py | Python | config.py | eicc27/Pixcrawl-Full | dfa36ee5b9990ff2781a9bc39a6a60c12b1c9bdb | [
"MIT"
] | null | null | null | config.py | eicc27/Pixcrawl-Full | dfa36ee5b9990ff2781a9bc39a6a60c12b1c9bdb | [
"MIT"
] | null | null | null | config.py | eicc27/Pixcrawl-Full | dfa36ee5b9990ff2781a9bc39a6a60c12b1c9bdb | [
"MIT"
] | null | null | null | from msedge.selenium_tools import Edge, EdgeOptions
from lxml import html
import time
import curses
stdscr = curses.initscr()
max_y = stdscr.getmaxyx()[0] - 1
if max_y < 16:
raise Exception("Terminal row size must be more then 17, but now it is %d." % (max_y + 1))
# changelog: more OOP.
# class: illust,illustName,picList(made up of pic classes)
stdscr.addstr("Config R18?\nWarning: you must quit all edge browsers and kill their process in task manager!")
# When getstr(), auto-refresh
f0_config = bytes.decode(stdscr.getstr())
if f0_config == 'Y' or f0_config == 'y' or f0_config == '':
driver = driver_init()
driver.get("https://www.pixiv.net/setting_user.php")
etree = html.etree
initial_page = driver.page_source
initial_dom = etree.HTML(initial_page)
r18Switch = initial_dom.xpath(
'//input[(@name="r18" or @name="r18g") and @checked]/@value')
if r18Switch[0] == 'hide':
stdscr.addstr('R-18 disabled.\n')
else:
stdscr.addstr('R-18 enabled.\n')
if r18Switch[1] == '1':
stdscr.addstr('R-18G disabled.\n')
else:
stdscr.addstr('R-18G enabled.\n')
stdscr.refresh()
stdscr.addstr(
'Do you want confirm the r-18 settings?\nPress Y or Enter to navigate you to the settings page, or by default '
'NO.\n')
f1_config = bytes.decode(stdscr.getstr())
if f1_config == 'y' or f1_config == 'Y' or f1_config == '':
stdscr.addstr('Unleash R-18?\n')
r18Config = bytes.decode(stdscr.getstr())
stdscr.addstr('Unleash R-18G?\n')
r18gConfig = bytes.decode(stdscr.getstr())
if r18Config == 'y' or r18Config == 'Y' or r18Config == '':
driver.find_element_by_xpath(
'//input[@name="r18" and @value="show"]').click()
stdscr.addstr('R-18 has been ON.\n')
else:
driver.find_element_by_xpath(
'//input[@name="r18" and @value="hide"]').click()
stdscr.addstr('R-18 is now OFF.\n')
# Give a timely feedback
stdscr.refresh()
if r18gConfig == 'Y' or r18gConfig == 'y' or r18gConfig == '':
driver.find_element_by_xpath(
'//input[@name="r18g" and @value="2"]').click()
stdscr.addstr('R-18G has been ON.\n')
else:
driver.find_element_by_xpath(
'//input[@name="r18g" and @value="1"]').click()
stdscr.addstr('R-18G is now OFF.\n')
stdscr.refresh()
driver.find_element_by_xpath('//input[@name="submit"]').click()
time.sleep(2)
stdscr.addstr('Config saved. Now refreshing...\n')
stdscr.refresh()
driver.refresh()
driver.quit()
| 39.961538 | 120 | 0.600898 |
a94bba226fe399a457f809ece3327258a884ffc0 | 1,181 | py | Python | dev/tools/roadnet_convert/geo/formats/osm.py | gusugusu1018/simmobility-prod | d30a5ba353673f8fd35f4868c26994a0206a40b6 | [
"MIT"
] | 50 | 2018-12-21T08:21:38.000Z | 2022-01-24T09:47:59.000Z | dev/tools/roadnet_convert/geo/formats/osm.py | gusugusu1018/simmobility-prod | d30a5ba353673f8fd35f4868c26994a0206a40b6 | [
"MIT"
] | 2 | 2018-12-19T13:42:47.000Z | 2019-05-13T04:11:45.000Z | dev/tools/roadnet_convert/geo/formats/osm.py | gusugusu1018/simmobility-prod | d30a5ba353673f8fd35f4868c26994a0206a40b6 | [
"MIT"
] | 27 | 2018-11-28T07:30:34.000Z | 2022-02-05T02:22:26.000Z | from geo.position import Location
import geo.helper
| 31.078947 | 92 | 0.675699 |
a94bd224bee59029c8d307451756cf94ded0c086 | 375 | py | Python | profiles/tables/role_binding_by_team_table.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 112 | 2021-04-21T08:52:55.000Z | 2022-03-01T15:09:19.000Z | profiles/tables/role_binding_by_team_table.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 216 | 2021-04-21T09:06:47.000Z | 2022-03-30T14:21:28.000Z | profiles/tables/role_binding_by_team_table.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 21 | 2021-04-20T13:53:54.000Z | 2022-03-30T21:43:04.000Z | from django_tables2 import tables, Column
from profiles.models import TeamRoleBinding
| 31.25 | 91 | 0.712 |
a94dde590f87aeb3b20de4c6b4b586cab3f571b5 | 1,441 | py | Python | Sorts/bubble_sort_recursive.py | Neiva07/Algorithms | cc2b22d1f69f0af7b91a8326550e759abfba79c8 | [
"MIT"
] | 199 | 2019-12-01T01:23:34.000Z | 2022-02-28T10:30:40.000Z | Sorts/bubble_sort_recursive.py | Neiva07/Algorithms | cc2b22d1f69f0af7b91a8326550e759abfba79c8 | [
"MIT"
] | 35 | 2020-06-08T17:59:22.000Z | 2021-11-11T04:00:29.000Z | Sorts/bubble_sort_recursive.py | Neiva07/Algorithms | cc2b22d1f69f0af7b91a8326550e759abfba79c8 | [
"MIT"
] | 106 | 2020-02-05T01:28:19.000Z | 2022-03-11T05:38:54.000Z | # Script: bubble_sort_recursive.py
# Author: Joseph L. Crandal
# Purpose: Demonstrate bubble sort with recursion
# data will be the list to be sorted
data = [ 0, 5, 2, 3, 10, 123, -53, 23, 9, 2 ]
dataOrig = [ 0, 5, 2, 3, 10, 123, -53, 23, 9, 2 ]
# In a bubble sort you will work your way through the dataset
# and move the elements that are adjacent
# Recursive functions call on themselves to process data until a goal has been met or it runs out of items to process
# In this example it continues to go over the dataset until it doesn't see any further change in position from sorting
# Execute the sort
bubbleSort(data)
# Show sorted array versus original
print("Unsorted array: ")
for i in range(len(dataOrig)):
print(dataOrig[i])
print("Sorted array: ")
for i in range(len(data)):
print(data[i])
| 35.146341 | 118 | 0.668286 |
a94f07dd94305ef8cca149684b5c8e4ef5b6072f | 19,260 | py | Python | mcv_consoler/plugins/tempest/runner.py | vladryk/mcv | ee74beafc65053ce200e03da423784cee0724e23 | [
"Apache-2.0"
] | null | null | null | mcv_consoler/plugins/tempest/runner.py | vladryk/mcv | ee74beafc65053ce200e03da423784cee0724e23 | [
"Apache-2.0"
] | null | null | null | mcv_consoler/plugins/tempest/runner.py | vladryk/mcv | ee74beafc65053ce200e03da423784cee0724e23 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 Mirantis, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import datetime
import json
import logging
import os.path
import subprocess
import traceback
from oslo_config import cfg
from mcv_consoler.common.config import DEFAULT_CIRROS_IMAGE
from mcv_consoler.common.config import MOS_TEMPEST_MAP
from mcv_consoler.common.config import TIMES_DB_PATH
from mcv_consoler.common.errors import TempestError
from mcv_consoler.plugins.rally import runner as rrunner
from mcv_consoler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
tempest_additional_conf = {
'compute':
{'fixed_network_name': CONF.networking.network_ext_name},
'object-storage':
{'operator_role': 'admin',
'reseller_admin_role': 'admin'},
'auth': {}
}
| 40.125 | 79 | 0.546625 |
a9508614454f6bf05864b1611a7fc47f5a0baa76 | 299 | py | Python | numpy_demo/version.py | mpmkp2020/numpy_demo | 796262e06c84b7e9aa446b244a3faf3891d9ece1 | [
"BSD-3-Clause"
] | null | null | null | numpy_demo/version.py | mpmkp2020/numpy_demo | 796262e06c84b7e9aa446b244a3faf3891d9ece1 | [
"BSD-3-Clause"
] | null | null | null | numpy_demo/version.py | mpmkp2020/numpy_demo | 796262e06c84b7e9aa446b244a3faf3891d9ece1 | [
"BSD-3-Clause"
] | null | null | null |
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy_demo.lib.NumpyVersion`
short_version = '1.29.0'
version = '1.29.0'
full_version = '1.29.0'
git_revision = 'dd8e9be4d35e25e795d8d139ff4658715767c211'
release = True
if not release:
version = full_version
| 23 | 65 | 0.755853 |
a9525cb4b63e18ce45a9ca957c592c3c20ea53fe | 1,385 | py | Python | docsource/sphinx/source/auto_examples/hammersleypoints/plot_hamm_points_sphere.py | EricHughesABC/pygamma_gallery | 64565d364e68a185aeee25b904813d795ecbe87c | [
"MIT"
] | null | null | null | docsource/sphinx/source/auto_examples/hammersleypoints/plot_hamm_points_sphere.py | EricHughesABC/pygamma_gallery | 64565d364e68a185aeee25b904813d795ecbe87c | [
"MIT"
] | null | null | null | docsource/sphinx/source/auto_examples/hammersleypoints/plot_hamm_points_sphere.py | EricHughesABC/pygamma_gallery | 64565d364e68a185aeee25b904813d795ecbe87c | [
"MIT"
] | null | null | null | """
#################
Hammersley Sphere
#################
"""
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def return_point(m, n, p):
"""
m is the index number of the Hammersley point to calculate
n is the maximun number of points
p is the order of the Hammersley point, 1,2,3,4,... etc
l is the power of x to go out to and is hard coded to 10 in this example
:return type double
"""
if p == 1:
return m / float(n)
v = 0.0
for j in range(10, -1, -1):
num = m // p ** j
if num > 0:
m -= num * p ** j
v += num / (p ** (j + 1))
return (v)
if __name__ == "__main__":
npts = 500
h_1 = np.zeros(npts)
h_7 = np.zeros(npts)
for m in range(npts):
h_1[m] = return_point(m, npts, 1)
h_7[m] = return_point(m, npts, 7)
phirad = h_1 * 2.0 * np.pi
h7 = 2.0 * h_7 - 1.0 # map from [0,1] to [-1,1]
st = np.sqrt(1.0 - h7 * h7)
xxx = st * np.cos(phirad)
yyy = st * np.sin(phirad)
zzz = h7
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(xxx, yyy, zzz, '.')
ax.set_xticks([-1.0, -0.5, 0.0, 0.5, 1.0]);
ax.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0]);
ax.set_zticks([-1.0, -0.5, 0.0, 0.5, 1.0]);
ax.set_title("Ham Points, 1 and 7", fontsize=14)
plt.show() | 22.704918 | 76 | 0.519856 |
a953cb0fff14bcb71d5e717da31296569a25a401 | 11,261 | py | Python | org/heather/setup/__init__.py | PandaLunatiquePrivate/Heather | a50ce59a7a61ac103003434fc0defc0e3bb4860c | [
"Apache-2.0"
] | 2 | 2021-03-06T20:15:14.000Z | 2021-03-28T16:58:13.000Z | org/heather/setup/__init__.py | PandaLunatiquePrivate/Heather | a50ce59a7a61ac103003434fc0defc0e3bb4860c | [
"Apache-2.0"
] | null | null | null | org/heather/setup/__init__.py | PandaLunatiquePrivate/Heather | a50ce59a7a61ac103003434fc0defc0e3bb4860c | [
"Apache-2.0"
] | null | null | null | import enum
import json
import os
import requests
import yaml
import socket
import sqlite3
import traceback
from org.heather.api.tools import Tools
from org.heather.api.log import Log, LogLevel
| 42.334586 | 534 | 0.610958 |
a955fd4758fdef6a817f379d021c4f3cc6b7730c | 5,421 | py | Python | utils/belief_prop.py | atitus5/ocr-869 | 1d714dd28e933fb320b099a4631d25e93bb01678 | [
"MIT"
] | null | null | null | utils/belief_prop.py | atitus5/ocr-869 | 1d714dd28e933fb320b099a4631d25e93bb01678 | [
"MIT"
] | null | null | null | utils/belief_prop.py | atitus5/ocr-869 | 1d714dd28e933fb320b099a4631d25e93bb01678 | [
"MIT"
] | null | null | null | import math
import sys
import time
from nltk import word_tokenize
import numpy as np
| 46.333333 | 120 | 0.643239 |
a9562047bea821bb81235f635245c2aa193d719c | 619 | py | Python | PWN/jarvisoj.com/level1/exploit.py | WinDDDog/Note | 5489ffeabe75d256b8bffffb24ab131cc74f3aed | [
"Apache-2.0"
] | null | null | null | PWN/jarvisoj.com/level1/exploit.py | WinDDDog/Note | 5489ffeabe75d256b8bffffb24ab131cc74f3aed | [
"Apache-2.0"
] | null | null | null | PWN/jarvisoj.com/level1/exploit.py | WinDDDog/Note | 5489ffeabe75d256b8bffffb24ab131cc74f3aed | [
"Apache-2.0"
] | null | null | null | from pwn import *
shellcode = "\x31\xc0\x31\xdb\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\x31\xd2\xb0\x0b\x51\x52\x55\x89\xe5\x0f\x34\x31\xc0\x31\xdb\xfe\xc0\x51\x52\x55\x89\xe5\x0f\x34"
HOST = 'pwn2.jarvisoj.com'
PORT = 9877
PrettyTommy(HOST,PORT)
| 29.47619 | 195 | 0.592892 |
a956dee6345202cc212985e79e8f74cb1e26aa99 | 1,065 | py | Python | botx/clients/methods/errors/unauthorized_bot.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 13 | 2021-01-21T12:43:10.000Z | 2022-03-23T11:11:59.000Z | botx/clients/methods/errors/unauthorized_bot.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 259 | 2020-02-26T08:51:03.000Z | 2022-03-23T11:08:36.000Z | botx/clients/methods/errors/unauthorized_bot.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 5 | 2019-12-02T16:19:22.000Z | 2021-11-22T20:33:34.000Z | """Definition for "invalid bot credentials" error."""
from typing import NoReturn
from botx.clients.methods.base import APIErrorResponse, BotXMethod
from botx.clients.types.http import HTTPResponse
from botx.exceptions import BotXAPIError
def handle_error(method: BotXMethod, response: HTTPResponse) -> NoReturn:
"""Handle "invalid bot credentials" error response.
Arguments:
method: method which was made before error.
response: HTTP response from BotX API.
Raises:
InvalidBotCredentials: raised always.
"""
APIErrorResponse[dict].parse_obj(response.json_body)
raise InvalidBotCredentials(
url=method.url,
method=method.http_method,
response_content=response.json_body,
status_content=response.status_code,
bot_id=method.bot_id, # type: ignore
)
| 30.428571 | 80 | 0.71831 |
a9578f51cee02781b1cf946c958d1259116e97c7 | 16,515 | py | Python | ld38/game_scene.py | irskep/rogue_basement | f92637d7870662a401ca7bb745e3855364b5ac9c | [
"MIT"
] | 16 | 2017-04-24T02:29:43.000Z | 2021-07-31T15:53:15.000Z | ld38/game_scene.py | irskep/rogue_basement | f92637d7870662a401ca7bb745e3855364b5ac9c | [
"MIT"
] | 4 | 2017-04-24T20:13:45.000Z | 2017-05-07T16:22:52.000Z | ld38/game_scene.py | irskep/rogue_basement | f92637d7870662a401ca7bb745e3855364b5ac9c | [
"MIT"
] | 2 | 2017-05-14T20:57:38.000Z | 2017-05-19T22:08:37.000Z | # This file has a lot going on in it because really ties the game together,
# just like The Dude's rug. You can probably read it start to finish, but
# by all means start jumping around from here.
# Dependencies for rendering the UI
from clubsandwich.ui import (
LabelView,
LayoutOptions,
UIScene,
)
# including some ones written specifically for this game
from .views import ProgressBarView, GameView, StatsView
# Whenever you go to another "screen," you're visiting a scene. These are the
# scenes you can get to from the game scene.
from .scenes import PauseScene, WinScene, LoseScene
# This object stores the state of the whole game, so we're definitely gonna
# need that.
from .game_state import GameState
# When keys are pressed, we'll call these functions to have the player do
# things.
from .actions import (
action_throw,
action_close,
action_move,
action_pickup_item,
)
# When things happen, we need to show status messages at the bottom of the
# screen. Since more than one thing can happen in a frame, there's some
# subtle logic encapsulated in this Logger object.
from .logger import Logger
# Constructing arbitrary English sentences from component parts is not always
# simple. This function makes it read nicer in code.
from .sentences import simple_declarative_sentence
# There are four tracks that can play at any given time. Pyglet (the library
# used for audio) doesn't have easy "fade" support, so this object tracks and
# modifies volumes for each track per frame.
from .music import NTrackPlayer
# const.py does some interesting things that you should look at when you're
# interested. For now, here are some hints:
from .const import (
# Enums are collections of unique identifiers. In roguelikes it's usually
# better to keep everything in data files, but for a small game like this
# it's not a big deal to have a few small ones.
EnumEventNames,
EnumFeature,
EnumMonsterMode,
# These are collections of values from data files:
verbs, # from verbs.csv
key_bindings, # from key_bindings.csv
# This is a reverse mapping of key_bindings.csv so we can turn
# a raw key value into a usable command.
BINDINGS_BY_KEY,
# Map of key binding ID to a clubsandwich.geom.Point object representing a
# direction.
KEYS_TO_DIRECTIONS,
)
# At some point this game was slow. This flag enables profiling. You can
# ignore it.
DEBUG_PROFILE = False
if DEBUG_PROFILE:
import cProfile
pr = cProfile.Profile()
# All game scenes share an instance of the player because the audio should be
# continuous. It's a bit of a hack that it's a global variable, but this was a
# 48-hour game, so deal with it.
N_TRACK_PLAYER = NTrackPlayer(['Q1.mp3', 'Q2.mp3', 'Q3.mp3', 'Q4.mp3'])
# This is the text that appears at the bottom left of the screen.
TEXT_HELP = """
======= Keys =======
Move: arrows, numpad
hjklyubn
Get rock: g
Throw rock: t
Close: c
""".strip()
# While you're playing the game, there are actually 3 modes of input:
#
# * Normal: move, wait, get, close, throw
# * Prompting for throw direction
# * Prompting for close direction
#
# These states were originally handled with a "mode" property, but it turns out
# to be MUCH simpler if there are just 3 completely different scenes for these
# things that happen to draw the screen the same way. That way you never have
# any "if mode == PROMPT_THROW_DIRECTION" blocks or anything.
#
# So those 3 scenes all inherit from this base class.
# This is another abstract base class, subclassing the one above. Two of the
# three game scenes are just waiting for a single keystroke for input. This
# class abstracts that behavior.
# Finally, some real action! This is the main game scene, as the name says.
# This object has a lot of responsibilities:
#
# * Reset things for a new game
# * Display world events to the user
# * Act on main game input
# * Assorted hacks
#
# Let's dive in!
# At this point, you should be able to read the last two classes yourself
# without my help. From here, you should jump around to whatever interests you!
# I would suggest a reading order of something like:
# * const.py
# * entity.py
# * game_state.py
# * level_state.py
# * behavior.py
# * actions.py
# * level_generator.py
# * views.py
# * draw_game.py
| 37.44898 | 102 | 0.711051 |
a958227f8764279c1268ab44258acb82a4b5a6c0 | 4,882 | py | Python | main.py | yanxurui/portfolio | 032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | [
"MIT"
] | null | null | null | main.py | yanxurui/portfolio | 032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | [
"MIT"
] | null | null | null | main.py | yanxurui/portfolio | 032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | [
"MIT"
] | null | null | null | import os
import shutil
import argparse
from pathlib import Path
from time import time
from collections import defaultdict
import torch
import numpy as np
import pandas as pd
torch.manual_seed(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Null')
parser.add_argument('path', help='path of experiment, must contain config.py')
parser.add_argument('--test', action='store_true', help='test only')
args = parser.parse_args()
os.environ['CONFIG_LOCAL_DIR'] = args.path
# variables defined here are global/model level
save_dir = Path(args.path)
if not os.path.isfile(os.path.join(save_dir, 'config.py')):
raise Exception('{}: wrong path or no local config'.format(save_dir))
from config_global import epoch, net, optimizer, criterion, data, online_train
if not args.test:
train()
net, optimizer, criterion = load_model(save_dir.joinpath('state.pt'))
test()
| 33.438356 | 88 | 0.621262 |
a958be1401872e502507925a19d9666e8b808383 | 736 | py | Python | listing_manager/utils.py | mikezareno/listing-manager | 7cf07c2f654925254949b8d0000104cd0cfcafe9 | [
"MIT"
] | null | null | null | listing_manager/utils.py | mikezareno/listing-manager | 7cf07c2f654925254949b8d0000104cd0cfcafe9 | [
"MIT"
] | null | null | null | listing_manager/utils.py | mikezareno/listing-manager | 7cf07c2f654925254949b8d0000104cd0cfcafe9 | [
"MIT"
] | null | null | null | #
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
import json
from frappe.utils import flt, cstr, nowdate, nowtime
from six import string_types
| 26.285714 | 107 | 0.754076 |
a95b262364cdeaaec9745537650c70b839330456 | 1,368 | py | Python | package_installer.py | LukasDoesDev/deployarch | 775e15220003ddbf30774167a0c3d145d84489a0 | [
"MIT"
] | 2 | 2021-02-07T14:47:28.000Z | 2021-02-08T10:42:54.000Z | package_installer.py | LukasDoesDev/deployarch | 775e15220003ddbf30774167a0c3d145d84489a0 | [
"MIT"
] | 1 | 2021-02-08T13:46:39.000Z | 2021-02-08T13:46:39.000Z | package_installer.py | LukasDoesDev/deployarch | 775e15220003ddbf30774167a0c3d145d84489a0 | [
"MIT"
] | 1 | 2021-02-07T14:47:35.000Z | 2021-02-07T14:47:35.000Z | import json
import subprocess
PACMAN_SYNC_DB_COMMAND = 'sudo pacman --noconfirm -Sy'
AUR_HELPER_COMMAND = 'paru --noconfirm -S {}'
PACMAN_COMMAND = 'sudo pacman --noconfirm -S {}'
with open('./config.json') as f:
config = json.load(f)
packages_raw = config.get('packages', {})
setup_raw = config.get('setup', {})
pacman_packages = set()
aur_packages = set()
scripts = []
for package_raw in packages_raw:
# TODO: select with maybe website? or fzf?
if package_raw.get('name') not in ['', None, 'custom dmenu', 'custom dwm']:
if package_raw.get('script'):
scripts.append(package_raw.get('script', ['echo error']))
pacman_packages.update(package_raw.get('pacman', []))
aur_packages.update(package_raw.get('aur', []))
pacman_packages_str = ' '.join(pacman_packages)
aur_packages_str = ' '.join(aur_packages)
run_cmds([
PACMAN_SYNC_DB_COMMAND, # Synchronize package database
PACMAN_COMMAND.format(pacman_packages_str), # Install Pacman packages
AUR_HELPER_COMMAND.format(aur_packages_str), # Install AUR packages
])
for script in scripts:
run_cmds(script)
for package_name, cmds in setup_raw.items():
if package_name in pacman_packages or package_name in aur_packages:
run_cmds(cmds)
| 31.090909 | 82 | 0.706871 |
a95cd5050cc5338cb7021667243f069114685055 | 2,293 | py | Python | utils/forms.py | JakubBialoskorski/notes | 1016581cbf7d2024df42f85df039c7e2a5b03205 | [
"MIT"
] | null | null | null | utils/forms.py | JakubBialoskorski/notes | 1016581cbf7d2024df42f85df039c7e2a5b03205 | [
"MIT"
] | 1 | 2021-06-22T20:26:20.000Z | 2021-06-22T20:26:20.000Z | utils/forms.py | JakubBialoskorski/notes | 1016581cbf7d2024df42f85df039c7e2a5b03205 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectMultipleField, HiddenField
from flask_pagedown.fields import PageDownField
from wtforms import validators
| 61.972973 | 286 | 0.756651 |
a95cdc019a431df0ac19c35d5980e2ea22fe3fdc | 2,508 | py | Python | toolkit/retry.py | blackmatrix7/iphone_hunter | 1df7bee48f4d67397fae821f8a675115525f4ef8 | [
"Apache-2.0"
] | 2 | 2017-09-27T14:11:59.000Z | 2022-02-28T06:38:30.000Z | toolkit/retry.py | blackmatrix7/iphone_hunter | 1df7bee48f4d67397fae821f8a675115525f4ef8 | [
"Apache-2.0"
] | 1 | 2021-06-01T21:38:59.000Z | 2021-06-01T21:38:59.000Z | toolkit/retry.py | blackmatrix7/iphone_hunter | 1df7bee48f4d67397fae821f8a675115525f4ef8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/18 9:50
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : retry.py
# @Software: PyCharm
import time
from functools import wraps
__author__ = 'blackmatrix'
"""
"""
def retry(max_retries: int =5, delay: (int, float) =0, step: (int, float) =0,
exceptions: (BaseException, tuple, list) =BaseException,
sleep=time.sleep, callback=None, validate=None):
"""
:param max_retries:
:param delay:
:param step:
:param exceptions: tuplelist
:param sleep: time.sleep
tornadotime.sleep
time.sleep
:param callback:
True
True
:param validate:
False
False
:return:
"""
return wrapper
if __name__ == '__main__':
pass
| 30.585366 | 77 | 0.598086 |
a95ff83ce47a3bbf06a07f81219704a5cbadd7ad | 2,564 | py | Python | data_loader/data_loader.py | Lishjie/LUPVisQ | b2f4bcc0e5bf4ce97f9dcfff4901a3469ce04163 | [
"Apache-2.0"
] | null | null | null | data_loader/data_loader.py | Lishjie/LUPVisQ | b2f4bcc0e5bf4ce97f9dcfff4901a3469ce04163 | [
"Apache-2.0"
] | null | null | null | data_loader/data_loader.py | Lishjie/LUPVisQ | b2f4bcc0e5bf4ce97f9dcfff4901a3469ce04163 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/01/02 16:26
# @Author : lishijie
import torch
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision
from torchvision.transforms.transforms import Resize
from .databases import *
| 45.785714 | 128 | 0.585023 |
a96249e5e27a4fb1f8a7acc8336f250dfb9992c9 | 64,609 | py | Python | pyeccodes/defs/grib2/localConcepts/eswi/name_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 7 | 2020-04-14T09:41:17.000Z | 2021-08-06T09:38:19.000Z | pyeccodes/defs/grib2/localConcepts/eswi/name_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | null | null | null | pyeccodes/defs/grib2/localConcepts/eswi/name_def.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 3 | 2020-04-30T12:44:48.000Z | 2020-12-15T08:40:26.000Z | import pyeccodes.accessors as _
| 42.646205 | 102 | 0.638208 |
a968e5c87a6a2fba1534a27a1696dd6c0f7117a1 | 1,568 | py | Python | apetools/proletarians/setuprun.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null | apetools/proletarians/setuprun.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null | apetools/proletarians/setuprun.py | rsnakamura/oldape | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | [
"Apache-2.0"
] | null | null | null |
# apetools Libraries
from apetools.baseclass import BaseClass
from apetools.builders import builder
from apetools.lexicographers.lexicographer import Lexicographer
# end SetUp
| 28 | 80 | 0.588648 |
a969c4d30c2cfa4664e0f50b541bf7d5cc4223f3 | 11,423 | py | Python | bleu.py | divyang02/English_to_Hindi_Machine_language_translator | 0502b7bb1f86f45d452868a8701009d421765b64 | [
"MIT"
] | 1 | 2022-02-22T04:10:34.000Z | 2022-02-22T04:10:34.000Z | bleu.py | divyang02/English_to_Hindi_Machine_language_translator | 0502b7bb1f86f45d452868a8701009d421765b64 | [
"MIT"
] | null | null | null | bleu.py | divyang02/English_to_Hindi_Machine_language_translator | 0502b7bb1f86f45d452868a8701009d421765b64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sentence level and Corpus level BLEU score calculation tool
"""
from __future__ import division, print_function
import io
import os
import math
import sys
import argparse
from fractions import Fraction
from collections import Counter
from functools import reduce
from operator import or_
try:
from nltk import ngrams
except:
def chen_and_cherry(references, hypothesis, p_n, hyp_len,
smoothing=0, epsilon=0.1, alpha=5, k=5):
"""
Boxing Chen and Collin Cherry (2014) A Systematic Comparison of Smoothing
Techniques for Sentence-Level BLEU. In WMT14.
"""
# No smoothing.
if smoothing == 0:
return p_n
# Smoothing method 1: Add *epsilon* counts to precision with 0 counts.
if smoothing == 1:
return [Fraction(p_i.numerator + epsilon, p_i.denominator)
if p_i.numerator == 0 else p_i for p_i in p_n]
# Smoothing method 2: Add 1 to both numerator and denominator (Lin and Och 2004)
if smoothing == 2:
return [Fraction(p_i.numerator + 1, p_i.denominator + 1)
for p_i in p_n]
# Smoothing method 3: NIST geometric sequence smoothing
# The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each
# precision score whose matching n-gram count is null.
# k is 1 for the first 'n' value for which the n-gram match count is null/
# For example, if the text contains:
# - one 2-gram match
# - and (consequently) two 1-gram matches
# the n-gram count for each individual precision score would be:
# - n=1 => prec_count = 2 (two unigrams)
# - n=2 => prec_count = 1 (one bigram)
# - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)
# - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)
if smoothing == 3:
incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.
for i, p_i in enumerate(p_n):
if p_i == 0:
p_n[i] = 1 / 2**incvnt
incvnt+=1
return p_n
# Smoothing method 4:
# Shorter translations may have inflated precision values due to having
# smaller denominators; therefore, we give them proportionally
# smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry
# suggests dividing by 1/ln(len(T), where T is the length of the translation.
if smoothing == 4:
incvnt = 1
for i, p_i in enumerate(p_n):
if p_i == 0:
p_n[i] = incvnt * k / math.log(hyp_len) # Note that this K is different from the K from NIST.
incvnt+=1
return p_n
# Smoothing method 5:
# The matched counts for similar values of n should be similar. To a
# calculate the n-gram matched count, it averages the n1, n and n+1 gram
# matched counts.
if smoothing == 5:
m = {}
# Requires an precision value for an addition ngram order.
p_n_plus5 = p_n + [modified_precision(references, hypothesis, 5)]
m[-1] = p_n[0] + 1
for i, p_i in enumerate(p_n):
p_n[i] = (m[i-1] + p_i + p_n_plus5[i+1]) / 3
m[i] = p_n[i]
return p_n
# Smoothing method 6:
# Interpolates the maximum likelihood estimate of the precision *p_n* with
# a prior estimate *pi0*. The prior is estimated by assuming that the ratio
# between pn and pn1 will be the same as that between pn1 and pn2.
if smoothing == 6:
for i, p_i in enumerate(p_n):
if i in [1,2]: # Skips the first 2 orders of ngrams.
continue
else:
pi0 = p_n[i-1]**2 / p_n[i-2]
# No. of ngrams in translation.
l = sum(1 for _ in ngrams(hypothesis, i+1))
p_n[i] = (p_i + alpha * pi0) / (l + alpha)
return p_n
# Smoothing method
if smoothing == 7:
p_n = chen_and_cherry(references, hypothesis, p_n, hyp_len, smoothing=4)
p_n = chen_and_cherry(references, hypothesis, p_n, hyp_len, smoothing=5)
return p_n
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arguments for calculating BLEU')
parser.add_argument('-t', '--translation', type=str, required=True,
help="translation file or string")
parser.add_argument('-r', '--reference', type=str, required=True,
help="reference file or string")
parser.add_argument('-s', '--smooth', type=int, default=3, metavar='INT', required=False,
help="smoothing method type (default: %(default)s)")
parser.add_argument('-w', '--weights', type=str, default='0.25 0.25 0.25 0.25',
help="weights for ngram (default: %(default)s)")
parser.add_argument('-sl', '--sentence-level', action='store_true',
help="print sentence level BLEU score (default: %(default)s)")
parser.add_argument('-se', '--smooth-epsilon', type=float, default=0.1,
help="empirical smoothing parameter for method 1 (default: %(default)s)")
parser.add_argument('-sk', '--smooth-k', type=int, default=5,
help="empirical smoothing parameter for method 4 (default: %(default)s)")
parser.add_argument('-sa', '--smooth-alpha', type=int, default=5,
help="empirical smoothing parameter for method 6 (default: %(default)s)")
args = parser.parse_args()
hypothesis_file = args.translation
reference_file = args.reference
weights = tuple(map(float, args.weights.split()))
segment_level = args.sentence_level
smoothing_method = args.smooth
epsilon = args.smooth_epsilon
alpha = args.smooth_alpha
k = args.smooth_k
# Calculate BLEU scores.
# Set --sentence-level and other params to calc sentence-level BLEU in a FILE or string
if os.path.isfile(reference_file):
with io.open(reference_file, 'r', encoding='utf8') as reffin, \
io.open(hypothesis_file, 'r', encoding='utf8') as hypfin:
list_of_references = ((r.split(),) for r in reffin)
hypotheses = (h.split() for h in hypfin)
corpus_bleu(list_of_references, hypotheses,
weights=weights, segment_level=segment_level,
smoothing=smoothing_method, epsilon=epsilon, alpha=alpha, k=k)
else:
reffin = [reference_file]
hypfin = [hypothesis_file]
list_of_references = ((r.split(),) for r in reffin)
hypotheses = (h.split() for h in hypfin)
corpus_bleu(list_of_references, hypotheses,
weights=weights, segment_level=True,
smoothing=smoothing_method, epsilon=epsilon, alpha=alpha, k=k)
| 45.692 | 109 | 0.604044 |
a96d1ad4941b2f2e2aed74363daf53f8103f7801 | 54 | py | Python | configs/postgres.py | enabokov/chat | 4a3a11c68c5089c119ebe5bec1dfebe699aa7c78 | [
"MIT"
] | 1 | 2019-04-14T16:49:32.000Z | 2019-04-14T16:49:32.000Z | configs/postgres.py | enabokov/Chat | 4a3a11c68c5089c119ebe5bec1dfebe699aa7c78 | [
"MIT"
] | 1 | 2021-03-25T21:44:52.000Z | 2021-03-25T21:44:52.000Z | configs/postgres.py | enabokov/chat | 4a3a11c68c5089c119ebe5bec1dfebe699aa7c78 | [
"MIT"
] | null | null | null | DSN = 'postgresql://edward:edward@postgres:5432/chat'
| 27 | 53 | 0.759259 |
a96d57b61d8688819ccbbbd9291ae22fdd80039b | 566 | py | Python | sqlite_framework/sql/item/constraint/table/base.py | alvarogzp/python-sqlite-framework | 29db97a64f95cfe13eb7bae1d00b624b5a37b152 | [
"Apache-2.0"
] | 1 | 2020-08-29T12:42:11.000Z | 2020-08-29T12:42:11.000Z | sqlite_framework/sql/item/constraint/table/base.py | alvarogzp/python-sqlite-framework | 29db97a64f95cfe13eb7bae1d00b624b5a37b152 | [
"Apache-2.0"
] | 4 | 2018-05-07T19:36:30.000Z | 2018-05-29T05:18:13.000Z | sqlite_framework/sql/item/constraint/table/base.py | alvarogzp/python-sqlite-framework | 29db97a64f95cfe13eb7bae1d00b624b5a37b152 | [
"Apache-2.0"
] | null | null | null | from sqlite_framework.sql.item.base import SqlItem
from sqlite_framework.sql.item.column import Column
| 29.789474 | 75 | 0.701413 |
a97015a85173cf78e85bed10e73b68dc69502a9d | 78 | py | Python | pydemic/report/__init__.py | GCES-Pydemic/pydemic | f221aa16e6a32ed1303fa11ebf8a357643f683d5 | [
"MIT"
] | null | null | null | pydemic/report/__init__.py | GCES-Pydemic/pydemic | f221aa16e6a32ed1303fa11ebf8a357643f683d5 | [
"MIT"
] | null | null | null | pydemic/report/__init__.py | GCES-Pydemic/pydemic | f221aa16e6a32ed1303fa11ebf8a357643f683d5 | [
"MIT"
] | null | null | null | from .report_group import GroupReport
from .report_single import SingleReport
| 26 | 39 | 0.871795 |
a97282eeac0597449d543922ed87821479844a39 | 724 | py | Python | src/baboon_tracking/mixins/history_frames_mixin.py | radioactivebean0/baboon-tracking | 062351c514073aac8e1207b8b46ca89ece987928 | [
"MIT"
] | 6 | 2019-07-15T19:10:59.000Z | 2022-02-01T04:25:26.000Z | src/baboon_tracking/mixins/history_frames_mixin.py | radioactivebean0/baboon-tracking | 062351c514073aac8e1207b8b46ca89ece987928 | [
"MIT"
] | 86 | 2019-07-02T17:59:46.000Z | 2022-02-01T23:23:08.000Z | src/baboon_tracking/mixins/history_frames_mixin.py | radioactivebean0/baboon-tracking | 062351c514073aac8e1207b8b46ca89ece987928 | [
"MIT"
] | 7 | 2019-10-16T12:58:21.000Z | 2022-03-08T00:31:32.000Z | """
Mixin for returning history frames.
"""
from collections import deque
from typing import Deque
from rx.core.typing import Observable
from baboon_tracking.models.frame import Frame
| 25.857143 | 84 | 0.68232 |
a972d95469a20ffc4d590103acea6ae8f6b2b426 | 1,746 | py | Python | src/elm_doc/tasks/html.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
] | 29 | 2017-02-01T11:58:44.000Z | 2021-05-21T15:18:33.000Z | src/elm_doc/tasks/html.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
] | 143 | 2017-07-26T17:34:44.000Z | 2022-03-01T18:01:43.000Z | src/elm_doc/tasks/html.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
] | 7 | 2018-03-09T10:04:45.000Z | 2021-10-19T19:17:40.000Z | import json
import html
from pathlib import Path
from elm_doc.utils import Namespace
# Note: title tag is omitted, as the Elm app sets the title after
# it's initialized.
PAGE_TEMPLATE = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="shortcut icon" size="16x16, 32x32, 48x48, 64x64, 128x128, 256x256" href="{mount_point}/assets/favicon.ico">
<link rel="stylesheet" href="{mount_point}/assets/style.css">
<script src="{mount_point}/artifacts/elm.js"></script>
<script src="{mount_point}/assets/highlight/highlight.pack.js"></script>
<link rel="stylesheet" href="{mount_point}/assets/highlight/styles/default.css">
</head>
<body>
<script>
try {{
const fontsLink = document.createElement("link");
fontsLink.href = "{mount_point}/assets/fonts/" + ((navigator.userAgent.indexOf("Macintosh") > -1) ? "_hints_off.css" : "_hints_on.css");
fontsLink.rel = "stylesheet";
document.head.appendChild(fontsLink);
}} catch(e) {{
// loading the font is not essential; log the error and move on
console.log(e);
}}
Elm.Main.init({init});
</script>
</body>
</html>
''' # noqa: E501
| 30.103448 | 142 | 0.643757 |
a9747260a42549b174eafc1943184e3614f86276 | 1,031 | py | Python | pyPico/2.传感器实验/6.水位传感器/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 73 | 2020-05-02T13:48:27.000Z | 2022-03-26T13:15:10.000Z | pyPico/2.传感器实验/6.水位传感器/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | null | null | null | pyPico/2.传感器实验/6.水位传感器/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 50 | 2020-05-15T13:57:28.000Z | 2022-03-30T14:03:33.000Z | '''
v1.0
2021.1
01Studio www.01Studio.org
'''
#
import time
from machine import Pin,SoftI2C,ADC
from ssd1306 import SSD1306_I2C
#oled
i2c = SoftI2C(scl=Pin(10), sda=Pin(11)) #I2Cscl--> 10, sda --> 11
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c) #OLED128*64,OLEDI2C0x3c
#ADC1,Pin=27
Water_level = ADC(1)
while True:
oled.fill(0) #
oled.text('01Studio', 0, 0) # 01Studio
oled.text('Water Level test', 0, 15) #
value=Water_level.read_u16() #ADC
#
oled.text(str(value)+' (65535)',0,40)
#0-40950-3V'%.2f'%2
oled.text(str('%.2f'%(value/65535*3.3))+' V',0,55)
#50-4cm
if 0 <= value <=9602:
oled.text('0cm', 60, 55)
if 9602 < value <= 14403:
oled.text('1cm', 60, 55)
if 14403 < value <= 19204:
oled.text('2cm', 60, 55)
if 19204 < value <= 20804:
oled.text('3cm', 60, 55)
if 20804 < value:
oled.text('4cm', 60, 55)
oled.show()
time.sleep_ms(1000)
| 19.826923 | 81 | 0.651794 |
a975a7568cae17acd3d7b4203c548d145cfe9d6a | 147 | py | Python | src/assisters/mytypes.py | khyreek/Codeforcescord-Bot | b47ce6b1bf779e6d3f904b3dcb2a811b74e90b17 | [
"Apache-2.0"
] | null | null | null | src/assisters/mytypes.py | khyreek/Codeforcescord-Bot | b47ce6b1bf779e6d3f904b3dcb2a811b74e90b17 | [
"Apache-2.0"
] | null | null | null | src/assisters/mytypes.py | khyreek/Codeforcescord-Bot | b47ce6b1bf779e6d3f904b3dcb2a811b74e90b17 | [
"Apache-2.0"
] | null | null | null | from typing import Annotated
Problem = Annotated[str, "code cfs problems have, ex. 1348B"]
ProblemWidth = int
CFSSectionsData = tuple[int, ...]
| 18.375 | 61 | 0.734694 |
a9767449042e9e6827a47f70074761e36edb412a | 2,666 | py | Python | nb.py | corytaitchison/online-reviews | 10de9218137658269ba36849dfa7e8f643335d01 | [
"MIT"
] | null | null | null | nb.py | corytaitchison/online-reviews | 10de9218137658269ba36849dfa7e8f643335d01 | [
"MIT"
] | null | null | null | nb.py | corytaitchison/online-reviews | 10de9218137658269ba36849dfa7e8f643335d01 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
###
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
###
from loadRandom import loadRandom2
ps = PorterStemmer()
# lemmatizer = WordNetLemmatizer()
if __name__ == '__main__':
_seed = 123
_observations = 1e4
_subsets = [1, 2, 3, 4]
location = '/Users/caitchison/Documents/Yelp/yelp_dataset/restaurants_only.csv'
data = loadRandom2(location, _observations, seed=_seed, n=3778803).loc[:,
('text', 'useful', 'cool', 'funny', 'stars_x')]
# Calculate "interaction" score
data['interactions'] = data.useful + data.cool + data.funny
data = data[data['interactions'] >= _subsets[0]].dropna()
# Subset to get equal amounts of low-useful and high-useful
masks = [data.interactions == x for x in _subsets]
masks.append(data.interactions > _subsets[-1])
subsetSize = min([sum(mask) for mask in masks])
print("Creating subsets of size %i" % subsetSize)
newData = pd.DataFrame([])
for mask in masks:
df = data[mask].sample(n=subsetSize, random_state=_seed)
newData = newData.append(df)
data = newData
# Split interactions into quantiles (5)
data['group'] = pd.qcut(data['interactions'], q=5, labels=False)
print(pd.qcut(data['interactions'], q=5).cat.categories)
data.rename(columns={"stars_x": "stars"})
# Create a bag of words and convert the text to a sparse matrix
text = np.array(data['text'])
bow = CountVectorizer(analyzer=textProcess).fit(text)
print("Unique (Not Stop) Words:", len(bow.vocabulary_))
text = bow.transform(text)
# Split into features for testing and training at 30%
xTrain, xTest, yTrain, yTest = train_test_split(
text, np.array(data['group']), test_size=0.3, random_state=_seed)
# Train model (Multinomial Naive Bayes)
nb = MultinomialNB()
nb.fit(xTrain, yTrain)
# Test and Evaluate Model
preds = nb.predict(xTest)
print(confusion_matrix(yTest, preds))
print('\n')
print(classification_report(yTest, preds))
| 33.746835 | 122 | 0.686422 |
a9767fe03cd95cd1ee4f89e2a2b53d9dc840600a | 4,032 | py | Python | 1_Basics:warmup/2_TweetsFilter/twitter_exerciseB.py | ferreiro/Python_course | 73eb41e248d702741a4109a78b15ef8e5e6341f2 | [
"MIT"
] | 2 | 2016-02-15T04:12:22.000Z | 2021-09-05T23:26:53.000Z | 1_Basics:warmup/2_TweetsFilter/twitter_exerciseB.py | ferreiro/Python-course | 73eb41e248d702741a4109a78b15ef8e5e6341f2 | [
"MIT"
] | 10 | 2015-10-16T14:37:41.000Z | 2015-11-16T22:29:39.000Z | 2_TwitterAPI/twitter_exerciseB.py | ferreiro/Python | 9a0292d4898571fcef95546eec977d3138c7c23b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
import json
outdirectory = "outputCSV/"
tweetsFile = "tweets.txt";
outputFile = "mostUsedHasgtags.csv";
tweetsList = [] #List that contains all the tweets readed from a file
hashtagTable = {}; # Dictionary with key= hashtags and value= frecuency for this hashtag
""" Returns a list of tweets readen from a file.
if there's a problem None object will be returned """
""" Creates a hasmap frecuency table where keys are the hashtags and
values are the number or appeareances of that hashtag in all the twetts.
Returns None if we coudn't create the Hashmap and a dictionary if everything works"""
""" Returns a ordered hasmap, where the sorting was made taking into acccount
the value of each key on the hasmap and desdending order. """
""" This function writes header and data to a .csv file pass by value
If the outputFile passed is not a .csv type. A failure will returned (False) """
tweetsList = loadTweets(tweetsFile); #Loading a list of twetts from a file
if (tweetsList != None): print "Loading twetts from file...[OK]"
else: "Loading twetts from file...[ERROR]"
hashtagTable = createHashtagFrecuencyTable(tweetsList);
if (hashtagTable != None): print "Creating hashtags table with its frecuencies...[OK]"
else: "Creating hashtags table with its frecuencies...[ERROR]"
orderedHashtagTable = orderHashtagTable(hashtagTable)
if (orderedHashtagTable != None): print "Ordering hashtags table in desdending order...[OK]"
else: "Ordering hashtags table in desdending order...[ERROR]"
headerList = ["hashtag", "frecuency"] # .csv header to write on the file
if (writeFile(headerList, orderedHashtagTable[:10], outputFile)): print "Writing csv file with top used hashtags...[OK]"
else: "Writing csv file with top used hashtags...[ERROR]"
| 33.322314 | 202 | 0.726438 |
a976a9884a077db66cbb3f3d300b2d865662f9c4 | 4,346 | py | Python | docker-images/slack-prs/main.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 17 | 2022-01-10T11:01:50.000Z | 2022-03-25T03:21:08.000Z | docker-images/slack-prs/main.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 1 | 2022-01-13T14:28:47.000Z | 2022-01-13T14:28:47.000Z | docker-images/slack-prs/main.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 7 | 2022-01-07T03:58:10.000Z | 2022-03-24T07:38:20.000Z | import time
import json
import argparse
import websocket
import requests
import github
MY_NAME = 'kit' # should be able to avoid this in the future
TOKEN = 'XXXXXXX'
GITHUB_USERNAME_BY_SLACK_USERNAME = {
"adam": "adamsmith",
# XXXXXXX ...
}
channel_ids_by_name = {}
channel_names_by_id = {}
next_id = 0
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 24.834286 | 102 | 0.685228 |
a9775f738c3044fcff42b57c7ed49ac310db7479 | 656 | py | Python | commands/meme.py | EFFLUX110/efflux-discord-bot | fe382fc822f852efab8d4742daa756045a17bff3 | [
"MIT"
] | null | null | null | commands/meme.py | EFFLUX110/efflux-discord-bot | fe382fc822f852efab8d4742daa756045a17bff3 | [
"MIT"
] | 4 | 2022-02-03T18:24:32.000Z | 2022-02-03T19:24:51.000Z | commands/meme.py | EFFLUX110/efflux-discord-bot | fe382fc822f852efab8d4742daa756045a17bff3 | [
"MIT"
] | 1 | 2022-02-03T18:12:44.000Z | 2022-02-03T18:12:44.000Z | import discord
import requests
from discord.ext import commands | 28.521739 | 87 | 0.617378 |
a977697bb7ffe10b5b5f5a391df5f58451adfd57 | 717 | py | Python | 45.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
] | null | null | null | 45.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
] | null | null | null | 45.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
] | null | null | null | target_num = 0
j = 0
while target_num == 0:
pent_ind = float((1 + ( 1 + 24*j*(2*j-1))**.5)/6)
tri_ind = float((-1 + (1+8*j*(2*j-1)))/2)
if pent_ind.is_integer() and tri_ind.is_integer():
num = j*(2*j-1)
if num != 1 and num != 40755:
target_num = num
j += 1
print(target_num) # 1533776805
"""
I had a brute force solution, but it was a bit over a minute.
By solving for the index values of pentagon and triangle numbers
in terms of the index value of the hexagon numbers,
the formulas in pent_ind and tri_ind pop out of the quadratic equation.
Basically those variables will only be integers if j is a valid index
for a pentagon number and triangle number as well.
""" | 29.875 | 71 | 0.661088 |
a97827ef5e7685a79286da4ad9d58d63d84d97d6 | 801 | py | Python | client.py | hani9/smartlockers | bd7a996be58769341367d58d5c80c70ad7bd1cb6 | [
"MIT"
] | null | null | null | client.py | hani9/smartlockers | bd7a996be58769341367d58d5c80c70ad7bd1cb6 | [
"MIT"
] | null | null | null | client.py | hani9/smartlockers | bd7a996be58769341367d58d5c80c70ad7bd1cb6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Importo les llibreries
import socket
import RPi.GPIO as GPIO
import time
# Faig la configuraci bsica del GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT) # Noms utilitzo el 18. Es podria fer un bucle per activar-ne diversos alhora.
# Indico la IP del servidor i el port de comunicaci
host = "PLACE_YOUR_SERVER_IP_HERE"
port = 12345
# Inicio un bucle infinit
while 1:
s = socket.socket() # Creo el socket
s.connect((host, port)) # Connecto al servidor
data = s.recv(1024) # Rebo dades
GPIO.output(int(data), GPIO.HIGH) # La dada rebuda indica el pin del gpio que es far UP
time.sleep(1) # S'espera 1 segon
GPIO.output(int(data), GPIO.LOW) # Fa un DOWN del pin
s.close() # Tanca la connexi
| 26.7 | 103 | 0.705368 |
a978a3e063f71ae417a8f86e87e70e36b033503d | 16,820 | py | Python | src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py | fhswf/MLPro | e944b69bed9c2d5548677711270e4a4fe868aea9 | [
"Apache-2.0"
] | 5 | 2022-01-31T15:52:19.000Z | 2022-03-21T18:34:27.000Z | src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py | fhswf/MLPro | e944b69bed9c2d5548677711270e4a4fe868aea9 | [
"Apache-2.0"
] | 61 | 2021-12-17T13:03:59.000Z | 2022-03-31T10:24:37.000Z | src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py | fhswf/MLPro | e944b69bed9c2d5548677711270e4a4fe868aea9 | [
"Apache-2.0"
] | null | null | null | ## -------------------------------------------------------------------------------------------------
## -- Project : MLPro - A Synoptic Framework for Standardized Machine Learning Tasks
## -- Package : mlpro.rl.envmodels
## -- Module : mlp_robotinhtm
## -------------------------------------------------------------------------------------------------
## -- History :
## -- yyyy-mm-dd Ver. Auth. Description
## -- 2021-12-17 0.0.0 MRD Creation
## -- 2021-12-17 1.0.0 MRD Released first version
## -- 2021-12-20 1.0.1 DA Replaced 'done' by 'success'
## -- 2021-12-21 1.0.2 DA Class MLPEnvMdel: renamed method reset() to _reset()
## -- 2022-01-02 2.0.0 MRD Refactoring due to the changes on afct pool on
## -- TorchAFctTrans
## -- 2022-02-25 2.0.1 SY Refactoring due to auto generated ID in class Dimension
## -------------------------------------------------------------------------------------------------
"""
Ver. 2.0.1 (2022-02-25)
This module provides Environment Model based on MLP Neural Network for
robotinhtm environment.
"""
import torch
import transformations
from mlpro.rl.models import *
from mlpro.rl.pool.envs.robotinhtm import RobotArm3D
from mlpro.rl.pool.envs.robotinhtm import RobotHTM
from mlpro.sl.pool.afct.afctrans_pytorch import TorchAFctTrans
from torch.utils.data.sampler import SubsetRandomSampler
from collections import deque
# Buffer
| 39.299065 | 146 | 0.542866 |
a979eac6a7daaac0fe50d966818c9860d5136601 | 3,474 | py | Python | pyxlpr/data/icdar/__init__.py | XLPRUtils/pyUtils | 3a62c14b0658ad3c24d83f953ee0d88530b02b23 | [
"Apache-2.0"
] | 15 | 2020-06-09T07:03:07.000Z | 2022-02-25T06:59:34.000Z | pyxlpr/data/icdar/__init__.py | XLPRUtils/pyUtils | 3a62c14b0658ad3c24d83f953ee0d88530b02b23 | [
"Apache-2.0"
] | 5 | 2020-08-08T07:11:21.000Z | 2020-08-08T07:11:24.000Z | pyxlpr/data/icdar/__init__.py | XLPRUtils/pyUtils | 3a62c14b0658ad3c24d83f953ee0d88530b02b23 | [
"Apache-2.0"
] | 2 | 2020-06-09T07:03:26.000Z | 2020-12-31T06:50:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author :
# @Email : 877362867@qq.com
# @Date : 2021/02/22 10:29
""" icdar2013
zip
"""
import re
from pyxllib.xl import File, Dir, shorten
| 35.814433 | 97 | 0.600173 |
a97a18817825892c952ac7174c04fcf55fabab56 | 6,441 | py | Python | MTL/features.py | usc-sail/mica-riskybehavior-identification | dd8d1bb795ca1b8273625713887c6c4b747fd542 | [
"MIT"
] | 2 | 2020-11-19T21:22:53.000Z | 2021-02-25T00:29:38.000Z | MTL/features.py | usc-sail/mica-riskybehavior-identification | dd8d1bb795ca1b8273625713887c6c4b747fd542 | [
"MIT"
] | null | null | null | MTL/features.py | usc-sail/mica-riskybehavior-identification | dd8d1bb795ca1b8273625713887c6c4b747fd542 | [
"MIT"
] | 1 | 2021-02-05T22:45:51.000Z | 2021-02-05T22:45:51.000Z | import os
import numpy as np
import torch
from transformers import BertTokenizer
from tensorflow.keras.utils import to_categorical
from NewDataLoader import *
from config import *
import warnings
| 38.568862 | 127 | 0.621798 |
a97af6a55423ad89ce397dfb867db2824473473b | 1,233 | py | Python | project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py | jpuris/udacity-data-engineering-submissions | e71e2569241c76b5e6c3cd074667b19bde4d7b9e | [
"MIT"
] | null | null | null | project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py | jpuris/udacity-data-engineering-submissions | e71e2569241c76b5e6c3cd074667b19bde4d7b9e | [
"MIT"
] | null | null | null | project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py | jpuris/udacity-data-engineering-submissions | e71e2569241c76b5e6c3cd074667b19bde4d7b9e | [
"MIT"
] | null | null | null | from airflow import DAG
from operators import LoadDimensionOperator
def load_dim_subdag(
parent_dag_name: str,
task_id: str,
redshift_conn_id: str,
sql_statement: str,
do_truncate: bool,
table_name: str,
**kwargs,
):
"""
Airflow's subdag wrapper. Implements LoadDimensionOperator operator.
Subdag's name will be f'{parent_dag_name}.{task_id}'
Subdag related keyword arguments:
- parent_dag_name -- Parent DAG name
- task_id -- Task ID for the subdag to use
Keyword arguments:
redshift_conn_id -- Airflow connection name for Redshift detail
sql_statement -- SQL statement to run
do_truncate -- Does the table need to be truncated before running
SQL statement
table_name -- Dimension table name
All keyword arguments will be passed to LoadDimensionOperator
"""
dag = DAG(f'{parent_dag_name}.{task_id}', **kwargs)
load_dimension_table = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
sql_query=sql_statement,
do_truncate=do_truncate,
table_name=table_name,
)
load_dimension_table
return dag
| 26.804348 | 75 | 0.673155 |
a97bced1b47f7e35fb054962b9c59fd468c4c16b | 1,816 | py | Python | inference.py | Retrospection/Yolo-v2-pytorch | d2028219a250e50e03340538faab197ac8ece8a8 | [
"MIT"
] | null | null | null | inference.py | Retrospection/Yolo-v2-pytorch | d2028219a250e50e03340538faab197ac8ece8a8 | [
"MIT"
] | null | null | null | inference.py | Retrospection/Yolo-v2-pytorch | d2028219a250e50e03340538faab197ac8ece8a8 | [
"MIT"
] | 1 | 2021-12-28T08:13:05.000Z | 2021-12-28T08:13:05.000Z | # coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from src.yolo_net import YoloTest, Yolo
import torch
import cv2
import numpy as np
# net = Yolo(10177)
# state_dict = torch.load('trained_models\\only_params_trained_yolo_coco')
# net.load_state_dict(state_dict)
# net.eval()
# img10 = readImage('D:\\dev\\dataset\\CASIA-WebFace\\0000045\\001.jpg')
# img11 = readImage('D:\\dev\\dataset\\CASIA-WebFace\\0000045\\002.jpg')
# img21 = readImage('D:\\dev\\dataset\\CASIA-WebFace\\0000099\\001.jpg')
#
# logits = net(img10)
# print(logits.view(1, 5, -1, 49).shape)
# output10 = net(img10).reshape((1024*7*7,)).detach().numpy()
# output11 = net(img11).reshape((1024*7*7,)).detach().numpy()
# output21 = net(img21).reshape((1024*7*7,)).detach().numpy()
# dis11 = np.linalg.norm(output10 - output11)
# dis21 = np.linalg.norm(output10 - output21)
#
# print(dis11)
# print(dis21)
#
#
# def cosdis(vec1, vec2):
# return np.dot(vec1,vec2)/(np.linalg.norm(vec1)*(np.linalg.norm(vec2)))
#
# cosdis11 = cosdis(output10, output11)
# cosdis21 = cosdis(output10, output21)
# print(cosdis11)
# print(cosdis21) | 24.876712 | 80 | 0.680617 |
a97e81a89bda65fad9ab35f52160822fa9349f8c | 11,572 | py | Python | geetools/collection/modis.py | carderne/gee_tools | 4003e75ffb0ffefc9f41b1a34d849eebdb486161 | [
"MIT"
] | null | null | null | geetools/collection/modis.py | carderne/gee_tools | 4003e75ffb0ffefc9f41b1a34d849eebdb486161 | [
"MIT"
] | null | null | null | geetools/collection/modis.py | carderne/gee_tools | 4003e75ffb0ffefc9f41b1a34d849eebdb486161 | [
"MIT"
] | null | null | null | # coding=utf-8
""" Google Earth Engine MODIS Collections """
from . import Collection, TODAY, Band
from functools import partial
IDS = [
'MODIS/006/MOD09GQ', 'MODIS/006/MYD09GQ',
'MODIS/006/MOD09GA', 'MODIS/006/MYD09GA',
'MODIS/006/MOD13Q1', 'MODIS/006/MYD13Q1'
]
START = {
'MODIS/006/MOD09GQ': '2000-02-24',
'MODIS/006/MYD09GQ': '2000-02-24',
'MODIS/006/MOD09GA': '2000-02-24',
'MODIS/006/MYD09GA': '2000-02-24',
'MODIS/006/MOD13Q1': '2000-02-18',
'MODIS/006/MYD13Q1': '2000-02-18',
}
END = {
'MODIS/006/MOD09GQ': TODAY,
'MODIS/006/MYD09GQ': TODAY,
'MODIS/006/MOD09GA': TODAY,
'MODIS/006/MYD09GA': TODAY,
'MODIS/006/MOD13Q1': TODAY,
'MODIS/006/MYD13Q1': TODAY,
}
| 38.317881 | 89 | 0.497753 |
a97f5a52d2112340dd02628abcf36314406fa57c | 338 | py | Python | random-py/app.py | traian-mihali/publishing-py | fa050b1169258b50678f00b97958499bc0210ca3 | [
"MIT"
] | null | null | null | random-py/app.py | traian-mihali/publishing-py | fa050b1169258b50678f00b97958499bc0210ca3 | [
"MIT"
] | null | null | null | random-py/app.py | traian-mihali/publishing-py | fa050b1169258b50678f00b97958499bc0210ca3 | [
"MIT"
] | null | null | null | """ This module provides a method to generate a random number between 0 and the specified number """
import random
import math
def random_num(max):
"""
Generates a random number
Parameters:
max(int): the range upper limit
Returns:
int: the random number
"""
return math.floor(random.random() * max)
| 19.882353 | 100 | 0.668639 |
a981fd9db88834f380bdfbae5402c0c579a7fa58 | 272 | py | Python | pleiades/transforms.py | jcwright77/pleiades | e3e208e94feee299589a094f361b301131d1bd15 | [
"MIT"
] | 3 | 2020-03-27T19:27:01.000Z | 2021-07-15T16:28:54.000Z | pleiades/transforms.py | jcwright77/pleiades | e3e208e94feee299589a094f361b301131d1bd15 | [
"MIT"
] | 6 | 2020-03-30T17:12:42.000Z | 2020-07-14T03:07:02.000Z | pleiades/transforms.py | jcwright77/pleiades | e3e208e94feee299589a094f361b301131d1bd15 | [
"MIT"
] | 6 | 2020-03-30T17:05:58.000Z | 2021-08-18T19:21:00.000Z | import math
import numpy as np
| 24.727273 | 55 | 0.602941 |
a982f1f9c012c80b9c26e9e99c4415060d09e04a | 166 | py | Python | Project/Python/project/public/auto/__init__.py | renwei-release/dave | 773301edd3bee6e7526e0d5587ff8af9f01e288f | [
"MIT"
] | null | null | null | Project/Python/project/public/auto/__init__.py | renwei-release/dave | 773301edd3bee6e7526e0d5587ff8af9f01e288f | [
"MIT"
] | null | null | null | Project/Python/project/public/auto/__init__.py | renwei-release/dave | 773301edd3bee6e7526e0d5587ff8af9f01e288f | [
"MIT"
] | null | null | null | import ctypes
import struct
from .dave_define import *
from .dave_enum import *
from .dave_msg_id import *
from .dave_msg_struct import *
from .dave_struct import * | 18.444444 | 30 | 0.789157 |
a9840415a7cc2a3662940dac6af33c62299a8276 | 551 | py | Python | Methods/Machine/Conductor/check.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Methods/Machine/Conductor/check.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | Methods/Machine/Conductor/check.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""@package Methods.Machine.Conductor.check
Check that the Conductor is correct
@date Created on Thu Jan 22 17:50:02 2015
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
from pyleecan.Methods.Machine.LamSlotWind.check import Lam_WindCheckError
def check(self):
"""Check that the Conductor object is correct
Parameters
----------
self : Conductor
A Conductor object
Returns
-------
None
"""
pass
| 17.774194 | 73 | 0.658802 |
a98465a5dbaaa69b7d18d16711f08102c5a830eb | 3,414 | py | Python | wholeslidedata/annotation/write_mask2.py | kaczmarj/pathology-whole-slide-data | 3adb86af716ca89f336b6c935f90bd13183572b7 | [
"Apache-2.0"
] | 1 | 2022-02-17T19:47:14.000Z | 2022-02-17T19:47:14.000Z | wholeslidedata/annotation/write_mask2.py | kaczmarj/pathology-whole-slide-data | 3adb86af716ca89f336b6c935f90bd13183572b7 | [
"Apache-2.0"
] | null | null | null | wholeslidedata/annotation/write_mask2.py | kaczmarj/pathology-whole-slide-data | 3adb86af716ca89f336b6c935f90bd13183572b7 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import List
import cv2
import numpy as np
from shapely import geometry
from shapely.strtree import STRtree
from wholeslidedata.annotation.structures import Annotation, Point, Polygon
from wholeslidedata.image.wholeslideimage import WholeSlideImage
from wholeslidedata.image.wholeslideimagewriter import WholeSlideMaskWriter
from wholeslidedata.samplers.utils import shift_coordinates
| 32.207547 | 86 | 0.621558 |
a984e763170541feb20e89e4a6245f1b8e706963 | 578 | py | Python | tuples_05/tests/test_slicing_tuples.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
] | null | null | null | tuples_05/tests/test_slicing_tuples.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
] | 2 | 2019-04-15T06:29:55.000Z | 2019-04-19T17:34:32.000Z | tuples_05/tests/test_slicing_tuples.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
] | 1 | 2019-11-19T04:51:18.000Z | 2019-11-19T04:51:18.000Z | import pytest
from ..slicing_tuples import tuple_slice
| 36.125 | 88 | 0.570934 |
a9856cedef8243944a78d8985c56e556db9faae0 | 28,653 | py | Python | dftimewolf/lib/state.py | hkhalifa/dftimewolf | 0a6d62fdb362c8618bd373c18a7f446b959f1a0f | [
"Apache-2.0"
] | null | null | null | dftimewolf/lib/state.py | hkhalifa/dftimewolf | 0a6d62fdb362c8618bd373c18a7f446b959f1a0f | [
"Apache-2.0"
] | null | null | null | dftimewolf/lib/state.py | hkhalifa/dftimewolf | 0a6d62fdb362c8618bd373c18a7f446b959f1a0f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, Future
import importlib
import logging
import threading
import traceback
from typing import TYPE_CHECKING, Callable, Dict, List, Sequence, Type, Any, TypeVar, cast # pylint: disable=line-too-long
from dftimewolf.cli import curses_display_manager as cdm
from dftimewolf.config import Config
from dftimewolf.lib import errors, utils
from dftimewolf.lib.containers.interface import AttributeContainer
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.module import ThreadAwareModule, BaseModule
if TYPE_CHECKING:
from dftimewolf.lib import module as dftw_module
from dftimewolf.lib.containers import interface
T = TypeVar("T", bound="interface.AttributeContainer") # pylint: disable=invalid-name,line-too-long
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
| 36.640665 | 144 | 0.682965 |
a98618135a8eb68ea555b4e82e1d790635fb2594 | 1,374 | py | Python | DBManager.py | d0d0d0/Kerberos | 38bf0b8388bc4f3571e790d5bc626d050df5d4dc | [
"MIT"
] | null | null | null | DBManager.py | d0d0d0/Kerberos | 38bf0b8388bc4f3571e790d5bc626d050df5d4dc | [
"MIT"
] | null | null | null | DBManager.py | d0d0d0/Kerberos | 38bf0b8388bc4f3571e790d5bc626d050df5d4dc | [
"MIT"
] | null | null | null | ### Implements database management for Authentication Server and TGS ###
from Query import *
from sqlite3 import *
from config import *
| 21.809524 | 72 | 0.697234 |
a987d4f7ac2585765bc67edb9138327e5465eec0 | 451 | py | Python | people/views.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
] | null | null | null | people/views.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
] | 6 | 2021-03-30T12:05:07.000Z | 2021-04-05T14:21:46.000Z | people/views.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
] | null | null | null | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import Person
from .serializers import PersonSerializer
| 22.55 | 60 | 0.75388 |
a98828e92b274eb6eae13e6556ae7fff3be2a963 | 8,867 | py | Python | simple_soccer/two_dimension.py | RyoheiGoto/reinforcement_learning | ff2ddded7fd24c831a5103818b8a747a66a75f0c | [
"MIT"
] | 2 | 2015-11-18T17:47:19.000Z | 2016-03-20T08:22:42.000Z | simple_soccer/two_dimension.py | RyoheiGoto/reinforcement_learning | ff2ddded7fd24c831a5103818b8a747a66a75f0c | [
"MIT"
] | 1 | 2015-11-19T18:15:13.000Z | 2016-02-09T16:48:23.000Z | simple_soccer/two_dimension.py | RyoheiGoto/ReinforcementLearning | ff2ddded7fd24c831a5103818b8a747a66a75f0c | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
field_width = 396 #cm
field_hight = 180 #cm
goal_length = 180 #cm
threshold = 36
field_width_threshold_num = field_width / threshold + 1
field_width_threshold = [Y * threshold - field_width / 2.0 for Y in xrange(field_width_threshold_num)]
field_hight_threshold_num = field_hight / threshold + 1
field_hight_threshold = [X * threshold for X in xrange(field_hight_threshold_num)]
ball_velo_x_threshold = [X * 100.0 for X in [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0]]
ball_velo_x_threshold_num = len(ball_velo_x_threshold) + 1
ball_velo_y_threshold = [Y * 50.0 for Y in [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0]]
ball_velo_y_threshold_num = len(ball_velo_y_threshold) + 1
tau = 0.2 #sec
fall_time = 10
robot_states = 3
#epsilon = 0.1
epsilon = 0.00001
alpha = 0.5
gamma = 0.5
STAND, LEFT, RIGHT, BALL = range(4)
COMPLETED = "COMPLETED"
FAILED = "FAILED"
ACTIVE = "ACTIVE"
if __name__ == '__main__':
#Soccer(max_episode=100, plot=True)
Soccer(max_episode=10000000, plot=False)
| 33.587121 | 149 | 0.551483 |
a98a17680f92454408a66d8e581e032e851f1d31 | 1,089 | py | Python | tests/test_molecular_signatures_db.py | krassowski/gsea-api | deb562ea55871b799eb501a798dd49a881ff9523 | [
"MIT"
] | 8 | 2020-03-06T02:03:40.000Z | 2022-01-22T15:57:17.000Z | tests/test_molecular_signatures_db.py | krassowski/gsea-api | deb562ea55871b799eb501a798dd49a881ff9523 | [
"MIT"
] | 3 | 2020-03-06T01:48:53.000Z | 2021-10-06T04:15:55.000Z | tests/test_molecular_signatures_db.py | krassowski/gsea-api | deb562ea55871b799eb501a798dd49a881ff9523 | [
"MIT"
] | 2 | 2019-12-01T18:41:07.000Z | 2020-07-15T14:52:17.000Z | from pytest import raises
from gsea_api.molecular_signatures_db import MolecularSignaturesDatabase
| 38.892857 | 90 | 0.747475 |
a98a271a4efe485ccb8f3daffb76dc91992cf6a3 | 11,387 | py | Python | froide_govplan/admin.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
] | 2 | 2022-03-13T14:49:46.000Z | 2022-03-14T18:39:04.000Z | froide_govplan/admin.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
] | 3 | 2022-03-18T11:52:46.000Z | 2022-03-18T14:13:43.000Z | froide_govplan/admin.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
] | 1 | 2022-03-18T09:36:20.000Z | 2022-03-18T09:36:20.000Z | from django.contrib import admin, auth
from django.contrib.auth.models import Group
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import path, reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from adminsortable2.admin import SortableAdminMixin
from froide.api import api_router
from froide.follow.admin import FollowerAdmin
from froide.helper.admin_utils import make_choose_object_action, make_emptyfilter
from froide.helper.widgets import TagAutocompleteWidget
from froide.organization.models import Organization
from .api_views import GovernmentPlanViewSet
from .auth import get_allowed_plans, has_limited_access
from .forms import (
GovernmentPlanForm,
GovernmentPlanUpdateAcceptProposalForm,
GovernmentPlanUpdateForm,
)
from .models import (
Government,
GovernmentPlan,
GovernmentPlanFollower,
GovernmentPlanSection,
GovernmentPlanUpdate,
)
User = auth.get_user_model()
api_router.register(r"governmentplan", GovernmentPlanViewSet, basename="governmentplan")
def execute_assign_organization(admin, request, queryset, action_obj):
queryset.update(organization=action_obj)
def execute_assign_group(admin, request, queryset, action_obj):
queryset.update(group=action_obj)
PLAN_ACTIONS = {
"assign_organization": make_choose_object_action(
Organization, execute_assign_organization, _("Assign organization...")
),
"assign_group": make_choose_object_action(
Group, execute_assign_group, _("Assign permission group...")
),
}
admin.site.register(Government, GovernmentAdmin)
admin.site.register(GovernmentPlan, GovernmentPlanAdmin)
admin.site.register(GovernmentPlanUpdate, GovernmentPlanUpdateAdmin)
admin.site.register(GovernmentPlanSection, GovernmentPlanSectionAdmin)
admin.site.register(GovernmentPlanFollower, FollowerAdmin)
govplan_admin_site = GovPlanAdminSite(name="govplanadmin")
govplan_admin_site.register(GovernmentPlan, GovernmentPlanAdmin)
govplan_admin_site.register(GovernmentPlanUpdate, GovernmentPlanUpdateAdmin)
| 30.859079 | 88 | 0.596557 |
a98a8630e0f08cab9b6667bd3db9422e0508306a | 2,995 | py | Python | tests/test_xmltompd.py | thiblahute/python-mpegdash | e7702dec59fe61668888ba5c9e1cb2f495b72c17 | [
"MIT"
] | 1 | 2021-06-08T04:25:04.000Z | 2021-06-08T04:25:04.000Z | tests/test_xmltompd.py | thiblahute/python-mpegdash | e7702dec59fe61668888ba5c9e1cb2f495b72c17 | [
"MIT"
] | null | null | null | tests/test_xmltompd.py | thiblahute/python-mpegdash | e7702dec59fe61668888ba5c9e1cb2f495b72c17 | [
"MIT"
] | 1 | 2021-09-27T12:57:51.000Z | 2021-09-27T12:57:51.000Z | try:
import unittest2 as unittest
except:
import unittest
from mpegdash.parser import MPEGDASHParser
| 50.762712 | 126 | 0.686477 |
a98cc0ed5054e6dba3e35b5238cafe5ac890c96b | 513 | py | Python | algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py | dibyanshushekhardey/data_struct_and_algo_coursera | ce579ba0be19d0415dc5a9526fd04bcdb803dbc0 | [
"MIT"
] | null | null | null | algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py | dibyanshushekhardey/data_struct_and_algo_coursera | ce579ba0be19d0415dc5a9526fd04bcdb803dbc0 | [
"MIT"
] | null | null | null | algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py | dibyanshushekhardey/data_struct_and_algo_coursera | ce579ba0be19d0415dc5a9526fd04bcdb803dbc0 | [
"MIT"
] | null | null | null |
arr = [3, 5, 8, 10, 12, 15, 18, 20, 20, 50, 60]
low = 1
high = 11
key = 50
index = BinarySearchIt(arr, low, high, key)
if index != -1:
print ("Element", key,"is present at index %d" %(index))
else:
print ("Element %d is not present" %(key)) | 23.318182 | 60 | 0.502924 |
a98fe624f9604a44b5865d4659413307a64a58db | 2,133 | py | Python | 2016/day-02.py | mharty3/advent_of_code | f86e67eb772f4c328e30744610606fc154930aef | [
"MIT"
] | null | null | null | 2016/day-02.py | mharty3/advent_of_code | f86e67eb772f4c328e30744610606fc154930aef | [
"MIT"
] | null | null | null | 2016/day-02.py | mharty3/advent_of_code | f86e67eb772f4c328e30744610606fc154930aef | [
"MIT"
] | null | null | null | #--- Day 2: Bathroom Security ---
from typing import List
test_data = """ULL
RRDDD
LURDL
UUUUD"""
assert solve1(test_data) == '1985'
assert solve2(test_data) == '5DB3'
if __name__ == '__main__':
from aocd.models import Puzzle
puzzle = Puzzle(2016, 2)
answer_1 = solve1(puzzle.input_data)
print(answer_1)
puzzle.answer_a = answer_1
answer_2 = solve2(puzzle.input_data)
puzzle.answer_b = answer_2
| 21.118812 | 50 | 0.449602 |
a99348b5bc6c6ccf0bf508d81eb41b18d8e6cf18 | 2,875 | py | Python | compose.py | gicmo/koji-osbuild | d8107f23478ca12cd376098a79c7465cc5dd12d1 | [
"Apache-2.0"
] | null | null | null | compose.py | gicmo/koji-osbuild | d8107f23478ca12cd376098a79c7465cc5dd12d1 | [
"Apache-2.0"
] | null | null | null | compose.py | gicmo/koji-osbuild | d8107f23478ca12cd376098a79c7465cc5dd12d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import argparse
import koji
import os
from pprint import pprint
if __name__ == "__main__":
main()
| 39.930556 | 88 | 0.631652 |
a9939846090c5322d4926d75f10b1fc68c18dada | 153 | py | Python | {{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}.py | numengo/cc-py-setup | 392dfb85acb9052bf48586b9be98fc1f591d8991 | [
"ISC",
"Apache-2.0",
"MIT"
] | 3 | 2018-02-16T17:10:15.000Z | 2018-03-01T19:38:54.000Z | {{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}.py | numengo/cc-py-setup | 392dfb85acb9052bf48586b9be98fc1f591d8991 | [
"ISC",
"Apache-2.0",
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}.py | numengo/cc-py-setup | 392dfb85acb9052bf48586b9be98fc1f591d8991 | [
"ISC",
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Main module {{cookiecutter.project_name}} """
from __future__ import absolute_import
from __future__ import unicode_literals
| 25.5 | 48 | 0.751634 |
a995cea083a766e717127d27dd67556ccd2542a5 | 5,382 | py | Python | src/models/def_features.py | jshcs/cfe | dc6ca928a124a3e0e0dd64a1d3667a9b313e8c50 | [
"MIT"
] | null | null | null | src/models/def_features.py | jshcs/cfe | dc6ca928a124a3e0e0dd64a1d3667a9b313e8c50 | [
"MIT"
] | null | null | null | src/models/def_features.py | jshcs/cfe | dc6ca928a124a3e0e0dd64a1d3667a9b313e8c50 | [
"MIT"
] | null | null | null | from config import *
from utils import *
import datetime
import pickle
indir_vocab_jnames = VOCAB_JNAMES
indir_bio_srt = BIO_SRT
indir_sorted_fperson_fname = SORTED_FPERSON_FNAME
indir_sorted_lperson_fname = SORTED_LPERSON_FNAME
print indir_vocab_jnames
with open(indir_vocab_jnames,'rb') as v:
all_vocab=pickle.load(v)
with open(indir_bio_srt,'rb') as v:
all_bio_vocab=pickle.load(v)
all_bio_vocab = [a.decode('utf-8') for a in all_bio_vocab]
sorted_fname= read_sorted_file_into_array(indir_sorted_fperson_fname)
sorted_lname= read_sorted_file_into_array(indir_sorted_lperson_fname)
#test() | 31.290698 | 102 | 0.622074 |
a9971d06d9c16341c965038e22004beaf49e0586 | 2,182 | py | Python | profile_python/profile.py | heroesofcode/profile-python | e4e6ee2f3739ea6edad30999b74b3d42f754a86c | [
"MIT"
] | null | null | null | profile_python/profile.py | heroesofcode/profile-python | e4e6ee2f3739ea6edad30999b74b3d42f754a86c | [
"MIT"
] | 1 | 2021-10-09T01:26:29.000Z | 2021-10-09T01:26:29.000Z | profile_python/profile.py | heroesofcode/profile-python | e4e6ee2f3739ea6edad30999b74b3d42f754a86c | [
"MIT"
] | null | null | null | from rich.console import Console
from rich.table import Table
from rich.progress import track
from time import sleep
import sys
| 29.486486 | 76 | 0.47846 |
a99744e768b04af0c0bed6111d20060a12e0cfeb | 2,459 | py | Python | app/view/admin/notification_manage.py | G1NTOKI0522/WeChatterBot | 1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5 | [
"BSD-3-Clause"
] | 1 | 2020-04-03T02:54:18.000Z | 2020-04-03T02:54:18.000Z | app/view/admin/notification_manage.py | G1NTOKI0522/WeChatterBot | 1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5 | [
"BSD-3-Clause"
] | 7 | 2020-04-11T13:22:50.000Z | 2020-05-14T00:19:37.000Z | app/view/admin/notification_manage.py | G1NTOKI0522/WeChatterBot | 1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5 | [
"BSD-3-Clause"
] | 3 | 2020-04-11T12:09:56.000Z | 2020-12-16T13:26:20.000Z | # coding: utf-8
import datetime
from flask_login import login_required, current_user
from flask import Blueprint, request
from app.libs.http import jsonify, error_jsonify
from app.libs.db import session
from app.serializer.notice import NoticeParaSchema
from app.model.notice import Notice
bp_admin_notification = Blueprint('admin_notification', __name__, url_prefix='/admin/notification')
| 28.264368 | 99 | 0.699471 |
a998c1d627b7fcf20a5161fbb3c3b4a79699eea3 | 1,345 | py | Python | test/test_delete_contact_from_group.py | schukinp/python_training | 8140bbf1aae10052055f272c8deb3a7bdb7abcfb | [
"Apache-2.0"
] | null | null | null | test/test_delete_contact_from_group.py | schukinp/python_training | 8140bbf1aae10052055f272c8deb3a7bdb7abcfb | [
"Apache-2.0"
] | null | null | null | test/test_delete_contact_from_group.py | schukinp/python_training | 8140bbf1aae10052055f272c8deb3a7bdb7abcfb | [
"Apache-2.0"
] | null | null | null | from fixture.orm import ORMfixture
from model.group import Group
from model.contact import Contact
import random
db = ORMfixture(host='127.0.0.1', name='addressbook', user='root', password='')
| 44.833333 | 130 | 0.710781 |
a9993f306b253d20a5358a309289cc43d569a04f | 323 | py | Python | apps/accounts/views.py | martindwyer/Juntos | 0aac3add432f5f3fc42befc720b70253d4fef2b4 | [
"MIT"
] | null | null | null | apps/accounts/views.py | martindwyer/Juntos | 0aac3add432f5f3fc42befc720b70253d4fef2b4 | [
"MIT"
] | null | null | null | apps/accounts/views.py | martindwyer/Juntos | 0aac3add432f5f3fc42befc720b70253d4fef2b4 | [
"MIT"
] | null | null | null | from django.urls import reverse_lazy
from django.contrib.auth import get_user_model
from django.views.generic import CreateView
from . import forms
User = get_user_model()
| 23.071429 | 46 | 0.783282 |
a99aa91e73c38055d1f2d643a8c77c56216293f4 | 6,498 | py | Python | colossalai/engine/_base_engine.py | rahulgupta9202/ColossalAI | 993088d45eaa032e39cf5959df2a506f0663bc2e | [
"Apache-2.0"
] | 1 | 2022-03-12T04:49:19.000Z | 2022-03-12T04:49:19.000Z | colossalai/engine/_base_engine.py | rahulgupta9202/ColossalAI | 993088d45eaa032e39cf5959df2a506f0663bc2e | [
"Apache-2.0"
] | null | null | null | colossalai/engine/_base_engine.py | rahulgupta9202/ColossalAI | 993088d45eaa032e39cf5959df2a506f0663bc2e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from colossalai.builder import build_gradient_handler
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_global_dist_logger
from colossalai.nn import (ZeroRedundancyOptimizer_Level_2,
ZeroRedundancyOptimizer_Level_3)
from .schedule import BaseSchedule
def train(self):
"""Sets the model to training mode.
"""
self.training = True
self._model.train()
def eval(self):
"""Sets the model to evaluation mode.
"""
self.training = False
self._model.eval()
def step(self,
data_iter,
is_last_iteration: bool = False,
return_loss=True):
"""A running step based on the schedule. Usually, it runs a training or
evaluation over a batch of dataset.
:param data_iter: Data iterator of the dataset
:param is_last_iteration: If True, this iteration is the last iteration in the epoch
:param return_loss: loss will be returned if True
:type data_iter: Iterator
:type is_last_iteration: bool, optional
:type return_loss: bool, optional
:return: (output, lablel, loss)
"""
if self.training:
self._optimizer.zero_grad()
# differentiate training and eval with grad accum
if self.training:
for i in range(self._grad_accum_size):
output, label, loss = self._schedule.forward_backward_step(
data_iter, self._model, self._criterion, self._optimizer,
forward_only=False,
grad_accum_size=self._grad_accum_size,
return_loss=return_loss)
if i == self._grad_accum_size - 1:
# all reduce gradients
self.handle_gradient()
self._schedule.optimizer_step(self._model, self._optimizer, self._grad_clip)
else:
output, label, loss = self._schedule.forward_backward_step(
data_iter, self._model, self._criterion, self._optimizer,
forward_only=True,
grad_accum_size=1,
return_loss=return_loss)
# consume the remaining dataset left out due to gradient accumulation
if is_last_iteration:
while True:
try:
_ = next(data_iter)
except StopIteration:
break
return output, label, loss
| 36.711864 | 99 | 0.622499 |
a99b36048f5d32ab6c9b6ad9baf0b5a681590fdd | 718 | py | Python | 11. Optical Flow/optical_flow.py | farhan0syakir/OpenCv-tutorial | b3d78f3567f4ea61b8955190f51097b6ceb4b318 | [
"MIT"
] | 15 | 2021-05-04T15:03:14.000Z | 2022-03-20T11:57:55.000Z | 11. Optical Flow/optical_flow.py | farhan0syakir/OpenCv-tutorial | b3d78f3567f4ea61b8955190f51097b6ceb4b318 | [
"MIT"
] | 12 | 2020-09-24T16:57:45.000Z | 2020-10-23T15:13:06.000Z | 11. Optical Flow/optical_flow.py | farhan0syakir/OpenCv-tutorial | b3d78f3567f4ea61b8955190f51097b6ceb4b318 | [
"MIT"
] | 18 | 2020-09-21T13:01:37.000Z | 2020-10-15T19:42:28.000Z | import numpy as np
import cv2
cap = cv2.VideoCapture('motion.avi')
ret, frame = cap.read()
gs_im0 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
points_prev = cv2.goodFeaturesToTrack(gs_im0, 100, 0.03, 9.0, False)
while(cap.isOpened()):
ret, frame = cap.read()
gs_im1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Call tracker.
points, st, err = cv2.calcOpticalFlowPyrLK(gs_im0, gs_im1, points_prev, None, (3,3))
for i,p in enumerate(points):
a,b = p.ravel()
frame = cv2.circle(frame,(a,b),3,(255,255,255),-1)
cv2.imshow('frame',frame)
points_prev = points
gs_im0 = gs_im1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | 25.642857 | 88 | 0.650418 |
a99d2fd19858a720fd9deb294de8995490e6da48 | 574 | py | Python | game/rendering.py | rajbala5479/asteroid | 73c6eab1bbdb68ff6c7f337c9517ba0ac1f34294 | [
"MIT"
] | null | null | null | game/rendering.py | rajbala5479/asteroid | 73c6eab1bbdb68ff6c7f337c9517ba0ac1f34294 | [
"MIT"
] | null | null | null | game/rendering.py | rajbala5479/asteroid | 73c6eab1bbdb68ff6c7f337c9517ba0ac1f34294 | [
"MIT"
] | null | null | null | import math
def make_circle(radius = 10, res = 20, filled = True):
points = []
for i in range(res):
ang = 2*math.pi * i / res
points.append((math.cos(ang) * radius, math.sin(ang) * radius) )
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True) | 19.793103 | 72 | 0.574913 |
a99e5850b3151bb654dd58f3e042f9310c260e3c | 2,770 | py | Python | tests/components/test_servo.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
] | null | null | null | tests/components/test_servo.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
] | null | null | null | tests/components/test_servo.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
] | null | null | null | """Tests for the servo classes."""
from typing import List, Optional, Type
import pytest
from j5.backends import Backend
from j5.boards import Board
from j5.components.servo import Servo, ServoInterface, ServoPosition
def test_servo_interface_implementation():
"""Test that we can implement the ServoInterface."""
MockServoDriver()
def test_servo_interface_class():
"""Test that the interface class is ServoInterface."""
assert Servo.interface_class() is ServoInterface
def test_servo_instantiation():
"""Test that we can instantiate a Servo."""
Servo(0, MockServoBoard(), MockServoDriver())
def test_servo_get_position():
"""Test that we can get the position of a servo."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
assert type(servo.position) is float
assert servo.position == 0.5
def test_servo_set_position():
"""Test that we can set the position of a servo."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
servo.position = 0.6
def test_servo_set_position_none():
"""Test that we can set the position of a servo to None."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
servo.position = None
def test_servo_set_position_out_of_bounds():
"""Test that we cannot set < -1 or > 1."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
with pytest.raises(ValueError):
servo.position = 2
with pytest.raises(ValueError):
servo.position = -2
| 26.634615 | 81 | 0.652708 |
a99e9b3110ca912a6a3fdcacc3a5951f95d02cb7 | 327 | py | Python | Desafios/des029.py | vitormrts/ExerciciosPython | 176b1c21e147670f7495678bdd4fc97241440d28 | [
"MIT"
] | 1 | 2021-02-07T18:58:57.000Z | 2021-02-07T18:58:57.000Z | Desafios/des029.py | vitormrts/ExerciciosPython | 176b1c21e147670f7495678bdd4fc97241440d28 | [
"MIT"
] | null | null | null | Desafios/des029.py | vitormrts/ExerciciosPython | 176b1c21e147670f7495678bdd4fc97241440d28 | [
"MIT"
] | null | null | null | frase = str(input('Digite uma frase: ')).lower()
print('Sobre a letra "a": \nQuantas vezes ela aparece? {} vezes;'.format(frase.count('a')))
print('Em que posio ela aparece pela primeira vez? {};'.format(frase.strip().index('a')+1))
print('Em que posio ela aparece pela ltima vez? {}.'.format(frase.strip().rfind('a')+1))
| 65.4 | 93 | 0.678899 |
a9a00c334939540391cc64f13f7f530cabcf615a | 7,546 | py | Python | unfold/transactions/views.py | wesny/unfold | 6594054f7408ac142fc6e902093b6fc8cbfda94e | [
"MIT"
] | null | null | null | unfold/transactions/views.py | wesny/unfold | 6594054f7408ac142fc6e902093b6fc8cbfda94e | [
"MIT"
] | null | null | null | unfold/transactions/views.py | wesny/unfold | 6594054f7408ac142fc6e902093b6fc8cbfda94e | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.views.generic import ListView
from django.utils.http import is_safe_url
from django.contrib import messages
from rest_framework import status
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect, render
from mama_cas.models import ServiceTicket
from mama_cas.utils import redirect as cas_redirect
from mama_cas.utils import to_bool
from rest_framework.response import Response
from decimal import Decimal
from django.urls import reverse
import urllib
from pinax.stripe.mixins import CustomerMixin
from pinax.stripe.models import Charge
from pinax.stripe.actions import charges
from stripe.error import CardError
from rest_framework_jwt.settings import api_settings
from unfold.transactions.models import Purchase, Article
from unfold.transactions.admin import PurchaseForm
from unfold.users.models import User
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
| 39.507853 | 111 | 0.658362 |
a9a1965586fb4160c10932687996645bcd809a1c | 1,843 | py | Python | interviewbit/Programming/Arrays/Rotate Matrix/solution.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
] | null | null | null | interviewbit/Programming/Arrays/Rotate Matrix/solution.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
] | null | null | null | interviewbit/Programming/Arrays/Rotate Matrix/solution.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
] | null | null | null | """InterviewBit.
Programming > Arrays > Rotate Matrix.
"""
matrices = [
[
[1]
],
[
[1, 2],
[3, 4]
],
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
],
[
['a', 'b', 'c', 'd'],
['e', 'f', 'g', 'h'],
['i', 'j', 'k', 'l'],
['m', 'n', 'o', 'p'],
],
[
[str(x).zfill(2) for x in range(1, 6)],
[str(x).zfill(2) for x in range(6, 11)],
[str(x).zfill(2) for x in range(11, 16)],
[str(x).zfill(2) for x in range(16, 21)],
[str(x).zfill(2) for x in range(21, 26)]
],
[
[str(x).zfill(2) for x in range(1, 7)],
[str(x).zfill(2) for x in range(7, 13)],
[str(x).zfill(2) for x in range(13, 19)],
[str(x).zfill(2) for x in range(19, 25)],
[str(x).zfill(2) for x in range(25, 31)],
[str(x).zfill(2) for x in range(31, 37)]
]
]
solution = Solution()
for matrix in matrices:
print("Matrix before rotation:")
for row in matrix:
print('\t', row)
print("Matrix after rotation:")
for row in solution.rotate(matrix):
print('\t', row)
print('\n' * 3)
| 26.328571 | 119 | 0.429192 |
a9a1ee58b00c556118c2fed52b5d79faa8995835 | 2,334 | py | Python | integration-tests/src/test/resources/model-in-image/scripts/verify-jdbc-resource.py | tanmaygarg-oracle/weblogic-kubernetes-operator | 2920cf3d9ba5c63ef1af6d9e4a574995286f524e | [
"UPL-1.0",
"MIT"
] | null | null | null | integration-tests/src/test/resources/model-in-image/scripts/verify-jdbc-resource.py | tanmaygarg-oracle/weblogic-kubernetes-operator | 2920cf3d9ba5c63ef1af6d9e4a574995286f524e | [
"UPL-1.0",
"MIT"
] | null | null | null | integration-tests/src/test/resources/model-in-image/scripts/verify-jdbc-resource.py | tanmaygarg-oracle/weblogic-kubernetes-operator | 2920cf3d9ba5c63ef1af6d9e4a574995286f524e | [
"UPL-1.0",
"MIT"
] | null | null | null | # Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
connect('weblogic', 'welcome1', 't3://DOMAINNAME-admin-server:7001')
# get all JDBC Properties
dsCounter = 0
allJDBCResources = cmo.getJDBCSystemResources()
for jdbcResource in allJDBCResources:
dsCounter = dsCounter + 1
dsname = jdbcResource.getName()
dsResource = jdbcResource.getJDBCResource()
dsJNDIname = dsResource.getJDBCDataSourceParams().getJNDINames()#[0]
dsDriver = dsResource.getJDBCDriverParams().getDriverName()
conn = dsResource.getJDBCDriverParams().getUrl()
dsInitialCap = dsResource.getJDBCConnectionPoolParams().getInitialCapacity()
dsMaxCap = dsResource.getJDBCConnectionPoolParams().getMaxCapacity()
dsParams = dsResource.getJDBCDataSourceParams()
dsProps = dsResource.getJDBCDriverParams().getProperties()
dsParams = dsResource.getJDBCConnectionPoolParams()
user = get("/JDBCSystemResources/"+ dsname +"/Resource/" + dsname + "/JDBCDriverParams/" + dsname + "/Properties/" + dsname + "/Properties/user/Value")
readTimeOut = get("/JDBCSystemResources/"+ dsname +"/Resource/" + dsname + "/JDBCDriverParams/" + dsname + "/Properties/" + dsname + "/Properties/oracle.jdbc.ReadTimeout/Value")
connTimeOut = get("/JDBCSystemResources/"+ dsname +"/Resource/" + dsname + "/JDBCDriverParams/" + dsname + "/Properties/" + dsname + "/Properties/oracle.net.CONNECT_TIMEOUT/Value")
print 'datasource.name.' + str(dsCounter) +'=' + str(dsname)
print 'datasource.jndiname.' + str(dsCounter) + '=' + str(dsJNDIname)
print 'datasource.driver.class.' + str(dsCounter) + '=' + dsDriver
print 'datasource.url.' + str(dsCounter) + '=' + conn
print 'datasource.initialCapacity.' + str(dsCounter) + '=' + str(dsInitialCap)
print 'datasource.maxCapacity.' + str(dsCounter) + '=' + str(dsMaxCap)
print 'datasource.readTimeout.' + str(dsCounter) + '=' + readTimeOut
print 'datasource.connectionTimeout.' + str(dsCounter) + '=' + connTimeOut
print 'datasource.username.' + str(dsCounter) + '=' + str(user)
print 'datasource.dsProps.' + str(dsCounter) + '=' + str(dsProps)
print 'datasource.dsParams.' + str(dsCounter) + '=' + str(dsParams)
disconnect()
exit() | 61.421053 | 184 | 0.711225 |
a9a3856b6e71069b01f3d5066c6f323c68f21ce5 | 1,283 | py | Python | tests/dao_tests/test_stored_sample_dao.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | tests/test_stored_sample_dao.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | tests/test_stored_sample_dao.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | from rdr_service import clock
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.participant import Participant
from tests.helpers.unittest_base import BaseTestCase
| 37.735294 | 92 | 0.694466 |
a9a3934109af932f3d04644fe8eb5b82a3bf255d | 2,769 | py | Python | server/pantryflask/__init__.py | jernaumorat/IntelligentPantry | 33d1ee867a5b6e0169fb44918069fbec5bfde259 | [
"MIT"
] | null | null | null | server/pantryflask/__init__.py | jernaumorat/IntelligentPantry | 33d1ee867a5b6e0169fb44918069fbec5bfde259 | [
"MIT"
] | null | null | null | server/pantryflask/__init__.py | jernaumorat/IntelligentPantry | 33d1ee867a5b6e0169fb44918069fbec5bfde259 | [
"MIT"
] | 1 | 2021-11-11T09:25:34.000Z | 2021-11-11T09:25:34.000Z | import socket, os, atexit
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask.helpers import send_from_directory, url_for
from zeroconf import ServiceInfo, Zeroconf
from pantryflask.config import FlaskConfig
from pantryflask.auth import token_auth, generate_pairing_code, generate_user_token
from pantryflask.models import AuthToken
from pantryflask.db import db
from pantryflask.pantry_api import bp as pantry_bp
from pantryflask.robot_api import bp as robot_bp
from pantryflask.util import bp as util_bp
ip = os.environ.get('LISTEN_IP')
httpZconf = ServiceInfo(
"_http._tcp.local.",
"intpantry._http._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5000)
httpsZconf = ServiceInfo(
"_https._tcp.local.",
"intpantry._https._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5443)
zc = Zeroconf()
zc.register_service(httpZconf)
print('Service Registered:', httpZconf)
app, db, migrate = app_factory() | 29.457447 | 91 | 0.669195 |
8d10162b60dc80362847021a74c900fd613e0ff7 | 39,370 | py | Python | lingua_franca/lang/parse_eu.py | OpenVoiceOS/ovos-lingua-franca | 392cc37cbfde3b8d6f11258c1e148e63ba2fb951 | [
"Apache-2.0"
] | null | null | null | lingua_franca/lang/parse_eu.py | OpenVoiceOS/ovos-lingua-franca | 392cc37cbfde3b8d6f11258c1e148e63ba2fb951 | [
"Apache-2.0"
] | 13 | 2022-01-26T03:43:46.000Z | 2022-03-25T17:00:18.000Z | lingua_franca/lang/parse_eu.py | OpenVoiceOS/ovos-lingua-franca | 392cc37cbfde3b8d6f11258c1e148e63ba2fb951 | [
"Apache-2.0"
] | 1 | 2022-01-18T21:11:44.000Z | 2022-01-18T21:11:44.000Z | #
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Parse functions for Basque (eu)
TODO: numbers greater than 999999
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil.tz import gettz
from lingua_franca.lang.format_eu import pronounce_number_eu
from lingua_franca.lang.parse_common import *
from lingua_franca.lang.common_data_eu import _NUM_STRING_EU
def isFractional_eu(input_str):
"""
This function takes the given text and checks if it is a fraction.
Args:
text (str): the string to check if fractional
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
if input_str.endswith('s', -1):
input_str = input_str[:len(input_str) - 1] # e.g. "fifths"
aFrac = {"erdia": 2, "erdi": 2, "heren": 3, "laurden": 4,
"laurdena": 4, "bosten": 5, "bostena": 5, "seiren": 6, "seirena": 6,
"zazpiren": 7, "zapirena": 7, "zortziren": 8, "zortzirena": 8,
"bederatziren": 9, "bederatzirena": 9, "hamarren": 10, "hamarrena": 10,
"hamaikaren": 11, "hamaikarena": 11, "hamabiren": 12, "hamabirena": 12}
if input_str.lower() in aFrac:
return 1.0 / aFrac[input_str]
if (input_str == "hogeiren" or input_str == "hogeirena"):
return 1.0 / 20
if (input_str == "hogeita hamarren" or input_str == "hogeita hamarrena"):
return 1.0 / 30
if (input_str == "ehunen" or input_str == "ehunena"):
return 1.0 / 100
if (input_str == "milaren" or input_str == "milarena"):
return 1.0 / 1000
return False
# TODO: short_scale and ordinals don't do anything here.
# The parameters are present in the function signature for API compatibility
# reasons.
#
# Returns incorrect output on certain fractional phrases like, "cuarto de dos"
def extract_number_eu(text, short_scale=True, ordinals=False):
"""
This function prepares the given text for parsing by making
numbers consistent, getting rid of contractions, etc.
Args:
text (str): the string to normalize
Returns:
(int) or (float): The value of extracted number
"""
aWords = text.lower().split()
count = 0
result = None
while count < len(aWords):
val = 0
word = aWords[count]
next_next_word = None
if count + 1 < len(aWords):
next_word = aWords[count + 1]
if count + 2 < len(aWords):
next_next_word = aWords[count + 2]
else:
next_word = None
# is current word a number?
if word in _NUM_STRING_EU:
val = _NUM_STRING_EU[word]
elif word.isdigit(): # doesn't work with decimals
val = int(word)
elif is_numeric(word):
val = float(word)
elif isFractional_eu(word):
if next_word in _NUM_STRING_EU:
# erdi bat, heren bat, etab
result = _NUM_STRING_EU[next_word]
# hurrengo hitza (bat, bi, ...) salto egin
next_word = None
count += 2
elif not result:
result = 1
count += 1
result = result * isFractional_eu(word)
continue
if not val:
# look for fractions like "2/3"
aPieces = word.split('/')
# if (len(aPieces) == 2 and is_numeric(aPieces[0])
# and is_numeric(aPieces[1])):
if look_for_fractions(aPieces):
val = float(aPieces[0]) / float(aPieces[1])
if val:
if result is None:
result = 0
# handle fractions
if next_word == "en" or next_word == "ren":
result = float(result) / float(val)
else:
result = val
if next_word is None:
break
# number word and fraction
ands = ["eta"]
if next_word in ands:
zeros = 0
if result is None:
count += 1
continue
newWords = aWords[count + 2:]
newText = ""
for word in newWords:
newText += word + " "
afterAndVal = extract_number_eu(newText[:-1])
if afterAndVal:
if result < afterAndVal or result < 20:
while afterAndVal > 1:
afterAndVal = afterAndVal / 10.0
for word in newWords:
if word == "zero" or word == "0":
zeros += 1
else:
break
for _ in range(0, zeros):
afterAndVal = afterAndVal / 10.0
result += afterAndVal
break
elif next_next_word is not None:
if next_next_word in ands:
newWords = aWords[count + 3:]
newText = ""
for word in newWords:
newText += word + " "
afterAndVal = extract_number_eu(newText[:-1])
if afterAndVal:
if result is None:
result = 0
result += afterAndVal
break
decimals = ["puntu", "koma", ".", ","]
if next_word in decimals:
zeros = 0
newWords = aWords[count + 2:]
newText = ""
for word in newWords:
newText += word + " "
for word in newWords:
if word == "zero" or word == "0":
zeros += 1
else:
break
afterDotVal = str(extract_number_eu(newText[:-1]))
afterDotVal = zeros * "0" + afterDotVal
result = float(str(result) + "." + afterDotVal)
break
count += 1
# Return the $str with the number related words removed
# (now empty strings, so strlen == 0)
# aWords = [word for word in aWords if len(word) > 0]
# text = ' '.join(aWords)
if "." in str(result):
integer, dec = str(result).split(".")
# cast float to int
if dec == "0":
result = int(integer)
return result or False
# TODO Not parsing 'cero'
def extract_numbers_eu(text, short_scale=True, ordinals=False):
"""
Takes in a string and extracts a list of numbers.
Args:
text (str): the string to extract a number from
short_scale (bool): Use "short scale" or "long scale" for large
numbers -- over a million. The default is short scale, which
is now common in most English speaking countries.
See https://en.wikipedia.org/wiki/Names_of_large_numbers
ordinals (bool): consider ordinal numbers, e.g. third=3 instead of 1/3
Returns:
list: list of extracted numbers as floats
"""
return extract_numbers_generic(text, pronounce_number_eu, extract_number_eu,
short_scale=short_scale, ordinals=ordinals)
def normalize_eu(text, remove_articles=True):
""" Basque string normalization """
words = text.split() # this also removed extra spaces
normalized = ""
i = 0
while i < len(words):
word = words[i]
# Convert numbers into digits
r = eu_number_parse(words, i)
if r:
v, i = r
normalized += " " + str(v)
continue
normalized += " " + word
i += 1
return normalized[1:] # strip the initial space
return text
# TODO MycroftAI/mycroft-core#2348
| 36.218951 | 132 | 0.436297 |
8d1326f81b702308f07d05eaa330ea71663f64ad | 6,976 | py | Python | path-generation/velocity_profile.py | iqzprvagbv/path-planning | c5b1099dbe1aadbd78a1fdb16c0a2f82245c19bc | [
"MIT"
] | null | null | null | path-generation/velocity_profile.py | iqzprvagbv/path-planning | c5b1099dbe1aadbd78a1fdb16c0a2f82245c19bc | [
"MIT"
] | 1 | 2021-06-01T21:26:25.000Z | 2021-06-01T21:26:25.000Z | path-generation/velocity_profile.py | iqzprvagbv/path-planning | c5b1099dbe1aadbd78a1fdb16c0a2f82245c19bc | [
"MIT"
] | null | null | null | # Defines a velocity profile, which is the big object we've been
# working towards.
from math import sqrt, ceil
import json
| 38.32967 | 96 | 0.591886 |
8d1378b3e67d5a0964ccf48994e4da6105c0ae60 | 472 | py | Python | move_py_files.py | rune-l/coco-annotator | a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14 | [
"MIT"
] | null | null | null | move_py_files.py | rune-l/coco-annotator | a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14 | [
"MIT"
] | null | null | null | move_py_files.py | rune-l/coco-annotator | a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14 | [
"MIT"
] | null | null | null | import os
import subprocess
test_set_path = '/Users/runelangergaard/Documents/SmartAnnotation/data/test_set'
test_imgs = os.listdir(test_set_path)
test_imgs
cwd_path = '/Users/runelangergaard'
os.chdir(cwd_path)
for img in test_imgs:
full_path = os.path.join(test_set_path, img)
subprocess.run([
"scp",
"-i",
"coco-anno.pem",
full_path,
"ec2-user@ec2-34-211-193-133.us-west-2.compute.amazonaws.com:/datasets/tmp"
])
| 23.6 | 83 | 0.684322 |
8d13e8253f51474a77c77b964813f16a0d1c345f | 304 | py | Python | examples/apply.py | PictElm/grom | 52e28efad1edae447347dd396e80a665c283b05d | [
"Apache-2.0"
] | 1 | 2019-06-29T18:53:31.000Z | 2019-06-29T18:53:31.000Z | examples/apply.py | PictElm/grom | 52e28efad1edae447347dd396e80a665c283b05d | [
"Apache-2.0"
] | null | null | null | examples/apply.py | PictElm/grom | 52e28efad1edae447347dd396e80a665c283b05d | [
"Apache-2.0"
] | null | null | null | import random
import grom
grom.debug(False)
dirName = "dump\\"
inputName = "example.bmp"
outputName = "output.bmp"
g = grom.Genome(dirName + inputName, partition=[
('head', 0x76),
('raw')
])
print(g)
print(g.partition)
g.apply(lambda x: 255 - x, ['raw'])
g(dirName + outputName, pause=False)
| 16 | 48 | 0.661184 |
8d14a69daed26d53510912624929725162594aec | 3,351 | py | Python | statefun-sdk-python/statefun/statefun_builder.py | MartijnVisser/flink-statefun | 66b2fc5a178d916756428f65a197095fbb43f57d | [
"Apache-2.0"
] | null | null | null | statefun-sdk-python/statefun/statefun_builder.py | MartijnVisser/flink-statefun | 66b2fc5a178d916756428f65a197095fbb43f57d | [
"Apache-2.0"
] | 7 | 2022-02-24T17:20:28.000Z | 2022-03-25T13:18:44.000Z | statefun-sdk-python/statefun/statefun_builder.py | MartijnVisser/flink-statefun | 66b2fc5a178d916756428f65a197095fbb43f57d | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
from statefun.core import ValueSpec
from statefun.context import Context
from statefun.messages import Message
from statefun.storage import make_address_storage_spec, StorageSpec
import inspect
| 39.423529 | 118 | 0.640107 |
8d17091c2b65264aa06f866332b484a8ae11e68d | 2,195 | py | Python | Solutions/236.py | ruppysuppy/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 70 | 2021-03-18T05:22:40.000Z | 2022-03-30T05:36:50.000Z | Solutions/236.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | null | null | null | Solutions/236.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 30 | 2021-03-18T05:22:43.000Z | 2022-03-17T10:25:18.000Z | """
Problem:
You are given a list of N points (x1, y1), (x2, y2), ..., (xN, yN) representing a
polygon. You can assume these points are given in order; that is, you can construct the
polygon by connecting point 1 to point 2, point 2 to point 3, and so on, finally
looping around to connect point N to point 1.
Determine if a new point p lies inside this polygon. (If p is on the boundary of the
polygon, you should return False).
"""
from typing import List, Tuple
Point = Tuple[int, int]
if __name__ == "__main__":
print(is_inside([(4, 3), (5, 4), (6, 3), (5, 2)], (3, 3)))
print(is_inside([(4, 3), (5, 4), (6, 3), (5, 2)], (5, 3)))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| 29.662162 | 87 | 0.596811 |
8d199b44ca6bfd408aa35f9d1da7c224cc1e44a1 | 968 | py | Python | tests/modules/generate/fake_package_repository_resolver.py | goldstar611/appimage-builder | 62e4b8781e604545817eb47c058f5be0c0d27d15 | [
"MIT"
] | 155 | 2019-12-16T00:04:03.000Z | 2022-03-28T11:22:55.000Z | tests/modules/generate/fake_package_repository_resolver.py | goldstar611/appimage-builder | 62e4b8781e604545817eb47c058f5be0c0d27d15 | [
"MIT"
] | 151 | 2019-11-22T13:13:22.000Z | 2022-03-30T21:27:32.000Z | tests/modules/generate/fake_package_repository_resolver.py | goldstar611/appimage-builder | 62e4b8781e604545817eb47c058f5be0c0d27d15 | [
"MIT"
] | 28 | 2020-01-15T15:30:43.000Z | 2022-03-22T08:58:06.000Z | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from appimagebuilder.modules.generate.package_managers.apt import (
PackageRepositoryResolver,
)
| 44 | 93 | 0.759298 |
8d19a458c0aeddafe12f42faf41b63a52a85ae7f | 2,546 | py | Python | Oblig3/test_benchmark.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
] | null | null | null | Oblig3/test_benchmark.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
] | null | null | null | Oblig3/test_benchmark.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
] | null | null | null | # Author: Fabio Rodrigues Pereira
# E-mail: fabior@uio.no
# Author: Per Morten Halvorsen
# E-mail: pmhalvor@uio.no
# Author: Eivind Grnlie Guren
# E-mail: eivindgg@ifi.uio.no
try:
from Oblig3.packages.preprocess import load_raw_data, filter_raw_data, pad
from Oblig3.packages.preprocess import OurCONLLUDataset
from Oblig3.packages.model import Transformer
except:
from packages.preprocess import load_raw_data, filter_raw_data, pad
from packages.preprocess import OurCONLLUDataset
from packages.model import Transformer
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from transformers import BertTokenizer
import torch
# first step
# datapath = '/cluster/projects/nn9851k/IN5550/norne-nb-in5550-train.conllu'
# NORBERT = '/cluster/shared/nlpl/data/vectors/latest/216'
datapath = 'Oblig3/saga/norne-nb-in5550-train.conllu'
NORBERT = 'Oblig3/saga/216/'
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.cuda.empty_cache() if torch.cuda.is_available() else None
# loading raw data
con_df = load_raw_data(datapath=datapath)
con_df = filter_raw_data(df=con_df, min_entities=5)
# splitting data
train_df, val_df = train_test_split(
con_df,
# train_size=0.50,
test_size=0.25,
random_state=1,
shuffle=True,
)
tokenizer = BertTokenizer.from_pretrained(NORBERT)
# creating data sets
train_dataset = OurCONLLUDataset(
df=train_df,
tokenizer=tokenizer,
device=device
)
val_dataset = OurCONLLUDataset(
df=val_df,
tokenizer=tokenizer,
label_vocab=train_dataset.label_vocab,
device=device
)
# creating data loaders
train_loader = DataLoader(
train_dataset,
batch_size=32,
collate_fn=lambda batch: pad(batch, train_dataset.IGNORE_ID)
)
val_loader = DataLoader(
val_dataset,
batch_size=len(val_dataset),
collate_fn=lambda batch: pad(batch, train_dataset.IGNORE_ID)
)
# calling transformer model
transformer = Transformer(
NORBERT=NORBERT,
num_labels=len(train_dataset.label_indexer),
NOT_ENTITY_ID=train_dataset.label_indexer['O'],
device=device,
epochs=100, # 12 for the optimal
lr_scheduler=False,
factor=0.1,
patience=2,
loss_funct='cross-entropy',
random_state=1,
verbose=True,
lr=0.01,
momentum=0.9,
epoch_patience=1, # 0 for the optimal
label_indexer=train_dataset.label_indexer
)
transformer.fit(
loader=train_loader,
test=val_loader,
verbose=True
)
torch.save(transformer, "transformer_benchmark_12ep.pt")
| 24.480769 | 78 | 0.749411 |
8d1acd1c8212f19c55510b4dd8c3544bf2548519 | 11,176 | py | Python | test/test_box/test_box_storage.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | null | null | null | test/test_box/test_box_storage.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 2 | 2021-11-24T19:39:57.000Z | 2022-01-03T23:03:35.000Z | test/test_box/test_box_storage.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import random
import string
import unittest
import warnings
from boxsdk.exception import BoxAPIException, BoxOAuthException
from parsons.box import Box
from parsons.etl import Table
"""Prior to running, you should ensure that the relevant environment
variables have been set, e.g. via
# Note: these are fake keys, provided as examples.
export BOX_CLIENT_ID=txqedp4rqi0cz5qckz361fziavdtdwxz
export BOX_CLIENT_SECRET=bk264KHMDLVy89TeuUpSRa4CN5o35u9h
export BOX_ACCESS_TOKEN=boK97B39m3ozIGyTcazbWRbi5F2SSZ5J
"""
TEST_CLIENT_ID = os.getenv('BOX_CLIENT_ID')
TEST_BOX_CLIENT_SECRET = os.getenv('BOX_CLIENT_SECRET')
TEST_ACCESS_TOKEN = os.getenv('BOX_ACCESS_TOKEN')
def generate_random_string(length):
"""Utility to generate random alpha string for file/folder names"""
return ''.join(random.choice(string.ascii_letters) for i in range(length))
| 42.656489 | 97 | 0.642895 |
8d1b66ad840bf7a208b29ea852c07fe8f18d11de | 3,961 | py | Python | Task2.py | sahil7pathak/Image_Segmentation_and_Point_Detection | 7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3 | [
"MIT"
] | null | null | null | Task2.py | sahil7pathak/Image_Segmentation_and_Point_Detection | 7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3 | [
"MIT"
] | null | null | null | Task2.py | sahil7pathak/Image_Segmentation_and_Point_Detection | 7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import cv2
'''Erosion Method'''
'''Point Detection Method'''
img = cv2.imread("point.jpg",0)
sample = img
kernel = np.array([[-1,-1,-1],
[-1,8,-1],
[-1,-1,-1]])
output, co_ord = point_detection(img, kernel)
output = output*255
output = np.asarray(output, np.uint8)
cv2.rectangle(output,(424,230),(464,272),(255,255,255),2)
cv2.imwrite("res_point.jpg",output)
'''Code for segmenting the object from the background'''
img2 = cv2.imread("segment.jpg", 0)
seg = check_segment(img2)
seg = np.asarray(seg, np.uint8)
cv2.rectangle(seg,(155,115),(208,172),(255,255,255),2)
cv2.rectangle(seg,(245,68),(300,223),(255,255,255),2)
cv2.rectangle(seg,(322,13),(370,291),(255,255,255),2)
cv2.rectangle(seg,(382,33),(430,264),(255,255,255),2)
'''Observed co-ordinates of bounding boxes, in col, row format'''
print("1st box: ")
print("Upper left: (155,115)")
print("Upper right: (208,115)")
print("Bottom left: (155,172)")
print("Bottom right: (208,172)\n")
print("2nd box: ")
print("Upper left: (245,68)")
print("Upper right: (300,68)")
print("Bottom left: (245,223)")
print("Bottom right: (300,223)\n")
print("3rd box: ")
print("Upper left: (322,13)")
print("Upper right: (370,13)")
print("Bottom left: (322,291)")
print("Bottom right: (370,291)\n")
print("4th box: ")
print("Upper left: (382,33)")
print("Upper right: (430,33)")
print("Bottom left: (382,264)")
print("Bottom right: (430,264)")
cv2.imwrite("res_segment.jpg",seg)
'''Plotting Histogram'''
my_dict = {}
for i in range(np.unique(img2).shape[0]):
a = np.unique(img2)[i]
count = np.sum(img2 == a)
my_dict[a] = count
sorted_by_value = sorted(my_dict.items(), key=lambda kv: kv[1])
uniq = list(np.unique(img2))
val = list(my_dict.values())
plt.plot(uniq[1:],val[1:])
plt.show()
| 30.705426 | 85 | 0.578642 |
8d1f2e38cdfd31edc3acb7a262903d61da73d831 | 1,652 | py | Python | Subjects/migrations/0001_initial.py | Mithzyl/Master-college-selecting-api | ec8f36067fb648238df4faeaa6a65e5a78740e6c | [
"MIT"
] | null | null | null | Subjects/migrations/0001_initial.py | Mithzyl/Master-college-selecting-api | ec8f36067fb648238df4faeaa6a65e5a78740e6c | [
"MIT"
] | null | null | null | Subjects/migrations/0001_initial.py | Mithzyl/Master-college-selecting-api | ec8f36067fb648238df4faeaa6a65e5a78740e6c | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-07 08:19
from django.db import migrations, models
| 35.148936 | 114 | 0.552663 |
8d1f97cb6d168a2c8e3c97a6da76772adf11469f | 239 | py | Python | app/__init__.py | pahumadad/flask-oauth | 309e235da8d72bb4e33d6fb68eb90b2f5392823a | [
"MIT"
] | 1 | 2017-04-27T09:23:48.000Z | 2017-04-27T09:23:48.000Z | app/__init__.py | pahumadad/flask-oauth | 309e235da8d72bb4e33d6fb68eb90b2f5392823a | [
"MIT"
] | null | null | null | app/__init__.py | pahumadad/flask-oauth | 309e235da8d72bb4e33d6fb68eb90b2f5392823a | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager(app)
from app import views, models, oauth
| 21.727273 | 39 | 0.803347 |
8d212f11594f7ae449b95c565655219888507326 | 511 | py | Python | Python/toLowerCase.py | dianeyeo/LeetCode | b814831e7a4296a4e95785b75ea5c540a3fca63d | [
"MIT"
] | null | null | null | Python/toLowerCase.py | dianeyeo/LeetCode | b814831e7a4296a4e95785b75ea5c540a3fca63d | [
"MIT"
] | null | null | null | Python/toLowerCase.py | dianeyeo/LeetCode | b814831e7a4296a4e95785b75ea5c540a3fca63d | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/to-lower-case/
Difficulty: Easy
Given a string s, return the string after replacing every uppercase letter with the same lowercase letter.
Example 1:
Input: s = "Hello"
Output: "hello"
Example 2:
Input: s = "here"
Output: "here"
Example 3:
Input: s = "LOVELY"
Output: "lovely"
Constraints:
1 <= s.length <= 100
s consists of printable ASCII characters.
"""
| 17.033333 | 106 | 0.661448 |
8d213f69d083136ed499e8028606ef1e8d49f01e | 2,495 | py | Python | covid_phylo/src/analysis.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
] | 1 | 2021-03-22T17:05:52.000Z | 2021-03-22T17:05:52.000Z | covid_phylo/src/analysis.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
] | 6 | 2020-06-06T01:51:21.000Z | 2022-01-13T02:39:02.000Z | covid_phylo/src/analysis.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
] | null | null | null | import align_tools as at
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
if __name__ == '__main__':
main() | 34.178082 | 161 | 0.658116 |
8d21b09432278f9368a292eca49b25d9da12e492 | 88 | py | Python | apps/salt/apps.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | 1 | 2019-07-31T07:34:38.000Z | 2019-07-31T07:34:38.000Z | apps/salt/apps.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | 9 | 2019-12-05T00:39:29.000Z | 2022-02-10T14:13:29.000Z | apps/salt/apps.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.666667 | 33 | 0.738636 |
8d21d5ac301b7c2c83e332f0f0cea5a96ae6d81d | 1,266 | py | Python | pygears_vivado/vivmod.py | Anari-AI/pygears-vivado | a9d928d9914b479739ff8fc1e208813292c4b711 | [
"MIT"
] | 1 | 2022-03-19T02:11:12.000Z | 2022-03-19T02:11:12.000Z | pygears_vivado/vivmod.py | Anari-AI/pygears-vivado | a9d928d9914b479739ff8fc1e208813292c4b711 | [
"MIT"
] | null | null | null | pygears_vivado/vivmod.py | Anari-AI/pygears-vivado | a9d928d9914b479739ff8fc1e208813292c4b711 | [
"MIT"
] | 1 | 2021-06-01T13:21:12.000Z | 2021-06-01T13:21:12.000Z | import os
from pygears.hdl.sv import SVModuleInst
from .ip_resolver import IPResolver
| 30.878049 | 59 | 0.553712 |
8d24383aba0b77760774f695ed82a4ade6ace738 | 1,841 | py | Python | commodore/inventory/render.py | projectsyn/commodore | afd924a2aa8abb79cd6a8970ff225756469dd2b3 | [
"BSD-3-Clause"
] | 39 | 2019-12-17T13:40:19.000Z | 2021-12-31T08:22:52.000Z | commodore/inventory/render.py | projectsyn/commodore | afd924a2aa8abb79cd6a8970ff225756469dd2b3 | [
"BSD-3-Clause"
] | 161 | 2020-02-14T18:32:49.000Z | 2022-03-25T09:23:35.000Z | commodore/inventory/render.py | projectsyn/commodore | afd924a2aa8abb79cd6a8970ff225756469dd2b3 | [
"BSD-3-Clause"
] | 12 | 2019-12-18T15:43:09.000Z | 2021-06-28T11:51:59.000Z | import shutil
import tempfile
from pathlib import Path
from typing import Dict
import click
from commodore.config import Config
from .parameters import ClassNotFound, InventoryFactory, InventoryFacts
| 30.683333 | 86 | 0.674633 |
8d2771d9640e1def0fa9d63283dfdac05afbee62 | 25,468 | py | Python | nova/pci/stats.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/pci/stats.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/pci/stats.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright (c) 2013 Intel, Inc.'
nl|'\n'
comment|'# Copyright (c) 2013 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LE'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'pci_device_pool'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'pci'
name|'import'
name|'utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'pci'
name|'import'
name|'whitelist'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|PciDeviceStats
name|'class'
name|'PciDeviceStats'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
string|'"""PCI devices summary information.\n\n According to the PCI SR-IOV spec, a PCI physical function can have up to\n 256 PCI virtual functions, thus the number of assignable PCI functions in\n a cloud can be big. The scheduler needs to know all device availability\n information in order to determine which compute hosts can support a PCI\n request. Passing individual virtual device information to the scheduler\n does not scale, so we provide summary information.\n\n Usually the virtual functions provided by a host PCI device have the same\n value for most properties, like vendor_id, product_id and class type.\n The PCI stats class summarizes this information for the scheduler.\n\n The pci stats information is maintained exclusively by compute node\n resource tracker and updated to database. The scheduler fetches the\n information and selects the compute node accordingly. If a compute\n node is selected, the resource tracker allocates the devices to the\n instance and updates the pci stats information.\n\n This summary information will be helpful for cloud management also.\n """'
newline|'\n'
nl|'\n'
DECL|variable|pool_keys
name|'pool_keys'
op|'='
op|'['
string|"'product_id'"
op|','
string|"'vendor_id'"
op|','
string|"'numa_node'"
op|','
string|"'dev_type'"
op|']'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'stats'
op|'='
name|'None'
op|','
name|'dev_filter'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'PciDeviceStats'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
comment|'# NOTE(sbauza): Stats are a PCIDevicePoolList object'
nl|'\n'
name|'self'
op|'.'
name|'pools'
op|'='
op|'['
name|'pci_pool'
op|'.'
name|'to_dict'
op|'('
op|')'
nl|'\n'
name|'for'
name|'pci_pool'
name|'in'
name|'stats'
op|']'
name|'if'
name|'stats'
name|'else'
op|'['
op|']'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'.'
name|'sort'
op|'('
name|'key'
op|'='
name|'lambda'
name|'item'
op|':'
name|'len'
op|'('
name|'item'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'dev_filter'
op|'='
name|'dev_filter'
name|'or'
name|'whitelist'
op|'.'
name|'Whitelist'
op|'('
nl|'\n'
name|'CONF'
op|'.'
name|'pci_passthrough_whitelist'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_equal_properties
dedent|''
name|'def'
name|'_equal_properties'
op|'('
name|'self'
op|','
name|'dev'
op|','
name|'entry'
op|','
name|'matching_keys'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'all'
op|'('
name|'dev'
op|'.'
name|'get'
op|'('
name|'prop'
op|')'
op|'=='
name|'entry'
op|'.'
name|'get'
op|'('
name|'prop'
op|')'
nl|'\n'
name|'for'
name|'prop'
name|'in'
name|'matching_keys'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_find_pool
dedent|''
name|'def'
name|'_find_pool'
op|'('
name|'self'
op|','
name|'dev_pool'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return the first pool that matches dev."""'
newline|'\n'
name|'for'
name|'pool'
name|'in'
name|'self'
op|'.'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'pool_keys'
op|'='
name|'pool'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'del'
name|'pool_keys'
op|'['
string|"'count'"
op|']'
newline|'\n'
name|'del'
name|'pool_keys'
op|'['
string|"'devices'"
op|']'
newline|'\n'
name|'if'
op|'('
name|'len'
op|'('
name|'pool_keys'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
op|'=='
name|'len'
op|'('
name|'dev_pool'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
name|'and'
nl|'\n'
name|'self'
op|'.'
name|'_equal_properties'
op|'('
name|'dev_pool'
op|','
name|'pool_keys'
op|','
name|'dev_pool'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'pool'
newline|'\n'
nl|'\n'
DECL|member|_create_pool_keys_from_dev
dedent|''
dedent|''
dedent|''
name|'def'
name|'_create_pool_keys_from_dev'
op|'('
name|'self'
op|','
name|'dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""create a stats pool dict that this dev is supposed to be part of\n\n Note that this pool dict contains the stats pool\'s keys and their\n values. \'count\' and \'devices\' are not included.\n """'
newline|'\n'
comment|"# Don't add a device that doesn't have a matching device spec."
nl|'\n'
comment|'# This can happen during initial sync up with the controller'
nl|'\n'
name|'devspec'
op|'='
name|'self'
op|'.'
name|'dev_filter'
op|'.'
name|'get_devspec'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'devspec'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
dedent|''
name|'tags'
op|'='
name|'devspec'
op|'.'
name|'get_tags'
op|'('
op|')'
newline|'\n'
name|'pool'
op|'='
op|'{'
name|'k'
op|':'
name|'getattr'
op|'('
name|'dev'
op|','
name|'k'
op|')'
name|'for'
name|'k'
name|'in'
name|'self'
op|'.'
name|'pool_keys'
op|'}'
newline|'\n'
name|'if'
name|'tags'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'.'
name|'update'
op|'('
name|'tags'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'pool'
newline|'\n'
nl|'\n'
DECL|member|add_device
dedent|''
name|'def'
name|'add_device'
op|'('
name|'self'
op|','
name|'dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Add a device to its matching pool."""'
newline|'\n'
name|'dev_pool'
op|'='
name|'self'
op|'.'
name|'_create_pool_keys_from_dev'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'if'
name|'dev_pool'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'='
name|'self'
op|'.'
name|'_find_pool'
op|'('
name|'dev_pool'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'pool'
op|':'
newline|'\n'
indent|' '
name|'dev_pool'
op|'['
string|"'count'"
op|']'
op|'='
number|'0'
newline|'\n'
name|'dev_pool'
op|'['
string|"'devices'"
op|']'
op|'='
op|'['
op|']'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'.'
name|'append'
op|'('
name|'dev_pool'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'.'
name|'sort'
op|'('
name|'key'
op|'='
name|'lambda'
name|'item'
op|':'
name|'len'
op|'('
name|'item'
op|')'
op|')'
newline|'\n'
name|'pool'
op|'='
name|'dev_pool'
newline|'\n'
dedent|''
name|'pool'
op|'['
string|"'count'"
op|']'
op|'+='
number|'1'
newline|'\n'
name|'pool'
op|'['
string|"'devices'"
op|']'
op|'.'
name|'append'
op|'('
name|'dev'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_decrease_pool_count
name|'def'
name|'_decrease_pool_count'
op|'('
name|'pool_list'
op|','
name|'pool'
op|','
name|'count'
op|'='
number|'1'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Decrement pool\'s size by count.\n\n If pool becomes empty, remove pool from pool_list.\n """'
newline|'\n'
name|'if'
name|'pool'
op|'['
string|"'count'"
op|']'
op|'>'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'['
string|"'count'"
op|']'
op|'-='
name|'count'
newline|'\n'
name|'count'
op|'='
number|'0'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'count'
op|'-='
name|'pool'
op|'['
string|"'count'"
op|']'
newline|'\n'
name|'pool_list'
op|'.'
name|'remove'
op|'('
name|'pool'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'count'
newline|'\n'
nl|'\n'
DECL|member|remove_device
dedent|''
name|'def'
name|'remove_device'
op|'('
name|'self'
op|','
name|'dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Remove one device from the first pool that it matches."""'
newline|'\n'
name|'dev_pool'
op|'='
name|'self'
op|'.'
name|'_create_pool_keys_from_dev'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'if'
name|'dev_pool'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'='
name|'self'
op|'.'
name|'_find_pool'
op|'('
name|'dev_pool'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'pool'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDevicePoolEmpty'
op|'('
nl|'\n'
name|'compute_node_id'
op|'='
name|'dev'
op|'.'
name|'compute_node_id'
op|','
name|'address'
op|'='
name|'dev'
op|'.'
name|'address'
op|')'
newline|'\n'
dedent|''
name|'pool'
op|'['
string|"'devices'"
op|']'
op|'.'
name|'remove'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_decrease_pool_count'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'pool'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_free_devs
dedent|''
dedent|''
name|'def'
name|'get_free_devs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'free_devs'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'pool'
name|'in'
name|'self'
op|'.'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'free_devs'
op|'.'
name|'extend'
op|'('
name|'pool'
op|'['
string|"'devices'"
op|']'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'free_devs'
newline|'\n'
nl|'\n'
DECL|member|consume_requests
dedent|''
name|'def'
name|'consume_requests'
op|'('
name|'self'
op|','
name|'pci_requests'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'alloc_devices'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'request'
name|'in'
name|'pci_requests'
op|':'
newline|'\n'
indent|' '
name|'count'
op|'='
name|'request'
op|'.'
name|'count'
newline|'\n'
name|'spec'
op|'='
name|'request'
op|'.'
name|'spec'
newline|'\n'
comment|'# For now, keep the same algorithm as during scheduling:'
nl|'\n'
comment|'# a spec may be able to match multiple pools.'
nl|'\n'
name|'pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_spec'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'spec'
op|')'
newline|'\n'
name|'if'
name|'numa_cells'
op|':'
newline|'\n'
indent|' '
name|'pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_numa_cells'
op|'('
name|'pools'
op|','
name|'numa_cells'
op|')'
newline|'\n'
dedent|''
name|'pools'
op|'='
name|'self'
op|'.'
name|'_filter_non_requested_pfs'
op|'('
name|'request'
op|','
name|'pools'
op|')'
newline|'\n'
comment|'# Failed to allocate the required number of devices'
nl|'\n'
comment|'# Return the devices already allocated back to their pools'
nl|'\n'
name|'if'
name|'sum'
op|'('
op|'['
name|'pool'
op|'['
string|"'count'"
op|']'
name|'for'
name|'pool'
name|'in'
name|'pools'
op|']'
op|')'
op|'<'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'error'
op|'('
name|'_LE'
op|'('
string|'"Failed to allocate PCI devices for instance."'
nl|'\n'
string|'" Unassigning devices back to pools."'
nl|'\n'
string|'" This should not happen, since the scheduler"'
nl|'\n'
string|'" should have accurate information, and allocation"'
nl|'\n'
string|'" during claims is controlled via a hold"'
nl|'\n'
string|'" on the compute node semaphore"'
op|')'
op|')'
newline|'\n'
name|'for'
name|'d'
name|'in'
name|'range'
op|'('
name|'len'
op|'('
name|'alloc_devices'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'add_device'
op|'('
name|'alloc_devices'
op|'.'
name|'pop'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'None'
newline|'\n'
dedent|''
name|'for'
name|'pool'
name|'in'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'pool'
op|'['
string|"'count'"
op|']'
op|'>='
name|'count'
op|':'
newline|'\n'
indent|' '
name|'num_alloc'
op|'='
name|'count'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'num_alloc'
op|'='
name|'pool'
op|'['
string|"'count'"
op|']'
newline|'\n'
dedent|''
name|'count'
op|'-='
name|'num_alloc'
newline|'\n'
name|'pool'
op|'['
string|"'count'"
op|']'
op|'-='
name|'num_alloc'
newline|'\n'
name|'for'
name|'d'
name|'in'
name|'range'
op|'('
name|'num_alloc'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pci_dev'
op|'='
name|'pool'
op|'['
string|"'devices'"
op|']'
op|'.'
name|'pop'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_handle_device_dependents'
op|'('
name|'pci_dev'
op|')'
newline|'\n'
name|'pci_dev'
op|'.'
name|'request_id'
op|'='
name|'request'
op|'.'
name|'request_id'
newline|'\n'
name|'alloc_devices'
op|'.'
name|'append'
op|'('
name|'pci_dev'
op|')'
newline|'\n'
dedent|''
name|'if'
name|'count'
op|'=='
number|'0'
op|':'
newline|'\n'
indent|' '
name|'break'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'alloc_devices'
newline|'\n'
nl|'\n'
DECL|member|_handle_device_dependents
dedent|''
name|'def'
name|'_handle_device_dependents'
op|'('
name|'self'
op|','
name|'pci_dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Remove device dependents or a parent from pools.\n\n In case the device is a PF, all of it\'s dependent VFs should\n be removed from pools count, if these are present.\n When the device is a VF, it\'s parent PF pool count should be\n decreased, unless it is no longer in a pool.\n """'
newline|'\n'
name|'if'
name|'pci_dev'
op|'.'
name|'dev_type'
op|'=='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_PF'
op|':'
newline|'\n'
indent|' '
name|'vfs_list'
op|'='
name|'objects'
op|'.'
name|'PciDeviceList'
op|'.'
name|'get_by_parent_address'
op|'('
nl|'\n'
name|'pci_dev'
op|'.'
name|'_context'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'compute_node_id'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'address'
op|')'
newline|'\n'
name|'if'
name|'vfs_list'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'vf'
name|'in'
name|'vfs_list'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'remove_device'
op|'('
name|'vf'
op|')'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'elif'
name|'pci_dev'
op|'.'
name|'dev_type'
op|'=='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_VF'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'parent'
op|'='
name|'pci_dev'
op|'.'
name|'get_by_dev_addr'
op|'('
name|'pci_dev'
op|'.'
name|'_context'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'compute_node_id'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'parent_addr'
op|')'
newline|'\n'
comment|'# Make sure not to decrease PF pool count if this parent has'
nl|'\n'
comment|'# been already removed from pools'
nl|'\n'
name|'if'
name|'parent'
name|'in'
name|'self'
op|'.'
name|'get_free_devs'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'remove_device'
op|'('
name|'parent'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'except'
name|'exception'
op|'.'
name|'PciDeviceNotFound'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_filter_pools_for_spec
name|'def'
name|'_filter_pools_for_spec'
op|'('
name|'pools'
op|','
name|'request_specs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'pool'
name|'for'
name|'pool'
name|'in'
name|'pools'
nl|'\n'
name|'if'
name|'utils'
op|'.'
name|'pci_device_prop_match'
op|'('
name|'pool'
op|','
name|'request_specs'
op|')'
op|']'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_filter_pools_for_numa_cells
name|'def'
name|'_filter_pools_for_numa_cells'
op|'('
name|'pools'
op|','
name|'numa_cells'
op|')'
op|':'
newline|'\n'
comment|"# Some systems don't report numa node info for pci devices, in"
nl|'\n'
comment|'# that case None is reported in pci_device.numa_node, by adding None'
nl|'\n'
comment|'# to numa_cells we allow assigning those devices to instances with'
nl|'\n'
comment|'# numa topology'
nl|'\n'
indent|' '
name|'numa_cells'
op|'='
op|'['
name|'None'
op|']'
op|'+'
op|'['
name|'cell'
op|'.'
name|'id'
name|'for'
name|'cell'
name|'in'
name|'numa_cells'
op|']'
newline|'\n'
comment|'# filter out pools which numa_node is not included in numa_cells'
nl|'\n'
name|'return'
op|'['
name|'pool'
name|'for'
name|'pool'
name|'in'
name|'pools'
name|'if'
name|'any'
op|'('
name|'utils'
op|'.'
name|'pci_device_prop_match'
op|'('
nl|'\n'
name|'pool'
op|','
op|'['
op|'{'
string|"'numa_node'"
op|':'
name|'cell'
op|'}'
op|']'
op|')'
nl|'\n'
name|'for'
name|'cell'
name|'in'
name|'numa_cells'
op|')'
op|']'
newline|'\n'
nl|'\n'
DECL|member|_filter_non_requested_pfs
dedent|''
name|'def'
name|'_filter_non_requested_pfs'
op|'('
name|'self'
op|','
name|'request'
op|','
name|'matching_pools'
op|')'
op|':'
newline|'\n'
comment|'# Remove SRIOV_PFs from pools, unless it has been explicitly requested'
nl|'\n'
comment|'# This is especially needed in cases where PFs and VFs has the same'
nl|'\n'
comment|'# product_id.'
nl|'\n'
indent|' '
name|'if'
name|'all'
op|'('
name|'spec'
op|'.'
name|'get'
op|'('
string|"'dev_type'"
op|')'
op|'!='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_PF'
name|'for'
nl|'\n'
name|'spec'
name|'in'
name|'request'
op|'.'
name|'spec'
op|')'
op|':'
newline|'\n'
indent|' '
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_pfs'
op|'('
name|'matching_pools'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'matching_pools'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_filter_pools_for_pfs
name|'def'
name|'_filter_pools_for_pfs'
op|'('
name|'pools'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'pool'
name|'for'
name|'pool'
name|'in'
name|'pools'
nl|'\n'
name|'if'
name|'not'
name|'pool'
op|'.'
name|'get'
op|'('
string|"'dev_type'"
op|')'
op|'=='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_PF'
op|']'
newline|'\n'
nl|'\n'
DECL|member|_apply_request
dedent|''
name|'def'
name|'_apply_request'
op|'('
name|'self'
op|','
name|'pools'
op|','
name|'request'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(vladikr): This code maybe open to race conditions.'
nl|'\n'
comment|'# Two concurrent requests may succeed when called support_requests'
nl|'\n'
comment|'# because this method does not remove related devices from the pools'
nl|'\n'
indent|' '
name|'count'
op|'='
name|'request'
op|'.'
name|'count'
newline|'\n'
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_spec'
op|'('
name|'pools'
op|','
name|'request'
op|'.'
name|'spec'
op|')'
newline|'\n'
name|'if'
name|'numa_cells'
op|':'
newline|'\n'
indent|' '
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_numa_cells'
op|'('
name|'matching_pools'
op|','
nl|'\n'
name|'numa_cells'
op|')'
newline|'\n'
dedent|''
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_non_requested_pfs'
op|'('
name|'request'
op|','
nl|'\n'
name|'matching_pools'
op|')'
newline|'\n'
name|'if'
name|'sum'
op|'('
op|'['
name|'pool'
op|'['
string|"'count'"
op|']'
name|'for'
name|'pool'
name|'in'
name|'matching_pools'
op|']'
op|')'
op|'<'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'False'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'pool'
name|'in'
name|'matching_pools'
op|':'
newline|'\n'
indent|' '
name|'count'
op|'='
name|'self'
op|'.'
name|'_decrease_pool_count'
op|'('
name|'pools'
op|','
name|'pool'
op|','
name|'count'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'break'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'True'
newline|'\n'
nl|'\n'
DECL|member|support_requests
dedent|''
name|'def'
name|'support_requests'
op|'('
name|'self'
op|','
name|'requests'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Check if the pci requests can be met.\n\n Scheduler checks compute node\'s PCI stats to decide if an\n instance can be scheduled into the node. Support does not\n mean real allocation.\n If numa_cells is provided then only devices contained in\n those nodes are considered.\n """'
newline|'\n'
comment|'# note (yjiang5): this function has high possibility to fail,'
nl|'\n'
comment|'# so no exception should be triggered for performance reason.'
nl|'\n'
name|'pools'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'self'
op|'.'
name|'pools'
op|')'
newline|'\n'
name|'return'
name|'all'
op|'('
op|'['
name|'self'
op|'.'
name|'_apply_request'
op|'('
name|'pools'
op|','
name|'r'
op|','
name|'numa_cells'
op|')'
nl|'\n'
name|'for'
name|'r'
name|'in'
name|'requests'
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|apply_requests
dedent|''
name|'def'
name|'apply_requests'
op|'('
name|'self'
op|','
name|'requests'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Apply PCI requests to the PCI stats.\n\n This is used in multiple instance creation, when the scheduler has to\n maintain how the resources are consumed by the instances.\n If numa_cells is provided then only devices contained in\n those nodes are considered.\n """'
newline|'\n'
name|'if'
name|'not'
name|'all'
op|'('
op|'['
name|'self'
op|'.'
name|'_apply_request'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'r'
op|','
name|'numa_cells'
op|')'
nl|'\n'
name|'for'
name|'r'
name|'in'
name|'requests'
op|']'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDeviceRequestFailed'
op|'('
name|'requests'
op|'='
name|'requests'
op|')'
newline|'\n'
nl|'\n'
DECL|member|__iter__
dedent|''
dedent|''
name|'def'
name|'__iter__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|"# 'devices' shouldn't be part of stats"
nl|'\n'
indent|' '
name|'pools'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'pool'
name|'in'
name|'self'
op|'.'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'tmp'
op|'='
op|'{'
name|'k'
op|':'
name|'v'
name|'for'
name|'k'
op|','
name|'v'
name|'in'
name|'six'
op|'.'
name|'iteritems'
op|'('
name|'pool'
op|')'
name|'if'
name|'k'
op|'!='
string|"'devices'"
op|'}'
newline|'\n'
name|'pools'
op|'.'
name|'append'
op|'('
name|'tmp'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'iter'
op|'('
name|'pools'
op|')'
newline|'\n'
nl|'\n'
DECL|member|clear
dedent|''
name|'def'
name|'clear'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Clear all the stats maintained."""'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'='
op|'['
op|']'
newline|'\n'
nl|'\n'
DECL|member|__eq__
dedent|''
name|'def'
name|'__eq__'
op|'('
name|'self'
op|','
name|'other'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cmp'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'other'
op|'.'
name|'pools'
op|')'
op|'=='
number|'0'
newline|'\n'
nl|'\n'
DECL|member|__ne__
dedent|''
name|'def'
name|'__ne__'
op|'('
name|'self'
op|','
name|'other'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'not'
op|'('
name|'self'
op|'=='
name|'other'
op|')'
newline|'\n'
nl|'\n'
DECL|member|to_device_pools_obj
dedent|''
name|'def'
name|'to_device_pools_obj'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return the contents of the pools as a PciDevicePoolList object."""'
newline|'\n'
name|'stats'
op|'='
op|'['
name|'x'
name|'for'
name|'x'
name|'in'
name|'self'
op|']'
newline|'\n'
name|'return'
name|'pci_device_pool'
op|'.'
name|'from_pci_stats'
op|'('
name|'stats'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 14.37246 | 1,148 | 0.61167 |
8d29d50d0c950b859290e95b7cb057e02fb60ee8 | 4,045 | py | Python | profit/models/torch/vae.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
] | null | null | null | profit/models/torch/vae.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
] | 1 | 2021-09-15T13:13:12.000Z | 2021-09-15T13:13:12.000Z | profit/models/torch/vae.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
] | null | null | null | """Variational autoencoder model."""
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
| 36.116071 | 90 | 0.634611 |
8d2ae38a47c725cb399a9f327008d51a718980eb | 2,037 | py | Python | backend/export/views.py | dmryutov/otus-python-0319-final | de07f36ee4bbd57dbfb16defaf762b08ec41fb0e | [
"Apache-2.0"
] | null | null | null | backend/export/views.py | dmryutov/otus-python-0319-final | de07f36ee4bbd57dbfb16defaf762b08ec41fb0e | [
"Apache-2.0"
] | 6 | 2020-06-05T23:05:14.000Z | 2022-02-10T10:42:31.000Z | backend/export/views.py | dmryutov/otus-python-0319-final | de07f36ee4bbd57dbfb16defaf762b08ec41fb0e | [
"Apache-2.0"
] | null | null | null | from django.http.response import HttpResponse
from rest_framework import serializers, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from .excel import Excel
XLSX_MIME = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
| 32.333333 | 92 | 0.650957 |