hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a80b6a8d0bacba13b3fe61daf36962d8ad3001a4 | 8,892 | py | Python | src/titanic/tit_utils.py | buffbob/titanic | 1e52814076ad78f6f9845d7b8f829889977a907b | [
"MIT"
] | null | null | null | src/titanic/tit_utils.py | buffbob/titanic | 1e52814076ad78f6f9845d7b8f829889977a907b | [
"MIT"
] | null | null | null | src/titanic/tit_utils.py | buffbob/titanic | 1e52814076ad78f6f9845d7b8f829889977a907b | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
import matplotlib.pyplot as plt
import numpy as np
import category_encoders as ce
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder, OrdinalEncoder
def load_tit(path):
"""
downloads data from kaggle stored at path = "../Data/"
returns a tuple of our titanic datasets- (train,test)
"""
train = pd.read_csv(path + 'tit_train.csv')
test = pd.read_csv(path + "tit_test.csv")
return (train, test)
def gscv_results_terse(model, params, X_train, y_train, X_test, y_test):
'''
clf = a classifier, params = a dict to feed to gridsearch_cv, score_list = list of evaluation metrics
nuff said
'''
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
clf = GridSearchCV(model, params, cv=10,
scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set: \n{}".format(clf.best_params_))
print('___________________________________')
print('cv scores on the best estimator')
scores = cross_val_score(clf.best_estimator_, X_train, y_train, scoring="accuracy", cv=10)
print(scores)
print('the average cv score is {:.3} with a std of {:.3}'.format(np.mean(scores), np.std(scores)))
return clf
def print_gscv_results(model, params, X_train, y_train, X_test, y_test):
'''
clf = a classifier, params = a dict to feed to gridsearch_cv, score_list = list of evaluation metrics
'''
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(model, params, cv=5,
scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print('________________________________________________')
print('best params for model are {}'.format(clf.best_params_))
print('\n___________________________________\n')
print('cv scores on the best estimator')
scores = cross_val_score(clf.best_estimator_, X_train, y_train, scoring="accuracy", cv=10)
print(scores)
print('the average cv score is {:.2}\n\n'.format(np.mean(scores)))
return clf
def visualize_classifier(model, X, y, ax=None, cmap='rainbow'):
"""
X is a 2D dataset
nuf said
"""
ax = ax or plt.gca()
# Plot the training points
ax.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y, s=30, cmap=cmap,
clim=(y.min(), y.max()), zorder=3)
ax.axis('tight')
ax.axis('off')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# fit the estimator
model.fit(X, y)
xx, yy = np.meshgrid(np.linspace(*xlim, num=200),
np.linspace(*ylim, num=200))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Create a color plot with the results
n_classes = len(np.unique(y))
contours = ax.contourf(xx, yy, Z, alpha=0.3,
levels=np.arange(n_classes + 1) - 0.5,
cmap=cmap, clim=(y.min(), y.max()),
zorder=1)
ax.set(xlim=xlim, ylim=ylim)
# this dataset has unique cols so we will go through one by one
def pp_Embarked(df):
"""
simply adds 'C' where missing values are present
inplace imputation
return df
"""
df.Embarked.fillna("C", inplace=True)
return df
def pp_Name(df):
"""
extracts the title from the Name column
returns- df with a new column named Title appended to original df
"""
temp = df.Name.apply(lambda x: x.split(',')[1].split(".")[0].strip())
df['Title'] = temp
return df
def pp_Age(df):
"""
imputes missing values of age through a groupby([Pclass,Title,isFemale])
returns df with new column named Age_nonull appended to it
"""
transformed_Age = df.groupby(["Title", 'Pclass', "Sex"])['Age'].transform(lambda x: x.fillna(x.median()))
df['Age_nonull'] = transformed_Age
return df
def pp_Fare(df):
'''
This will clip outliers to the middle 98% of the range
'''
temp = df['Fare'].copy()
limits = np.percentile(temp, [1, 99])
df.Fare = np.clip(temp, limits[0], limits[1])
return df
def pp_AgeBin(df):
"""
takes Age_nonull and puts in bins
returns df with new column- AgeBin
"""
z = df.Age_nonull.round() # some values went to 0 so clip to 1
binborders = np.linspace(0, 80, 17)
z = z.clip(1, None)
z = z.astype("int32")
df['AgeBin'] = pd.cut(z, bins=binborders, labels=False)
return df
def pp_Sex(df):
"""
maps male and female to 0 and 1
returns the df with is_Female added
"""
df['is_Female'] = df.Sex.apply(lambda row: 0 if row == "male" else 1) # one way
return df
def pp_Cabin(df):
"""
extracts the deck from the cabin. Mostly 1st class has cabin assignments. Replace
nan with "unk". Leaves as an ordinal categorical. can be onehoted later.
returns the df with Deck added as a column
"""
df["Deck"] = "UNK"
temp = df.loc[df.Cabin.notnull(), :].copy()
temp['D'] = temp.Cabin.apply(lambda z: z[0])
df.iloc[temp.index, -1] = temp["D"]
# df.where(df.Deck != "0", "UNK")
return df
def scaleNumeric(df, cols):
"""
Standardize features by removing the mean and scaling to unit variance
"""
ss = StandardScaler()
scaled_features = ss.fit_transform(df[cols].values)
for i, col in enumerate(cols):
df[col + "_scaled"] = scaled_features[:, i]
return df
def chooseFeatures(df, alist):
"""
df is our dataframe with all new features added
alist is a list of cols to select for a new dataframe
returns df[alist]
"""
return df[alist]
def test_dtc(alist, df, labels):
"""
tests a decision tree model for classification
prints out way to much stuff
returns a GridSearchCV classifier
"""
a = df[alist] # select columns
X_train, X_test, y_train, y_test = train_test_split(a, labels, test_size=0.2, random_state=42)
dtc = DecisionTreeClassifier()
dtc_dict = dt_dict = [{"max_depth": [2, 5, 8, 12, 15], "min_samples_leaf": [1, 2, 3],
"max_features": [None, 1.0, 2, 'sqrt', X_train.shape[1]]}]
clf = gscv_results_terse(dtc, dtc_dict, X_train, y_train, X_test, y_test)
return clf
#########################################################
# some utilities functions to aid in ml in general
def lin_to_log_even(min_num, max_num, num_pts=10):
"""
This really only needed in min_num << 1 and min_max >> 1
creates an evenly spaced log space from min_num to max_num
"""
lmin = np.log10(min_num)
lmax = np.log10(max_num)
ls = np.linspace(lmin, lmax, num_pts)
log_spaces = np.power(10, ls)
# print(["{:05f}".format(each) for each in log_spaces])
return log_spaces
def lin_to_log_random(num1, num2, num_pts=10):
"""
This really only needed in min_num << 1 and min_max >> 1
creates an array of random selected pts of len num_pts
each point is in the log space from min_num to max_num
"""
ln1 = np.log10(num1)
ln2 = np.log10(num2)
range_bn = np.abs(ln2 - ln1)
z = ln2 + np.random.rand(num_pts) * -range_bn
zz = np.power(10, z)
print(["{:05f}".format(each) for each in zz])
return zz | 31.870968 | 109 | 0.624944 |
a80bd9815a0efacc56fe16adf0b6e490442b6851 | 161 | py | Python | magic_markdown/__init__.py | transfluxus/magic_markdown | 3a71d0c0a0937dc87973b6e19389f27575e16208 | [
"MIT"
] | 10 | 2019-04-09T17:33:52.000Z | 2021-05-10T04:58:59.000Z | magic_markdown/__init__.py | transfluxus/magic_markdown | 3a71d0c0a0937dc87973b6e19389f27575e16208 | [
"MIT"
] | null | null | null | magic_markdown/__init__.py | transfluxus/magic_markdown | 3a71d0c0a0937dc87973b6e19389f27575e16208 | [
"MIT"
] | null | null | null | name = "magic_markdown"
from magic_markdown.MagicMarkdown import MagicMarkdown
| 23 | 54 | 0.838509 |
a80cfdeae5dd9779dfdf75f7f464b230527883ae | 1,167 | py | Python | src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py | BoKleynen/P-O-3-Smart-Energy-Home | 4849038c47199aa0a752ff5a4f2afa91f4a9e8f0 | [
"MIT"
] | null | null | null | src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py | BoKleynen/P-O-3-Smart-Energy-Home | 4849038c47199aa0a752ff5a4f2afa91f4a9e8f0 | [
"MIT"
] | null | null | null | src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py | BoKleynen/P-O-3-Smart-Energy-Home | 4849038c47199aa0a752ff5a4f2afa91f4a9e8f0 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
from house.production.solar_panel import SolarPanel
from house import House
from math import pi
from time import time
start_time = time()
solar_panel_east = SolarPanel(285.0, 10*pi/180, -pi/2, 0.87, 1.540539, 10)
solar_panel_west = SolarPanel(285.0, 10*pi/180, pi/2, 0.87, 1.540539, 10)
house = House([], solar_panel_tp=(solar_panel_east, solar_panel_west))
irradiance_df = pd.read_csv(filepath_or_buffer="C:\\Users\\Lander\\Documents\\KULeuven\\2e bachelor\\semester 1\\P&O 3\\P-O-3-Smart-Energy-Home\\data\\Irradiance.csv",
header=0,
index_col="Date/Time",
dtype={"watts-per-meter-sq": float},
parse_dates=["Date/Time"]
)
start = pd.Timestamp("2016-06-17 00:00:00")
# end = pd.Timestamp("2017-04-21 23:55:00")
end = pd.Timestamp("2016-06-17 23:55:00")
times = pd.date_range(start, end, freq="300S")
data = [house.power_production(t, irradiance_df) for t in pd.date_range(start, end, freq="300S")]
# print(data)
plt.plot(data)
print(time() - start_time)
plt.show()
| 33.342857 | 167 | 0.642674 |
a813a7003f5f5d2c9a1b282747c12188d836b770 | 2,468 | py | Python | src/lsct/models/cnn_1d.py | junyongyou/lsct_phiqnet | ffa546b3225c7db0bc7977565dc11a91186fe939 | [
"MIT"
] | 9 | 2021-11-01T06:06:33.000Z | 2022-02-07T12:21:18.000Z | src/lsct/models/cnn_1d.py | junyongyou/lsct_phiqnet | ffa546b3225c7db0bc7977565dc11a91186fe939 | [
"MIT"
] | null | null | null | src/lsct/models/cnn_1d.py | junyongyou/lsct_phiqnet | ffa546b3225c7db0bc7977565dc11a91186fe939 | [
"MIT"
] | 1 | 2022-03-06T07:38:32.000Z | 2022-03-06T07:38:32.000Z | from tensorflow.keras.layers import Layer, Conv1D, Input, Dropout, MaxPool1D, Masking
import tensorflow.keras.backend as K
from tensorflow.keras import Model
import tensorflow as tf
if __name__ == '__main__':
input_shape = (16, 5 * 256)
filters = [32, 64, 128, 256]
pooling_sizes = [2, 2, 2, 2]
inputs = Input(shape=input_shape)
x = CNN1D(filters=filters, pooling_sizes=pooling_sizes)(inputs)
model = Model(inputs=inputs, outputs=x)
model.summary()
| 37.393939 | 112 | 0.573339 |
a81433a2173979769be9813ef0e72f88f835d3f9 | 339 | py | Python | 1 clean code/auto_format_on_save.py | philippschmalen/ml-devops-engineer | 98c4c94b807215e2a909905235bde4a8d022477f | [
"MIT"
] | null | null | null | 1 clean code/auto_format_on_save.py | philippschmalen/ml-devops-engineer | 98c4c94b807215e2a909905235bde4a8d022477f | [
"MIT"
] | null | null | null | 1 clean code/auto_format_on_save.py | philippschmalen/ml-devops-engineer | 98c4c94b807215e2a909905235bde4a8d022477f | [
"MIT"
] | null | null | null | """
Play with autoformatting on save
Ensure to pip install black within your environment
"""
# test linting with an unnecessary import
# it should complain and suggest a solution
import sys
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964,
"okay": "This is getting way too long",
}
| 16.142857 | 51 | 0.666667 |
a81435452d7a1fd0220c50904adbc5e774a45f27 | 931 | py | Python | test/utils.py | eddrial/aapy | 929f554aea24c0a893052f0907488e0a843fd5dd | [
"Apache-2.0"
] | null | null | null | test/utils.py | eddrial/aapy | 929f554aea24c0a893052f0907488e0a843fd5dd | [
"Apache-2.0"
] | null | null | null | test/utils.py | eddrial/aapy | 929f554aea24c0a893052f0907488e0a843fd5dd | [
"Apache-2.0"
] | null | null | null | import json
import os
import mock
def get_data_filepath(filename):
"""Construct filepath for a file in the test/data directory
Args:
filename: name of file
Returns:
full path to file
"""
return os.path.join(os.path.dirname(__file__), 'data', filename)
def load_from_file(filename):
"""Load the contents of a file in the data directory.
Args:
filename: name of file to load
Returns:
contents of file as a string
"""
filepath = get_data_filepath(filename)
with open(filepath) as f:
return f.read()
| 21.159091 | 68 | 0.651987 |
a81666f0e6701e07b7dd6f00c88fe2096ec32290 | 391 | py | Python | archive/AIAP_v1.00/v1.2b/promoter_bin.py | ShaopengLiu1/Zhanglab_ATAC-seq_analysis | 3f615c159bb04fcc3f7b777e00c5f04ff105898c | [
"MIT"
] | null | null | null | archive/AIAP_v1.00/v1.2b/promoter_bin.py | ShaopengLiu1/Zhanglab_ATAC-seq_analysis | 3f615c159bb04fcc3f7b777e00c5f04ff105898c | [
"MIT"
] | null | null | null | archive/AIAP_v1.00/v1.2b/promoter_bin.py | ShaopengLiu1/Zhanglab_ATAC-seq_analysis | 3f615c159bb04fcc3f7b777e00c5f04ff105898c | [
"MIT"
] | 1 | 2018-02-26T03:14:46.000Z | 2018-02-26T03:14:46.000Z | import sys
peak=[]
with open(sys.argv[1],'r') as f:
for line in f:
line=line.strip('\n').split('\t')
peak.append(int(line[3]))
f.close()
num=int(len(peak)/100.0)
bin=[]
for i in range(99):
bin.append(str(i+1)+'\t'+str(sum(peak[num*i:num*(i+1)])/(num*1.0))+'\n')
bin.append('100'+'\t'+str(sum(peak[num*99:])/(num*1.0))+'\n')
with open('bin.txt','w') as f:
f.writelines(bin)
f.close
| 20.578947 | 73 | 0.59335 |
a8172aac1601eb8a00a3b924aa63876138f48a83 | 1,347 | py | Python | Code/Database Creator/examples.py | Storm-Petrel/HawkEye | f9afde3866ab9e5e71de7f2be4d836be9ed48d69 | [
"MIT"
] | null | null | null | Code/Database Creator/examples.py | Storm-Petrel/HawkEye | f9afde3866ab9e5e71de7f2be4d836be9ed48d69 | [
"MIT"
] | null | null | null | Code/Database Creator/examples.py | Storm-Petrel/HawkEye | f9afde3866ab9e5e71de7f2be4d836be9ed48d69 | [
"MIT"
] | null | null | null | import csv
from faker import Faker
fake = Faker()
for x in range(0, 10):
placa = fake.pystr(min_chars=3, max_chars=3).upper() + str(fake.pydecimal(left_digits=1, right_digits=1, positive=True)) + str(fake.pydecimal(left_digits=1, right_digits=1, positive=True))
placa = placa.replace(".","")
atualLat = str(fake.geo_coordinate(center=-8.059845, radius=0.001))
atualLon = str(fake.geo_coordinate(center=-34.905552, radius=0.001))
geo0Lat = str(fake.geo_coordinate(center=-8.021154, radius=0.001))
geo0Lon = str(fake.geo_coordinate(center=-34.933909, radius=0.001))
geo1Lat = str(fake.geo_coordinate(center=-8.027868, radius=0.001))
geo1Lon = str(fake.geo_coordinate(center=-34.852109, radius=0.001))
geo2Lat = str(fake.geo_coordinate(center=-8.122738, radius=0.001))
geo2Lon = str(fake.geo_coordinate(center=-34.874526, radius=0.001))
geo3Lat = str(fake.geo_coordinate(center=-8.052431, radius=0.001))
geo3Lon = str(fake.geo_coordinate(center=-34.959744, radius=0.001))
csvRow = [placa,atualLat,atualLon,geo0Lat,geo0Lon,geo1Lat,geo1Lon,geo2Lat,geo2Lon,geo3Lat,geo3Lon,"0","0"]
with open('cars.csv', 'a', newline='\n') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(csvRow)
| 51.807692 | 193 | 0.697847 |
a8178087a6d24532c3fa392eae43c6d6a8b30612 | 4,595 | py | Python | MultiInputDialog.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
] | null | null | null | MultiInputDialog.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
] | 2 | 2019-07-31T23:14:14.000Z | 2020-12-26T16:27:02.000Z | MultiInputDialog.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
] | 2 | 2019-07-31T22:22:06.000Z | 2020-07-14T04:58:16.000Z | from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QLabel, QLineEdit, QVBoxLayout, QMessageBox, QCheckBox,\
QSpinBox, QComboBox, QListWidget, QDialog, QFileDialog, QProgressBar, QTableWidget, QTableWidgetItem,\
QAbstractItemView, QSpinBox, QSplitter, QSizePolicy, QAbstractScrollArea, QHBoxLayout, QTextEdit, QShortcut,\
QProgressDialog
from PyQt5.QtGui import QPalette, QKeySequence, QDoubleValidator, QIntValidator
from PyQt5.QtCore import Qt, QThread, QSignalMapper
import sys
import pyqtgraph as pg
if __name__=='__main__':
app = QApplication(sys.argv)
dlg = MultiInputDialog(inputs={'value':100,'value2':10.0,'fit':True,'func':['Lor','Gau']})
dlg.show()
sys.exit(app.exec_()) | 47.864583 | 120 | 0.654189 |
a81b25109e2c25d80338be4ee486823e581a2347 | 3,813 | py | Python | src/handlers.py | jneethling/WikiStats | 232640bf3799851554fa4c13cee8a7f63eb532e2 | [
"MIT"
] | null | null | null | src/handlers.py | jneethling/WikiStats | 232640bf3799851554fa4c13cee8a7f63eb532e2 | [
"MIT"
] | 1 | 2022-01-09T12:07:13.000Z | 2022-01-09T15:29:41.000Z | src/handlers.py | jneethling/WikiStats | 232640bf3799851554fa4c13cee8a7f63eb532e2 | [
"MIT"
] | null | null | null | import os
import psutil
import json
import sqlite3
import threading
from datetime import datetime, timezone
from websocket import create_connection
| 32.87069 | 199 | 0.575924 |
a81d611063f78006b5948c72bc4dd6b96d015544 | 1,035 | py | Python | simulate/continue.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
] | 2 | 2015-04-27T01:57:43.000Z | 2015-05-01T18:18:56.000Z | simulate/continue.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
] | null | null | null | simulate/continue.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
] | null | null | null | from __future__ import print_function
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
from sys import stdout
import os
import time
import numpy as np
import argparse
from equil import setup_sim, dynamix
parser = argparse.ArgumentParser(description='equilibrate structures')
parser.add_argument('--sys', type=str, help='system pdb preface')
parser.add_argument('--pdb', type=str, help='IC pdb')
parser.add_argument('--nmin', type=int, help='number of minimization steps', default=50)
parser.add_argument('--nstep', type=int, help='number of steps')
args = parser.parse_args()
systm = args.sys
ns = args.nstep
# load initial parameters and geometry
prmtop = app.AmberPrmtopFile(systm + '.prmtop')
pdb = app.PDBFile(args.pdb)
# eq temp
temp = 300.0
# timestep
ts = 2.0
qs = pdb.positions
top = pdb.topology
unit_cell = top.getUnitCellDimensions()
box = unit_cell*np.eye(3)
# run it!
sim = setup_sim(prmtop, temp, ts, qs, 'gpu', top, box)
dynamix(systm, sim, ns, prmtop, temp, ts, 'gpu', min=args.nmin)
| 26.538462 | 88 | 0.745894 |
a81eba16cf9a55afaac7c0432d5bc776ba731b35 | 40,893 | py | Python | py/agentflow/preprocessors/observation_transforms_test.py | wx-b/dm_robotics | 5d407622360ccf7f0b4b50bcee84589e2cfd0783 | [
"Apache-2.0"
] | 128 | 2021-09-08T18:39:39.000Z | 2022-03-27T11:29:05.000Z | py/agentflow/preprocessors/observation_transforms_test.py | wx-b/dm_robotics | 5d407622360ccf7f0b4b50bcee84589e2cfd0783 | [
"Apache-2.0"
] | 7 | 2021-10-11T14:26:17.000Z | 2022-03-15T17:26:45.000Z | py/agentflow/preprocessors/observation_transforms_test.py | LaudateCorpus1/dm_robotics | 647bc810788c74972c1684a8d2e4d2dfd2791485 | [
"Apache-2.0"
] | 8 | 2021-09-08T18:25:49.000Z | 2022-02-21T23:45:16.000Z | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for observations_transforms."""
import copy
from typing import Mapping, Optional, Type
from absl.testing import absltest
from absl.testing import parameterized
import cv2
import dm_env
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.preprocessors import observation_transforms
from dm_robotics.agentflow.preprocessors import timestep_preprocessor
from dm_robotics.transformations import transformations as tr
import numpy as np
_DEFAULT_TYPE = np.float64
def _build_unit_timestep_spec(
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.BoundedArray] = None):
if observation_spec is None:
name = 'foo'
observation_spec = {
name: specs.Array(shape=(2,), dtype=_DEFAULT_TYPE, name=name),
}
if reward_spec is None:
reward_spec = scalar_array_spec(name='reward')
if discount_spec is None:
discount_spec = scalar_array_spec(name='discount')
return spec_utils.TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
if __name__ == '__main__':
absltest.main()
| 39.358037 | 80 | 0.706747 |
a81fa302f2ff4cbc6dc18bbb647920f29a503d5e | 1,897 | py | Python | 2017/23b.py | mcbor/advent_of_code_2016 | 14453b970d3e0f031ae6a66f2028652b6ed870dd | [
"MIT"
] | 1 | 2016-12-17T10:53:22.000Z | 2016-12-17T10:53:22.000Z | 2017/23b.py | mcbor/adventofcode | 14453b970d3e0f031ae6a66f2028652b6ed870dd | [
"MIT"
] | null | null | null | 2017/23b.py | mcbor/adventofcode | 14453b970d3e0f031ae6a66f2028652b6ed870dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
23b.py
~~~~~~
Advent of Code 2017 - Day 23: Coprocessor Conflagration
Part Two
Now, it's time to fix the problem.
The debug mode switch is wired directly to register a. You flip the switch,
which makes register a now start at 1 when the program is executed.
Immediately, the coprocessor begins to overheat. Whoever wrote this program
obviously didn't choose a very efficient implementation. You'll need to
optimize the program if it has any hope of completing before Santa needs
that printer working.
The coprocessor's ultimate goal is to determine the final value left in
register h once the program completes. Technically, if it had that... it
wouldn't even need to run the program.
After setting register a to 1, if the program were to run to completion,
what value would be left in register h?
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
import math
def solve(instructions):
"""Return value of h.
Hand optimized.
"""
instr, reg, val = instructions.split('\n')[0].split()
assert instr == 'set'
assert reg == 'b'
b = int(val) * 100 + 100000
start = b - 17000
end = b + 1
return sum(not is_prime(x) for x in range(start, end, 17))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 24.012658 | 79 | 0.615709 |
a81fc289f1eb7f0a4f761bd960c55555bea22c98 | 4,456 | py | Python | game_of_life.py | WinterWonderland/Game_of_Life | 99eced42146a195b6a7bc423f76f0fd79f5771d2 | [
"MIT"
] | null | null | null | game_of_life.py | WinterWonderland/Game_of_Life | 99eced42146a195b6a7bc423f76f0fd79f5771d2 | [
"MIT"
] | null | null | null | game_of_life.py | WinterWonderland/Game_of_Life | 99eced42146a195b6a7bc423f76f0fd79f5771d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 11:59:50 2018
@author: klaus
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from argparse import ArgumentParser, RawTextHelpFormatter
if __name__ == "__main__":
argument_parser = ArgumentParser(description="""
Game of Life:
- Little python implementation of Conway's game of life.
- The game board will be visualized with matplotlib.
- See readme.md for more informations.""",
epilog="https://github.com/WinterWonderland/Game_of_Life",
formatter_class=RawTextHelpFormatter)
argument_parser.add_argument("--width",
metavar="",
type=int,
default=100,
help="The width of the game board (default=100)")
argument_parser.add_argument("--height",
metavar="",
type=int,
default=100,
help="The width of the game board (default=100)")
argument_parser.add_argument("--interval",
metavar="",
type=float,
default=0.3,
help="Interval time between each step (default=0.3)")
argument_parser.add_argument("--seed",
metavar="",
type=int,
default=None,
help="A seed for the random number generator to get identical play boards")
args = argument_parser.parse_args()
GameOfLife(width=args.width,
height=args.height,
interval=args.interval,
seed=args.seed).run()
input("press enter to quit")
| 41.64486 | 123 | 0.47711 |
a82054bcbbc93091d6cde0c3bba2fa420fc0e4b0 | 520 | py | Python | tests/mixins/back_tests.py | StuartMacKay/ebird-api | 14b5c777548416a58abec05e25cd4b9a8e22f210 | [
"MIT"
] | 9 | 2020-05-16T20:26:33.000Z | 2021-11-02T06:24:46.000Z | tests/mixins/back_tests.py | StuartMacKay/ebird-api | 14b5c777548416a58abec05e25cd4b9a8e22f210 | [
"MIT"
] | 17 | 2019-06-22T09:41:22.000Z | 2020-09-11T06:25:21.000Z | tests/mixins/back_tests.py | ProjectBabbler/ebird-api | 14b5c777548416a58abec05e25cd4b9a8e22f210 | [
"MIT"
] | null | null | null | from ebird.api.constants import DEFAULT_BACK
from tests.mixins.base import BaseMixin
| 30.588235 | 51 | 0.709615 |
a820c01ed9ab1a3512b23d858002b832b81b6f26 | 506 | py | Python | examples/snippets/data_io/df_connect/export_simple.py | nguyentr17/tamr-toolbox | 1d27101eda12f937813cdbfe27e2fa9c33ac34d2 | [
"Apache-2.0"
] | 6 | 2021-02-09T22:27:55.000Z | 2022-01-14T18:15:17.000Z | examples/snippets/data_io/df_connect/export_simple.py | nguyentr17/tamr-toolbox | 1d27101eda12f937813cdbfe27e2fa9c33ac34d2 | [
"Apache-2.0"
] | 34 | 2021-02-09T22:23:33.000Z | 2022-03-31T16:22:51.000Z | examples/snippets/data_io/df_connect/export_simple.py | nguyentr17/tamr-toolbox | 1d27101eda12f937813cdbfe27e2fa9c33ac34d2 | [
"Apache-2.0"
] | 12 | 2021-02-09T21:17:10.000Z | 2022-02-09T16:35:39.000Z | """
Export data from Tamr using df-connect. An example where everything is default in config file,
which implies exported data is written back to same database as ingested from.
"""
import tamr_toolbox as tbox
my_config = tbox.utils.config.from_yaml("examples/resources/conf/connect.config.yaml")
my_connect = tbox.data_io.df_connect.client.from_config(my_config)
tbox.data_io.df_connect.client.export_dataset(
my_connect, dataset_name="example_dataset", target_table_name="example_target_table",
)
| 36.142857 | 94 | 0.8083 |
a822bff3f043bc516ac3c82ab2394920c525256d | 1,700 | py | Python | services/web/project/auth/forms.py | petervuyk456/persona_finance | 5c4c1bea0e176f37cc122571b846de1f020bdd92 | [
"MIT"
] | null | null | null | services/web/project/auth/forms.py | petervuyk456/persona_finance | 5c4c1bea0e176f37cc122571b846de1f020bdd92 | [
"MIT"
] | null | null | null | services/web/project/auth/forms.py | petervuyk456/persona_finance | 5c4c1bea0e176f37cc122571b846de1f020bdd92 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Length, EqualTo, URL
from project.auth.models import User
| 32.075472 | 72 | 0.629412 |
a82471d2b32cd5726156914bf073feb69a5965b8 | 10,619 | py | Python | projectGo.py | KyleBrownCS/SoftDev2 | c992061a849f19f1ccbea71e4c0aa97fb2e135dc | [
"MIT"
] | null | null | null | projectGo.py | KyleBrownCS/SoftDev2 | c992061a849f19f1ccbea71e4c0aa97fb2e135dc | [
"MIT"
] | null | null | null | projectGo.py | KyleBrownCS/SoftDev2 | c992061a849f19f1ccbea71e4c0aa97fb2e135dc | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, jsonify
import sqlite3
import json
import re
import logging
from applicationInfo import ApplicationInfo
logging.basicConfig(filename='/var/www/SoftDev2/projectGo.log', level=logging.DEBUG)
app = Flask(__name__)
applicationInfo = ApplicationInfo()
row_pos_obligationid = 0
row_pos_userid = 1
row_pos_name = 2
row_pos_description = 3
row_pos_starttime = 4
row_pos_endtime = 5
row_pos_priority = 6
row_pos_status = 7
row_pos_category = 8
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int('5000'))
| 39.475836 | 297 | 0.617384 |
a8247bed0a1cb5051fa0d35c0fab64fca16aa20d | 1,396 | py | Python | python/cuML/test/test_dbscan.py | rongou/cuml | 9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec | [
"Apache-2.0"
] | null | null | null | python/cuML/test/test_dbscan.py | rongou/cuml | 9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec | [
"Apache-2.0"
] | null | null | null | python/cuML/test/test_dbscan.py | rongou/cuml | 9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml import DBSCAN as cuDBSCAN
from sklearn.cluster import DBSCAN as skDBSCAN
from test_utils import array_equal
import cudf
import numpy as np
| 32.465116 | 86 | 0.703438 |
a827531247ffd24ded530b9e0dea0c181d142c7b | 114 | py | Python | math_and_algorithm/024.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | math_and_algorithm/024.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | math_and_algorithm/024.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | N = int(input())
ans = 0
for _ in range(N):
p, q = map(int, input().split())
ans += (1 / p) * q
print(ans) | 19 | 36 | 0.5 |
a8276b0d3215a9fe2604eec700ad87c77dc2f29b | 769 | py | Python | LeetCode/0023_merge_k_sorted_lists.py | KanegaeGabriel/ye-olde-interview-prep-grind | 868362872523a5688f49ab48efb09c3008e0db4d | [
"MIT"
] | 1 | 2020-05-13T19:16:23.000Z | 2020-05-13T19:16:23.000Z | LeetCode/0023_merge_k_sorted_lists.py | KanegaeGabriel/ye-olde-interview-prep-grind | 868362872523a5688f49ab48efb09c3008e0db4d | [
"MIT"
] | null | null | null | LeetCode/0023_merge_k_sorted_lists.py | KanegaeGabriel/ye-olde-interview-prep-grind | 868362872523a5688f49ab48efb09c3008e0db4d | [
"MIT"
] | null | null | null | from heapq import heappush, heappop
l1 = ListNode(1)
l1.next = ListNode(4)
l1.next.next = ListNode(5)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
l3 = ListNode(2)
l3.next = ListNode(6)
l3 = mergeKLists([l1, l2, l3])
p = l3
while p:
print(p.val, end=" ") # 1 1 2 3 4 4 5 6
p = p.next
print() | 17.477273 | 45 | 0.579974 |
a82a766dd5a8919e5aec354cbe63b71c9cd59549 | 2,297 | py | Python | source/cell_mask/cell_mask.py | zhanyinx/SPT_analysis | 1cf806c1fd6051e7fc998d2860a16bea6aa9de1a | [
"MIT"
] | 1 | 2021-07-09T11:51:04.000Z | 2021-07-09T11:51:04.000Z | source/cell_mask/cell_mask.py | zhanyinx/SPT_analysis | 1cf806c1fd6051e7fc998d2860a16bea6aa9de1a | [
"MIT"
] | null | null | null | source/cell_mask/cell_mask.py | zhanyinx/SPT_analysis | 1cf806c1fd6051e7fc998d2860a16bea6aa9de1a | [
"MIT"
] | null | null | null | import argparse
import glob
import numpy as np
import os
import skimage.io
import torch
import tifffile
from cellpose import models
def _parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input",
type=str,
default=None,
required=True,
help="Input image or folder with images to mask.",
)
parser.add_argument(
"-o",
"--output",
type=str,
default=None,
required=False,
help="Output folder, default mask within input folder",
)
parser.add_argument(
"-t",
"--target",
type=str,
default=None,
required=False,
help="Target channel tag, if provided, it will look for files with the tag.",
)
args = parser.parse_args()
return args
def main():
"""Create cell masks and save them into mask folder within input folder."""
args = _parse_args()
if os.path.isdir(args.input):
inputs = glob.glob(f"{args.input}/*tif")
elif os.path.isfile(args.input):
inputs = [args.input]
else:
raise ValueError(f"Expected input folder or file. Provided {args.input}.")
if args.target is not None:
inputs = [x for x in inputs if args.target in x]
output = args.output
if output is None:
output = f"{os.path.abspath(args.input)}/mask"
if not os.path.exists(output):
os.mkdir(output)
cellpose_model = models.Cellpose(model_type="cyto", gpu=False)
for input_file in inputs:
img = skimage.io.imread(input_file)
middle_slice = len(img) // 2
if len(img.shape) == 4:
mask_nucl, *_ = cellpose_model.eval(
[np.max(img, axis=1)[middle_slice]],
diameter=150,
channels=[0, 0],
min_size=15,
)
if len(img.shape) == 3:
mask_nucl, *_ = cellpose_model.eval(
[img[middle_slice]],
diameter=150,
channels=[0, 0],
min_size=15,
)
name = os.path.basename(input_file)
out = f"{output}/{name}"
tifffile.imsave(out, mask_nucl[0])
if __name__ == "__main__":
main()
| 25.241758 | 85 | 0.562908 |
a82b6067d87e3c320c8e0fb55b9b998dccade592 | 14,134 | py | Python | 02-customer-cliff-dive/python/emery_leslie.py | leslem/insight-data-challenges | 14c56d30663d7fef178b820d2128dbf4782c1200 | [
"MIT"
] | null | null | null | 02-customer-cliff-dive/python/emery_leslie.py | leslem/insight-data-challenges | 14c56d30663d7fef178b820d2128dbf4782c1200 | [
"MIT"
] | 1 | 2021-06-08T02:43:08.000Z | 2021-06-08T03:05:21.000Z | 02-customer-cliff-dive/python/emery_leslie.py | leslem/insight-data-challenges | 14c56d30663d7fef178b820d2128dbf4782c1200 | [
"MIT"
] | null | null | null | # # Customer cliff dive data challenge
# 2020-02-17
# Leslie Emery
# ## Summary
# ### The problem
# The head of the Yammer product team has noticed a precipitous drop in weekly active users, which is one of the main KPIs for customer engagement. What has caused this drop?
# ### My approach and results
# I began by coming up with several questions to investigate:
# - Was there any change in the way that weekly active users is calculated?
# - This does not appear to be the case. To investigate this, I began by replicating the figure from the dashboard. I calculated a rolling 7-day count of engaged users, making sure to use the same method across the entire time frame covered by the dataset, and it still showed the same drop in engagement.
# - Was there a change in any one particular type of "engagement"?
# - I looked at a rolling 7-day count of each individual type of engagement action. From plotting all of these subplots, it looks to me like home_page, like_message, login, send_message, and view_inbox are all exhibiting a similar drop around the same time, so it's these underlying events that are driving the drop.
# - Could a change in the user interface be making it more difficult or less pleasant for users?
# - I couldn't find information in the available datasets to address this question. The `yammer_experiments` data set has information about experiments going on, presumably in the user interface. All of the listed experiments happened in June of 2014, though, which I think is too early to have caused the August drop in engagement.
# - Is this drop a seasonal change that happens around this time every year?
# - Because the data is only available for the period of time shown in the original dashboard, I can't investigate this question. I'd be very interested to see if there is a pattern of reduced engagement at the end of the summer, perhaps related to vacation or school schedules.
# - Are users visiting the site less because they're getting more content via email?
# - I calculated 7-day rolling counts of each type of email event, and all email events together. Email events overall went up during the time period immediately before the drop in user engagement. All four types of email events increased during the same period, indicating higher clickthroughs on emails, higher numbers of email open events, and more reengagement and weekly digest emails sent. It could be that the higher number of weekly digests sent out mean that users don't have to visit the site directly as much.
# - Are users disengaging from the site due to too many emails/notifications?
# - I calculated a rolling 7-day count of emails sent to each user and found that the number of emails sent to each user per 7-day period has increased from 5.4 emails (July 20) to 7.75 emails (August 11). This suggests that an increasing volume of emails sent to individual users could have driven them away from using the site. To investigate this further I would want to look into email unsubscribe rates. If unsubscribe rates have also gone up, then it seems that Yammer is sending too many emails to its users.
# - To investigate whether the number of emails sent per user is correlated with the number of engaged users, I used a Granger causality test to see if "emails sent per user" could be used to predict "number of engaged users". With a high enough lag, the test statistics might be starting to become significant, but I would want to investigate these test results further before making any recommendations based on them.
# - Is the drop in engagement due to a decrease in new activated users? e.g. they are reaching the end of potential customer base?
# - I calculated the cumulative number of newly activated users over time, using the activation time for each user in the users table. I wanted to see if customer growth had leveled off. However, I saw that customer growth was still increasing in the same pattern. This was true when using creating date rather than activation date as well.
# What is my recommendation to Yammer?
# I have a few recommendations to Yammer:
# - Try decreasing the number of emails sent to each individual user to see if this increases engagement. They could try this for a subset of users first.
# - Investigate email unsubscribe rates to see if they are going up. This would indicate that increased email volume might be making users unhappy.
# - Compare this data to a wider time range to see if the drop shown here is seasonal.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import plotly.express as px
import pandas as pd
from scipy import stats
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import grangercausalitytests
# -
data_dir = '/Users/leslie/devel/insight-data-challenges/02-customer-cliff-dive/data'
benn_normal = pd.read_csv(os.path.join(data_dir, 'benn.normal_distribution - benn.normal_distribution.csv.tsv'), sep='\t')
rollup_periods = pd.read_csv(os.path.join(data_dir, 'dimension_rollup_periods - dimension_rollup_periods.csv.tsv'), sep='\t',
parse_dates=['time_id', 'pst_start', 'pst_end', 'utc_start', 'utc_end'])
yammer_emails = pd.read_csv(os.path.join(data_dir, 'yammer_emails - yammer_emails.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_events = pd.read_csv(os.path.join(data_dir, 'yammer_events - yammer_events.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_experiments = pd.read_csv(os.path.join(data_dir, 'yammer_experiments - yammer_experiments.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_users = pd.read_csv(os.path.join(data_dir, 'yammer_users - yammer_users.csv.tsv'), sep='\t',
parse_dates=['created_at', 'activated_at'])
# +
benn_normal.info()
benn_normal.head()
benn_normal.describe()
rollup_periods.info()
rollup_periods.head()
rollup_periods.describe()
yammer_emails.info()
yammer_emails.head()
yammer_emails.describe()
yammer_emails['action'].value_counts(dropna=False)
yammer_emails['user_type'].value_counts(dropna=False)
yammer_events.info()
yammer_events.head()
yammer_events.describe()
yammer_events['occurred_at']
yammer_events['event_type'].value_counts(dropna=False)
yammer_events['event_name'].value_counts(dropna=False)
yammer_events['location'].value_counts(dropna=False)
yammer_events['device'].value_counts(dropna=False)
yammer_events['user_type'].value_counts(dropna=False)
yammer_events['user_type'].dtype
# user_type should be an int, but has many missing values, and NaN is a float.
# So convert it to the Pandas Int64 dtype which can accommodate NaNs and ints.
yammer_events = yammer_events.astype({'user_type': 'Int64'})
yammer_experiments.info()
yammer_experiments.head()
yammer_experiments.describe()
yammer_experiments['experiment'].value_counts(dropna=False)
yammer_experiments['experiment_group'].value_counts(dropna=False)
yammer_experiments['location'].value_counts(dropna=False)
yammer_experiments['device'].value_counts(dropna=False)
yammer_users.info()
yammer_users.head()
yammer_users.describe()
yammer_users['language'].value_counts(dropna=False)
yammer_users['state'].value_counts(dropna=False)
yammer_users['company_id'].value_counts(dropna=False)
# -
# ## Initial data investigation
# +
# How many days in the dataset?
yammer_events['occurred_at'].max() - yammer_events['occurred_at'].min()
# 122 days!
rollup_periods['pst_start'].max() - rollup_periods['pst_end'].min()
# 1094 days - way more intervals than needed to tile this events data!
yammer_events = yammer_events.sort_values(by='occurred_at', ascending=True)
small_events = yammer_events.head(int(yammer_events.shape[0]/10)).sample(n=40)
small_events = small_events.sort_values(by='occurred_at', ascending=True)
small_events['occurred_at'].max() - small_events['occurred_at'].min()
weekly_rollup_periods = rollup_periods.loc[rollup_periods['period_id'] == 1007]
# -
# +
small_rolling_engagement = small_events.loc[small_events['event_type'] == 'engagement'].rolling(
'7D', on='occurred_at').count()
# I'm not sure whether rollup_periods are closed on right, left, or both...
# Calculate counts of engagement events in a 7-day rolling window
rolling_engagement_counts = yammer_events.loc[yammer_events['event_type'] == 'engagement'].sort_values(
by='occurred_at', ascending=True # Have to sort by "on" column to use rolling()
).rolling('7D', on='occurred_at', min_periods=1).count()
# +
# Use a loop to aggregate on rollup periods
yammer_events['event_name'].unique()
event_range = [min(yammer_events['occurred_at']), max(yammer_events['occurred_at'])]
covered_weekly_rollup_periods = weekly_rollup_periods.loc[(weekly_rollup_periods['pst_end'] <= event_range[1])
& (weekly_rollup_periods['pst_start'] >= event_range[0])]
# in interval --> start < occurred_at <= end
counts_by_type = None
for (ridx, row) in covered_weekly_rollup_periods.iterrows():
# row = covered_weekly_rollup_periods.iloc[0]
# Get egagement events within the period
df = yammer_events.loc[(yammer_events['occurred_at'] > row['pst_start'])
& (yammer_events['occurred_at'] <= row['pst_end'])
& (yammer_events['event_type'] == 'engagement')]
# Count user engagement events
cbt = df.groupby('event_name').aggregate(event_count=('user_id', 'count')).transpose()
cbt['pst_start'] = row['pst_start']
cbt['pst_end'] = row['pst_end']
cbt['engaged_users'] = df['user_id'].nunique()
cbt['engagement_event_count'] = df.shape[0]
if counts_by_type is None:
counts_by_type = cbt
else:
counts_by_type = counts_by_type.append(cbt)
counts_by_type
# +
# Plot engaged users over time
fig = px.scatter(counts_by_type, x='pst_end', y='engaged_users', template='plotly_white')
fig.update_yaxes(range=[0, 1500])
fig.show()
# Plot count of engagement_events over time
fig = px.scatter(counts_by_type, x='pst_end', y='engagement_event_count', template='plotly_white')
fig.show()
# Plot count of individual event types over time
counts_melted = counts_by_type.melt(id_vars=['pst_start', 'pst_end', 'engaged_users', 'engagement_event_count'])
fig = px.scatter(counts_melted, x='pst_end', y='value', template='plotly_white',
facet_col='event_name', facet_col_wrap=3, height=1200)
fig.update_yaxes(matches=None)
fig.show()
# -
# Are there any "experiments" messing things up?
yammer_experiments['occurred_at'].describe()
# No, these are all before the issue shows up
# +
# Investigate the sending of emails to user in the same rollup periods
email_counts_by_type = None
for (ridx, row) in covered_weekly_rollup_periods.iterrows():
# row = covered_weekly_rollup_periods.iloc[0]
# Get egagement events within the period
df = yammer_emails.loc[(yammer_events['occurred_at'] > row['pst_start'])
& (yammer_events['occurred_at'] <= row['pst_end'])]
# Count user engagement events
cbt = df.groupby('action').aggregate(action_count=('user_id', 'count')).transpose()
cbt['pst_start'] = row['pst_start']
cbt['pst_end'] = row['pst_end']
cbt['emailed_users'] = df['user_id'].nunique()
cbt['email_event_count'] = df.shape[0]
cbt['emails_sent_per_user'] = df.loc[df['action'].str.startswith('sent_')].groupby(
'user_id').count().mean()['user_type']
if email_counts_by_type is None:
email_counts_by_type = cbt
else:
email_counts_by_type = email_counts_by_type.append(cbt)
email_counts_by_type
# +
# Plot emailed users over time
fig = px.scatter(email_counts_by_type, x='pst_end', y='emailed_users', template='plotly_white')
fig.update_yaxes(range=[0, 1500])
fig.show()
# Plot count of email events over time
fig = px.scatter(email_counts_by_type, x='pst_end', y='email_event_count', template='plotly_white')
fig.show()
# Plot count of individual email types over time
email_counts_melted = email_counts_by_type.melt(id_vars=[
'pst_start', 'pst_end', 'emailed_users', 'email_event_count', 'emails_sent_per_user'])
fig = px.scatter(email_counts_melted, x='pst_end', y='value', template='plotly_white',
facet_col='action', facet_col_wrap=2)
fig.update_yaxes(matches=None)
fig.show()
# -
# +
# What is email engagement event count per user? Did that increase?
# +
fig = px.scatter(email_counts_by_type, x='pst_start', y='emails_sent_per_user', template='plotly_white')
fig.show()
p, r = stats.pearsonr(email_counts_by_type['emails_sent_per_user'].to_numpy(),
counts_by_type['engaged_users'].to_numpy())
# They do look moderately correlated, but how do I test that one has an effect on the other?
# -
acf_50 = acf(counts_by_type['engaged_users'], nlags=50, fft=True)
pacf_50 = pacf(counts_by_type['engaged_users'], nlags=50)
fig, axes = plt.subplots(1, 2, figsize=(16, 3), dpi=200)
plot_acf(counts_by_type['engaged_users'].tolist(), lags=50, ax=axes[0])
plot_pacf(counts_by_type['engaged_users'].tolist(), lags=50, ax=axes[1])
plt.show()
test_df = pd.DataFrame({'emails_sent_per_user': email_counts_by_type['emails_sent_per_user'].to_numpy(),
'engaged_users': counts_by_type['engaged_users'].to_numpy()})
lags = range(20)
caus_test = grangercausalitytests(test_df, maxlag=lags)
# Has there been a dropoff in new users?
# +
yammer_users = yammer_users.sort_values(by='created_at', ascending=True)
yammer_users['cumulative_users'] = pd.Series(np.ones(yammer_users.shape[0]).cumsum())
fig = px.scatter(yammer_users, x='created_at', y='cumulative_users', template='plotly_white')
fig.show()
# Nope, growth is still practicially exponenital
yammer_users['cumulative_activated_users'] = pd.Series(np.ones(yammer_users.shape[0]).cumsum())
fig = px.scatter(yammer_users, x='created_at', y='cumulative_activated_users', template='plotly_white')
fig.show()
yammer_users['company_id'].nunique()
# -
| 51.963235 | 524 | 0.743809 |
a82ba74fc6af916b17675886faaf3aad6278c7c2 | 796 | py | Python | students/K33402/laboratory_works/Ermakova_Anna/laboratory_work_1/task_4/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33402/laboratory_works/Ermakova_Anna/laboratory_work_1/task_4/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33402/laboratory_works/Ermakova_Anna/laboratory_work_1/task_4/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | import socket
import threading
import random
sock = socket.socket()
sock.connect(('localhost', 9090))
number = random.randint(0,1000)
name = "person" + str(number)
threading.Thread(target=send_message).start()
threading.Thread(target=receive_message).start()
| 20.410256 | 56 | 0.548995 |
a82c200cd117a48cc9a2ebacd146f50b56baabcf | 23,587 | py | Python | convolutional_attention/token_naming_data.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
] | 128 | 2016-05-10T01:38:27.000Z | 2022-02-04T07:14:12.000Z | convolutional_attention/token_naming_data.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
] | 6 | 2016-07-19T09:27:47.000Z | 2021-07-08T21:22:32.000Z | convolutional_attention/token_naming_data.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
] | 36 | 2016-05-11T08:57:26.000Z | 2021-07-07T02:37:07.000Z | from collections import defaultdict
import heapq
from itertools import chain, repeat
from feature_dict import FeatureDictionary
import json
import numpy as np
import scipy.sparse as sp
def __get_data_in_forward_format(self, names, code, name_cx_size):
"""
Get the data in a "forward" model format.
:param data:
:param name_cx_size:
:return:
"""
assert len(names) == len(code), (len(names), len(code), code.shape)
# Keep only identifiers in code
#code = self.keep_identifiers_only(code)
name_targets = []
name_contexts = []
original_names_ids = []
id_xs = []
id_ys = []
k = 0
for i, name in enumerate(names):
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.name_dictionary.get_id_or_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
for code_token in set(code[i]):
token_id = self.all_tokens_dictionary.get_id_or_none(code_token)
if token_id is not None:
id_xs.append(k)
id_ys.append(token_id)
k += 1
code_features = sp.csr_matrix((np.ones(len(id_xs)), (id_xs, id_ys)), shape=(k, len(self.all_tokens_dictionary)), dtype=np.int32)
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
return name_targets, name_contexts, code_features, original_names_ids
def get_data_in_convolution_format(self, input_file, name_cx_size, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_convolution(names, code, name_cx_size, min_code_size), original_names
def get_data_in_copy_convolution_format(self, input_file, name_cx_size, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_copy_convolution(names, code, name_cx_size, min_code_size), original_names
def get_data_in_recurrent_convolution_format(self, input_file, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_recurrent_convolution(names, code, min_code_size), original_names
def get_data_in_recurrent_copy_convolution_format(self, input_file, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_recurrent_copy_convolution(names, code, min_code_size), original_names
def get_data_for_convolution(self, names, code, name_cx_size, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
name_contexts = []
original_names_ids = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.all_tokens_dictionary.get_id_or_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
code_sentences = np.array(code_sentences, dtype=np.object)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
return name_targets, name_contexts, code_sentences, original_names_ids
def get_data_for_recurrent_convolution(self, names, code, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
name_tokens = [self.all_tokens_dictionary.get_id_or_unk(t) for t in name]
name_targets.append(np.array(name_tokens, dtype=np.int32))
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.object)
code_sentences = np.array(code_sentences, dtype=np.object)
return name_targets, code_sentences
def get_data_for_recurrent_copy_convolution(self, names, code, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
target_is_unk = []
copy_vectors = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
name_tokens = [self.all_tokens_dictionary.get_id_or_unk(t) for t in name]
unk_tokens = [self.all_tokens_dictionary.is_unk(t) for t in name]
target_can_be_copied = [[t == subtok for t in code[i]] for subtok in name]
name_targets.append(np.array(name_tokens, dtype=np.int32))
target_is_unk.append(np.array(unk_tokens, dtype=np.int32))
copy_vectors.append(np.array(target_can_be_copied, dtype=np.int32))
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.object)
code_sentences = np.array(code_sentences, dtype=np.object)
code = np.array(code, dtype=np.object)
target_is_unk = np.array(target_is_unk, dtype=np.object)
copy_vectors = np.array(copy_vectors, dtype=np.object)
return name_targets, code_sentences, code, target_is_unk, copy_vectors
def get_data_for_copy_convolution(self, names, code, name_cx_size, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
original_targets = []
name_contexts = []
original_names_ids = []
code_sentences = []
original_code = []
copy_vector = []
target_is_unk = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.all_tokens_dictionary.get_id_or_unk(name[j]))
original_targets.append(name[j])
target_is_unk.append(self.all_tokens_dictionary.is_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
code_sentences.append(np.array(code_sentence, dtype=np.int32))
original_code.append(code[i])
tokens_to_be_copied = [t == name[j] for t in code[i]]
copy_vector.append(np.array(tokens_to_be_copied, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
code_sentences = np.array(code_sentences, dtype=np.object)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
copy_vector = np.array(copy_vector, dtype=np.object)
target_is_unk = np.array(target_is_unk, dtype=np.int32)
return name_targets, original_targets, name_contexts, code_sentences, original_code, copy_vector, target_is_unk, original_names_ids
def get_suggestions_given_name_prefix(self, next_name_log_probs, name_cx_size, max_predicted_identifier_size=5, max_steps=100):
suggestions = defaultdict(lambda: float('-inf')) # A list of tuple of full suggestions (token, prob)
# A stack of partial suggestion in the form ([subword1, subword2, ...], logprob)
possible_suggestions_stack = [
([self.NONE] * (name_cx_size - 1) + [self.SUBTOKEN_START], [], 0)]
# Keep the max_size_to_keep suggestion scores (sorted in the heap). Prune further exploration if something has already
# lower score
predictions_probs_heap = [float('-inf')]
max_size_to_keep = 15
nsteps = 0
while True:
scored_list = []
while len(possible_suggestions_stack) > 0:
subword_tokens = possible_suggestions_stack.pop()
# If we're done, append to full suggestions
if subword_tokens[0][-1] == self.SUBTOKEN_END:
final_prediction = tuple(subword_tokens[1][:-1])
if len(final_prediction) == 0:
continue
log_prob_of_suggestion = np.logaddexp(suggestions[final_prediction], subword_tokens[2])
if log_prob_of_suggestion > predictions_probs_heap[0] and not log_prob_of_suggestion == float('-inf'):
# Push only if the score is better than the current minimum and > 0 and remove extraneous entries
suggestions[final_prediction] = log_prob_of_suggestion
heapq.heappush(predictions_probs_heap, log_prob_of_suggestion)
if len(predictions_probs_heap) > max_size_to_keep:
heapq.heappop(predictions_probs_heap)
continue
elif len(subword_tokens[1]) > max_predicted_identifier_size: # Stop recursion here
continue
# Convert subword context
context = [self.name_dictionary.get_id_or_unk(k) for k in
subword_tokens[0][-name_cx_size:]]
assert len(context) == name_cx_size
context = np.array([context], dtype=np.int32)
# Predict next subwords
target_subword_logprobs = next_name_log_probs(context)
top_indices = np.argsort(-target_subword_logprobs[0])
possible_options = [get_possible_options(top_indices[i]) for i in xrange(max_size_to_keep)]
# Disallow suggestions that contain duplicated subtokens.
scored_list.extend(filter(lambda x: len(x[1])==1 or x[1][-1] != x[1][-2], possible_options))
# Prune
scored_list = filter(lambda suggestion: suggestion[2] >= predictions_probs_heap[0] and suggestion[2] >= float('-inf'), scored_list)
scored_list.sort(key=lambda entry: entry[2], reverse=True)
# Update
possible_suggestions_stack = scored_list[:max_size_to_keep]
nsteps += 1
if nsteps >= max_steps:
break
# Sort and append to predictions
suggestions = [(identifier, np.exp(logprob)) for identifier, logprob in suggestions.items()]
suggestions.sort(key=lambda entry: entry[1], reverse=True)
# print suggestions
return suggestions
| 51.953744 | 143 | 0.644338 |
a82c44a1683f511d5f99fbda3a6f12bd84f86c4c | 550 | py | Python | test_word.py | AsherSeiling/Ap-hug-Vocab-database | fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a | [
"MIT"
] | null | null | null | test_word.py | AsherSeiling/Ap-hug-Vocab-database | fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a | [
"MIT"
] | 1 | 2021-02-27T06:12:07.000Z | 2021-03-01T14:32:39.000Z | test_word.py | AsherSeiling/Ap-hug-Vocab-database | fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a | [
"MIT"
] | 1 | 2021-02-27T06:14:55.000Z | 2021-02-27T06:14:55.000Z | words = open("words.txt", "r")
words = [x.rstrip("\n") for x in words.readlines()]
refwords = open("referencewords.txt", "r")
refwords = [x.strip("\n") for x in refwords.readlines()]
words_needed = []
main()
for i in words_needed:
print(i) | 20.37037 | 56 | 0.650909 |
a82e508670c379c3dbb7d2f2e849d1ec9ed6d7a8 | 2,736 | py | Python | democrasite/users/tests/test_forms.py | mfosterw/cookiestocracy | 6912e9e7c3006024d0fbee61dce5c48e63e9e231 | [
"MIT"
] | null | null | null | democrasite/users/tests/test_forms.py | mfosterw/cookiestocracy | 6912e9e7c3006024d0fbee61dce5c48e63e9e231 | [
"MIT"
] | 9 | 2021-07-18T17:16:42.000Z | 2022-03-31T00:19:14.000Z | democrasite/users/tests/test_forms.py | mfosterw/cookiestocracy | 6912e9e7c3006024d0fbee61dce5c48e63e9e231 | [
"MIT"
] | null | null | null | # pylint: disable=too-few-public-methods,no-self-use
from django.utils.crypto import get_random_string
from django.utils.translation import gettext_lazy as _
from democrasite.users.forms import (
DisabledChangePasswordForm,
DisabledResetPasswordForm,
DisabledResetPasswordKeyForm,
DisabledSetPasswordForm,
UserCreationForm,
)
from democrasite.users.models import User
| 32.571429 | 87 | 0.623904 |
a82ef552d3bf70dc77e897c13a1b0f9b584ffa9d | 3,359 | py | Python | src/keras_networks.py | RU-IIPL/2DLD_keras | 8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf | [
"MIT"
] | 1 | 2021-05-24T08:00:29.000Z | 2021-05-24T08:00:29.000Z | src/keras_networks.py | RU-IIPL/2DLD_keras | 8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf | [
"MIT"
] | null | null | null | src/keras_networks.py | RU-IIPL/2DLD_keras | 8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf | [
"MIT"
] | 1 | 2021-09-29T03:43:46.000Z | 2021-09-29T03:43:46.000Z | # -*- coding: utf-8 -*-
"""
@author: Terada
"""
from keras.models import Sequential, Model
from keras.layers import Dense, MaxPooling2D, Flatten, Dropout
from keras.layers import Conv2D, BatchNormalization, ZeroPadding2D, MaxPool2D
from keras.layers import Input, Convolution2D, AveragePooling2D, merge, Reshape, Activation, concatenate
from keras.regularizers import l2
#from keras.engine.topology import Container
| 42.518987 | 122 | 0.677583 |
a82fdc3cbf4660f5463187cd042910a00705a302 | 9,951 | py | Python | lambdak_spec.py | yawaramin/lambdak | 5038531f2939dfbbbf94ea859454efaf69bf6487 | [
"MIT"
] | 19 | 2015-01-13T23:36:23.000Z | 2021-05-29T16:05:05.000Z | lambdak_spec.py | yawaramin/lambdak | 5038531f2939dfbbbf94ea859454efaf69bf6487 | [
"MIT"
] | 1 | 2015-11-09T17:23:05.000Z | 2015-11-09T18:48:26.000Z | lambdak_spec.py | yawaramin/lambdak | 5038531f2939dfbbbf94ea859454efaf69bf6487 | [
"MIT"
] | 3 | 2016-03-26T15:57:36.000Z | 2018-11-30T07:04:03.000Z | from contextlib import closing, contextmanager
import StringIO as s
import unittest as t
from lambdak import *
# A helper class to test attribute access.
# Helper functions for the tests.
def inc(x): return x + 1
if __name__ == "__main__":
t.main()
| 25.320611 | 162 | 0.633203 |
a830be9674eca4b0486b3f40d92cbb270322784c | 2,327 | py | Python | Bitcoin_Malware.py | Ismael-Safadi/Bitcoin-Wallet-address-spoofer | 16b92d5538d10a2b14ee1fed441a25bdb33a2e67 | [
"MIT"
] | 7 | 2019-03-04T14:28:53.000Z | 2022-01-31T12:11:53.000Z | Bitcoin_Malware.py | Ismael-Safadi/Bitcoin-Wallet-address-spoofer | 16b92d5538d10a2b14ee1fed441a25bdb33a2e67 | [
"MIT"
] | null | null | null | Bitcoin_Malware.py | Ismael-Safadi/Bitcoin-Wallet-address-spoofer | 16b92d5538d10a2b14ee1fed441a25bdb33a2e67 | [
"MIT"
] | 4 | 2019-03-04T14:29:01.000Z | 2022-01-31T12:11:40.000Z | # Coded By : Ismael Al-safadi
from win32gui import GetWindowText, GetForegroundWindow
from pyperclip import copy
from re import findall
from win32clipboard import OpenClipboard , GetClipboardData , CloseClipboard
from time import sleep
a = BitcoinDroper()
while True:
if a.check_active_window() and a.check_bitcoin_wallet():
if not a.spoofing_done():
a.get_old_wallet()
a.spoof_wallet()
elif a.spoofing_done():
if a.check_bitcoin_wallet() and not a.check_active_window():
a.return_copied_wallet()
sleep(2)
| 31.026667 | 96 | 0.628277 |
a832641c2261a8791df173a07f00c6ea847b04f1 | 504 | py | Python | Exploits/Protostar/stack/stack2.py | SkyBulk/OSCE | 1749add01cfbc80b11055ea2fdbfdfdee1409a3c | [
"BSD-3-Clause"
] | 80 | 2018-07-12T04:58:02.000Z | 2022-03-18T11:31:49.000Z | Exploits/Protostar/stack/stack2.py | SunWare-shellcoder/OSCE-1 | 1749add01cfbc80b11055ea2fdbfdfdee1409a3c | [
"BSD-3-Clause"
] | null | null | null | Exploits/Protostar/stack/stack2.py | SunWare-shellcoder/OSCE-1 | 1749add01cfbc80b11055ea2fdbfdfdee1409a3c | [
"BSD-3-Clause"
] | 43 | 2018-07-12T19:48:15.000Z | 2021-11-01T13:16:25.000Z | #!/usr/bin/env python
# $Id: stack2.py,v 1.0 2018/06/21 23:12:02 dhn Exp $
from pwn import *
level = 2
host = "10.168.142.133"
user = "user"
chal = "stack%i" % level
password = "user"
binary = "/opt/protostar/bin/%s" % chal
shell = ssh(host=host, user=user, password=password)
padding = "A" * 64
addr = p32(0x0d0a0d0a)
payload = padding
payload += addr
r = shell.run("GREENIE=\"%s\" %s" % (payload, binary))
r.recvuntil("you have correctly modified the variable")
r.clean()
log.success("Done!")
| 20.16 | 55 | 0.656746 |
a8347276bdea4347d1187329f50e22db158c90b3 | 5,096 | py | Python | Stock_Programs/myOauth.py | timwroge/DeepPurple | 3d6f3203938853ede654ef4f88b7451a1ba3999e | [
"Apache-2.0"
] | 4 | 2020-02-13T18:57:41.000Z | 2020-08-03T21:08:26.000Z | Stock_Programs/myOauth.py | timwroge/DeepPurple | 3d6f3203938853ede654ef4f88b7451a1ba3999e | [
"Apache-2.0"
] | null | null | null | Stock_Programs/myOauth.py | timwroge/DeepPurple | 3d6f3203938853ede654ef4f88b7451a1ba3999e | [
"Apache-2.0"
] | 1 | 2021-06-14T13:42:39.000Z | 2021-06-14T13:42:39.000Z | import urllib.parse, urllib.request,json
import time
import hmac, hashlib,random,base64
#yahoo stuff
#client ID dj0yJmk9S3owYWNNcm1jS3VIJmQ9WVdrOU1HMUZiMHh5TjJNbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmeD0xOQ--
#client secret ID fcde44eb1bf2a7ff474b9fd861a6fcf33be56d3f
##### ACTUAL FUNCTIONS
| 41.770492 | 116 | 0.724882 |
a8347a798c6edcafbe98def909244e3a366c1264 | 5,246 | py | Python | IOController/src/UpdateManager.py | MicrosoftDX/liquidintel | 8c3f840f88ca3515cc812078a620e2a845978177 | [
"MIT"
] | 9 | 2017-05-27T20:42:46.000Z | 2020-11-12T21:03:28.000Z | IOController/src/UpdateManager.py | MicrosoftDX/liquidintel | 8c3f840f88ca3515cc812078a620e2a845978177 | [
"MIT"
] | 30 | 2017-02-16T19:43:18.000Z | 2018-01-17T21:17:01.000Z | IOController/src/UpdateManager.py | MicrosoftDX/liquidintel | 8c3f840f88ca3515cc812078a620e2a845978177 | [
"MIT"
] | 6 | 2017-02-24T03:40:04.000Z | 2020-11-22T20:29:11.000Z |
import os, sys, logging, threading, tempfile, shutil, tarfile, inspect
from ConfigParser import RawConfigParser
import requests
from DXLiquidIntelApi import DXLiquidIntelApi
log = logging.getLogger(__name__)
| 51.940594 | 191 | 0.62276 |
a8347ba774f82ced779a3ceac5d45c914fbb1cf6 | 3,179 | py | Python | tests/test_derivatives.py | whalenpt/rkstiff | 9fbec7ddd123cc644d392933b518d342751b4cd8 | [
"MIT"
] | 4 | 2021-11-05T15:35:21.000Z | 2022-01-17T10:20:57.000Z | tests/test_derivatives.py | whalenpt/rkstiff | 9fbec7ddd123cc644d392933b518d342751b4cd8 | [
"MIT"
] | null | null | null | tests/test_derivatives.py | whalenpt/rkstiff | 9fbec7ddd123cc644d392933b518d342751b4cd8 | [
"MIT"
] | null | null | null |
from rkstiff.grids import construct_x_kx_rfft, construct_x_kx_fft
from rkstiff.grids import construct_x_Dx_cheb
from rkstiff.derivatives import dx_rfft, dx_fft
import numpy as np
| 26.057377 | 74 | 0.624096 |
a834a938200061353abd64e3aa79cc1eac77b3bf | 2,511 | py | Python | python/jinja2_template.py | bismog/leetcode | 13b8a77045f96e7c59ddfe287481f6aaa68e564d | [
"MIT"
] | null | null | null | python/jinja2_template.py | bismog/leetcode | 13b8a77045f96e7c59ddfe287481f6aaa68e564d | [
"MIT"
] | null | null | null | python/jinja2_template.py | bismog/leetcode | 13b8a77045f96e7c59ddfe287481f6aaa68e564d | [
"MIT"
] | 1 | 2018-08-17T07:07:15.000Z | 2018-08-17T07:07:15.000Z | #!/usr/bin/env python
import os
from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(os.path.join(PATH, 'templates')))
mac_addr = "01:23:45:67:89:01"
PXE_ROOT_DIR = "/data/tftpboot"
pxe_options = {
'os_distribution': 'centos7',
'path_to_vmlinuz': os.path.join(PXE_ROOT_DIR, 'node', mac_addr, 'vmlinuz'),
'path_to_initrd': os.path.join(PXE_ROOT_DIR, 'node', mac_addr, 'initrd.img'),
'path_to_kickstart_cfg': os.path.join(PXE_ROOT_DIR, 'node', mac_addr, 'ks.cfg'),
'pxe_server_ip': '128.0.0.1',
'protocol': 'nfs'
}
def build_pxe_config(ctxt, template):
"""Build the PXE boot configuration file.
This method builds the PXE boot configuration file by rendering the
template with the given parameters.
:param pxe_options: A dict of values to set on the configuration file.
:param template: The PXE configuration template.
:param root_tag: Root tag used in the PXE config file.
:param disk_ident_tag: Disk identifier tag used in the PXE config file.
:returns: A formatted string with the file content.
"""
tmpl_path, tmpl_file = os.path.split(template)
env = Environment(loader=FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render(ctxt)
def get_pxe_mac_path(mac, delimiter=None):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
:param delimiter: The MAC address delimiter. Defaults to dash ('-').
:returns: the path to the config file.
"""
if delimiter is None:
delimiter = '-'
mac_file_name = mac.replace(':', delimiter).lower()
mac_file_name = '01-' + mac_file_name
return os.path.join(PXE_ROOT_DIR, 'pxelinux.cfg', mac_file_name)
def get_teml_path():
"""
"""
return os.path.join(PXE_ROOT_DIR, 'template', '01-xx-xx-xx-xx-xx-xx.template')
#def render_template(template_filename, context):
# return env.get_template(template_filename).render(context)
########################################
if __name__ == "__main__":
create_pxe_config_file(pxe_options)
| 31 | 84 | 0.68419 |
a837db7dbbd9e3811093f9342986a637e65f9e07 | 1,101 | py | Python | school_system/users/admin.py | SanyaDeath/BIA-school-system | d07e4e86f91cf1e24c211cc9f5524c50da45b0e5 | [
"BSD-3-Clause"
] | null | null | null | school_system/users/admin.py | SanyaDeath/BIA-school-system | d07e4e86f91cf1e24c211cc9f5524c50da45b0e5 | [
"BSD-3-Clause"
] | null | null | null | school_system/users/admin.py | SanyaDeath/BIA-school-system | d07e4e86f91cf1e24c211cc9f5524c50da45b0e5 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from .models import Student, User
admin.site.site_header = 'BIA SCHOOL SYSTEM'
admin.site.register(User, UserAdmin)
admin.site.register(Student, StudentUser)
| 28.230769 | 66 | 0.613079 |
b5179adb5c10e59288f470f8fa76ecec344ba97b | 1,111 | py | Python | converter.py | ownerofworld/TDroidDesk | 5c773f15d764e6cff468bb39ed40dca5ba07d902 | [
"MIT"
] | 20 | 2017-02-22T18:36:57.000Z | 2022-03-23T11:03:35.000Z | converter.py | extratone/TDroidDesk | e778463e996368374c856e6154dc0885df1f3c11 | [
"MIT"
] | 3 | 2017-02-23T03:51:07.000Z | 2017-03-26T15:06:35.000Z | converter.py | extratone/TDroidDesk | e778463e996368374c856e6154dc0885df1f3c11 | [
"MIT"
] | 9 | 2017-02-23T19:39:20.000Z | 2022-01-02T03:28:01.000Z | # coding: utf-8
"""Converter module."""
import util
THEME = 'theme'
BACKGROUND = 'background'
| 26.452381 | 74 | 0.621062 |
b517e64ad1c06cf00c0f78b0ee1fc02a33f3ce6e | 3,109 | py | Python | tools/serve/test_serve.py | mtrzos/wpt | 6e559a60ecfa38ad6cc434911dd0995a63900db6 | [
"BSD-3-Clause"
] | null | null | null | tools/serve/test_serve.py | mtrzos/wpt | 6e559a60ecfa38ad6cc434911dd0995a63900db6 | [
"BSD-3-Clause"
] | null | null | null | tools/serve/test_serve.py | mtrzos/wpt | 6e559a60ecfa38ad6cc434911dd0995a63900db6 | [
"BSD-3-Clause"
] | 1 | 2021-04-06T20:06:58.000Z | 2021-04-06T20:06:58.000Z | import pickle
import platform
import os
import pytest
import localpaths
from . import serve
from .serve import Config
def test_ws_doc_root_default():
c = Config()
assert c.ws_doc_root == os.path.join(localpaths.repo_root, "websockets", "handlers")
def test_init_ws_doc_root():
c = Config(ws_doc_root="/")
assert c.doc_root == localpaths.repo_root # check this hasn't changed
assert c._ws_doc_root == "/"
assert c.ws_doc_root == "/"
def test_set_ws_doc_root():
c = Config()
c.ws_doc_root = "/"
assert c.doc_root == localpaths.repo_root # check this hasn't changed
assert c._ws_doc_root == "/"
assert c.ws_doc_root == "/"
def test_pickle():
# Ensure that the config object can be pickled
pickle.dumps(Config())
| 38.8625 | 88 | 0.525571 |
b51830bb1dccb3fd7e3c8b9bb5061b4737e27584 | 8,440 | py | Python | webshell/preprocess.py | radish608/graduationProject_DL4WebSecurity | 1bafeca95d8c02be438b79e8192cae3f624879c9 | [
"MIT"
] | 1 | 2020-09-15T01:44:21.000Z | 2020-09-15T01:44:21.000Z | webshell/preprocess.py | radish608/graduationProject_DL4WebSecurity | 1bafeca95d8c02be438b79e8192cae3f624879c9 | [
"MIT"
] | null | null | null | webshell/preprocess.py | radish608/graduationProject_DL4WebSecurity | 1bafeca95d8c02be438b79e8192cae3f624879c9 | [
"MIT"
] | null | null | null | # -*-coding: utf-8 -*-
import os
import re
from sklearn.feature_extraction.text import CountVectorizer
import sys
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
import commands
import tflearn
import pickle
max_features=10000
max_document_length=100
min_opcode_count=2
webshell_dir="../Datasets/dataset_webshell/webshell/PHP/"
whitefile_dir="../Datasets/dataset_webshell/normal/php/"
white_count=0
black_count=0
php_bin="/usr/bin/php"
#php N-Gram + TF-IDF
#opcode N-Gram
#opcode
#php
#php
#opcode
| 26.049383 | 104 | 0.619313 |
b5195d6a3d0b3fd5a3b08706a1231fda25ed0eb8 | 2,252 | py | Python | py/DREAM/Settings/Equations/RunawayElectronDistribution.py | chalmersplasmatheory/DREAM | 715637ada94f5e35db16f23c2fd49bb7401f4a27 | [
"MIT"
] | 12 | 2020-09-07T11:19:10.000Z | 2022-02-17T17:40:19.000Z | py/DREAM/Settings/Equations/RunawayElectronDistribution.py | chalmersplasmatheory/DREAM | 715637ada94f5e35db16f23c2fd49bb7401f4a27 | [
"MIT"
] | 110 | 2020-09-02T15:29:24.000Z | 2022-03-09T09:50:01.000Z | py/DREAM/Settings/Equations/RunawayElectronDistribution.py | chalmersplasmatheory/DREAM | 715637ada94f5e35db16f23c2fd49bb7401f4a27 | [
"MIT"
] | 3 | 2021-05-21T13:24:31.000Z | 2022-02-11T14:43:12.000Z |
import numpy as np
from DREAM.Settings.Equations.EquationException import EquationException
from . import DistributionFunction as DistFunc
from . DistributionFunction import DistributionFunction
from .. TransportSettings import TransportSettings
INIT_FORWARD = 1
INIT_XI_NEGATIVE = 2
INIT_XI_POSITIVE = 3
INIT_ISOTROPIC = 4
| 30.026667 | 83 | 0.655861 |
b519b948a7702826eb1cadca71144eb49329174c | 8,907 | py | Python | chevah/compat/tests/normal/testing/test_assertion.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 5 | 2016-12-03T22:54:50.000Z | 2021-11-17T11:17:39.000Z | chevah/compat/tests/normal/testing/test_assertion.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 76 | 2015-01-22T16:00:31.000Z | 2022-02-09T22:13:34.000Z | chevah/compat/tests/normal/testing/test_assertion.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 1 | 2016-12-10T15:57:31.000Z | 2016-12-10T15:57:31.000Z | # Copyright (c) 2015 Adi Roiban.
# See LICENSE for details.
"""
Tests for the assertion helpers.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
from chevah.compat.exceptions import CompatError
from chevah.compat.testing import ChevahTestCase, mk
| 32.507299 | 78 | 0.615359 |
b51c24c2a8046cf56cf971ce8b89fbb099048127 | 2,253 | py | Python | tests/basic_test.py | patpizio/conformal_predictors | 80d46d8728af23cf4a412024f592c40b51d977c7 | [
"MIT"
] | null | null | null | tests/basic_test.py | patpizio/conformal_predictors | 80d46d8728af23cf4a412024f592c40b51d977c7 | [
"MIT"
] | 4 | 2021-08-04T15:11:33.000Z | 2021-08-04T22:52:03.000Z | tests/basic_test.py | patpizio/conformal_predictors | 80d46d8728af23cf4a412024f592c40b51d977c7 | [
"MIT"
] | null | null | null | import unittest
import sys
sys.path.insert(0, '../src/')
from conformal_predictors.icp import ConformalPredictor
from conformal_predictors.nc_measures import *
import conformal_predictors.calibrutils as cu
from sklearn.datasets import *
import numpy as np
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.base import clone
from sklearn.metrics import classification_report
from nonconformist.cp import IcpClassifier
from nonconformist.nc import NcFactory, InverseProbabilityErrFunc, MarginErrFunc
if __name__ == '__main__':
unittest.main() | 29.644737 | 112 | 0.699512 |
b51c95bad3faa026a48a62db4fc8bca989c644e2 | 7,561 | py | Python | data/unaligned_dataset.py | basicskywards/cyclegan-yolo | 536498706da30707facf1211355ff21df2e5b227 | [
"BSD-3-Clause"
] | null | null | null | data/unaligned_dataset.py | basicskywards/cyclegan-yolo | 536498706da30707facf1211355ff21df2e5b227 | [
"BSD-3-Clause"
] | null | null | null | data/unaligned_dataset.py | basicskywards/cyclegan-yolo | 536498706da30707facf1211355ff21df2e5b227 | [
"BSD-3-Clause"
] | null | null | null | import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import PIL
from pdb import set_trace as st
import torch
import numpy as np
#from yolo.utils.datasets import pad
#import torchvision.transforms as transforms
from yolo.utils.datasets import pad_to_square, resize, pad_to_square2
| 40.005291 | 206 | 0.562492 |
b51f90c659e185b69613117f368541efd8ec132f | 8,396 | py | Python | primare_control/primare_interface.py | ZenithDK/primare-control | 597a2dd15bedb511fab5cca8d01044692d1e2d96 | [
"Apache-2.0"
] | null | null | null | primare_control/primare_interface.py | ZenithDK/primare-control | 597a2dd15bedb511fab5cca8d01044692d1e2d96 | [
"Apache-2.0"
] | null | null | null | primare_control/primare_interface.py | ZenithDK/primare-control | 597a2dd15bedb511fab5cca8d01044692d1e2d96 | [
"Apache-2.0"
] | null | null | null | """Interface to Primare amplifiers using Twisted SerialPort.
This module allows you to control your Primare I22 and I32 amplifier from the
command line using Primare's binary protocol via the RS232 port on the
amplifier.
"""
import logging
import click
from contextlib import closing
from primare_control import PrimareController
# from twisted.logger import (
# FilteringLogObserver,
# globalLogBeginner,
# Logger,
# LogLevel,
# LogLevelFilterPredicate,
# textFileLogObserver
# )
# log = Logger()
# globalLogBeginner.beginLoggingTo([
# FilteringLogObserver(
# textFileLogObserver(sys.stdout),
# [LogLevelFilterPredicate(LogLevel.debug)]
# )
# ])
# Setup logging so that is available
FORMAT = '%(asctime)-15s %(name)s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
cli()
| 36.504348 | 79 | 0.47737 |
b51fa08d66290d275d2da9e4167fcbc0a1d4e931 | 382 | py | Python | sjfxjc/foundations-for-analytics-with-python-master/csv/2csv_reader_parsing_and_write.py | SaronZhou/python | 40d73b49b9b17542c73a3c09d28e479d2fefcde3 | [
"MIT"
] | null | null | null | sjfxjc/foundations-for-analytics-with-python-master/csv/2csv_reader_parsing_and_write.py | SaronZhou/python | 40d73b49b9b17542c73a3c09d28e479d2fefcde3 | [
"MIT"
] | null | null | null | sjfxjc/foundations-for-analytics-with-python-master/csv/2csv_reader_parsing_and_write.py | SaronZhou/python | 40d73b49b9b17542c73a3c09d28e479d2fefcde3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import csv
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
with open(input_file, 'r', newline='') as csv_in_file:
with open(output_file, 'w', newline='') as csv_out_file:
filereader = csv.reader(csv_in_file, delimiter=',')
filewriter = csv.writer(csv_out_file, delimiter=',')
for row_list in filereader:
filewriter.writerow(row_list) | 29.384615 | 57 | 0.730366 |
b520efe1b1cf2e4fbb13042874cc3d5189db3ae6 | 2,980 | py | Python | provdbconnector/tests/db_adapters/neo4j/test_neo4jadapter.py | Ama-Gi/prov-neo4j-covid19-track | 67a79694ad3b48c34dd263f1508c0bdfbc6702fb | [
"Apache-2.0"
] | 15 | 2016-09-21T22:27:45.000Z | 2022-01-17T15:44:42.000Z | provdbconnector/tests/db_adapters/neo4j/test_neo4jadapter.py | Ama-Gi/prov-neo4j-covid19-track | 67a79694ad3b48c34dd263f1508c0bdfbc6702fb | [
"Apache-2.0"
] | 87 | 2016-09-19T13:26:05.000Z | 2022-03-16T04:16:47.000Z | provdbconnector/tests/db_adapters/neo4j/test_neo4jadapter.py | Ama-Gi/prov-neo4j-covid19-track | 67a79694ad3b48c34dd263f1508c0bdfbc6702fb | [
"Apache-2.0"
] | 3 | 2016-10-17T19:25:10.000Z | 2020-06-26T12:38:34.000Z | import unittest
from provdbconnector.exceptions.database import InvalidOptionsException, AuthException
from provdbconnector import Neo4jAdapter, NEO4J_USER, NEO4J_PASS, NEO4J_HOST, NEO4J_BOLT_PORT
from provdbconnector.prov_db import ProvDb
from provdbconnector.tests import AdapterTestTemplate
from provdbconnector.tests import ProvDbTestTemplate
| 33.863636 | 124 | 0.601342 |
b521c6c0f419d3631f195792a8be1ffaddad4502 | 1,956 | py | Python | Python3-ThirdPartyLibrary/Chapter06_psutil.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-ThirdPartyLibrary/Chapter06_psutil.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-ThirdPartyLibrary/Chapter06_psutil.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import psutil
# CPU
print("CPU: ", psutil.cpu_count()) # CPU
print("CPU: ", psutil.cpu_count(logical=False)) # CPU
print("CPU: ", psutil.cpu_times()) # CPU
# for x in range(3):
# print(psutil.cpu_percent(interval=1, percpu=True)) # CPU
#
print("memory", psutil.virtual_memory()) # ,
print("memory", psutil.swap_memory()) #
#
print("disk: ", psutil.disk_partitions()) #
print("disk: ", psutil.disk_usage('/')) #
print("disk: ", psutil.disk_io_counters()) # IO
#
print("network: ", psutil.net_io_counters()) #
print("network: ", psutil.net_if_addrs()) #
print("network: ", psutil.net_if_stats()) #
print("network: ", psutil.net_connections()) #
#
print("process: ", psutil.pids()) # ID
p = psutil.Process(12052) #
print("process: ", p.name(), #
"\nprocess: ", p.status(), #
"\nprocess: ", p.exe(), # exe
"\nprocess: ", p.cwd(), #
"\nprocess: ", p.create_time(), #
"\nprocess: ", p.cmdline(), #
"\nprocess: ", p.ppid(), # ID
"\nprocess: ", p.parent(), #
"\nprocess: ", p.children(), #
"\nprocess: ", p.username(), #
"\nprocess: ", p.cpu_times(), # CPU
"\nprocess: ", p.memory_info(), #
"\nprocess: ", p.num_threads(), #
"\nprocess: ", p.threads(), #
"\nprocess: ", p.environ(), #
"\nprocess: ", p.open_files(), #
"\nprocess: ", p.connections() #
)
# p.terminate() #
psutil.test() # test()ps
# ### psutil
# - Cross-platform lib for process and system monitoring in Python.
# - Home Page: https://github.com/giampaolo/psutil
# - Documentation: http://psutil.readthedocs.io/en/latest/
| 36.222222 | 73 | 0.596626 |
b5220f9d88a447b033fc07fa837a16f3731fa688 | 1,971 | py | Python | ocrDA.py | it-pebune/ani-research-data-extraction | e8b0ffecb0835020ce7942223cf566dc45ccee35 | [
"MIT"
] | null | null | null | ocrDA.py | it-pebune/ani-research-data-extraction | e8b0ffecb0835020ce7942223cf566dc45ccee35 | [
"MIT"
] | 7 | 2022-01-29T22:19:55.000Z | 2022-03-28T18:18:19.000Z | ocrDA.py | it-pebune/ani-research-data-extraction | e8b0ffecb0835020ce7942223cf566dc45ccee35 | [
"MIT"
] | null | null | null |
import json
from NewDeclarationInQueue.formular_converter import FormularConverter
from NewDeclarationInQueue.preprocess_one_step import PreprocessOneStep
from NewDeclarationInQueue.preprocess_two_steps import PreProcessTwoSteps
from NewDeclarationInQueue.processfiles.customprocess.search_text_line_parameter import SearchTextLineParameter
from NewDeclarationInQueue.processfiles.customprocess.table_config_detail import TableConfigDetail
from NewDeclarationInQueue.processfiles.customprocess.text_with_special_ch import TextWithSpecialCharacters
from NewDeclarationInQueue.processfiles.ocr_worker import OcrWorker
from NewDeclarationInQueue.processfiles.process_messages import ProcessMessages
#process_only_second_steps(r"test_url.json")
process_two_steps(r"test_url.json")
| 38.647059 | 112 | 0.811771 |
b522c08e48bccc21cac46f1faeda9461a2a4bfcf | 1,187 | py | Python | backend/model/migrate/versions/18632a2d5fc_.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 7 | 2018-05-20T08:56:08.000Z | 2022-03-11T15:50:54.000Z | backend/model/migrate/versions/18632a2d5fc_.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:12:51.000Z | 2022-01-13T01:25:27.000Z | backend/model/migrate/versions/18632a2d5fc_.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 5 | 2016-10-09T14:52:09.000Z | 2020-12-25T01:04:35.000Z | """Extend event column in account history
Revision ID: 18632a2d5fc
Revises: 3e19c50e864
Create Date: 2015-06-05 17:49:12.757269
"""
# revision identifiers, used by Alembic.
revision = '18632a2d5fc'
down_revision = '3e19c50e864'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
| 21.581818 | 63 | 0.670598 |
b522cbe5a92fd1c03c4cc0d0200215ca4a546a8f | 8,018 | py | Python | grid_user/models.py | topd333/Xlab | 28d89b3b18717957229ca52cb2cbbbc20bd31eae | [
"Unlicense"
] | null | null | null | grid_user/models.py | topd333/Xlab | 28d89b3b18717957229ca52cb2cbbbc20bd31eae | [
"Unlicense"
] | null | null | null | grid_user/models.py | topd333/Xlab | 28d89b3b18717957229ca52cb2cbbbc20bd31eae | [
"Unlicense"
] | null | null | null | import random
import datetime
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.utils import timezone
SECURTYQUESTION = (
('1', "What city were you born in?"),
('2', "What is your mother's maiden name?"),
('3', "What street did you grow up on?"),
('4', "What is the title of your favorite book?"),
('5', "What is your favorite vacation spot?"),
('6', "What is your pet's name?"),
)
| 31.077519 | 114 | 0.634697 |
b523c2ff097c63e33e8bee17d44fcc56243d89de | 1,729 | py | Python | keyboards/inline/in_processing/confirm_keyboard.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | keyboards/inline/in_processing/confirm_keyboard.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | keyboards/inline/in_processing/confirm_keyboard.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | from emoji import emojize
from data import all_emoji
from aiogram.types import InlineKeyboardMarkup
from aiogram.types import InlineKeyboardButton
from aiogram.utils.callback_data import CallbackData
cb_confirm_close = CallbackData('cb_cc', 'type_btn')
| 26.6 | 76 | 0.625795 |
b524a997831cceef37fb6ffcb9a5a1813e885500 | 9,076 | py | Python | utils/HCA/a_star.py | proroklab/magat_pathplanning | a2cab3b11abc46904bc45be1762a780becb1e8c7 | [
"MIT"
] | 40 | 2021-07-01T03:14:20.000Z | 2022-03-23T23:45:22.000Z | utils/HCA/a_star.py | QingbiaoLi/magat_pathplanning | f28429b1a2ab7866c3001b82e6ae9ca3f072c106 | [
"MIT"
] | null | null | null | utils/HCA/a_star.py | QingbiaoLi/magat_pathplanning | f28429b1a2ab7866c3001b82e6ae9ca3f072c106 | [
"MIT"
] | 13 | 2021-07-14T07:57:16.000Z | 2022-03-03T10:43:25.000Z | '''
This file contains utility of AStarSearch.
Thanks to Binyu Wang for providing the codes.
'''
from random import randint
import numpy as np
# img = np.zeros((20,20))
# source = (0,0)
# dest = (img.shape[0]-1, img.shape[1]-1)
# path = AStarSearch(img, source, dest)
| 35.592157 | 119 | 0.552336 |
b525a442d992316233f044f50e799f9a075c90fa | 1,270 | py | Python | app/users/tasks.py | atulmishra-one/dairy_management_portal | a07320dc0f4419d4c78f7d2453c63b1c9544aba8 | [
"MIT"
] | 2 | 2020-08-02T10:06:19.000Z | 2022-03-29T06:10:57.000Z | app/users/tasks.py | atulmishra-one/dairy_management_portal | a07320dc0f4419d4c78f7d2453c63b1c9544aba8 | [
"MIT"
] | null | null | null | app/users/tasks.py | atulmishra-one/dairy_management_portal | a07320dc0f4419d4c78f7d2453c63b1c9544aba8 | [
"MIT"
] | 2 | 2019-02-03T15:44:02.000Z | 2021-03-09T07:30:28.000Z | import xlrd
from app.services.extension import task_server, sqlalchemy as db
from app.models.core.user import User
from app.application import initialize_app
try:
from app.config.production import ProductionConfig as config_object
except ImportError:
from app.config.local import LocalConfig as config_object
| 27.608696 | 71 | 0.607874 |
b526e227b8af6adb71768eb4900aaf57a69f1acb | 3,444 | py | Python | savenger.py | SlapBot/GodkillerArmor | 27058332cd94c4389b092a621eeedc834d8f5a15 | [
"MIT"
] | 3 | 2018-07-06T17:06:28.000Z | 2018-09-06T03:31:43.000Z | savenger.py | SlapBot/GodkillerArmor | 27058332cd94c4389b092a621eeedc834d8f5a15 | [
"MIT"
] | null | null | null | savenger.py | SlapBot/GodkillerArmor | 27058332cd94c4389b092a621eeedc834d8f5a15 | [
"MIT"
] | 1 | 2018-07-10T00:13:07.000Z | 2018-07-10T00:13:07.000Z | from praw import Reddit
import random
| 44.727273 | 108 | 0.626597 |
b52a4b91de40afb841386437bc92df7dcd61942d | 1,493 | py | Python | python-packages/pyRiemann-0.2.2/pyriemann/channelselection.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2017-08-13T14:09:32.000Z | 2018-07-16T23:39:00.000Z | python-packages/pyRiemann-0.2.2/pyriemann/channelselection.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | null | null | null | python-packages/pyRiemann-0.2.2/pyriemann/channelselection.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2018-04-02T06:45:11.000Z | 2018-07-16T23:39:02.000Z | from .utils.distance import distance
from .classification import MDM
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################
| 31.765957 | 62 | 0.464836 |
b52aa43dd118effc265e50061d8175f3814721d2 | 1,897 | py | Python | syncbase/user/urls.py | gkrnours/syncbase | 0e20d400fe83e2157ee6e893a105253d20634808 | [
"MIT"
] | null | null | null | syncbase/user/urls.py | gkrnours/syncbase | 0e20d400fe83e2157ee6e893a105253d20634808 | [
"MIT"
] | null | null | null | syncbase/user/urls.py | gkrnours/syncbase | 0e20d400fe83e2157ee6e893a105253d20634808 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from django.contrib.auth import views as auth
from user.forms import NewAccountForm
from user import views
app_name = 'user'
urlpatterns = [
# auth
url(r'^create/$', views.UserCreate.as_view(), name='create'),
url(r'^login/$', auth.login,
{'template_name':'user/login.html'},
name='login'),
url(r'^logout/$', auth.logout,
{'template_name':'user/logout.html'},
name='logout'),
url(r'^password_change/$', auth.password_change,
{'template_name':'user/password_change_form.html',
'post_change_redirect':'user:password_change_done'},
name='password_change'),
url(r'^password_change/done/$', auth.password_change_done,
{'template_name':'user/password_change_done.html'},
name='password_change_done'),
url(r'^password_reset/$', auth.password_reset,
{'post_reset_redirect': 'user:password_reset_done',
'template_name': 'user/password_reset_form.html',
'email_template_name': 'user/password_reset_email.html',
'subject_template_name': 'user/password_reset_subject.txt'},
name='password_reset'),
url(r'^password_reset/done/$', auth.password_reset_done,
{'template_name': 'user/password_reset_done.html'},
name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth.password_reset_confirm,
{'post_reset_redirect':'user:password_reset_complete',
'template_name': "user/password_reset_confirm.html"},
name='password_reset_confirm'),
url(r'^reset/done/$', auth.password_reset_complete,
{'template_name': 'user/password_reset_complete.html'},
name='password_reset_complete'),
# profile
url(r'^basic/$', views.BasicInfo.as_view(), name="basic"),
]
| 34.490909 | 95 | 0.655245 |
b52daf8a9a6916b3bc3be9fb6b077491427da67f | 1,728 | py | Python | mac_changer.py | xicoder96/luv-sic | 033527b558c3e4d7f254dca1e2f6f0ccf9ff78fe | [
"MIT"
] | null | null | null | mac_changer.py | xicoder96/luv-sic | 033527b558c3e4d7f254dca1e2f6f0ccf9ff78fe | [
"MIT"
] | null | null | null | mac_changer.py | xicoder96/luv-sic | 033527b558c3e4d7f254dca1e2f6f0ccf9ff78fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import subprocess
import re
import argparse
if __name__ == "__main__":
options = get_arguments()
current_mac = get_current_mac(options.interface)
print(f"Current Mac:{current_mac}")
change_mac(options.interface, options.new_mac)
current_mac = get_current_mac(options.interface)
if current_mac == options.new_mac:
print(f"[+] MAC address was successfully changed to {current_mac}")
else:
print("[-] MAC address did not change")
| 33.230769 | 79 | 0.65162 |
b53016b4f1a8a22aaafbf177615312636a59d031 | 1,916 | py | Python | training/model.py | J77M/stuffy-nose-recognition | e5d8957e2026e9046e6ffee69a60a11a686bc042 | [
"MIT"
] | null | null | null | training/model.py | J77M/stuffy-nose-recognition | e5d8957e2026e9046e6ffee69a60a11a686bc042 | [
"MIT"
] | null | null | null | training/model.py | J77M/stuffy-nose-recognition | e5d8957e2026e9046e6ffee69a60a11a686bc042 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import time
import utils
path = r'data/'
x, y = utils.reload_data(path)
inp_shape = (x[0].shape[0],1)
x = np.array(x).reshape(-1, 1000, 1)# change 1000 to your sample lenght if you changed frame (= CHUNK ) or RESOLUTION
# prepared for testing and evaluating. try other combinations of architecture
dense_layers = [1]
conv_sizes = [64]
conv_layers = [2]
dense_layer_sizes = [256]
kernel = 10
pool_size = 4
_batchs = 5
_epochs = 10
for dense_layer in dense_layers:
for conv_layer in conv_layers:
for dense_size in dense_layer_sizes:
for conv_size in conv_sizes:
NAME = '{}-conv_layers-{}-dense_layers-{}-conv_size-{}-dense_size-{}-kernel-{}'.format(conv_layer,dense_layer,conv_size, dense_size,kernel, int(time.time()))
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv1D(conv_size, kernel, activation='relu', input_shape = inp_shape))
model.add(tf.keras.layers.MaxPooling1D(pool_size))
for i in range(conv_layer-1):
model.add(tf.keras.layers.Conv1D(conv_size, kernel, activation='relu'))
model.add(tf.keras.layers.MaxPooling1D(pool_size))
model.add(tf.keras.layers.Flatten())
for _ in range(dense_layer):
model.add(tf.keras.layers.Dense(dense_size, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy'])
tensorboard = tf.keras.callbacks.TensorBoard(log_dir='model_evaluate/{}'.format(NAME))
print(NAME)
model.fit(x,y, batch_size = _batchs, epochs=_epochs, validation_split = 0.2, callbacks=[tensorboard])
model.save('trained_models/{}.h5'.format(NAME)) | 39.102041 | 173 | 0.641441 |
b5317c9553e4578dd1313a39baa5b57770eff21b | 6,163 | py | Python | app/api_service/ice_creams/migrations/0001_initial.py | TheRayOfSeasons/worker-heavy-cicd | fa36e89dd68ee2fd8b37bda55d6bb885f31afaa7 | [
"MIT"
] | null | null | null | app/api_service/ice_creams/migrations/0001_initial.py | TheRayOfSeasons/worker-heavy-cicd | fa36e89dd68ee2fd8b37bda55d6bb885f31afaa7 | [
"MIT"
] | null | null | null | app/api_service/ice_creams/migrations/0001_initial.py | TheRayOfSeasons/worker-heavy-cicd | fa36e89dd68ee2fd8b37bda55d6bb885f31afaa7 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-12 07:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 66.268817 | 204 | 0.646438 |
b5325a85e324486debcb82eb330c6fd293cb8cf4 | 1,306 | py | Python | game/game/protocol.py | maosplx/L2py | 5d81b2ea150c0096cfce184706fa226950f7f583 | [
"MIT"
] | 7 | 2020-09-01T21:52:37.000Z | 2022-02-25T16:00:08.000Z | game/game/protocol.py | maosplx/L2py | 5d81b2ea150c0096cfce184706fa226950f7f583 | [
"MIT"
] | 4 | 2021-09-10T22:15:09.000Z | 2022-03-25T22:17:43.000Z | game/game/protocol.py | maosplx/L2py | 5d81b2ea150c0096cfce184706fa226950f7f583 | [
"MIT"
] | 9 | 2020-09-01T21:53:39.000Z | 2022-03-30T12:03:04.000Z | import logging
from common.api_handlers import handle_request
from common.packet import Packet
from common.response import Response
from common.transport.protocol import TCPProtocol
from game.models.world import WORLD
from game.session import GameSession
from game.states import Connected
LOG = logging.getLogger(f"l2py.{__name__}")
| 30.372093 | 79 | 0.658499 |
b532e08e69d241104c91b1c89e9d10205dab72ab | 292 | py | Python | day6.py | seblars/AdventOfCode2020 | dc41181caa50fe03645aa36d70fe0ebd76cd6e25 | [
"MIT"
] | 1 | 2020-12-16T09:37:01.000Z | 2020-12-16T09:37:01.000Z | day6.py | seblars/AdventOfCode2020 | dc41181caa50fe03645aa36d70fe0ebd76cd6e25 | [
"MIT"
] | null | null | null | day6.py | seblars/AdventOfCode2020 | dc41181caa50fe03645aa36d70fe0ebd76cd6e25 | [
"MIT"
] | null | null | null | import fileinput
# "day6.txt"
groups = [x.split() for x in ''.join(fileinput.input()).split('\n\n')]
# part 1
print(sum(len(set([j for sub in group for j in sub])) for group in groups))
# part 2
print(sum(len(set.intersection(*[set(list(j)) for j in group])) for group in groups))
| 29.2 | 86 | 0.64726 |
b53308bf2a97499e9f1e960c2ded7b7ca3598996 | 370 | py | Python | Conditional/Extras/Everton/02.py | Vitor-ORB/algorithms-and-programming-1-ufms | 10821e9b580b78b7f78c27e740f3ead9c6b9f0bd | [
"MIT"
] | 7 | 2021-05-25T16:49:20.000Z | 2022-02-17T11:57:32.000Z | Conditional/Extras/Everton/02.py | Vitor-ORB/algorithms-and-programming-1-ufms | 10821e9b580b78b7f78c27e740f3ead9c6b9f0bd | [
"MIT"
] | null | null | null | Conditional/Extras/Everton/02.py | Vitor-ORB/algorithms-and-programming-1-ufms | 10821e9b580b78b7f78c27e740f3ead9c6b9f0bd | [
"MIT"
] | 8 | 2021-05-25T16:49:39.000Z | 2021-09-30T18:02:07.000Z | '''Considere o problema de computar o valor absoluto de um nmero real.
O valor absoluto de um nmero real x dado por f(x) = x se x >= 0 ou f(x) = -x se x < 0.
Projete e implemente um programa em Python que lei um nmero de ponto flutuante x, calcule e imprima o valor absoluto de x.'''
x = float(input())
y = (x**2)**(1/2)
print("|{:.2f}| = {:.2f}".format(x,y))
| 33.636364 | 126 | 0.651351 |
b536ac94f02abdab43e5ca604aa965f6ad2715d0 | 1,394 | py | Python | pyoptmat/solvers.py | Argonne-National-Laboratory/pyoptmat | a6e5e8d0b93c77374d4ccbc65a86262eec5df77b | [
"MIT"
] | null | null | null | pyoptmat/solvers.py | Argonne-National-Laboratory/pyoptmat | a6e5e8d0b93c77374d4ccbc65a86262eec5df77b | [
"MIT"
] | 1 | 2022-03-30T22:20:38.000Z | 2022-03-31T15:02:22.000Z | pyoptmat/solvers.py | Argonne-National-Laboratory/pyoptmat | a6e5e8d0b93c77374d4ccbc65a86262eec5df77b | [
"MIT"
] | 2 | 2021-11-16T15:13:54.000Z | 2022-01-06T21:35:42.000Z | import torch
import warnings
def newton_raphson(fn, x0, linsolver = "lu", rtol = 1e-6, atol = 1e-10,
miter = 100):
"""
Solve a nonlinear system with Newton's method. Return the
solution and the last Jacobian
Args:
fn: function that returns the residual and Jacobian
x0: starting point
linsolver (optional): method to use to solve the linear system
rtol (optional): nonlinear relative tolerance
atol (optional): nonlinear absolute tolerance
miter (optional): maximum number of nonlinear iterations
"""
x = x0
R, J = fn(x)
nR = torch.norm(R, dim = -1)
nR0 = nR
i = 0
while (i < miter) and torch.any(nR > atol) and torch.any(nR / nR0 > rtol):
x -= solve_linear_system(J, R)
R, J = fn(x)
nR = torch.norm(R, dim = -1)
i += 1
if i == miter:
warnings.warn("Implicit solve did not succeed. Results may be inaccurate...")
return x, J
def solve_linear_system(A, b, method = "lu"):
"""
Solve or iterate on a linear system of equations
Args:
A: block matrix
b: block RHS
method (optional):
"""
if method == "diag":
return b / torch.diagonal(A, dim1=-2, dim2=-1)
elif method == "lu":
return torch.linalg.solve(A, b)
else:
raise ValueError("Unknown solver method!")
| 27.333333 | 82 | 0.585366 |
b5373a616def2b1d58dca3805f309b56a4c149e0 | 323 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/number-of-substrings-with-only-1s.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/number-of-substrings-with-only-1s.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/number-of-substrings-with-only-1s.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n)
# Space: O(1)
| 20.1875 | 46 | 0.436533 |
b537ff6eac7f94b76cf8db09b3957cee998efb52 | 4,531 | py | Python | usecase-2/monitoring/fleet-seat-info-monitor/src/seat_res_train_monitor.py | edgefarm/edgefarm-demos | 6381d4a2f7f9c1d0632ab8123fed2bd0763d3b34 | [
"MIT"
] | null | null | null | usecase-2/monitoring/fleet-seat-info-monitor/src/seat_res_train_monitor.py | edgefarm/edgefarm-demos | 6381d4a2f7f9c1d0632ab8123fed2bd0763d3b34 | [
"MIT"
] | 9 | 2021-04-21T10:37:45.000Z | 2021-07-28T05:56:50.000Z | usecase-2/monitoring/fleet-seat-info-monitor/src/seat_res_train_monitor.py | edgefarm/train-simulation | 6381d4a2f7f9c1d0632ab8123fed2bd0763d3b34 | [
"MIT"
] | null | null | null | import logging
import datetime
import asyncio
from edgefarm_application.base.application_module import application_module_network_nats
from edgefarm_application.base.avro import schemaless_decode
from run_task import run_task
from state_tracker import StateTracker
from schema_loader import schema_load
_logger = logging.getLogger(__name__)
_state_report_subject = "public.seatres.status"
| 31.685315 | 96 | 0.608034 |
b53809b9629204fc4062a7f81e9f288aeb36cec8 | 302 | py | Python | modulo 03/script_03.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | 10 | 2020-02-13T03:14:29.000Z | 2021-09-16T04:32:40.000Z | modulo 03/script_03.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | null | null | null | modulo 03/script_03.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | 4 | 2020-02-18T23:42:23.000Z | 2021-09-10T05:52:09.000Z | from termcolor import colored, cprint
import sys
text = colored('Hello, World!', 'red', attrs=['reverse', 'blink'])
print(text)
cprint('Hello, World!', 'green', 'on_red')
for i in range(10):
cprint(i, 'magenta', end=' ')
cprint("Attention!",'red', attrs=['bold'], file=sys.stdout)
| 33.555556 | 68 | 0.622517 |
b538595bde41c89c5a8fbdc33e2ae560a927b953 | 1,597 | py | Python | src/AML/run_training.py | monkeypants/CartridgeOCR | a2cdaa72e3839a881118b85f5ff7b4515579004b | [
"MIT"
] | 2 | 2021-07-12T02:37:46.000Z | 2021-12-28T23:03:20.000Z | src/AML/run_training.py | monkeypants/CartridgeOCR | a2cdaa72e3839a881118b85f5ff7b4515579004b | [
"MIT"
] | 28 | 2021-12-29T00:51:24.000Z | 2022-03-24T08:03:59.000Z | src/AML/run_training.py | monkeypants/CartridgeOCR | a2cdaa72e3839a881118b85f5ff7b4515579004b | [
"MIT"
] | 4 | 2021-09-24T16:13:43.000Z | 2022-03-09T17:52:35.000Z | import sys
from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from shutil import copy
ws = Workspace.from_config()
# Choose a name for your CPU cluster
# cpu_cluster_name = "cpucluster"
cpu_cluster_name = "gpucompute"
experiment_name = "main"
src_dir = "model"
script = "train.py"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_DS12_v2',
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
experiment = Experiment(workspace=ws, name=experiment_name)
copy('./config.json', 'model/config.json')
myenv = Environment.from_pip_requirements(name="myenv",
file_path="requirements.txt")
myenv.environment_variables['PYTHONPATH'] = './model'
myenv.environment_variables['RUNINAZURE'] = 'true'
config = ScriptRunConfig(source_directory=src_dir,
script="./training/train.py",
arguments=sys.argv[1:] if len(sys.argv) > 1 else None,
compute_target=cpu_cluster_name, environment=myenv)
run = experiment.submit(config)
aml_url = run.get_portal_url()
print(aml_url)
| 35.488889 | 86 | 0.708203 |
b538fc619dc6adad01e93a8132a517e7cc8b2d80 | 818 | py | Python | tests/conftest.py | cielavenir/pyppmd-py2 | c148b8fbe7cb0c0e9f68fdf9a1c3599325f0e4c8 | [
"BSD-3-Clause"
] | 3 | 2021-05-04T13:20:39.000Z | 2021-11-03T12:43:02.000Z | tests/conftest.py | cielavenir/pyppmd-py2 | c148b8fbe7cb0c0e9f68fdf9a1c3599325f0e4c8 | [
"BSD-3-Clause"
] | 39 | 2021-04-16T02:55:28.000Z | 2022-03-30T14:23:50.000Z | tests/conftest.py | cielavenir/pyppmd-py2 | c148b8fbe7cb0c0e9f68fdf9a1c3599325f0e4c8 | [
"BSD-3-Clause"
] | 3 | 2021-07-07T17:39:30.000Z | 2022-03-30T15:15:44.000Z | import cpuinfo
def pytest_benchmark_update_json(config, benchmarks, output_json):
"""Calculate compression/decompression speed and add as extra_info"""
for benchmark in output_json["benchmarks"]:
if "data_size" in benchmark["extra_info"]:
rate = benchmark["extra_info"].get("data_size", 0.0) / benchmark["stats"]["mean"]
benchmark["extra_info"]["rate"] = rate
| 43.052632 | 110 | 0.684597 |
b53920dd20dbdafabadb24be44f2a512437147fb | 331 | py | Python | examples/test_gcld3.py | lbp0200/EasyNMT | d253e9346996a47aa989bb33aed72e531528dc27 | [
"Apache-2.0"
] | null | null | null | examples/test_gcld3.py | lbp0200/EasyNMT | d253e9346996a47aa989bb33aed72e531528dc27 | [
"Apache-2.0"
] | null | null | null | examples/test_gcld3.py | lbp0200/EasyNMT | d253e9346996a47aa989bb33aed72e531528dc27 | [
"Apache-2.0"
] | null | null | null | import time
import gcld3
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0,
max_num_bytes=1000)
# text = "This text is written in English"
text = ""
while True:
result = detector.FindLanguage(text=text)
print(text, result.probability, result.language)
time.sleep(0.01)
| 25.461538 | 59 | 0.65861 |
b539e3fd28c31f9e28937feef603fdbd7a3fc98e | 1,593 | py | Python | src/0075下一个排列/index.py | zzh2036/OneDayOneLeetcode | 1198692e68f8f0dbf15555e45969122e1a92840a | [
"MIT"
] | null | null | null | src/0075下一个排列/index.py | zzh2036/OneDayOneLeetcode | 1198692e68f8f0dbf15555e45969122e1a92840a | [
"MIT"
] | null | null | null | src/0075下一个排列/index.py | zzh2036/OneDayOneLeetcode | 1198692e68f8f0dbf15555e45969122e1a92840a | [
"MIT"
] | null | null | null | '''
1
nums = [1,2,3]
[1,3,2]
2
nums = [3,2,1]
[1,2,3]
3
nums = [1,1,5]
[1,5,1]
4
nums = [1]
[1]
1 <= nums.length <= 100
0 <= nums[i] <= 100
'''
if __name__ == '__main__':
points = [1, 2, 3]
ins = Solution()
ins.nextPermutation(points)
print(points)
| 22.125 | 61 | 0.468927 |
b53a0396d1bab4e9ce336103d0380f331ae41db6 | 7,906 | py | Python | scripts/structural_make_html_report.py | vogelbac/LAB-QA2GO- | be434da7399d396413309f947f4b634d8fae9a17 | [
"BSD-3-Clause"
] | 14 | 2019-02-07T10:50:58.000Z | 2021-09-03T16:11:00.000Z | scripts/structural_make_html_report.py | vogelbac/LAB-QA2GO- | be434da7399d396413309f947f4b634d8fae9a17 | [
"BSD-3-Clause"
] | 6 | 2019-01-28T09:19:27.000Z | 2021-09-09T06:56:42.000Z | scripts/structural_make_html_report.py | vogelbac/LAB-QA2GO | be434da7399d396413309f947f4b634d8fae9a17 | [
"BSD-3-Clause"
] | 4 | 2019-01-28T09:00:58.000Z | 2021-05-25T13:54:40.000Z | # script to generate the overview and individual html report website.
import os
import numpy
| 41.610526 | 193 | 0.68214 |
b53dea5224a5f4701caa31694ad6c985328e3868 | 12,226 | py | Python | pynetdicom3/sop_class.py | mcgregorandrew/pynetdicom3 | 1c798f9b0ad086cf0a8d0619e57f2bc2fbbf13f1 | [
"MIT"
] | null | null | null | pynetdicom3/sop_class.py | mcgregorandrew/pynetdicom3 | 1c798f9b0ad086cf0a8d0619e57f2bc2fbbf13f1 | [
"MIT"
] | null | null | null | pynetdicom3/sop_class.py | mcgregorandrew/pynetdicom3 | 1c798f9b0ad086cf0a8d0619e57f2bc2fbbf13f1 | [
"MIT"
] | 2 | 2020-09-27T06:41:41.000Z | 2021-02-07T06:53:02.000Z | """Generates the supported SOP Classes."""
from collections import namedtuple
import inspect
import logging
import sys
from pydicom.uid import UID
from pynetdicom3.service_class import (
VerificationServiceClass,
StorageServiceClass,
QueryRetrieveServiceClass,
BasicWorklistManagementServiceClass,
)
LOGGER = logging.getLogger('pynetdicom3.sop')
def uid_to_service_class(uid):
"""Return the ServiceClass object corresponding to `uid`.
Parameters
----------
uid : pydicom.uid.UID
The SOP Class UID to find the corresponding Service Class.
Returns
-------
service_class.ServiceClass
The Service Class corresponding to the SOP Class UID.
Raises
------
NotImplementedError
If the Service Class corresponding to the SOP Class `uid` hasn't been
implemented.
"""
if uid in _VERIFICATION_CLASSES.values():
return VerificationServiceClass
elif uid in _STORAGE_CLASSES.values():
return StorageServiceClass
elif uid in _QR_CLASSES.values():
return QueryRetrieveServiceClass
elif uid in _BASIC_WORKLIST_CLASSES.values():
return BasicWorklistManagementServiceClass
else:
raise NotImplementedError(
"The Service Class for the SOP Class with UID '{}' has not "
"been implemented".format(uid)
)
SOPClass = namedtuple("SOPClass", ['uid', 'UID', 'service_class'])
def _generate_sop_classes(sop_class_dict):
"""Generate the SOP Classes."""
for name in sop_class_dict:
globals()[name] = SOPClass(
UID(sop_class_dict[name]),
UID(sop_class_dict[name]),
uid_to_service_class(sop_class_dict[name])
)
# Generate the various SOP classes
_VERIFICATION_CLASSES = {
'VerificationSOPClass' : '1.2.840.10008.1.1',
}
# pylint: disable=line-too-long
_STORAGE_CLASSES = {
'ComputedRadiographyImageStorage' : '1.2.840.10008.5.1.4.1.1.1',
'DigitalXRayImagePresentationStorage' : '1.2.840.10008.5.1.4.1.1.1.1',
'DigitalXRayImageProcessingStorage' : '1.2.840.10008.5.1.4.1.1.1.1.1.1',
'DigitalMammographyXRayImagePresentationStorage' : '1.2.840.10008.5.1.4.1.1.1.2',
'DigitalMammographyXRayImageProcessingStorage' : '1.2.840.10008.5.1.4.1.1.1.2.1',
'DigitalIntraOralXRayImagePresentationStorage' : '1.2.840.10008.5.1.4.1.1.1.3',
'DigitalIntraOralXRayImageProcessingStorage' : '1.2.840.10008.5.1.1.4.1.1.3.1',
'CTImageStorage' : '1.2.840.10008.5.1.4.1.1.2',
'EnhancedCTImageStorage' : '1.2.840.10008.5.1.4.1.1.2.1',
'LegacyConvertedEnhancedCTImageStorage' : '1.2.840.10008.5.1.4.1.1.2.2',
'UltrasoundMultiframeImageStorage' : '1.2.840.10008.5.1.4.1.1.3.1',
'MRImageStorage' : '1.2.840.10008.5.1.4.1.1.4',
'EnhancedMRImageStorage' : '1.2.840.10008.5.1.4.1.1.4.1',
'MRSpectroscopyStorage' : '1.2.840.10008.5.1.4.1.1.4.2',
'EnhancedMRColorImageStorage' : '1.2.840.10008.5.1.4.1.1.4.3',
'LegacyConvertedEnhancedMRImageStorage' : '1.2.840.10008.5.1.4.1.1.4.4',
'UltrasoundImageStorage' : '1.2.840.10008.5.1.4.1.1.6.1',
'EnhancedUSVolumeStorage' : '1.2.840.10008.5.1.4.1.1.6.2',
'SecondaryCaptureImageStorage' : '1.2.840.10008.5.1.4.1.1.7',
'MultiframeSingleBitSecondaryCaptureImageStorage' : '1.2.840.10008.5.1.4.1.1.7.1',
'MultiframeGrayscaleByteSecondaryCaptureImageStorage' : '1.2.840.10008.5.1.4.1.1.7.2',
'MultiframeGrayscaleWordSecondaryCaptureImageStorage' : '1.2.840.10008.5.1.4.1.1.7.3',
'MultiframeTrueColorSecondaryCaptureImageStorage' : '1.2.840.10008.5.1.4.1.1.7.4',
'TwelveLeadECGWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.1.1',
'GeneralECGWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.1.2',
'AmbulatoryECGWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.1.3',
'HemodynamicWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.2.1',
'CardiacElectrophysiologyWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.3.1',
'BasicVoiceAudioWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.4.1',
'GeneralAudioWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.4.2',
'ArterialPulseWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.5.1',
'RespiratoryWaveformStorage' : '1.2.840.10008.5.1.4.1.1.9.6.1',
'GrayscaleSoftcopyPresentationStateStorage' : '1.2.840.10008.5.1.4.1.1.11.1',
'ColorSoftcopyPresentationStateStorage' : '1.2.840.10008.5.1.4.1.1.11.2',
'PseudocolorSoftcopyPresentationStageStorage' : '1.2.840.10008.5.1.4.1.1.11.3',
'BlendingSoftcopyPresentationStateStorage' : '1.2.840.10008.5.1.4.1.1.11.4',
'XAXRFGrayscaleSoftcopyPresentationStateStorage' : '1.2.840.10008.5.1.4.1.1.11.5',
'XRayAngiographicImageStorage' : '1.2.840.10008.5.1.4.1.1.12.1',
'EnhancedXAImageStorage' : '1.2.840.10008.5.1.4.1.1.12.1.1',
'XRayRadiofluoroscopicImageStorage' : '1.2.840.10008.5.1.4.1.1.12.2',
'EnhancedXRFImageStorage' : '1.2.840.10008.5.1.4.1.1.12.2.1',
'XRay3DAngiographicImageStorage' : '1.2.840.10008.5.1.4.1.1.13.1.1',
'XRay3DCraniofacialImageStorage' : '1.2.840.10008.5.1.4.1.1.13.1.2',
'BreastTomosynthesisImageStorage' : '1.2.840.10008.5.1.4.1.1.13.1.3',
'BreastProjectionXRayImagePresentationStorage' : '1.2.840.10008.5.1.4.1.1.13.1.4',
'BreastProjectionXRayImageProcessingStorage' : '1.2.840.10008.5.1.4.1.1.13.1.5',
'IntravascularOpticalCoherenceTomographyImagePresentationStorage' : '1.2.840.10008.5.1.4.1.1.14.1',
'IntravascularOpticalCoherenceTomographyImageProcessingStorage' : '1.2.840.10008.5.1.4.1.1.14.2',
'NuclearMedicineImageStorage' : '1.2.840.10008.5.1.4.1.1.20',
'ParametricMapStorage' : '1.2.840.10008.5.1.4.1.1.30',
'RawDataStorage' : '1.2.840.10008.5.1.4.1.1.66',
'SpatialRegistrationStorage' : '1.2.840.10008.5.1.4.1.1.66.1',
'SpatialFiducialsStorage' : '1.2.840.10008.5.1.4.1.1.66.2',
'DeformableSpatialRegistrationStorage' : '1.2.840.10008.5.1.4.1.1.66.3',
'SegmentationStorage' : '1.2.840.10008.5.1.4.1.1.66.4',
'SurfaceSegmentationStorage' : '1.2.840.10008.5.1.4.1.1.66.5',
'RealWorldValueMappingStorage' : '1.2.840.10008.5.1.4.1.1.67',
'SurfaceScanMeshStorage' : '1.2.840.10008.5.1.4.1.1.68.1',
'SurfaceScanPointCloudStorage' : '1.2.840.10008.5.1.4.1.1.68.2',
'VLEndoscopicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.1',
'VideoEndoscopicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.1.1',
'VLMicroscopicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.2',
'VideoMicroscopicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.2.1',
'VLSlideCoordinatesMicroscopicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.3',
'VLPhotographicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.4',
'VideoPhotographicImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.4.1',
'OphthalmicPhotography8BitImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.5.1',
'OphthalmicPhotography16BitImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.5.2',
'StereometricRelationshipStorage' : '1.2.840.10008.5.1.4.1.1.77.1.5.3',
'OpthalmicTomographyImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.5.4',
'WideFieldOpthalmicPhotographyStereographicProjectionImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.5.5',
'WideFieldOpthalmicPhotography3DCoordinatesImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.5.6',
'VLWholeSlideMicroscopyImageStorage' : '1.2.840.10008.5.1.4.1.1.77.1.6',
'LensometryMeasurementsStorage' : '1.2.840.10008.5.1.4.1.1.78.1',
'AutorefractionMeasurementsStorage' : '1.2.840.10008.5.1.4.1.1.78.2',
'KeratometryMeasurementsStorage' : '1.2.840.10008.5.1.4.1.1.78.3',
'SubjectiveRefractionMeasurementsStorage' : '1.2.840.10008.5.1.4.1.1.78.4',
'VisualAcuityMeasurementsStorage' : '1.2.840.10008.5.1.4.1.1.78.5',
'SpectaclePrescriptionReportStorage' : '1.2.840.10008.5.1.4.1.1.78.6',
'OpthalmicAxialMeasurementsStorage' : '1.2.840.10008.5.1.4.1.1.78.7',
'IntraocularLensCalculationsStorage' : '1.2.840.10008.5.1.4.1.1.78.8',
'MacularGridThicknessAndVolumeReport' : '1.2.840.10008.5.1.4.1.1.79.1',
'OpthalmicVisualFieldStaticPerimetryMeasurementsStorag' : '1.2.840.10008.5.1.4.1.1.80.1',
'OpthalmicThicknessMapStorage' : '1.2.840.10008.5.1.4.1.1.81.1',
'CornealTopographyMapStorage' : '1.2.840.10008.5.1.4.1.1.82.1',
'BasicTextSRStorage' : '1.2.840.10008.5.1.4.1.1.88.11',
'EnhancedSRStorage' : '1.2.840.10008.5.1.4.1.1.88.22',
'ComprehensiveSRStorage' : '1.2.840.10008.5.1.4.1.1.88.33',
'Comprehenseice3DSRStorage' : '1.2.840.10008.5.1.4.1.1.88.34',
'ExtensibleSRStorage' : '1.2.840.10008.5.1.4.1.1.88.35',
'ProcedureSRStorage' : '1.2.840.10008.5.1.4.1.1.88.40',
'MammographyCADSRStorage' : '1.2.840.10008.5.1.4.1.1.88.50',
'KeyObjectSelectionStorage' : '1.2.840.10008.5.1.4.1.1.88.59',
'ChestCADSRStorage' : '1.2.840.10008.5.1.4.1.1.88.65',
'XRayRadiationDoseSRStorage' : '1.2.840.10008.5.1.4.1.1.88.67',
'RadiopharmaceuticalRadiationDoseSRStorage' : '1.2.840.10008.5.1.4.1.1.88.68',
'ColonCADSRStorage' : '1.2.840.10008.5.1.4.1.1.88.69',
'ImplantationPlanSRDocumentStorage' : '1.2.840.10008.5.1.4.1.1.88.70',
'EncapsulatedPDFStorage' : '1.2.840.10008.5.1.4.1.1.104.1',
'EncapsulatedCDAStorage' : '1.2.840.10008.5.1.4.1.1.104.2',
'PositronEmissionTomographyImageStorage' : '1.2.840.10008.5.1.4.1.1.128',
'EnhancedPETImageStorage' : '1.2.840.10008.5.1.4.1.1.130',
'LegacyConvertedEnhancedPETImageStorage' : '1.2.840.10008.5.1.4.1.1.128.1',
'BasicStructuredDisplayStorage' : '1.2.840.10008.5.1.4.1.1.131',
'RTImageStorage' : '1.2.840.10008.5.1.4.1.1.481.1',
'RTDoseStorage' : '1.2.840.10008.5.1.4.1.1.481.2',
'RTStructureSetStorage' : '1.2.840.10008.5.1.4.1.1.481.3',
'RTBeamsTreatmentRecordStorage' : '1.2.840.10008.5.1.4.1.1.481.4',
'RTPlanStorage' : '1.2.840.10008.5.1.4.1.1.481.5',
'RTBrachyTreatmentRecordStorage' : '1.2.840.10008.5.1.4.1.1.481.6',
'RTTreatmentSummaryRecordStorage' : '1.2.840.10008.5.1.4.1.1.481.7',
'RTIonPlanStorage' : '1.2.840.10008.5.1.4.1.1.481.8',
'RTIonBeamsTreatmentRecordStorage' : '1.2.840.10008.5.1.4.1.1.481.9',
'RTBeamsDeliveryInstructionStorage' : '1.2.840.10008.5.1.4.34.7',
'GenericImplantTemplateStorage' : '1.2.840.10008.5.1.4.43.1',
'ImplantAssemblyTemplateStorage' : '1.2.840.10008.5.1.4.44.1',
'ImplantTemplateGroupStorage' : '1.2.840.10008.5.1.4.45.1'
}
_QR_CLASSES = {
'PatientRootQueryRetrieveInformationModelFind' : '1.2.840.10008.5.1.4.1.2.1.1',
'PatientRootQueryRetrieveInformationModelMove' : '1.2.840.10008.5.1.4.1.2.1.2',
'PatientRootQueryRetrieveInformationModelGet' : '1.2.840.10008.5.1.4.1.2.1.3',
'StudyRootQueryRetrieveInformationModelFind' : '1.2.840.10008.5.1.4.1.2.2.1',
'StudyRootQueryRetrieveInformationModelMove' : '1.2.840.10008.5.1.4.1.2.2.2',
'StudyRootQueryRetrieveInformationModelGet' : '1.2.840.10008.5.1.4.1.2.2.3',
'PatientStudyOnlyQueryRetrieveInformationModelFind' : '1.2.840.10008.5.1.4.1.2.3.1',
'PatientStudyOnlyQueryRetrieveInformationModelMove' : '1.2.840.10008.5.1.4.1.2.3.2',
'PatientStudyOnlyQueryRetrieveInformationModelGet' : '1.2.840.10008.5.1.4.1.2.3.3',
}
_BASIC_WORKLIST_CLASSES = {
'ModalityWorklistInformationFind' : '1.2.840.10008.5.1.4.31',
}
# pylint: enable=line-too-long
_generate_sop_classes(_VERIFICATION_CLASSES)
_generate_sop_classes(_STORAGE_CLASSES)
_generate_sop_classes(_QR_CLASSES)
_generate_sop_classes(_BASIC_WORKLIST_CLASSES)
def uid_to_sop_class(uid):
"""Given a `uid` return the corresponding SOPClass.
Parameters
----------
uid : pydicom.uid.UID
Returns
-------
sop_class.SOPClass subclass
The SOP class corresponding to `uid`.
Raises
------
NotImplementedError
If the SOP Class corresponding to the given UID has not been
implemented.
"""
# Get a list of all the class members of the current module
members = inspect.getmembers(
sys.modules[__name__],
lambda mbr: isinstance(mbr, tuple)
)
for obj in members:
if hasattr(obj[1], 'uid') and obj[1].uid == uid:
return obj[1]
raise NotImplementedError("The SOP Class for UID '{}' has not been " \
"implemented".format(uid))
| 49.699187 | 108 | 0.679699 |
b53df049332ea39e2f7827214e41edfb7e42ca6c | 7,885 | py | Python | feed_forward_model.py | karlschrader/deepPD | 678793c9026eab2681d2d0a3b7e7f9f91c0f3bc5 | [
"MIT"
] | null | null | null | feed_forward_model.py | karlschrader/deepPD | 678793c9026eab2681d2d0a3b7e7f9f91c0f3bc5 | [
"MIT"
] | null | null | null | feed_forward_model.py | karlschrader/deepPD | 678793c9026eab2681d2d0a3b7e7f9f91c0f3bc5 | [
"MIT"
] | null | null | null | import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.python.training import moving_averages
TF_DTYPE = tf.float64
MOMENTUM = 0.99
EPSILON = 1e-6
DELTA_CLIP = 50.0
| 38.842365 | 95 | 0.557261 |
b53f8bde038bcadd2b2dcbf0be630c6ab3f7dcdd | 122 | py | Python | semg_bss/hyser/__init__.py | nihil21/semg-bss | c8abd8aad6ed35e22a31d238adafe28f22bbcb3f | [
"MIT"
] | 2 | 2021-12-09T15:27:42.000Z | 2021-12-16T12:48:54.000Z | semg_bss/hyser/__init__.py | nihil21/semg-bss | c8abd8aad6ed35e22a31d238adafe28f22bbcb3f | [
"MIT"
] | null | null | null | semg_bss/hyser/__init__.py | nihil21/semg-bss | c8abd8aad6ed35e22a31d238adafe28f22bbcb3f | [
"MIT"
] | null | null | null | from .dataset import load_pr, load_1dof, load_mvc, load_ndof
__all__ = ["load_pr", "load_1dof", "load_mvc", "load_ndof"]
| 30.5 | 60 | 0.737705 |
b540b40d9aaf331bef2f785083b2bbd7ed30bfe6 | 619 | py | Python | Fibonacci/Python/fibonacci.py | IanDoarn/LearningRepo | 4c5906b3c1f497a979c3fce89a66d1e571cd6b42 | [
"MIT"
] | null | null | null | Fibonacci/Python/fibonacci.py | IanDoarn/LearningRepo | 4c5906b3c1f497a979c3fce89a66d1e571cd6b42 | [
"MIT"
] | null | null | null | Fibonacci/Python/fibonacci.py | IanDoarn/LearningRepo | 4c5906b3c1f497a979c3fce89a66d1e571cd6b42 | [
"MIT"
] | null | null | null | """
Fibonacci sequence using python
generators
Written by: Ian Doarn
"""
if __name__ == '__main__':
# Maximum fib numbers to print
max_i = 20
for i, fib_n in enumerate(fib()):
#Print each yielded fib number
print('{i:3}: {f:3}'.format(i=i, f=fib_n))
# Break when we hit max_i value
if i == max_i:
break
| 23.807692 | 75 | 0.55412 |
b543980e156f1837cc8c91284aa02b3f5bbf8218 | 974 | py | Python | Chapter 6/glossary_2.py | WilliamJaber/Python-Crash-Course | d87621785011039fbe0b42f0d8b6cd2364246577 | [
"MIT"
] | null | null | null | Chapter 6/glossary_2.py | WilliamJaber/Python-Crash-Course | d87621785011039fbe0b42f0d8b6cd2364246577 | [
"MIT"
] | null | null | null | Chapter 6/glossary_2.py | WilliamJaber/Python-Crash-Course | d87621785011039fbe0b42f0d8b6cd2364246577 | [
"MIT"
] | 5 | 2021-09-22T16:53:47.000Z | 2022-03-24T00:56:49.000Z | glossary = {
'intger': 'is colloquially defined as a number that can be written without a fractional component.\n',
'iterate': 'is the repetition of a process in order to generate a sequence of outcomes.\n',
'indentation': 'is an empty space at the beginning of a line that groups particular blocks of code.\n',
'concatinate': 'is the operation of joining character strings end-to-end.\n',
'boolean': 'is a logical data type that can have only the values True or False.\n',
'loop': 'for loop iterates over an object until that object is complete.\n',
'tuple': 'is a immutable data structure that store an ordered sequence of values.\n',
'dictionary': 'is an unordered and mutable Python container that stores mappings of unique keys to values.\n',
'parse': 'is a command for dividing the given program code into a small piece of code for analyzing the correct syntax.',
}
for k, v in glossary.items():
print(f'{k.title()}: {v}')
| 64.933333 | 125 | 0.710472 |
b543f58cf6e8b8dc209086801165057172e20d3f | 1,711 | py | Python | scripts/test_spider_roundtrip.py | mattr1/seq2struct_forPRs | cdc9e3c94380fb479ed3e3c77f370038d27cf2d6 | [
"MIT"
] | 25 | 2019-07-16T22:32:44.000Z | 2022-01-25T05:23:07.000Z | scripts/test_spider_roundtrip.py | mattr1/seq2struct_forPRs | cdc9e3c94380fb479ed3e3c77f370038d27cf2d6 | [
"MIT"
] | 19 | 2018-12-17T20:42:11.000Z | 2020-02-12T21:29:51.000Z | scripts/test_spider_roundtrip.py | mattr1/seq2struct_forPRs | cdc9e3c94380fb479ed3e3c77f370038d27cf2d6 | [
"MIT"
] | 22 | 2019-03-16T05:57:27.000Z | 2020-10-25T04:34:54.000Z | import ast
import argparse
import json
import os
import pprint
import astor
import tqdm
import _jsonnet
from seq2struct import datasets
from seq2struct import grammars
from seq2struct.utils import registry
from third_party.spider import evaluation
if __name__ == '__main__':
main()
| 30.553571 | 102 | 0.663939 |
b5441f2ff301c902adbb89a228a3e18af8032444 | 1,632 | py | Python | tune/noniterative/objective.py | fugue-project/tune | bf2288ddcb29c8345d996a9b22c0910da9002da1 | [
"Apache-2.0"
] | 14 | 2021-03-03T20:02:09.000Z | 2021-11-10T20:32:22.000Z | tune/noniterative/objective.py | fugue-project/tune | bf2288ddcb29c8345d996a9b22c0910da9002da1 | [
"Apache-2.0"
] | 26 | 2021-04-30T19:56:06.000Z | 2022-01-18T04:40:00.000Z | tune/noniterative/objective.py | fugue-project/tune | bf2288ddcb29c8345d996a9b22c0910da9002da1 | [
"Apache-2.0"
] | 2 | 2021-04-30T03:12:21.000Z | 2022-02-05T12:13:37.000Z | from tune.constants import TUNE_STOPPER_DEFAULT_CHECK_INTERVAL
from typing import Any, Callable, Optional
from tune._utils import run_monitored_process
from tune.concepts.flow import Trial, TrialReport
def validate_noniterative_objective(
func: NonIterativeObjectiveFunc,
trial: Trial,
validator: Callable[[TrialReport], None],
optimizer: Optional[NonIterativeObjectiveLocalOptimizer] = None,
) -> None:
_optimizer = optimizer or NonIterativeObjectiveLocalOptimizer()
validator(_optimizer.run_monitored_process(func, trial, lambda: False, "1sec"))
| 33.306122 | 85 | 0.717525 |
b545de61a4d0708ce0bd62bccdbaa4f9ddf7238d | 371 | py | Python | ThinkPython/chap10/ex7.py | sokolowskik/Tutorials | d2681d4f18b03e00f90f9132c77f0b23b74d2629 | [
"MIT"
] | null | null | null | ThinkPython/chap10/ex7.py | sokolowskik/Tutorials | d2681d4f18b03e00f90f9132c77f0b23b74d2629 | [
"MIT"
] | null | null | null | ThinkPython/chap10/ex7.py | sokolowskik/Tutorials | d2681d4f18b03e00f90f9132c77f0b23b74d2629 | [
"MIT"
] | null | null | null | a1 = 'mary'
b1 = 'army'
a2 = 'mary'
b2 = 'mark'
def is_anagram(a, b):
"""
Return True if words a and b are anagrams.
Return Flase if otherwise.
"""
a_list = list(a)
b_list = list(b)
a_list.sort()
b_list.sort()
if a_list == b_list:
return True
else:
return False
print is_anagram(a1, b1)
print is_anagram(a2, b2)
| 16.130435 | 46 | 0.574124 |
b54662251afe3923c7f22e2ffa35d6fb5d4cc63b | 459 | py | Python | pymotion/models/notification.py | LeResKP/motion | 4a7c6200ca6fd20edb3b98c6ea1215c90d988a78 | [
"MIT"
] | null | null | null | pymotion/models/notification.py | LeResKP/motion | 4a7c6200ca6fd20edb3b98c6ea1215c90d988a78 | [
"MIT"
] | null | null | null | pymotion/models/notification.py | LeResKP/motion | 4a7c6200ca6fd20edb3b98c6ea1215c90d988a78 | [
"MIT"
] | null | null | null | from sqlalchemy import (
Column,
ForeignKey,
Integer,
Text,
)
from sqlalchemy.orm import relationship
from .meta import Base
| 21.857143 | 77 | 0.714597 |
b5473421d6c0b8e5ed5978ee678700c80296d6a9 | 1,340 | py | Python | utils/model_helper.py | CocoBir/django-restful-demo | aeb7f8a0bcff5c52b528c7b0c48f87de5f392320 | [
"MIT"
] | null | null | null | utils/model_helper.py | CocoBir/django-restful-demo | aeb7f8a0bcff5c52b528c7b0c48f87de5f392320 | [
"MIT"
] | null | null | null | utils/model_helper.py | CocoBir/django-restful-demo | aeb7f8a0bcff5c52b528c7b0c48f87de5f392320 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
model helper
~~~~~~~~~~~~
:Created: 2016-8-5
:Copyright: (c) 2016<smileboywtu@gmail.com>
"""
from customer_exceptions import OffsetOutOfRangeException
| 23.103448 | 60 | 0.568657 |
b54756c2e6e68e661aab45de212b547f340ad633 | 2,603 | py | Python | djadmin2/templatetags/admin2_tags.py | beezz/django-admin2 | 4aec1a3836011cd46e5eb8b6375590bf5a76c044 | [
"BSD-3-Clause"
] | 1 | 2015-04-30T13:34:03.000Z | 2015-04-30T13:34:03.000Z | djadmin2/templatetags/admin2_tags.py | taxido/django-admin2 | 6a6b3d5f790b8289b0dd0f9194d80799af8804dc | [
"BSD-3-Clause"
] | 1 | 2021-03-19T23:57:09.000Z | 2021-03-19T23:57:09.000Z | djadmin2/templatetags/admin2_tags.py | RyanBalfanz/django-admin2 | e7f0611eea22370bb3418e25e9cd10ddbac4fd6d | [
"BSD-3-Clause"
] | null | null | null | from django import template
register = template.Library()
from .. import utils
| 26.835052 | 84 | 0.702651 |
b5484bee48cb34153d413c1639f3e4d36037235a | 2,323 | py | Python | tests/test_filters/test_edges.py | luluricketts/biothings_explorer | ae2009ff285f96a08e0145f242846ca613b5069c | [
"Apache-2.0"
] | null | null | null | tests/test_filters/test_edges.py | luluricketts/biothings_explorer | ae2009ff285f96a08e0145f242846ca613b5069c | [
"Apache-2.0"
] | null | null | null | tests/test_filters/test_edges.py | luluricketts/biothings_explorer | ae2009ff285f96a08e0145f242846ca613b5069c | [
"Apache-2.0"
] | null | null | null | """
Tests for edges.py
"""
import unittest
import pandas as pd
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from biothings_explorer.filters.edges import filter_node_degree
if __name__ == '__main__':
unittest.main()
| 40.754386 | 106 | 0.54025 |
b54ed986a0849287fd62118ba89a87ae8732ba9e | 974 | py | Python | get_data.py | ryanw3bb/fpl | a06fbf8ada5f549f0750ed9af46f53b3a1a0149e | [
"MIT"
] | 1 | 2018-08-15T02:52:52.000Z | 2018-08-15T02:52:52.000Z | get_data.py | ryanw3bb/fpl | a06fbf8ada5f549f0750ed9af46f53b3a1a0149e | [
"MIT"
] | null | null | null | get_data.py | ryanw3bb/fpl | a06fbf8ada5f549f0750ed9af46f53b3a1a0149e | [
"MIT"
] | null | null | null | """
Retrieves data as json files from fantasy.premierleague.com
"""
import json
import requests
LAST_SEASON_DATA_FILENAME = "data/player_data_20_21.json"
DATA_URL = "https://fantasy.premierleague.com/api/bootstrap-static/"
DATA_FILENAME = "data/player_data_21_22.json"
FIXTURES_URL = "https://fantasy.premierleague.com/api/fixtures/"
FIXTURES_FILENAME = "data/fixtures_data_21_22.json"
# Download all player data and write file
# Download all fixtures data and write file
| 24.974359 | 68 | 0.74846 |
b54f720607fa63d495bc79cd36045e62028217a1 | 5,587 | py | Python | examples/spawning5.py | MissMeriel/BeamNGpy | a8467c57537441802bc5b56f0012dfee2b5f5af0 | [
"MIT"
] | 1 | 2021-08-10T19:29:52.000Z | 2021-08-10T19:29:52.000Z | examples/spawning5.py | MissMeriel/BeamNGpy | a8467c57537441802bc5b56f0012dfee2b5f5af0 | [
"MIT"
] | null | null | null | examples/spawning5.py | MissMeriel/BeamNGpy | a8467c57537441802bc5b56f0012dfee2b5f5af0 | [
"MIT"
] | null | null | null | from beamngpy import BeamNGpy, Vehicle, Scenario, ScenarioObject
from beamngpy import setup_logging, Config
from beamngpy.sensors import Camera, GForces, Lidar, Electrics, Damage, Timer
import beamngpy
import time, random
# globals
default_model = 'pickup'
default_scenario = 'west_coast_usa' #'cliff' # smallgrid
dt = 20
if __name__ == "__main__":
main() | 36.51634 | 133 | 0.583139 |
b55212239eec52a949a21799291c19c62811b1b2 | 17,827 | py | Python | steam/check_acf.py | DarkStarSword/junk | f0de649a3ef92715fbc6aeff7599ad843763c32b | [
"MIT"
] | 19 | 2015-02-02T16:49:32.000Z | 2021-12-04T02:33:10.000Z | steam/check_acf.py | DarkStarSword/junk | f0de649a3ef92715fbc6aeff7599ad843763c32b | [
"MIT"
] | 1 | 2015-07-11T13:57:34.000Z | 2015-11-16T02:36:23.000Z | steam/check_acf.py | DarkStarSword/junk | f0de649a3ef92715fbc6aeff7599ad843763c32b | [
"MIT"
] | 4 | 2017-02-06T21:11:17.000Z | 2019-04-04T15:11:50.000Z | #!/usr/bin/env python
from __future__ import print_function
import os, optparse, glob
import depotcache, acf
from ui import ui_tty as ui
import hashlib
import sys
g_indent = ' '
colours = {
False: 'back_red black',
True: ''
}
if __name__ == '__main__':
main()
# vi:noet:ts=8:sw=8
| 35.941532 | 388 | 0.710664 |
b5523d39a4d4c8cb3b8be163ac345c9888bb29a1 | 178 | py | Python | reference/old/distance-simple.py | Art31/trekking-pro-cefetrj | 37ab58759b42978cbd8d950bd75c487e1292cb2b | [
"Apache-1.1"
] | null | null | null | reference/old/distance-simple.py | Art31/trekking-pro-cefetrj | 37ab58759b42978cbd8d950bd75c487e1292cb2b | [
"Apache-1.1"
] | null | null | null | reference/old/distance-simple.py | Art31/trekking-pro-cefetrj | 37ab58759b42978cbd8d950bd75c487e1292cb2b | [
"Apache-1.1"
] | null | null | null | from gpiozero import DistanceSensor
from time import sleep
sensor = DistanceSensor(echo=23, trigger=22)
while True:
print('Distance: ', sensor.distance * 100)
sleep(1)
| 19.777778 | 46 | 0.735955 |
b5526b9490a6617e9343309ab67db978943793e5 | 1,070 | py | Python | SmallTips/RemoveDuplication.py | Akasan/PythonTips | eee85c35fd25576c7b2b01af838749608bf8989c | [
"MIT"
] | null | null | null | SmallTips/RemoveDuplication.py | Akasan/PythonTips | eee85c35fd25576c7b2b01af838749608bf8989c | [
"MIT"
] | null | null | null | SmallTips/RemoveDuplication.py | Akasan/PythonTips | eee85c35fd25576c7b2b01af838749608bf8989c | [
"MIT"
] | null | null | null | import pickle
def remove_duplicate_from_list(data):
""" remove duplications from specific list
any data can be contained in the data.
if the data is hashable, you can implement this function easily like below.
data = list(set(data))
but if the data is unhashable, you have to implement in other ways.
This function use pickle.dumps to convert any data to binary.
Binary data is hashable, so after that, we can implement like with hashable data.
Arguments:
data {list(any)} -- list that contains any type of data
Returns:
{list(any)} -- list that contains any type of data without duplications
"""
pickled_data = [pickle.dumps(d) for d in data]
removed_pickled_data = list(set(pickled_data))
result = [pickle.loads(d) for d in removed_pickled_data]
return result
if __name__ == "__main__":
data = [1, 2, 2, 3, 2, 2, 2, 6]
print(remove_duplicate_from_list(data))
data = ["hoge", 1, "hdf", 3.4, "hoge", 2, 2, 2]
print(remove_duplicate_from_list(data))
| 36.896552 | 89 | 0.66729 |
b5533e6640dc60d29a04f82e1a7722aa55036807 | 7,226 | py | Python | ultraviolet_cli/commands/fixtures.py | mnyrop/ultraviolet-cli | f177adde71a899ca6775bd4673d30e19ccdb2a30 | [
"MIT"
] | 1 | 2022-02-08T18:28:30.000Z | 2022-02-08T18:28:30.000Z | ultraviolet_cli/commands/fixtures.py | mnyrop/ultraviolet-cli | f177adde71a899ca6775bd4673d30e19ccdb2a30 | [
"MIT"
] | null | null | null | ultraviolet_cli/commands/fixtures.py | mnyrop/ultraviolet-cli | f177adde71a899ca6775bd4673d30e19ccdb2a30 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2022 NYU Libraries.
#
# ultraviolet-cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module for custom UltraViolet commands."""
import click
import glob
import json
import os
import requests
import sys
from jsonschema import Draft4Validator
from time import sleep
from urllib3.exceptions import InsecureRequestWarning
from .. import config, utils
# Suppress InsecureRequestWarning warnings from urllib3.
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
| 33.146789 | 128 | 0.63313 |
b553c83a33407279287a7fa673284b12f5a363b1 | 1,320 | py | Python | consts.py | twyair/hebrew-verb-inflection | a85773a008ea3fc1b8c36f92ac3100315edee6b6 | [
"MIT"
] | null | null | null | consts.py | twyair/hebrew-verb-inflection | a85773a008ea3fc1b8c36f92ac3100315edee6b6 | [
"MIT"
] | null | null | null | consts.py | twyair/hebrew-verb-inflection | a85773a008ea3fc1b8c36f92ac3100315edee6b6 | [
"MIT"
] | null | null | null | from __future__ import annotations
from enum import Enum, auto
# TODO: rename
| 22 | 88 | 0.559848 |
b55d244aa62443aced945674009694fb76ee238b | 1,834 | py | Python | src/function_manager/function_manager.py | lzjzx1122/FaaSFlow | c4a32a04797770c21fe6a0dcacd85ac27a3d29ec | [
"Apache-2.0"
] | 24 | 2021-12-02T01:00:54.000Z | 2022-03-27T00:50:28.000Z | src/function_manager/function_manager.py | lzjzx1122/FaaSFlow | c4a32a04797770c21fe6a0dcacd85ac27a3d29ec | [
"Apache-2.0"
] | null | null | null | src/function_manager/function_manager.py | lzjzx1122/FaaSFlow | c4a32a04797770c21fe6a0dcacd85ac27a3d29ec | [
"Apache-2.0"
] | 3 | 2021-12-02T01:00:47.000Z | 2022-03-04T07:33:09.000Z | import gevent
import docker
import os
from function_info import parse
from port_controller import PortController
from function import Function
import random
repack_clean_interval = 5.000 # repack and clean every 5 seconds
dispatch_interval = 0.005 # 200 qps at most
# the class for scheduling functions' inter-operations
| 37.428571 | 104 | 0.681025 |
b55f0296b5c70a3898760614021a220983b1ac36 | 333 | py | Python | getall.py | bransorem/Magic-Scraper | f68ae25122391efe440c8652119d146a75be4cea | [
"Unlicense"
] | 2 | 2018-07-16T21:24:34.000Z | 2020-12-19T21:34:00.000Z | getall.py | bransorem/Magic-Scraper | f68ae25122391efe440c8652119d146a75be4cea | [
"Unlicense"
] | null | null | null | getall.py | bransorem/Magic-Scraper | f68ae25122391efe440c8652119d146a75be4cea | [
"Unlicense"
] | null | null | null | import sets
import scan_set
import os
path = 'ids/'
setlist = os.listdir(path)
for set in sets.set_info:
s = set + '.txt'
if s not in setlist:
print "Getting " + set
getall(set)
print "\n\nCompletely Finished........"
| 15.857143 | 39 | 0.615616 |
b55f0ffd6458d9da1434363a2f94293d840e899b | 6,717 | py | Python | MalmoEnv/run.py | chemgymrl/malmo | 207e2530ec94af46450ba6d0e62d691ade91e282 | [
"MIT"
] | 1 | 2022-02-17T07:58:06.000Z | 2022-02-17T07:58:06.000Z | MalmoEnv/run.py | chemgymrl/malmo | 207e2530ec94af46450ba6d0e62d691ade91e282 | [
"MIT"
] | null | null | null | MalmoEnv/run.py | chemgymrl/malmo | 207e2530ec94af46450ba6d0e62d691ade91e282 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import malmoenv
import argparse
from pathlib import Path
import time
from PIL import Image
from stable_baselines3.common import results_plotter
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.env_checker import check_env
from stable_baselines3 import PPO
log_dir = "tmp/"
os.makedirs(log_dir, exist_ok=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='malmovnv test')
parser.add_argument('--mission', type=str, default='missions/jumping.xml', help='the mission xml')
parser.add_argument('--port', type=int, default=9000, help='the mission server port')
parser.add_argument('--server', type=str, default='127.0.0.1', help='the mission server DNS or IP address')
parser.add_argument('--port2', type=int, default=None, help="(Multi-agent) role N's mission port. Defaults to server port.")
parser.add_argument('--server2', type=str, default=None, help="(Multi-agent) role N's server DNS or IP")
parser.add_argument('--episodes', type=int, default=100, help='the number of resets to perform - default is 1')
parser.add_argument('--episode', type=int, default=0, help='the start episode - default is 0')
parser.add_argument('--role', type=int, default=0, help='the agent role - defaults to 0')
parser.add_argument('--episodemaxsteps', type=int, default=100, help='max number of steps per episode')
parser.add_argument('--saveimagesteps', type=int, default=0, help='save an image every N steps')
parser.add_argument('--resync', type=int, default=0, help='exit and re-sync every N resets'
' - default is 0 meaning never.')
parser.add_argument('--experimentUniqueId', type=str, default='test1', help="the experiment's unique id.")
args = parser.parse_args()
if args.server2 is None:
args.server2 = args.server
xml = Path(args.mission).read_text()
env = malmoenv.make()
env.init(xml, args.port,
server=args.server,
server2=args.server2, port2=args.port2,
role=args.role,
exp_uid=args.experimentUniqueId,
episode=args.episode, resync=args.resync)
env = Monitor(env, log_dir)
# print("checking env")
check_env(env, True)
s = SaveOnBestTrainingRewardCallback(2000, log_dir)
# print("checked env")
model = PPO("MlpPolicy", env, verbose=1, tensorboard_log="./ppo_test_tensorboard/")
#model.load("tmp/best_model.zip")
model.learn(total_timesteps=100000, callback=s, reset_num_timesteps=False)
# print("trained and saved model")
# for i in range(args.episodes):
# print("reset " + str(i))
# obs = env.reset()
# steps = 0
# done = False
# while not done and (args.episodemaxsteps <= 0 or steps < args.episodemaxsteps):
# # h, w, d = env.observation_space.shape
# # print(done)
# action, _states = model.predict(obs, deterministic=True)
# # action = env.action_space.sample()
# obs, reward, done, info = env.step(action)
# steps += 1
# # print("reward: " + str(reward))
# # print(obs)
# time.sleep(.05)
env.close()
| 46.645833 | 128 | 0.650737 |
b55f2629add10c43d98efae9012f1f13e3691bd5 | 1,172 | py | Python | example/wrapper/common/5001-get_tgpio_digital.py | krasin/xArm-Python-SDK-ssh | 9c854e8bfa78d0e91b67efbab79f733ddf19e916 | [
"BSD-3-Clause"
] | 62 | 2018-11-30T05:53:32.000Z | 2022-03-20T13:15:22.000Z | example/wrapper/common/5001-get_tgpio_digital.py | krasin/xArm-Python-SDK-ssh | 9c854e8bfa78d0e91b67efbab79f733ddf19e916 | [
"BSD-3-Clause"
] | 25 | 2019-08-12T18:53:41.000Z | 2021-12-28T10:17:39.000Z | example/wrapper/common/5001-get_tgpio_digital.py | krasin/xArm-Python-SDK-ssh | 9c854e8bfa78d0e91b67efbab79f733ddf19e916 | [
"BSD-3-Clause"
] | 43 | 2019-01-03T04:47:13.000Z | 2022-03-18T06:40:59.000Z | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
"""
Example: Get GPIO Digital
"""
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
try:
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address[192.168.1.194]:')
if not ip:
ip = '192.168.1.194'
arm = XArmAPI(ip)
time.sleep(0.5)
if arm.warn_code != 0:
arm.clean_warn()
if arm.error_code != 0:
arm.clean_error()
last_digitals = [-1, -1]
while arm.connected and arm.error_code != 19 and arm.error_code != 28:
code, digitals = arm.get_tgpio_digital()
if code == 0:
if digitals[0] == 1 and digitals[0] != last_digitals[0]:
print('IO0 input high level')
if digitals[1] == 1 and digitals[1] != last_digitals[1]:
print('IO1 input high level')
last_digitals = digitals
time.sleep(0.1)
| 23.44 | 70 | 0.648464 |
b560237f424501f39681590b33c61e9846076455 | 2,429 | py | Python | sdk/python/pulumi_lxd/__init__.py | soupdiver/pulumi-lxd | 258395aefd6a4cf138d470d7de70babed3310063 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_lxd/__init__.py | soupdiver/pulumi-lxd | 258395aefd6a4cf138d470d7de70babed3310063 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_lxd/__init__.py | soupdiver/pulumi-lxd | 258395aefd6a4cf138d470d7de70babed3310063 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from . import _utilities
import typing
# Export this package's modules as members:
from .cached_image import *
from .container import *
from .container_file import *
from .network import *
from .profile import *
from .provider import *
from .publish_image import *
from .snapshot import *
from .storage_pool import *
from .volume import *
from .volume_container_attach import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_lxd.config as config
else:
config = _utilities.lazy_import('pulumi_lxd.config')
_utilities.register(
resource_modules="""
[
{
"pkg": "lxd",
"mod": "index/profile",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/profile:Profile": "Profile"
}
},
{
"pkg": "lxd",
"mod": "index/storagePool",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/storagePool:StoragePool": "StoragePool"
}
},
{
"pkg": "lxd",
"mod": "index/volumeContainerAttach",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/volumeContainerAttach:VolumeContainerAttach": "VolumeContainerAttach"
}
},
{
"pkg": "lxd",
"mod": "index/cachedImage",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/cachedImage:CachedImage": "CachedImage"
}
},
{
"pkg": "lxd",
"mod": "index/container",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/container:Container": "Container"
}
},
{
"pkg": "lxd",
"mod": "index/network",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/network:Network": "Network"
}
},
{
"pkg": "lxd",
"mod": "index/volume",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/volume:Volume": "Volume"
}
},
{
"pkg": "lxd",
"mod": "index/containerFile",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/containerFile:ContainerFile": "ContainerFile"
}
},
{
"pkg": "lxd",
"mod": "index/publishImage",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/publishImage:PublishImage": "PublishImage"
}
},
{
"pkg": "lxd",
"mod": "index/snapshot",
"fqn": "pulumi_lxd",
"classes": {
"lxd:index/snapshot:Snapshot": "Snapshot"
}
}
]
""",
resource_packages="""
[
{
"pkg": "lxd",
"token": "pulumi:providers:lxd",
"fqn": "pulumi_lxd",
"class": "Provider"
}
]
"""
)
| 19.58871 | 87 | 0.627419 |
b56057ff5dbd4cdc1d25d244ff87b18b26455492 | 544 | py | Python | 49-group anagrams/main.py | ytong82/leetcode | 34e08c430d654b14b1608211f74702f57e507189 | [
"Apache-2.0"
] | null | null | null | 49-group anagrams/main.py | ytong82/leetcode | 34e08c430d654b14b1608211f74702f57e507189 | [
"Apache-2.0"
] | null | null | null | 49-group anagrams/main.py | ytong82/leetcode | 34e08c430d654b14b1608211f74702f57e507189 | [
"Apache-2.0"
] | null | null | null |
strs = ["eat", "tea", "tan", "ate", "nat", "bat"]
sol = Solution()
print(sol.groupAnagrams(strs)) | 22.666667 | 51 | 0.443015 |
b561af012e5087c35cc2997a33fe02fbbdb5ae5d | 2,429 | py | Python | vending.py | mit-dci/litvending | 28f8f2b51691eac7c69de153aafbe72663d9892c | [
"MIT"
] | 1 | 2018-06-20T01:42:54.000Z | 2018-06-20T01:42:54.000Z | vending.py | mit-dci/litvending | 28f8f2b51691eac7c69de153aafbe72663d9892c | [
"MIT"
] | null | null | null | vending.py | mit-dci/litvending | 28f8f2b51691eac7c69de153aafbe72663d9892c | [
"MIT"
] | 1 | 2022-02-15T06:48:15.000Z | 2022-02-15T06:48:15.000Z | #!/usr/bin/env python3
import os
import time
import sys
gpio = None
try:
import RPi.GPIO
gpio = RPi.GPIO
except:
print('RPi library not found. We\'re probably on a dev machine. Moving on...')
import lvconfig
import litrpc
# This could be more efficient, we're making a lot more requests than we need to.
if __name__ == '__main__':
main(lvconfig.load_config())
| 26.11828 | 136 | 0.669823 |
b563b116c6a5501886a56db83d13d435b9bc033e | 3,070 | py | Python | python-study/2.py | yu1k/python-study | 0bf2e2927696f58f36c05602446ecd71e31596e3 | [
"MIT"
] | null | null | null | python-study/2.py | yu1k/python-study | 0bf2e2927696f58f36c05602446ecd71e31596e3 | [
"MIT"
] | null | null | null | python-study/2.py | yu1k/python-study | 0bf2e2927696f58f36c05602446ecd71e31596e3 | [
"MIT"
] | null | null | null | #*** ***
#Python, ' (), " ().
print('some eggs')
print("some eggs")
print('some eggs\nsome eggs')
#a == b abTrue, False
print('some eggs' == "some eggs") #True
#'...' ' , "..." " ,
# \ () .
print("I don't Know him") #I don't know him
print('"Python"') #"Python"
print("I don\'t know him") #I don't know him
print("\"Python\"") #"Python"
#\n. \n.
print("\n")
#
#
#\n,
#+n\\n,
#r.
print("\\n")
#\n
print(r"\n")
#\n
#,
#"""...""" ''' ... '''.
#, \
print("""
\
""")
#
#
#.
#+1.
print("a lot of" + " eggs")
#a lot of eggs
#*
print("Python" * 3)
#PythonPythonPython
#.
first_name = ""
last_name = ""
print(first_name + last_name)
#
#*** , ***
#,
#(, , ).
# 0.
word = "Python"
print(word)
#Python
#[].
#1(0)
print(word[0])
#P
#5(4)
print(word[4])
#o
#,
#. , 0-0, -1.
#
print(word[-1])
#n
#2
print(word[-2])
#o
#.
# P y t h o n
# 0 1 2 3 4 5
# -0 -5 -4 -3 -2 -1
#, o4, , -2.
#ij. .
#, 01.
#1.
#, (1)
print(word[0:2])
#Py
#0.
#2
print(word[:3])
#Pyt
#.
#3
print(word[3:])
#hon
print(word[:3] + word[3:])
#Python
#.
#print(word[42])
#len().
print("length:", len(word))
#length: 6
#, .
print(word[4:42])
#on
#Python.
#word[0] = "J"
#, .
#1J, word[1:]
word = "J" + word[1:]
print(word)
#Jython
#*** Format ***
#.
#print().
#Python,
#(f-string).
#, fF.
#{}.
#{word}word.
word = "Python"
print(f"Hello {word}")
#Hello Python
#{}Python.
print(f"length: {len(word)}")
#length: 6
print(f"slice: {word[:2]}")
#slice: Py
#, 0,
#.
pi = 3.14159265359
#
print(f"{pi}")
#3.14159265359
#2
print(f"{pi:.2f}")
#3.14
#10
print(f"{pi:10.2f}")
# 3.14
#50
print(f"{pi:05.2f}")
#03.14
#
print(f"'{word:>10s}'")
#' Python'
#
print(f"'{word:^10s}'")
#' Python ' | 17.150838 | 59 | 0.70684 |
b5656eebed6c2a64ded792a644ba9d21bbe79fe1 | 570 | py | Python | tests/test_delete_query.py | RaduG/fluentql | 653a77bb95b40724eb58744f5f8dbed9c88eaebd | [
"MIT"
] | 4 | 2020-04-15T10:50:03.000Z | 2021-07-22T12:23:50.000Z | tests/test_delete_query.py | RaduG/fluentql | 653a77bb95b40724eb58744f5f8dbed9c88eaebd | [
"MIT"
] | 2 | 2020-05-24T08:54:56.000Z | 2020-05-24T09:04:31.000Z | tests/test_delete_query.py | RaduG/fluentql | 653a77bb95b40724eb58744f5f8dbed9c88eaebd | [
"MIT"
] | null | null | null | import pytest
from fluentql import GenericSQLDialect, Q
from fluentql.types import Table
test_table = Table("test_table")
| 21.111111 | 73 | 0.652632 |
b56b02915f5cdfb61babcb70fc1c32bc2970b2fa | 597 | py | Python | Section02/ParsingChart.py | fosterleejoe/Developing-NLP-Applications-Using-NLTK-in-Python | f2cac32c02d0632fb89f32446388ef15d9926bbc | [
"MIT"
] | 67 | 2017-11-23T18:48:47.000Z | 2022-03-29T08:03:25.000Z | Section02/ParsingChart.py | fosterleejoe/Developing-NLP-Applications-Using-NLTK-in-Python | f2cac32c02d0632fb89f32446388ef15d9926bbc | [
"MIT"
] | null | null | null | Section02/ParsingChart.py | fosterleejoe/Developing-NLP-Applications-Using-NLTK-in-Python | f2cac32c02d0632fb89f32446388ef15d9926bbc | [
"MIT"
] | 49 | 2017-12-06T16:10:14.000Z | 2021-11-25T09:02:49.000Z | from nltk.grammar import CFG
from nltk.parse.chart import ChartParser, BU_LC_STRATEGY
grammar = CFG.fromstring("""
S -> T1 T4
T1 -> NNP VBZ
T2 -> DT NN
T3 -> IN NNP
T4 -> T3 | T2 T3
NNP -> 'Tajmahal' | 'Agra' | 'Bangalore' | 'Karnataka'
VBZ -> 'is'
IN -> 'in' | 'of'
DT -> 'the'
NN -> 'capital'
""")
cp = ChartParser(grammar, BU_LC_STRATEGY, trace=True)
sentence = "Bangalore is the capital of Karnataka"
tokens = sentence.split()
chart = cp.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
print("Total Edges :", len(chart.edges()))
for tree in parses: print(tree)
tree.draw()
| 22.961538 | 56 | 0.676717 |