hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ca4fb77d1058786e6c3813cfbd46b9161c2b28a | 3,473 | py | Python | lagom/core/es/base_es_master.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | lagom/core/es/base_es_master.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | lagom/core/es/base_es_master.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | from lagom.core.multiprocessing import BaseIterativeMaster
| 37.344086 | 98 | 0.600921 |
3ca513ca1cc8091c31b7381ae44ccedd1283fc01 | 1,096 | py | Python | Roman_Morozov_dz_3/task_5.py | Wern-rm/2074_GB_Python | f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646 | [
"MIT"
] | null | null | null | Roman_Morozov_dz_3/task_5.py | Wern-rm/2074_GB_Python | f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646 | [
"MIT"
] | null | null | null | Roman_Morozov_dz_3/task_5.py | Wern-rm/2074_GB_Python | f0b7a7f4ed993a007c1aef6ec9ce266adb5a3646 | [
"MIT"
] | null | null | null | """
get_jokes(), n , , ( ):
"""
import random
nouns = ["", "", "", "", ""]
adverbs = ["", "", "", "", ""]
adjectives = ["", "", "", "", ""]
if __name__ == '__main__':
print(get_jokes(count=1, repeat=True, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
print(get_jokes(count=3, repeat=False, nouns=nouns, adverbs=adverbs, adjectives=adjectives))
print(get_jokes(count=5, repeat=True, nouns=nouns, adverbs=adverbs, adjectives=adjectives)) | 40.592593 | 140 | 0.666058 |
3ca67e9442436a3a4c05f92ccc99c1b4150df427 | 11,217 | py | Python | tools.py | akerestely/nonlinearBestFit | e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4 | [
"MIT"
] | 1 | 2019-10-09T07:39:55.000Z | 2019-10-09T07:39:55.000Z | tools.py | akerestely/nonlinearBestFit | e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4 | [
"MIT"
] | null | null | null | tools.py | akerestely/nonlinearBestFit | e45b5e33dd8fdfc2f9bd19b48523b1759e694fc4 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
np.random.seed(421)
def gen_rand_points(n: int, A: float = 1000, B: float = 3, alpha: float = 0.01, noise: float = 2, consecutive: bool = False):
"""
:param n: number of points to generate
:param A, B, alpha: parameters to hCG function
:param noise: randomly add this much to the result of the hCG function
"""
from numpy.random import random
sparsity = 1
if consecutive is False:
x = random(n) * n * sparsity
x.sort() # just for plot visual effect; does not change results
else :
x = np.linspace(0, n-1, n) * sparsity
y = hCG(x, A, B, alpha)
ynoise = random(n) * noise - noise / 2
y += ynoise
return x, y
def print_rmse_methods(x: np.ndarray, y: np.ndarray, paramsList: list):
"""
param paramsList: array of tuples, where tuple contains A, B and alpha
"""
from sklearn.metrics import mean_squared_error
from math import sqrt
for i, params in enumerate(paramsList):
rmse = sqrt(mean_squared_error(y, hCG(x, *params)))
print(f"Method {i} RMSE: {rmse}")
def plot_methods(x: np.ndarray, y: np.ndarray, paramsList:list , paramsNames: list = [], data_id: str="", showPlot: bool = True):
"""
param paramsList: array of tuples, where tuple contains A, B and alpha
param paramsNames: array of strings, where each sting represents the name of the corresponding param tuple.
The names will appear on the plot. Optional, in which case the name will be the index in the array.
"""
from sklearn.metrics import mean_squared_error
from math import sqrt
import matplotlib.pyplot as plt
plt.xlabel(r"$time$")
plt.ylabel(r"$hCG(time)$")
plt.plot(x, y, 'bo', label=f"data {data_id}")
#print(paramsNames)
for i, params in enumerate(paramsList):
rmse = sqrt(mean_squared_error(y, hCG(x, *params)))
name = paramsNames[i] if i < len(paramsNames) else ("Method " + str(i))
plt.plot(x, hCG(x, *params),
label=f'{name}: A=%5.2f, B=%5.2f, alpha=%5.2f, rmse=%5.2f' % (*params, rmse))
plt.legend()
if showPlot:
plt.show()
# print_rmse_methods(x, y, params, paramsCalc)
def plot_results(x: np.ndarray, y: np.ndarray, ptsStart: int = 0, ptsEnd: int = None, ptsTrain: int = None, data_id: str="", showPlot:bool = True, allAlgorithms:bool = True):
"""
:param ptsStart: use x, y values starting from this point
:param ptsEnd: use x, y values ending at this point
:param ptsTrain: use this much x, y values for training starting from ptsStart
"""
ptsEnd = ptsEnd or len(x)
ptsTrain = ptsTrain or (ptsEnd - ptsStart)
if ptsStart + ptsTrain > ptsEnd:
raise ValueError("Invalid interval for points")
x_train = x[ptsStart : ptsStart + ptsTrain]
y_train = y[ptsStart : ptsStart + ptsTrain]
paramsList = []
paramsNames = []
if allAlgorithms:
try:
from scipy.optimize import curve_fit
popt, _ = curve_fit(hCG, x_train, y_train) # uses Levenberg-Marquardt iterative method
paramsList.append(tuple(popt))
paramsNames.append("Iterative")
except:
pass
try:
from bestfitte import best_fit
paramsList.append(best_fit(x_train, y_train))
paramsNames.append("BestFit")
except:
pass
if allAlgorithms:
try:
from pseloglin import fit
paramsList.append(fit(x_train, y_train))
paramsNames.append("PseLogLin")
except:
pass
plot_methods(x[ptsStart:ptsEnd], y[ptsStart:ptsEnd], paramsList, paramsNames, data_id, showPlot)
def compare_results_on_datasets(datasets: list):
'''
datasets parameter is a list of datasets which contain (x_data, y_data, dataset_name) tuples
'''
import matplotlib.pyplot as plt
plt.figure(figsize = (9*len(datasets), 5))
for i, dataset in enumerate(datasets):
x, y, name = dataset
plt.subplot(1, len(datasets), i + 1)
plot_results(x, y, data_id = name, showPlot=False)
def compare_time_on_datasets(datasets: list = None):
'''
datasets parameter is a list of datasets which contain (x_data, y_data, dataset_name) tuples
if omitted, 10 random dataset will be generated
'''
if datasets is None:
# generate 10 random datasets
paramsList = []
for _ in range(10):
paramsList.append((
np.random.random_integers(3, 20), #n
np.random.random() * 1e3, # A
np.random.random() * 1e1, # B
np.random.random() * 1e1, # alpha
np.random.random() * 1 # noise
))
datasets = []
for params in paramsList:
datasets.append(gen_rand_points(*params) +
(f'n=%d, A=%5.2f, B=%5.2f, alpha=%5.2f, noise=%5.2f' % params,))
from scipy.optimize import curve_fit
from bestfitte import best_fit
from pseloglin import fit
from time import perf_counter
rows = []
for dataset in datasets:
x, y, name = dataset
measurements = {'Dataset' : name}
start = perf_counter()
try:
curve_fit(hCG, x, y)
end = perf_counter()
measurements["Iterative"] = end - start
except:
measurements["Iterative"] = np.nan
start = perf_counter()
try:
best_fit(x, y)
end = perf_counter()
measurements["BestFit"] = end - start
except:
measurements["BestFit"] = np.nan
start = perf_counter()
try:
fit(x, y)
end = perf_counter()
measurements["PseLogLin"] = end - start
except:
measurements["PseLogLin"] = np.nan
rows.append(measurements)
import pandas as pd
df = pd.DataFrame(rows, columns=["Dataset", "Iterative", "BestFit", "PseLogLin"])
df.loc['mean'] = df.mean()
df["Dataset"].values[-1] = "Mean"
#print(df.to_latex(index=False))
return df
def compare_with_less_trained(x: np.ndarray, y: np.ndarray, trainPoints):
'''
trainPoints, array with the number of points to use for train on each subplot
'''
import matplotlib.pyplot as plt
plt.figure(figsize = (9 * len(trainPoints), 10))
plt.subplot(2, len(trainPoints), len(trainPoints) / 2 + 1)
plot_results(x, y, showPlot=False, allAlgorithms=False, data_id="All")
for i, ptsTrain in enumerate(trainPoints):
plt.subplot(2, len(trainPoints), len(trainPoints) + i + 1)
plot_results(x, y, ptsTrain = ptsTrain, showPlot=False, allAlgorithms=False, data_id=str(ptsTrain) + " points")
plt.plot(x[ptsTrain:], y[ptsTrain:], "o", color="orange") | 36.537459 | 174 | 0.61594 |
3ca799dcd7f204dd2b5700a464c22a2701817676 | 925 | py | Python | Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.2 - ATM/convert pseudo-code solution.py | gitjot/python-for-lccs | a8a4ae8847abbc33361f80183c06d57b20523382 | [
"CC0-1.0"
] | 10 | 2020-02-14T14:28:15.000Z | 2022-02-02T18:44:11.000Z | Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.2 - ATM/convert pseudo-code solution.py | gitjot/python-for-lccs | a8a4ae8847abbc33361f80183c06d57b20523382 | [
"CC0-1.0"
] | null | null | null | Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.2 - ATM/convert pseudo-code solution.py | gitjot/python-for-lccs | a8a4ae8847abbc33361f80183c06d57b20523382 | [
"CC0-1.0"
] | 8 | 2020-03-25T09:27:42.000Z | 2021-11-03T15:24:38.000Z | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Solution to Breakout 2.2 (ATM)
# Display a welcome message
print("Welcome to LCCS Bank Ltd.")
print("=========================")
# Initialise a variable called balance to 123.45
balance = 123.45
# Display the value of balance
print("Your balance is:", balance)
# Prompt the user to enter the amount to lodge
amount = float(input("Enter amount to lodge: "))
# Increase the balance by the amount entered
balance = balance + amount
# Display the value of balance
print("Your balance is:", balance)
# Prompt the user to enter the amount to withdraw
amount = float(input("Enter amount to withdraw: "))
# Decrease the balance by the amount entered
balance = balance - amount
# Display the value of balance
print("Your balance is:", round(balance,2) )
| 27.205882 | 52 | 0.692973 |
3ca93bc9e19f578ac6c9e0e416c1d3d6ec54c6d4 | 460 | py | Python | src/unit6/user/user_datastore.py | cdoremus/udacity-python_web_development-cs253 | 87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b | [
"Apache-2.0"
] | null | null | null | src/unit6/user/user_datastore.py | cdoremus/udacity-python_web_development-cs253 | 87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b | [
"Apache-2.0"
] | null | null | null | src/unit6/user/user_datastore.py | cdoremus/udacity-python_web_development-cs253 | 87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b | [
"Apache-2.0"
] | null | null | null | '''
Created on Apr 30, 2012
@author: h87966
''' | 12.777778 | 40 | 0.43913 |
3ca9eb97e4365037a9faa4fd695283f51ac6d5a4 | 3,870 | py | Python | sciflo/utils/mail.py | hysds/sciflo | f706288405c8eee59a2f883bab3dcb5229615367 | [
"Apache-2.0"
] | null | null | null | sciflo/utils/mail.py | hysds/sciflo | f706288405c8eee59a2f883bab3dcb5229615367 | [
"Apache-2.0"
] | null | null | null | sciflo/utils/mail.py | hysds/sciflo | f706288405c8eee59a2f883bab3dcb5229615367 | [
"Apache-2.0"
] | 1 | 2019-02-07T01:08:34.000Z | 2019-02-07T01:08:34.000Z | from smtplib import SMTP
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.header import Header
from email.utils import parseaddr, formataddr, COMMASPACE, formatdate
from email.encoders import encode_base64
def send_email(sender, cc_recipients, bcc_recipients, subject, body, attachments=[]):
"""Send an email.
All arguments should be Unicode strings (plain ASCII works as well).
Only the real name part of sender and recipient addresses may contain
non-ASCII characters.
The email will be properly MIME encoded and delivered though SMTP to
localhost port 25. This is easy to change if you want something different.
The charset of the email will be the first one out of US-ASCII, ISO-8859-1
and UTF-8 that can represent all the characters occurring in the email.
"""
# combined recipients
recipients = cc_recipients + bcc_recipients
# Header class is smart enough to try US-ASCII, then the charset we
# provide, then fall back to UTF-8.
header_charset = 'ISO-8859-1'
# We must choose the body charset manually
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
# Split real name (which is optional) and email address parts
sender_name, sender_addr = parseaddr(sender)
parsed_cc_recipients = [parseaddr(rec) for rec in cc_recipients]
parsed_bcc_recipients = [parseaddr(rec) for rec in bcc_recipients]
#recipient_name, recipient_addr = parseaddr(recipient)
# We must always pass Unicode strings to Header, otherwise it will
# use RFC 2047 encoding even on plain ASCII strings.
sender_name = str(Header(str(sender_name), header_charset))
unicode_parsed_cc_recipients = []
for recipient_name, recipient_addr in parsed_cc_recipients:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode('ascii')
unicode_parsed_cc_recipients.append((recipient_name, recipient_addr))
unicode_parsed_bcc_recipients = []
for recipient_name, recipient_addr in parsed_bcc_recipients:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode('ascii')
unicode_parsed_bcc_recipients.append((recipient_name, recipient_addr))
# Make sure email addresses do not contain non-ASCII characters
sender_addr = sender_addr.encode('ascii')
# Create the message ('plain' stands for Content-Type: text/plain)
msg = MIMEMultipart()
msg['CC'] = COMMASPACE.join([formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_cc_recipients])
msg['BCC'] = COMMASPACE.join([formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_bcc_recipients])
msg['Subject'] = Header(str(subject), header_charset)
msg.attach(MIMEText(body.encode(body_charset), 'plain', body_charset))
# Add attachments
for attachment in attachments:
part = MIMEBase('application', "octet-stream")
part.set_payload(attachment.file.read())
encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % attachment.filename)
msg.attach(part)
# print "#" * 80
# print msg.as_string()
# Send the message via SMTP to localhost:25
smtp = SMTP("localhost")
smtp.sendmail(sender, recipients, msg.as_string())
smtp.quit()
| 42.527473 | 103 | 0.708527 |
3caa5d8aa46dcaada0dadcfe04d781f5ae6b979d | 496 | py | Python | my-ml-api/api/schemas.py | ballcarsen/MyMlTool | eb476e21799ec773fa816f63693e6de4c52d0094 | [
"MIT"
] | null | null | null | my-ml-api/api/schemas.py | ballcarsen/MyMlTool | eb476e21799ec773fa816f63693e6de4c52d0094 | [
"MIT"
] | null | null | null | my-ml-api/api/schemas.py | ballcarsen/MyMlTool | eb476e21799ec773fa816f63693e6de4c52d0094 | [
"MIT"
] | null | null | null | from typing import List, Optional
from pydantic import BaseModel
| 13.777778 | 33 | 0.677419 |
3caab00869605f81530d9a70561508995ff52b3b | 2,467 | py | Python | apps/extention/views/tool.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/extention/views/tool.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/extention/views/tool.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | from flask import Blueprint
from apps.extention.business.tool import ToolBusiness
from apps.extention.extentions import validation, parse_json_form
from library.api.render import json_detail_render
tool = Blueprint('tool', __name__)
| 29.369048 | 115 | 0.614512 |
3cab08629b30111114e01484ab49b594bbdb9dd0 | 3,948 | py | Python | apt_repoman/connection.py | memory/repoman | 4c5cdfba85afcab5a1219fa5629abc457de27ed5 | [
"Apache-2.0"
] | 1 | 2017-07-01T21:46:40.000Z | 2017-07-01T21:46:40.000Z | apt_repoman/connection.py | memory/repoman | 4c5cdfba85afcab5a1219fa5629abc457de27ed5 | [
"Apache-2.0"
] | null | null | null | apt_repoman/connection.py | memory/repoman | 4c5cdfba85afcab5a1219fa5629abc457de27ed5 | [
"Apache-2.0"
] | 6 | 2017-07-13T21:41:14.000Z | 2020-08-07T19:40:25.000Z |
# stdlib imports
import logging
import time
# pypi imports
from boto3 import Session
LOG = logging.getLogger(__name__)
| 31.584 | 76 | 0.563323 |
3cabc6bebd08e9407e6c12b5afc414ea98b75d01 | 1,412 | py | Python | setup.py | squidfarts/py-program | 98c3694ffa90b5969eafe1093def9097dfd0d62c | [
"Apache-2.0"
] | null | null | null | setup.py | squidfarts/py-program | 98c3694ffa90b5969eafe1093def9097dfd0d62c | [
"Apache-2.0"
] | null | null | null | setup.py | squidfarts/py-program | 98c3694ffa90b5969eafe1093def9097dfd0d62c | [
"Apache-2.0"
] | 1 | 2021-02-19T20:32:33.000Z | 2021-02-19T20:32:33.000Z | #!/user/bin/env python3
###################################################################################
# #
# NAME: setup.py #
# #
# AUTHOR: Michael Brockus. #
# #
# CONTACT: <mailto:michaelbrockus@squidfarts.com> #
# #
# NOTICES: #
# #
# License: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0 #
# #
###################################################################################
import setuptools, setup
setup(
name='py-program',
version='0.1.0',
description='Python program',
author='Michael Brockus',
author_email='michaelbrockus@squidfarts.com',
license='Apache-2.0',
include_package_data=True,
packages=['src.main', 'src.main.module']
) | 50.428571 | 83 | 0.24221 |
3cac0aa35252a097de5d59a421a354021c1ccdfa | 21,267 | py | Python | paul_analysis/Python/labird/fieldize.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | paul_analysis/Python/labird/fieldize.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | paul_analysis/Python/labird/fieldize.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Methods for interpolating particle lists onto a grid. There are three classic methods:
ngp - Nearest grid point (point interpolation)
cic - Cloud in Cell (linear interpolation)
tsc - Triangular Shaped Cloud (quadratic interpolation)
Each function takes inputs:
Values - list of field values to interpolate, centered on the grid center.
Points - coordinates of the field values
Field - grid to add interpolated points onto
There are also helper functions (convert and convert_centered) to rescale arrays to grid units.
"""
import math
import numpy as np
#Try to import scipy.weave. If we can't, don't worry, we just use the unaccelerated versions
try :
import scipy.weave
except ImportError :
scipy=None
def convert(pos, ngrid,box):
"""Rescales coordinates to grid units.
(0,0) is the lower corner of the grid.
Inputs:
pos - coord array to rescale
ngrid - dimension of grid
box - Size of the grid in units of pos
"""
return pos*(ngrid-1)/float(box)
def convert_centered(pos, ngrid,box):
"""Rescales coordinates to grid units.
(0,0) is the center of the grid
Inputs:
pos - coord array to rescale
ngrid - dimension of grid
box - Size of the grid in units of pos
"""
return pos*(ngrid-1.)/float(box)+(ngrid-1.)/2.
def check_input(pos, field):
"""Checks the position and field values for consistency.
Avoids segfaults in the C code."""
if np.size(pos) == 0:
return 0
dims=np.size(np.shape(field))
if np.max(pos) > np.shape(field)[0] or np.min(pos) < 0:
raise ValueError("Positions outside grid")
if np.shape(pos)[1] < dims:
raise ValueError("Position array not wide enough for field")
return 1
def ngp(pos,values,field):
"""Does nearest grid point for a 2D array.
Inputs:
Values - list of field values to interpolate
Points - coordinates of the field values
Field - grid to add interpolated points onto
Points need to be in grid units
Note: This is implemented in scipy.weave and pure python (in case the weave breaks).
For O(1e5) points both versions are basically instantaneous.
For O(1e7) points the sipy.weave version is about 100 times faster.
"""
if not check_input(pos,field):
return field
nx=np.shape(values)[0]
dims=np.size(np.shape(field))
# Coordinates of nearest grid point (ngp).
ind=np.array(np.rint(pos),dtype=np.int)
#Sum over the 3rd axis here.
expr="""for(int j=0;j<nx;j++){
int ind1=ind(j,0);
int ind2=ind(j,1);
field(ind1,ind2)+=values(j);
}
"""
expr3d="""for(int j=0;j<nx;j++){
int ind1=ind(j,0);
int ind2=ind(j,1);
int ind3=ind(j,2);
field(ind1,ind2,ind3)+=values(j);
}
"""
try:
if dims==2:
scipy.weave.inline(expr,['nx','ind','values','field'],type_converters=scipy.weave.converters.blitz)
elif dims==3:
scipy.weave.inline(expr3d,['nx','ind','values','field'],type_converters=scipy.weave.converters.blitz)
else:
raise ValueError
except Exception:
#Fall back on slow python version.
for j in xrange(0,nx):
field[tuple(ind[j,0:dims])]+=values[j]
return field
def cic(pos, value, field,totweight=None,periodic=False):
"""Does Cloud-in-Cell for a 2D array.
Inputs:
Values - list of field values to interpolate
Points - coordinates of the field values
Field - grid to add interpolated points onto
Points need to be in coordinates where np.max(points) = np.shape(field)
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
#-----------------------
# Calculate CIC weights.
#-----------------------
# Coordinates of nearest grid point (ngp).
ng=np.array(np.rint(pos[:,0:dim]),dtype=np.int)
# Distance from sample to ngp.
dng=ng-pos[:,0:dim]
#Setup two arrays for later:
# kk is for the indices, and ww is for the weights.
kk=np.empty([2,nval,dim])
ww=np.empty([2,nval,dim])
# Index of ngp.
kk[1]=ng
# Weight of ngp.
ww[1]=0.5+np.abs(dng)
# Point before ngp.
kk[0]=kk[1]-1 # Index.
ww[0]=0.5-np.abs(dng)
#Take care of the points at the boundaries
tscedge(kk,ww,nx,periodic)
#-----------------------------
# Interpolate samples to grid.
#-----------------------------
# tscweight adds up all tsc weights allocated to a grid point, we need
# to keep track of this in order to compute the temperature.
# Note that total(tscweight) is equal to nrsamples and that
# total(ifield)=n0**3 if sph.plot NE 'sph,temp' (not 1 because we use
# xpos=posx*n0 --> cube length different from EDFW paper).
#index[j] -> kk[0][j,0],kk[0][j,2],kk[0][j,3] -> kk[0][j,:]
extraind=np.zeros(dim-1,dtype=int)
#Perform y=0, z=0 addition
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 1:
#Perform z=0 addition
extraind[0]=1
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 2:
extraind[1]=1
#Perform the rest of the addition
for yy in xrange(0,2):
extraind[0]=yy
tsc_xind(field,value,totweight,kk,ww,extraind)
if totweight == None:
return field
else:
return (field,totweight)
def tsc(pos,value,field,totweight=None,periodic=False):
""" NAME: TSC
PURPOSE:
Interpolate an irregularly sampled field using a Triangular Shaped Cloud
EXPLANATION:
This function interpolates an irregularly sampled field to a
regular grid using Triangular Shaped Cloud (nearest grid point
gets weight 0.75-dx**2, points before and after nearest grid
points get weight 0.5*(1.5-dx)**2, where dx is the distance
from the sample to the grid point in units of the cell size).
INPUTS:
pos: Array of coordinates of field samples, in grid units from 0 to nx
value: Array of sample weights (field values). For e.g. a
temperature field this would be the temperature and the
keyword AVERAGE should be set. For e.g. a density field
this could be either the particle mass (AVERAGE should
not be set) or the density (AVERAGE should be set).
field: Array to interpolate onto of size nx,nx,nx
totweight: If this is not None, the routine will to it the weights at each
grid point. You can then calculate the average later.
periodic: Set this keyword if you want a periodic grid.
ie, the first grid point contains samples of both sides of the volume
If this is not true, weight is not conserved (some falls off the edges)
Note: Points need to be in grid units: pos = [0,ngrid-1]
Note 2: If field has fewer dimensions than pos, we sum over the extra dimensions,
and the final indices are ignored.
Example of default allocation of nearest grid points: n0=4, *=gridpoint.
0 1 2 3 Index of gridpoints
* * * * Grid points
|---|---|---|---| Range allocated to gridpoints ([0.0,1.0> --> 0, etc.)
0 1 2 3 4 posx
OUTPUTS:
Returns particles interpolated to field, and modifies input variable of the same name.
PROCEDURE:
Nearest grid point is determined for each sample.
TSC weights are computed for each sample.
Samples are interpolated to the grid.
Grid point values are computed (sum or average of samples).
EXAMPLE:
nx=20
ny=10
posx=randomu(s,1000)
posy=randomu(s,1000)
value=posx**2+posy**2
field=tsc(value,pos,field,/average)
surface,field,/lego
NOTES:
A standard reference for these interpolation methods is: R.W. Hockney
and J.W. Eastwood, Computer Simulations Using Particles (New York:
McGraw-Hill, 1981).
MODIFICATION HISTORY:
Written by Joop Schaye, Feb 1999.
Check for overflow for large dimensions P. Riley/W. Landsman Dec. 1999
Ported to python, cleaned up and drastically shortened using
these new-fangled "function" thingies by Simeon Bird, Feb. 2012
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
#-----------------------
# Calculate TSC weights.
#-----------------------
# Coordinates of nearest grid point (ngp).
ng=np.array(np.rint(pos[:,0:dim]),dtype=np.int)
# Distance from sample to ngp.
dng=ng-pos[:,0:dim]
#Setup two arrays for later:
# kk is for the indices, and ww is for the weights.
kk=np.empty([3,nval,dim])
ww=np.empty([3,nval,dim])
# Index of ngp.
kk[1,:,:]=ng
# Weight of ngp.
ww[1,:,:]=0.75-dng**2
# Point before ngp.
kk[0,:,:]=kk[1,:,:]-1 # Index.
dd=1.0-dng # Distance to sample.
ww[0]=0.5*(1.5-dd)**2 # TSC-weight.
# Point after ngp.
kk[2,:,:]=kk[1,:,:]+1 # Index.
dd=1.0+dng # Distance to sample.
ww[2]=0.5*(1.5-dd)**2 # TSC-weight.
#Take care of the points at the boundaries
tscedge(kk,ww,nx,periodic)
#-----------------------------
# Interpolate samples to grid.
#-----------------------------
# tscweight adds up all tsc weights allocated to a grid point, we need
# to keep track of this in order to compute the temperature.
# Note that total(tscweight) is equal to nrsamples and that
# total(ifield)=n0**3 if sph.plot NE 'sph,temp' (not 1 because we use
# xpos=posx*n0 --> cube length different from EDFW paper).
#index[j] -> kk[0][j,0],kk[0][j,2],kk[0][j,3] -> kk[0][j,:]
extraind=np.zeros(dim-1,dtype=int)
#Perform y=0, z=0 addition
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 1:
#Perform z=0 addition
for yy in xrange(1,3):
extraind[0]=yy
tsc_xind(field,value,totweight,kk,ww,extraind)
if dim > 2:
#Perform the rest of the addition
for zz in xrange(1,3):
for yy in xrange(0,3):
extraind[0]=yy
extraind[1]=zz
tsc_xind(field,value,totweight,kk,ww,extraind)
if totweight == None:
return field
else:
return (field,totweight)
def cic_str(pos,value,field,in_radii,periodic=False):
"""This is exactly the same as the cic() routine, above, except
that instead of each particle being stretched over one grid point,
it is stretched over a cubic region with some radius.
Field must be 2d
Extra arguments:
radii - Array of particle radii in grid units.
"""
# Some error handling.
if not check_input(pos,field):
return field
nval=np.size(value)
dim=np.shape(field)
nx = dim[0]
dim=np.size(dim)
if dim != 2:
raise ValueError("Non 2D grid not supported!")
#Use a grid cell radius of 2/3 (4 \pi /3 )**(1/3) s
#This means that l^3 = cell volume for AREPO (so it should be more or less exact)
#and is close to the l = 0.5 (4\pi/3)**(1/3) s
#cic interpolation that Nagamine, Springel & Hernquist used
#to approximate their SPH smoothing
corr=2./3.*(4*math.pi/3.)**0.3333333333
radii=np.array(corr*in_radii)
#If the smoothing length is below a single grid cell,
#stretch it.
ind = np.where(radii < 0.5)
radii[ind]=0.5
#Weight of each cell
weight = value/(2*radii)**dim
#Upper and lower bounds
up = pos[:,1:dim+1]+np.repeat(np.transpose([radii,]),dim,axis=1)
low = pos[:,1:dim+1]-np.repeat(np.transpose([radii,]),dim,axis=1)
#Upper and lower grid cells to add to
upg = np.array(np.floor(up),dtype=int)
lowg = np.array(np.floor(low),dtype=int)
#Deal with the edges
if periodic:
raise ValueError("Periodic grid not supported")
else:
ind=np.where(up > nx-1)
up[ind] = nx
upg[ind]=nx-1
ind=np.where(low < 0)
low[ind]=0
lowg[ind]=0
expr="""for(int p=0;p<nval;p++){
//Temp variables
double wght = weight(p);
int ilx=lowg(p,0);
int ily=lowg(p,1);
int iux=upg(p,0);
int iuy=upg(p,1);
double lx=low(p,0);
double ly=low(p,1);
double ux=up(p,0);
double uy=up(p,1);
//Deal with corner values
field(ilx,ily)+=(ilx+1-lx)*(ily+1-ly)*wght;
field(iux,ily)+=(ux-iux)*(ily+1-ly)*wght;
field(ilx,iuy)+=(ilx+1-lx)*(uy-iuy)*wght;
field(iux,iuy)+=(ux-iux)*(uy-iuy)*wght;
//Edges in y
for(int gx=ilx+1;gx<iux;gx++){
field(gx,ily)+=(ily+1-ly)*wght;
field(gx,iuy)+=(uy-iuy)*wght;
}
//Central region
for(int gy=ily+1;gy< iuy;gy++){
//Edges.
field(ilx,gy)+=(ilx+1-lx)*wght;
field(iux,gy)+=(ux-iux)*wght;
//x-values
for(int gx=ilx+1;gx<iux;gx++){
field(gx,gy)+=wght;
}
}
}
"""
try:
scipy.weave.inline(expr,['nval','upg','lowg','field','up','low','weight'],type_converters=scipy.weave.converters.blitz)
except Exception:
for p in xrange(0,nval):
#Deal with corner values
field[lowg[p,0],lowg[p,1]]+=(lowg[p,0]+1-low[p,0])*(lowg[p,1]+1-low[p,1])*weight[p]
field[upg[p,0],lowg[p,1]]+=(up[p,0]-upg[p,0])*(lowg[p,1]+1-low[p,1])*weight[p]
field[lowg[p,0],upg[p,1]]+=(lowg[p,0]+1-low[p,0])*(up[p,1]-upg[p,1])*weight[p]
field[upg[p,0], upg[p,1]]+=(up[p,0]-upg[p,0])*(up[p,1]-upg[p,1])*weight[p]
#Edges in y
for gx in xrange(lowg[p,0]+1,upg[p,0]):
field[gx,lowg[p,1]]+=(lowg[p,1]+1-low[p,1])*weight[p]
field[gx,upg[p,1]]+=(up[p,1]-upg[p,1])*weight[p]
#Central region
for gy in xrange(lowg[p,1]+1,upg[p,1]):
#Edges in x
field[lowg[p,0],gy]+=(lowg[p,0]+1-low[p,0])*weight[p]
field[upg[p,0],gy]+=(up[p,0]-upg[p,0])*weight[p]
#x-values
for gx in xrange(lowg[p,0]+1,upg[p,0]):
field[gx,gy]+=weight[p]
return field
from _fieldize_priv import _SPH_Fieldize
# this takes forever!!!!a
# Typical call: fieldize.sph_str(coords,mHI,sub_nHI_grid[ii],ismooth,weights=weights, periodic=True)
def sph_str(pos,value,field,radii,weights=None,periodic=False):
"""Interpolate a particle onto a grid using an SPH kernel.
This is similar to the cic_str() routine, but spherical.
Field must be 2d
Extra arguments:
radii - Array of particle radii in grid units.
weights - Weights to divide each contribution by.
"""
# Some error handling.
if np.size(pos)==0:
return field
dim=np.shape(field)
if np.size(dim) != 2:
raise ValueError("Non 2D grid not supported!")
if weights == None:
weights = np.array([0.])
#Cast some array types
if pos.dtype != np.float32:
pos = np.array(pos, dtype=np.float32)
if radii.dtype != np.float32:
radii = np.array(radii, dtype=np.float32)
if value.dtype != np.float32:
value = np.array(value, dtype=np.float32)
field += _SPH_Fieldize(pos, radii, value, weights,periodic,dim[0])
return
import scipy.integrate as integ
def integrate_sph_kernel(h,gx,gy):
"""Compute the integrated sph kernel for a particle with
smoothing length h, at position pos, for a grid-cell at gg"""
#Fast method; use the value at the grid cell.
#Bad if h < grid cell radius
r0 = np.sqrt((gx+0.5)**2+(gy+0.5)**2)
if r0 > h:
return 0
h2 = h*h
#Do the z integration with the trapezium rule.
#Evaluate this at some fixed (well-chosen) abcissae
zc=0
if h/2 > r0:
zc=np.sqrt(h2/4-r0**2)
zm = np.sqrt(h2-r0**2)
zz=np.array([zc,(3*zc+zm)/4.,(zc+zm)/2.,(zc+3*zm)/2,zm])
kern = sph_kern2(np.sqrt(zz**2+r0**2),h)
total= 2*integ.simps(kern,zz)
if h/2 > r0:
zz=np.array([0,zc/8.,zc/4.,3*zc/8,zc/2.,5/8.*zc,3*zc/4.,zc])
kern = sph_kern1(np.sqrt(zz**2+r0**2),h)
total+= 2*integ.simps(kern,zz)
return total
def do_slow_sph_integral(h,gx,gy):
"""Evaluate the very slow triple integral to find kernel contribution. Only do it when we must."""
#z limits are -h - > h, for simplicity.
#x and y limits are grid cells
(weight,err)=integ.tplquad(sph_cart_wrap,-h,h,lambda x: gx,lambda x: gx+1,lambda x,y: gy,lambda x,y:gy+1,args=(h,),epsabs=5e-3)
return weight
def sph_cart_wrap(z,y,x,h):
"""Cartesian wrapper around sph_kernel"""
r = np.sqrt(x**2+y**2+z**2)
return sph_kernel(r,h)
def sph_kern1(r,h):
"""SPH kernel for 0 < r < h/2"""
return 8/math.pi/h**3*(1-6*(r/h)**2+6*(r/h)**3)
def sph_kern2(r,h):
"""SPH kernel for h/2 < r < h"""
return 2*(1-r/h)**3*8/math.pi/h**3
def sph_kernel(r,h):
"""Evaluates the sph kernel used in gadget."""
if r > h:
return 0
elif r > h/2:
return 2*(1-r/h)**3*8/math.pi/h**3
else:
return 8/math.pi/h**3*(1-6*(r/h)**2+6*(r/h)**3)
def tscedge(kk,ww,ngrid,periodic):
"""This function takes care of the points at the grid boundaries,
either by wrapping them around the grid (the Julie Andrews sense)
or by throwing them over the side (the Al Pacino sense).
Arguments are:
kk - the grid indices
ww - the grid weights
nx - the number of grid points
periodic - Julie or Al?
"""
if periodic:
#If periodic, the nearest grid indices need to wrap around
#Note python has a sensible remainder operator
#which always returns > 0 , unlike C
kk=kk%ngrid
else:
#Find points outside the grid
ind=np.where(np.logical_or((kk < 0),(kk > ngrid-1)))
#Set the weights of these points to zero
ww[ind]=0
#Indices of these points now do not matter, so set to zero also
kk[ind]=0
def tscadd(field,index,weight,value,totweight):
"""This function is a helper for the tsc and cic routines. It adds
the weighted value to the field and optionally calculates the total weight.
Returns nothing, but alters field
"""
nx=np.size(value)
dims=np.size(np.shape(field))
total=totweight !=None
#Faster C version of this function: this is getting a little out of hand.
expr="""for(int j=0;j<nx;j++){
int ind1=index(j,0);
int ind2=index(j,1);
"""
if dims == 3:
expr+="""int ind3=index(j,2);
field(ind1,ind2,ind3)+=weight(j)*value(j);
"""
if total:
expr+=" totweight(ind1,ind2,ind3) +=weight(j);"
if dims == 2:
expr+="""field(ind1,ind2)+=weight(j)*value(j);
"""
if total:
expr+=" totweight(ind1,ind2) +=weight(j);"
expr+="}"
try:
if dims==2 or dims == 3:
if total:
scipy.weave.inline(expr,['nx','index','value','field','weight','totweight'],type_converters=scipy.weave.converters.blitz)
else:
scipy.weave.inline(expr,['nx','index','value','field','weight'],type_converters=scipy.weave.converters.blitz)
else:
raise ValueError
except Exception:
wwval=weight*value
for j in xrange(0,nx):
ind=tuple(index[j,:])
field[ind]+=wwval[j]
if totweight != None:
totweight[ind]+=weight[j]
return
def get_tscweight(ww,ii):
"""Calculates the TSC weight for a particular set of axes.
ii should be a vector of length dims having values 0,1,2.
(for CIC a similar thing but ii has values 0,1)
eg, call as:
get_tscweight(ww,[0,0,0])
"""
tscweight=1.
#tscweight = \Pi ww[1]*ww[2]*ww[3]
for j in xrange(0,np.size(ii)):
tscweight*=ww[ii[j],:,j]
return tscweight
def tsc_xind(field,value,totweight,kk,ww,extraind):
"""Perform the interpolation along the x-axis.
extraind argument contains the y and z indices, if needed.
So for a 1d interpolation, extraind=[], for 2d,
extraind=[y,], for 3d, extraind=[y,z]
Returns nothing, but alters field
"""
dims=np.size(extraind)+1
dim_list=np.zeros(dims,dtype=int)
dim_list[1:dims]=extraind
index=kk[0]
#Set up the index to have the right kk values depending on the y,z axes
for i in xrange(1,dims):
index[:,i]=kk[extraind[i-1],:,i]
#Do the addition for each value of x
for i in xrange(0,np.shape(kk)[0]):
dim_list[0]=i
tscweight=get_tscweight(ww,dim_list)
index[:,0]=kk[i,:,0]
tscadd(field,index,tscweight,value,totweight)
return
| 34.246377 | 137 | 0.590022 |
3cad04b55e10337da5937edce699d46c3369e96d | 1,607 | py | Python | epytope/test/DummyAdapter.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/test/DummyAdapter.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/test/DummyAdapter.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | # This code is part of the epytope distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
.. module:: DummyAdaper
:synopsis: Contains a pseudo data base adapter for testing purposes.
.. moduleauthor:: schubert, brachvogel
"""
import copy
from epytope.IO.ADBAdapter import ADBAdapter, EAdapterFields
| 31.509804 | 73 | 0.6229 |
3cad775a80e54adc9a4854ed12070f7e895a7dd6 | 2,819 | py | Python | backend/plugins/nav_bar/migrations/0008_migrate_to_link_all_base.py | marksweb/django-cms-60min-demo-2021 | d9ca83538d6c5c7a0b0e1a18ae1a15bda4c296e4 | [
"MIT"
] | null | null | null | backend/plugins/nav_bar/migrations/0008_migrate_to_link_all_base.py | marksweb/django-cms-60min-demo-2021 | d9ca83538d6c5c7a0b0e1a18ae1a15bda4c296e4 | [
"MIT"
] | 1 | 2022-01-15T11:29:16.000Z | 2022-01-15T22:11:45.000Z | backend/plugins/nav_bar/migrations/0008_migrate_to_link_all_base.py | marksweb/django-cms-60min-demo-2021 | d9ca83538d6c5c7a0b0e1a18ae1a15bda4c296e4 | [
"MIT"
] | 3 | 2022-01-14T15:55:00.000Z | 2022-01-23T23:46:56.000Z | # Generated by Django 2.2.16 on 2020-09-17 16:00
from django.db import migrations, models
import django.db.models.deletion
import enumfields.fields
import link_all.models
| 38.616438 | 171 | 0.630011 |
3cadd23dc28e0931be3476bf361e1ba65acc6956 | 4,187 | py | Python | test/unit/utils/test_expiration_queue.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 12 | 2019-11-06T17:39:10.000Z | 2022-03-01T11:26:19.000Z | test/unit/utils/test_expiration_queue.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 8 | 2019-11-06T21:31:11.000Z | 2021-06-02T00:46:50.000Z | test/unit/utils/test_expiration_queue.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 5 | 2019-11-14T18:08:11.000Z | 2022-02-08T09:36:22.000Z | import time
import unittest
from mock import MagicMock
from bxcommon.utils.expiration_queue import ExpirationQueue
| 34.319672 | 106 | 0.662288 |
3caefd3f5a8bfe14855d5ea0372e3bc9a9317bc4 | 480 | py | Python | legacy-code/pailindrome.py | developbiao/pythonbasics | a7549786629e820646dcde5bb9f1aad4331de9be | [
"MIT"
] | 1 | 2019-06-13T15:33:57.000Z | 2019-06-13T15:33:57.000Z | legacy-code/pailindrome.py | developbiao/pythonbasics | a7549786629e820646dcde5bb9f1aad4331de9be | [
"MIT"
] | null | null | null | legacy-code/pailindrome.py | developbiao/pythonbasics | a7549786629e820646dcde5bb9f1aad4331de9be | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
# Test
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('!')
else:
print('!')
| 25.263158 | 163 | 0.55 |
3cafbcdeecba4bc828647c5d5e2a12435c74df80 | 776 | py | Python | spotify_search/search.py | MiltonLn/spotify-tracks-pyconco2020 | 4a75b15852344f7dac066bea3c3e3abb1157d198 | [
"MIT"
] | 1 | 2021-07-29T16:09:30.000Z | 2021-07-29T16:09:30.000Z | spotify_search/search.py | MiltonLn/spotify-tracks-pyconco2020 | 4a75b15852344f7dac066bea3c3e3abb1157d198 | [
"MIT"
] | null | null | null | spotify_search/search.py | MiltonLn/spotify-tracks-pyconco2020 | 4a75b15852344f7dac066bea3c3e3abb1157d198 | [
"MIT"
] | null | null | null | from importlib import import_module
from flask import Flask, request, jsonify
from .spotify_api import get_spotify_response
app = Flask(__name__)
app.config.from_object("spotify_search.settings")
| 26.758621 | 65 | 0.719072 |
3cb1615543f6a7b7ba1580acd4a1477cfa004ce2 | 3,940 | py | Python | Python/src/controllers/MainController.py | Jictyvoo/EXA868--PathFinder | 1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a | [
"MIT"
] | null | null | null | Python/src/controllers/MainController.py | Jictyvoo/EXA868--PathFinder | 1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a | [
"MIT"
] | null | null | null | Python/src/controllers/MainController.py | Jictyvoo/EXA868--PathFinder | 1fe839e0d3c14f36a4a2187cc8bc00c19f3bda4a | [
"MIT"
] | null | null | null | import math
from models.business.OrganismController import OrganismController
from models.value.Finder import Finder
from models.value.Labyrinth import Labyrinth
| 39.4 | 103 | 0.628173 |
3cb181b4a78692a5068ea6ba57d0e24bbe0db8c2 | 3,386 | py | Python | accounts/views.py | callmewind/billdev | fcd53cb98284677fb619abeafb17a88035aabfd6 | [
"MIT"
] | null | null | null | accounts/views.py | callmewind/billdev | fcd53cb98284677fb619abeafb17a88035aabfd6 | [
"MIT"
] | null | null | null | accounts/views.py | callmewind/billdev | fcd53cb98284677fb619abeafb17a88035aabfd6 | [
"MIT"
] | null | null | null | from django.views.generic.edit import CreateView
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import RedirectView
from django.conf import settings
from .forms import *
| 36.804348 | 208 | 0.646486 |
3cb5796f6762e147de6c1a95dfd1c12f82cf44f8 | 241 | py | Python | hw-2/useful_modules.py | Atlasshrugs00/astr-119 | be30734d2580acd947e5b2e22e3039d0d42419f3 | [
"MIT"
] | null | null | null | hw-2/useful_modules.py | Atlasshrugs00/astr-119 | be30734d2580acd947e5b2e22e3039d0d42419f3 | [
"MIT"
] | 8 | 2021-09-24T04:02:52.000Z | 2021-12-09T05:45:22.000Z | hw-2/useful_modules.py | Atlasshrugs00/astr-119 | be30734d2580acd947e5b2e22e3039d0d42419f3 | [
"MIT"
] | null | null | null | import numpy as np #numpy library
import matplotlib.pyplot as plt #matplotlib pyplot
import sys #acces to c-like sys library
import os #gives access to operating system
print(sys.argv) #prints any command line arguments
print(os.getcwd()) | 26.777778 | 50 | 0.792531 |
3cb70deff93c19ea3ca28c0dcdec1ef4bed01acf | 3,532 | py | Python | Custom/text.py | SemLaan/Hotel-review-sentiment-analysis | b7fd22dcea63bab1c7fe666a7f4912931de1f4dc | [
"Apache-2.0"
] | null | null | null | Custom/text.py | SemLaan/Hotel-review-sentiment-analysis | b7fd22dcea63bab1c7fe666a7f4912931de1f4dc | [
"Apache-2.0"
] | null | null | null | Custom/text.py | SemLaan/Hotel-review-sentiment-analysis | b7fd22dcea63bab1c7fe666a7f4912931de1f4dc | [
"Apache-2.0"
] | null | null | null |
import pandas as pd
from nltk import tokenize as tokenizers
from nltk.stem import PorterStemmer, WordNetLemmatizer
appos = {
"aren t" : "are not",
"can t" : "cannot",
"couldn t" : "could not",
"didn t" : "did not",
"doesn t" : "does not",
"don t" : "do not",
"hadn t" : "had not",
"hasn t" : "has not",
"haven t" : "have not",
"he d" : "he would",
"he ll" : "he will",
"he s" : "he is",
"i d" : "I would",
"i ll" : "I will",
"i m" : "I am",
"isn t" : "is not",
"it s" : "it is",
"it ll":"it will",
"i ve" : "I have",
"let s" : "let us",
"mightn t" : "might not",
"mustn t" : "must not",
"shan t" : "shall not",
"she d" : "she would",
"she ll" : "she will",
"she s" : "she is",
"shouldn t" : "should not",
"that s" : "that is",
"there s" : "there is",
"they d" : "they would",
"they ll" : "they will",
"they re" : "they are",
"they ve" : "they have",
"we d" : "we would",
"we re" : "we are",
"weren t" : "were not",
"we ve" : "we have",
"what ll" : "what will",
"what re" : "what are",
"what s" : "what is",
"what ve" : "what have",
"where s" : "where is",
"who d" : "who would",
"who ll" : "who will",
"who re" : "who are",
"who s" : "who is",
"who ve" : "who have",
"won t" : "will not",
"wouldn t" : "would not",
"you d" : "you would",
"you ll" : "you will",
"you re" : "you are",
"you ve" : "you have",
" re": " are",
"wasn t": "was not",
"we ll":" will",
}
| 22.213836 | 76 | 0.51812 |
3cb8b156ffda90f3a147616840973c64a0b81e50 | 546 | py | Python | kolibri/plugins/user_auth/root_urls.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 545 | 2016-01-19T19:26:55.000Z | 2022-03-20T00:13:04.000Z | kolibri/plugins/user_auth/root_urls.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 8,329 | 2016-01-19T19:32:02.000Z | 2022-03-31T21:23:12.000Z | kolibri/plugins/user_auth/root_urls.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 493 | 2016-01-19T19:26:48.000Z | 2022-03-28T14:35:05.000Z | """
This is here to enable redirects from the old /user endpoint to /auth
"""
from django.conf.urls import include
from django.conf.urls import url
from django.views.generic.base import RedirectView
from kolibri.core.device.translation import i18n_patterns
redirect_patterns = [
url(
r"^user/$",
RedirectView.as_view(
pattern_name="kolibri:kolibri.plugins.user_auth:user_auth", permanent=True
),
name="redirect_user",
),
]
urlpatterns = [url(r"", include(i18n_patterns(redirect_patterns)))]
| 26 | 86 | 0.705128 |
3cb8db111fef337bf519873d89b2fd5a45a81770 | 250 | py | Python | Learning/CodeWars/Python/7 kyu_Sum_of_numbers_from_0_to_N.py | aliasfoxkde/snippets | bb6dcc6597316ef9c88611f526935059451c3b5a | [
"MIT"
] | null | null | null | Learning/CodeWars/Python/7 kyu_Sum_of_numbers_from_0_to_N.py | aliasfoxkde/snippets | bb6dcc6597316ef9c88611f526935059451c3b5a | [
"MIT"
] | null | null | null | Learning/CodeWars/Python/7 kyu_Sum_of_numbers_from_0_to_N.py | aliasfoxkde/snippets | bb6dcc6597316ef9c88611f526935059451c3b5a | [
"MIT"
] | null | null | null | # See: https://www.codewars.com/kata/56e9e4f516bcaa8d4f001763
| 27.777778 | 83 | 0.54 |
3cb8ec1381ca6215654d8b8a9da92a3ab2726159 | 4,685 | py | Python | Script.py | harisqazi1/Automated_Script | 6680e0604db55297fad2ab2f99ea61324ca88048 | [
"MIT"
] | null | null | null | Script.py | harisqazi1/Automated_Script | 6680e0604db55297fad2ab2f99ea61324ca88048 | [
"MIT"
] | null | null | null | Script.py | harisqazi1/Automated_Script | 6680e0604db55297fad2ab2f99ea61324ca88048 | [
"MIT"
] | null | null | null | """
Title: Automated Script for Data Scraping
Creator: Haris "5w464l1c10u5"
Purpose: This was made in order to make it easier to get data from online, all through one python script
Usage:
python3 Automated_Script.py
Resources:
https://www.digitalocean.com/community/tutorials/how-to-scrape-web-pages-with-beautiful-soup-and-python-3
https://www.guru99.com/reading-and-writing-files-in-python.html
https://www.dataquest.io/blog/web-scraping-tutorial-python/
https://forecast.weather.gov/MapClick.php?lat=42.00900000000007&lon=-87.69495999999998
https://pythonspot.com/http-download-file-with-python/
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import urllib.request, urllib.error, urllib.parse
from datetime import date, datetime
import io
import codecs
Code_Version = 3
#Time in H:M:S format
now = datetime.now()
Time = now.strftime("%I:%M:%S:%p")
#Date
Today_Date = date.today()
Date = Today_Date.strftime("(%A) %B %d, %Y")
try:
#Weather
page = requests.get('https://forecast.weather.gov/MapClick.php?lat=42.00900000000007&lon=-87.69495999999998')
soup = BeautifulSoup(page.text, 'html.parser')
except:
print("Weather.gov is not available")
try:
#Weather Type
weathertype = soup.find(class_='myforecast-current')
type = weathertype.contents[0]
type = type.encode('utf-8')
except:
type = "N/A"
try:
#Fahrenheit
weather = soup.find(class_='myforecast-current-lrg')
w = weather.contents[0]
w = w.encode('utf-8')
except:
w = "N/A"
try:
#Humidity
Humidity = soup.find_all('td')[0].get_text()
Hum_percent = soup.find_all('td')[1].get_text()
except:
Humidity = "N/A"
Hum_percent = "N/A"
try:
#Wind_Speed
W_Speed = soup.find_all('td')[2].get_text()
W_S = soup.find_all('td')[3].get_text()
except:
W_Speed = "N/A"
W_S = "N/A"
try:
#Wind_Chill
Wind_Chill = soup.find_all('td')[10].get_text()
Wind_Chill_num = soup.find_all('td')[11].get_text()
Wind_Chill = Wind_Chill.encode('utf-8')
Wind_Chill_num = Wind_Chill_num.encode('utf-8')
except:
Wind_Chill = "N/A"
Wind_Chill_num = "N/A"
try:
#Last_Update
Last_Update = soup.find_all('td')[12].get_text()
Last_Update_num = soup.find_all('td')[13].get_text()
except:
Last_Update = "N/A"
Last_Update_num = "N/A"
html_file = """
<h1 style="text-align: center;"><span style="text-decoration: underline;">Good Morning, Haris!</span></h1>
<h4 style="text-align: left;">Time:</h4>
<h4 style="text-align: left;">Date:</h4>
<h4>Code Version:</h4>
<hr />
<h3 style="font-size: 1.5em; text-align: center;"><span style="text-decoration: underline;"><span style="background-color: #00ccff;">Weather</span></span></h3>
<table style="margin-left: auto; margin-right: auto; height: 195px;" width="238">
<tbody>
<tr style="height: 7px;">
<td style="width: 228px; height: 7px;">Current Weather:</td>
</tr>
<tr style="height: 1px;">
<td style="width: 228px; height: 1px;">Weather Type:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Humidity:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Wind Speed:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Wind Chill:</td>
</tr>
<tr style="height: 2px;">
<td style="width: 228px; height: 2px;">Last Update:</td>
</tr>
</tbody>
</table>
<p style="font-size: 1.5em;"> </p>
<hr />
<h3 style="font-size: 1.5em; text-align: center;"><span style="text-decoration: underline; background-color: #cc99ff;">News</span></h3>
"""
html_file = html_file.replace('Time:','Current Time: ' + Time)
html_file = html_file.replace('Date:','Today\'s Date: ' + Date)
html_file = html_file.replace('Code Version:', 'Code Version: #' + str(Code_Version))
html_file = html_file.replace('Current Weather:','Current Weather: ' + w.decode('utf8'))
html_file = html_file.replace('Weather Type:','Weather Type: ' + type.decode('utf8'))
html_file = html_file.replace('Humidity:','Humidity: ' + Hum_percent)
html_file = html_file.replace('Wind Speed:','Wind Speed: ' + W_S)
html_file = html_file.replace('Wind Chill:','Wind Chill: ' + Wind_Chill_num.decode('utf-8'))
html_file = html_file.replace('Last Update:','Last Update: ' + Last_Update_num)
try:
response = urllib.request.urlopen('https://allinfosecnews.com/')
html = response.read()
except:
print("https://allinfosecnews.com/ is not available")
with io.open("website.html", 'w', encoding='utf8') as f:
f.write(html_file)
f.write(html.decode('utf-8'))
f.close()
print(w)
print(type)
print(Hum_percent)
print(W_Speed)
print(W_S)
print(Wind_Chill_num)
print(Last_Update_num)
| 28.919753 | 159 | 0.683458 |
3cb91fcc9d369715e263d80560e5e0440993f481 | 144 | py | Python | pnbp/helpers/__init__.py | prettynb/pnbp | 1be54a2217a85675ec4a14a1c8a1d2501be88404 | [
"MIT"
] | 1 | 2021-07-30T02:00:29.000Z | 2021-07-30T02:00:29.000Z | pnbp/helpers/__init__.py | prettynb/pnbp | 1be54a2217a85675ec4a14a1c8a1d2501be88404 | [
"MIT"
] | null | null | null | pnbp/helpers/__init__.py | prettynb/pnbp | 1be54a2217a85675ec4a14a1c8a1d2501be88404 | [
"MIT"
] | null | null | null | from .base import _convert_datetime
from .codeblock import CodeBlock
from .link import Link
from .tag import Tag
from .url import Url
| 9 | 35 | 0.756944 |
3cb929d8fa24f1122564db813af9ab0475a425f5 | 838 | py | Python | tests/elections/test_police_and_crime_commissioner.py | DemocracyClub/uk-election-timetables | 2541f9e5050a393906bafa2b70709fe650de3f32 | [
"MIT"
] | 2 | 2020-11-14T15:56:56.000Z | 2021-01-11T11:11:09.000Z | tests/elections/test_police_and_crime_commissioner.py | DemocracyClub/uk-election-timetables | 2541f9e5050a393906bafa2b70709fe650de3f32 | [
"MIT"
] | 12 | 2020-11-18T20:27:43.000Z | 2021-12-15T10:47:01.000Z | tests/elections/test_police_and_crime_commissioner.py | DemocracyClub/uk-election-timetables | 2541f9e5050a393906bafa2b70709fe650de3f32 | [
"MIT"
] | null | null | null | from datetime import date
from uk_election_timetables.elections import PoliceAndCrimeCommissionerElection
# Reference election: pcc.avon-and-somerset.2016-05-05
# Reference election: pcc.2021-05-06
# Reference election: pcc.2021-05-06
| 33.52 | 79 | 0.805489 |
3cb98b826371f4dfda09a39ed9c09c8f6ab7451b | 847 | py | Python | LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py | SanaaCHAOU/laureat_management_ENSAT | d769714f9f8cb9ebf90e02577547ec348c011461 | [
"MIT"
] | null | null | null | LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py | SanaaCHAOU/laureat_management_ENSAT | d769714f9f8cb9ebf90e02577547ec348c011461 | [
"MIT"
] | null | null | null | LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py | SanaaCHAOU/laureat_management_ENSAT | d769714f9f8cb9ebf90e02577547ec348c011461 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-01-11 14:25
from django.db import migrations, models
import django.db.models.deletion
| 28.233333 | 132 | 0.570248 |
3cbc5cfef3c4ee6f751fd3f8b8b9e741e7ebbbd4 | 1,952 | py | Python | python/250.count-univalue-subtrees.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 10 | 2019-09-15T00:23:57.000Z | 2022-01-05T12:53:42.000Z | python/250.count-univalue-subtrees.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 3 | 2021-06-30T00:39:26.000Z | 2021-08-01T07:13:59.000Z | python/250.count-univalue-subtrees.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 6 | 2020-02-08T02:55:22.000Z | 2022-01-02T22:48:18.000Z | # [250] Count Univalue Subtrees
# Description
# Given a binary tree, count the number of uni-value subtrees.
# A Uni-value subtree means all nodes of the subtree have the same value.
# Example
# Example 1
# Input: root = {5,1,5,5,5,#,5}
# Output: 4
# Explanation:
# 5
# / \
# 1 5
# / \ \
# 5 5 5
# Example 2
# Input: root = {1,3,2,4,5,#,6}
# Output: 3
# Explanation:
# 1
# / \
# 3 2
# / \ \
# 4 5 6
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
| 22.436782 | 103 | 0.518955 |
3cbd5fce78146aae7cbddda0c039ec527c342db9 | 5,752 | py | Python | apis.py | teemuja/ndp_app3 | 8a9517b2e2385640dc1a2c1baf0ae07cf630c89c | [
"MIT"
] | null | null | null | apis.py | teemuja/ndp_app3 | 8a9517b2e2385640dc1a2c1baf0ae07cf630c89c | [
"MIT"
] | null | null | null | apis.py | teemuja/ndp_app3 | 8a9517b2e2385640dc1a2c1baf0ae07cf630c89c | [
"MIT"
] | null | null | null | # apis for ndp_d3
from owslib.wfs import WebFeatureService
import pandas as pd
import geopandas as gpd
import momepy
import streamlit as st
| 52.770642 | 134 | 0.685327 |
3cbec5b44846435b33e0ef20ab76a5f6a4ef6c68 | 6,471 | py | Python | test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py | nrc-cnrc/Portage-SMT-TAS | 73f5a65de4adfa13008ea9a01758385c97526059 | [
"MIT"
] | null | null | null | test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py | nrc-cnrc/Portage-SMT-TAS | 73f5a65de4adfa13008ea9a01758385c97526059 | [
"MIT"
] | null | null | null | test-suite/unit-testing/PortageLive.soap/tests/testIncrAddSentence.py | nrc-cnrc/Portage-SMT-TAS | 73f5a65de4adfa13008ea9a01758385c97526059 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# vim:expandtab:ts=3:sw=3
# @file testIncrStatus.py
# @brief Test SOAP calls to incrAddSentence using a deployed PortageLive web server.
#
# @author Samuel Larkin
#
# Traitement multilingue de textes / Multilingual Text Processing
# Tech. de l'information et des communications / Information and Communications Tech.
# Conseil national de recherches Canada / National Research Council Canada
# Copyright 2016, Sa Majeste la Reine du Chef du Canada /
# Copyright 2016, Her Majesty in Right of Canada
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
#import zeep
#client = zeep.Client(wsdl=url)
from suds.cache import DocumentCache
from suds.client import Client
from suds import WebFault
import unittest
import logging
import requests
import time
import random
import os
import sys
import shutil
logging.basicConfig(level=logging.CRITICAL)
# If you need to debug what is happening, uncomment the following line
#logging.basicConfig(level=logging.DEBUG)
url = 'http://127.0.0.1'
if __name__ == '__main__':
unittest.main()
| 36.767045 | 118 | 0.637923 |
3cbf25669395a89790375a19545ba5be63026880 | 1,919 | py | Python | Cryptography/Caesar_Cipher.py | hari40009/learnpython | b75e700f62f49ab9d8fef607ebd87a34c5cb6530 | [
"MIT"
] | 1 | 2018-11-07T04:13:52.000Z | 2018-11-07T04:13:52.000Z | Cryptography/Caesar_Cipher.py | engineerprogrammer/learnpython | 140acfd8fc6345745a9b274baaa1e58ea3217f9f | [
"MIT"
] | null | null | null | Cryptography/Caesar_Cipher.py | engineerprogrammer/learnpython | 140acfd8fc6345745a9b274baaa1e58ea3217f9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" A program to use a Caesar cipher based on user input for the shift value """
MAX_SHIFT = 26
def whatMode():
""" Finds out what the user wants to do """
while True:
print("Do you wish to encrypt, decrypt or brute force a message: ")
mode = input().lower()
if mode in "encrypt e decrypt d brute b".split():
return mode[0]
else:
print("Enter '[E]ncrypt', '[D]ecrypt' or [B]rute")
def plainMessage():
""" Gets a string from the user """
print ("Message: ")
return input()
def getKey():
""" Gets a shift value from the user """
shiftKey = 0
while True:
print("Enter shift key (1-%s) " % (MAX_SHIFT))
shiftKey = int(input())
if (shiftKey >= 1 and shiftKey <= MAX_SHIFT):
return shiftKey
def cryptMessage(mode, message, shiftKey):
""" The encryption / decryption action is here """
if mode[0] == 'd':
shiftKey = -shiftKey
translated = ''
for symbol in message: # The encryption stuff
if symbol.isalpha():
num = ord(symbol)
num += shiftKey
if symbol.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif symbol.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
translated += chr(num)
else:
translated += symbol
return translated
mode = whatMode()
message = plainMessage()
if mode[0] != 'b':
shiftKey = getKey()
print('Your translated text is:')
if mode[0] != 'b': #Brute force settings
print(cryptMessage(mode, message, shiftKey))
else:
for shiftKey in range(1, MAX_SHIFT + 1):
print(shiftKey, cryptMessage('decrypt', message, shiftKey))
| 27.028169 | 80 | 0.532569 |
3cc3cc243655d3b808c34d010f7d4b9e190e610a | 494 | py | Python | leetcode/python/medium/p046_permute.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/medium/p046_permute.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/medium/p046_permute.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null |
slu = Solution()
print(slu.permute([1]))
| 22.454545 | 39 | 0.506073 |
3cc75769cc0430a3c58ed37733ff77e1117674ee | 83 | py | Python | bemy/apps.py | foropolo/profiles-rest-api | f35cbb5727204bf4419c6b0a9797d7c624773219 | [
"MIT"
] | null | null | null | bemy/apps.py | foropolo/profiles-rest-api | f35cbb5727204bf4419c6b0a9797d7c624773219 | [
"MIT"
] | 6 | 2019-12-05T00:35:40.000Z | 2022-02-10T08:29:56.000Z | bemy/apps.py | foropolo/profiles-rest-api | f35cbb5727204bf4419c6b0a9797d7c624773219 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 13.833333 | 33 | 0.73494 |
3cc9578bf937313ea3ce810099e43cb50d90651a | 634 | py | Python | ribosome/compute/ribosome.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | ribosome/compute/ribosome.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | ribosome/compute/ribosome.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Generic, TypeVar, Type
from lenses import UnboundLens
from amino import Dat
from ribosome.data.plugin_state import PluginState
D = TypeVar('D')
CC = TypeVar('CC')
C = TypeVar('C')
__all__ = ('Ribosome',)
| 21.862069 | 85 | 0.621451 |
3cc96d6bfddb10586b88d9ad0d7b86bd5ca4e9aa | 1,431 | py | Python | pythonstartup.py | avisilver/util_scripts | ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508 | [
"MIT"
] | null | null | null | pythonstartup.py | avisilver/util_scripts | ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508 | [
"MIT"
] | null | null | null | pythonstartup.py | avisilver/util_scripts | ffe4ee4b7a7b907b7d93bef5ec96151d2cbf8508 | [
"MIT"
] | null | null | null | # Add auto-completion and a stored history file of commands to your Python
# interactive interpreter. Requires Python 2.0+, readline. Autocomplete is
# bound to the Esc key by default (you can change it - see readline docs).
#
# Store the file in ~/.pystartup, and set an environment variable to point
# to it: "export PYTHONSTARTUP=/home/user/.pystartup" in bash.
#
# Note that PYTHONSTARTUP does *not* expand "~", so you have to put in the
# full path to your home directory.
import atexit
import os
import readline
import rlcompleter
historyPath = os.path.expanduser("~/.pyhistory")
if os.path.exists(historyPath):
readline.read_history_file(historyPath)
atexit.register(save_history)
readline.parse_and_bind('tab: complete')
del os, atexit, readline, rlcompleter, save_history, historyPath
def dirp(object_or_module):
"""dirp(object_or_module) -> string
Print the object's or currently imported module's attributes as shown
in dir() on separate lines with docstrings"""
for attr in dir(object_or_module):
doc = object_or_module.__getattribute__(attr).__doc__
doc = doc if doc else ""
indented_doc = "\n".join(doc.split("\n"))
print ("\n{line}\n{attr}\n{doc}".format(
line="-"*80,
attr=attr,
doc=indented_doc
))
| 31.108696 | 74 | 0.709294 |
3ccccac3d5c3d7c8d168081f420c8dfcbee68843 | 761 | py | Python | NhMedicalSite/panel/models.py | Dogruyer/ecommerce | aa505b401e42882a96e6ef6375bd1a1ed95c5b85 | [
"Apache-2.0"
] | null | null | null | NhMedicalSite/panel/models.py | Dogruyer/ecommerce | aa505b401e42882a96e6ef6375bd1a1ed95c5b85 | [
"Apache-2.0"
] | null | null | null | NhMedicalSite/panel/models.py | Dogruyer/ecommerce | aa505b401e42882a96e6ef6375bd1a1ed95c5b85 | [
"Apache-2.0"
] | 1 | 2018-11-01T11:10:58.000Z | 2018-11-01T11:10:58.000Z | from __future__ import unicode_literals
from django.db import models
# Create your models here.
| 21.138889 | 75 | 0.730618 |
3cccd58c207124db8b9a503a7ea72e1986e27cb3 | 459 | py | Python | voxelcut/initial.py | JohnyEngine/CNC | e4c77250ab2b749d3014022cbb5eb9924e939993 | [
"Apache-2.0"
] | null | null | null | voxelcut/initial.py | JohnyEngine/CNC | e4c77250ab2b749d3014022cbb5eb9924e939993 | [
"Apache-2.0"
] | null | null | null | voxelcut/initial.py | JohnyEngine/CNC | e4c77250ab2b749d3014022cbb5eb9924e939993 | [
"Apache-2.0"
] | null | null | null | toolpath.coords = Coords(-100, -100, -5, 100, 100, 5)
voxelcut.set_current_color(12566512)
toolpath.coords.add_block(0.150768, 0, -5, 9.69846, 9.84808, 10)
GRAY = 0x505050
RED = 0x600000
BLUE = 0x000050
toolpath.tools[2] = Tool([[Span(Point(3, 0), Vertex(0, Point(3, 20), Point(0, 0)), False), GRAY], [Span(Point(3, 20), Vertex(0, Point(3, 40), Point(0, 0)), False), RED]])
#toolpath.load('C:/Users/Dan/AppData/Local/Temp/test.tap')
toolpath.load('test.tap')
| 45.9 | 170 | 0.681917 |
3ccda61294b042b9301d3115e54f9eaad129e0a8 | 2,200 | py | Python | core/cliqueIntersectionGraph.py | ongmingyang/some-max-cut | 7ebabd06d3e46789a3672bd516adc48953ba135e | [
"MIT"
] | 3 | 2018-03-16T17:25:23.000Z | 2021-04-27T21:42:31.000Z | core/cliqueIntersectionGraph.py | ongmingyang/some-max-cut | 7ebabd06d3e46789a3672bd516adc48953ba135e | [
"MIT"
] | null | null | null | core/cliqueIntersectionGraph.py | ongmingyang/some-max-cut | 7ebabd06d3e46789a3672bd516adc48953ba135e | [
"MIT"
] | null | null | null | import sys
from clique import Clique
from cvxopt import spmatrix, amd
from collections import defaultdict as dd
import chompack as cp
from util.graph import Graph
LARGEST_CLIQUE_SIZE = 24
#
# A CliqueIntersectionGraph is a graph (V,E), where V is a set of cliques, each
# bag containing a clique, and (i,j) in E if clique i and clique j have a non
# empty sepset
#
# @param I,J,W (I[i],J[i]) is an edge in the original graph with weight
# W[i]. We require I > J
#
| 33.333333 | 79 | 0.678636 |
3ccdd8c975b584a486aac3e7fbb9b1d2ae39487f | 4,586 | py | Python | backend/src/baserow/contrib/database/airtable/tasks.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/src/baserow/contrib/database/airtable/tasks.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/src/baserow/contrib/database/airtable/tasks.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | import logging
from django.conf import settings
from baserow.config.celery import app
logger = logging.getLogger(__name__)
| 35.276923 | 86 | 0.657872 |
3cd0a4bbec748d6e33fb26e96ae01249982c0522 | 7,439 | py | Python | d2lbook/notebook.py | naoufelito/d2l-book | bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a | [
"Apache-2.0"
] | null | null | null | d2lbook/notebook.py | naoufelito/d2l-book | bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a | [
"Apache-2.0"
] | 1 | 2020-06-06T06:34:03.000Z | 2020-06-06T07:01:56.000Z | d2lbook/notebook.py | naoufelito/d2l-book | bb281e1640aaf039b4d2d69bb9c8d6334a7cb98a | [
"Apache-2.0"
] | null | null | null | """utilities to handle notebooks"""
from typing import Union, List, Optional
import copy
import notedown
import nbformat
import nbconvert
from nbformat import notebooknode
from d2lbook import markdown
from d2lbook import common
def create_new_notebook(nb: notebooknode.NotebookNode,
cells: List[notebooknode.NotebookNode]
) -> notebooknode.NotebookNode:
"""create an empty notebook by copying metadata from nb"""
new_nb = copy.deepcopy(nb)
new_nb.cells = cells
return new_nb
def read_markdown(source: Union[str, List[str]]) -> notebooknode.NotebookNode:
"""Returns a notebook from markdown source"""
if not isinstance(source, str):
source = '\n'.join(source)
reader = notedown.MarkdownReader(match='strict')
return reader.reads(source)
def split_markdown_cell(nb: notebooknode.NotebookNode) -> notebooknode.NotebookNode:
"""split a markdown cell if it contains tab block.
a new property `class` is added to the metadata for a tab cell.
"""
# merge continous markdown cells
grouped_cells = common.group_list(nb.cells,
lambda cell, _: cell.cell_type=='markdown')
new_cells = []
for is_md, group in grouped_cells:
if not is_md:
new_cells.extend(group)
else:
src = '\n\n'.join(cell.source for cell in group)
md_cells = markdown.split_markdown(src)
is_tab_cell = lambda cell, _: cell['type']=='markdown' and 'class' in cell
grouped_md_cells = common.group_list(md_cells, is_tab_cell)
for is_tab, md_group in grouped_md_cells:
new_cell = nbformat.v4.new_markdown_cell(
markdown.join_markdown_cells(md_group))
if is_tab:
tab = md_group[0]['class']
assert tab.startswith('`') and tab.endswith('`'), tab
new_cell.metadata['tab'] = tab[1:-1]
new_cells.append(new_cell)
new_cells = [cell for cell in new_cells if cell.source]
return create_new_notebook(nb, new_cells)
def _get_cell_tab(cell: notebooknode.NotebookNode, default_tab: str='') -> Optional[str]:
"""Get the cell tab"""
if 'tab' in cell.metadata:
return cell.metadata['tab']
if cell.cell_type != 'code':
return None
match = common.source_tab_pattern.search(cell.source)
if match:
return match[1]
return default_tab
def get_tab_notebook(nb: notebooknode.NotebookNode, tab: str, default_tab: str
) -> notebooknode.NotebookNode:
"""Returns a notebook with code/markdown cells that doesn't match tab
removed.
Return None if no cell matched the tab and nb contains code blocks.
A `origin_pos` property is added to the metadata for each cell, which
records its position in the original notebook `nb`.
"""
matched_tab = False
new_cells = []
for i, cell in enumerate(nb.cells):
new_cell = copy.deepcopy(cell)
new_cell.metadata['origin_pos'] = i
cell_tab = _get_cell_tab(new_cell, default_tab)
if not cell_tab:
new_cells.append(new_cell)
else:
if cell_tab == tab:
new_cell.metadata['tab'] = cell_tab
matched_tab = True
# remove the tab from source
lines = new_cell.source.split('\n')
for j, line in enumerate(lines):
src_tab = common.source_tab_pattern.search(line)
text_tab = common.md_mark_pattern.search(line)
if src_tab or (text_tab and (
text_tab[1]=='begin_tab' or text_tab[1]=='end_tab')):
del lines[j]
new_cell.source = '\n'.join(lines)
new_cells.append(new_cell)
if not matched_tab and any([cell.cell_type=='code' for cell in nb.cells]):
return None
return create_new_notebook(nb, new_cells)
def merge_tab_notebooks(src_notebooks: List[notebooknode.NotebookNode]
) -> notebooknode.NotebookNode:
"""Merge the tab notebooks into a single one.
The reserved function of get_tab_notebook.
"""
n = max([max([cell.metadata['origin_pos'] for cell in nb.cells])
for nb in src_notebooks])
new_cells = [None] * (n+1)
for nb in src_notebooks:
for cell in nb.cells:
new_cells[cell.metadata['origin_pos']] = copy.deepcopy(cell)
return create_new_notebook(src_notebooks[0], new_cells)
def _merge_tabs(nb: notebooknode.NotebookNode):
"""merge side-by-side tabs into a single one"""
cell_groups = common.group_list(nb.cells, _tab_status)
meta = [(in_tab, [cell.metadata['tab'] for cell in group] if in_tab else None
) for in_tab, group in cell_groups]
new_cells = []
i = 0
while i < len(meta):
in_tab, tabs = meta[i]
if not in_tab:
new_cells.append((False, cell_groups[i][1]))
i += 1
else:
j = i + 1
while j < len(meta):
if meta[j][1] != tabs:
break
j += 1
groups = [group for _, group in cell_groups[i:j]]
new_cells.append((True, [x for x in zip(*groups)]))
i = j
return new_cells
def add_html_tab(nb: notebooknode.NotebookNode, default_tab: str) -> notebooknode.NotebookNode:
"""Add html codes for the tabs"""
cell_groups = _merge_tabs(nb)
tabs = [len(group) for in_tab, group in cell_groups if in_tab]
if not tabs or max(tabs) <= 1:
return nb
new_cells = []
for i, (in_tab, group) in enumerate(cell_groups):
if not in_tab:
new_cells.extend(group)
else:
tabs = [cells[0].metadata['tab'] for cells in group]
div_class = "code" if group[0][0].cell_type == 'code' == 2 else "text"
new_cells.append(_get_tab_bar(tabs, i, default_tab, div_class))
for j, (tab, cells) in enumerate(zip(tabs, group)):
new_cells.extend(_get_tab_panel(cells, tab, f'{i}-{j}', default_tab))
new_cells.append(nbformat.v4.new_markdown_cell(
"```eval_rst\n.. raw:: html\n\n </div>\n```"))
return create_new_notebook(nb, new_cells)
| 41.099448 | 152 | 0.609894 |
3cd1756adb8c57eb1928457d00bc92c25a43ba4c | 1,204 | py | Python | myamiweb/imcache/imcacheconfig.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | myamiweb/imcache/imcacheconfig.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | myamiweb/imcache/imcacheconfig.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | # config file for imcached
# camera name pattern to cache. For example 'GatanK2' will restrict it
# only to camera name containing the string
camera_name_pattern = ''
# time in seconds to wait between consecutive queries
query_interval = 5
# limit query to later than this timestamp (mysql style: yyyymmddhhmmss)
min_timestamp = '20130126000000'
# limit query to start at this image id
start_id = 0
# root dir of cache. session subdirs will be added automatically
cache_path = '/srv/cache/dbem'
# maximum image dimension after conversion
redux_maxsize1 = 4096
redux_maxsize2 = 1024
# initial redux read and resize before calculating power and final
redux_args1 = {
'pipes': 'read:Read,shape:Shape',
'cache': False,
}
# redux to create final image for cache
redux_args_jpg = {
'cache': False,
'pipes': 'shape:Shape,scale:Scale,format:Format',
'scaletype': 'stdev',
'scalemin': -5,
'scalemax': 5,
'oformat': 'JPEG',
}
# redux to create final power image for cache
redux_args_pow = {
'cache': False,
'pipes': 'power:Power,shape:Shape,mask:Mask,scale:Scale,format:Format',
'power': True,
'maskradius': 10,
'scaletype': 'stdev',
'scalemin': -5,
'scalemax': 5,
'oformat': 'JPEG',
}
| 23.607843 | 72 | 0.724252 |
3cd1a6c109376dfdc24ad44b61222972d5c24dd2 | 3,737 | py | Python | graphs/graphgenerator.py | andrew-lockwood/lab-project | e39a0f21966cdee519942cf2f94b7bab6ed2196e | [
"MIT"
] | 1 | 2017-08-30T15:21:31.000Z | 2017-08-30T15:21:31.000Z | graphs/graphgenerator.py | andrew-lockwood/lab-project-summer2016 | e39a0f21966cdee519942cf2f94b7bab6ed2196e | [
"MIT"
] | null | null | null | graphs/graphgenerator.py | andrew-lockwood/lab-project-summer2016 | e39a0f21966cdee519942cf2f94b7bab6ed2196e | [
"MIT"
] | 1 | 2017-06-15T20:44:59.000Z | 2017-06-15T20:44:59.000Z |
import sqlite3
import matplotlib.pyplot as plt
import re
from collections import Counter
db = "C:\\Users\\Andrew\\lab-project\\data\\frontiers_corpus.db"
if __name__ == "__main__":
conn = sqlite3.connect(db)
curr = conn.cursor()
kwd_frequency()
| 24.585526 | 85 | 0.54616 |
3cd1de8fe3c2b6efa630c25b86bb05e41fab354a | 5,612 | py | Python | peering_manager/constants.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 127 | 2017-10-12T00:27:45.000Z | 2020-08-07T11:13:55.000Z | peering_manager/constants.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 247 | 2017-12-26T12:55:34.000Z | 2020-08-08T11:57:35.000Z | peering_manager/constants.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 63 | 2017-10-13T06:46:05.000Z | 2020-08-08T00:41:57.000Z | from collections import OrderedDict
from devices.filters import ConfigurationFilterSet
from devices.models import Configuration
from devices.tables import ConfigurationTable
from messaging.filters import ContactFilterSet, EmailFilterSet
from messaging.models import Contact, ContactAssignment, Email
from messaging.tables import ContactTable, EmailTable
from net.filters import ConnectionFilterSet
from net.models import Connection
from net.tables import ConnectionTable
from peering.filters import (
AutonomousSystemFilterSet,
BGPGroupFilterSet,
CommunityFilterSet,
DirectPeeringSessionFilterSet,
InternetExchangeFilterSet,
InternetExchangePeeringSessionFilterSet,
RouterFilterSet,
RoutingPolicyFilterSet,
)
from peering.models import (
AutonomousSystem,
BGPGroup,
Community,
DirectPeeringSession,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
from peering.tables import (
AutonomousSystemTable,
BGPGroupTable,
CommunityTable,
DirectPeeringSessionTable,
InternetExchangePeeringSessionTable,
InternetExchangeTable,
RouterTable,
RoutingPolicyTable,
)
from utils.functions import count_related
__all__ = ("SEARCH_MAX_RESULTS", "SEARCH_TYPES")
SEARCH_MAX_RESULTS = 15
SEARCH_TYPES = OrderedDict(
(
# devices
(
"configuration",
{
"queryset": Configuration.objects.all(),
"filterset": ConfigurationFilterSet,
"table": ConfigurationTable,
"url": "devices:configuration_list",
},
),
# messaging
(
"contact",
{
"queryset": Contact.objects.prefetch_related("assignments").annotate(
assignment_count=count_related(ContactAssignment, "contact")
),
"filterset": ContactFilterSet,
"table": ContactTable,
"url": "messaging:contact_list",
},
),
(
"email",
{
"queryset": Email.objects.all(),
"filterset": EmailFilterSet,
"table": EmailTable,
"url": "messaging:email_list",
},
),
# net
(
"connection",
{
"queryset": Connection.objects.prefetch_related(
"internet_exchange_point", "router"
),
"filterset": ConnectionFilterSet,
"table": ConnectionTable,
"url": "net:connection_list",
},
),
# peering
(
"autonomousystem",
{
"queryset": AutonomousSystem.objects.defer("prefixes"),
"filterset": AutonomousSystemFilterSet,
"table": AutonomousSystemTable,
"url": "peering:autonomoussystem_list",
},
),
(
"bgpgroup",
{
"queryset": BGPGroup.objects.all(),
"filterset": BGPGroupFilterSet,
"table": BGPGroupTable,
"url": "peering:bgpgroup_list",
},
),
(
"community",
{
"queryset": Community.objects.all(),
"filterset": CommunityFilterSet,
"table": CommunityTable,
"url": "peering:community_list",
},
),
(
"directpeeringsession",
{
"queryset": DirectPeeringSession.objects.prefetch_related(
"autonomous_system", "bgp_group", "router"
),
"filterset": DirectPeeringSessionFilterSet,
"table": DirectPeeringSessionTable,
"url": "peering:directpeeringsession_list",
},
),
(
"internetexchange",
{
"queryset": InternetExchange.objects.prefetch_related(
"local_autonomous_system"
).annotate(
connection_count=count_related(
Connection, "internet_exchange_point"
)
),
"filterset": InternetExchangeFilterSet,
"table": InternetExchangeTable,
"url": "peering:internetexchange_list",
},
),
(
"internetexchangepeeringsession",
{
"queryset": InternetExchangePeeringSession.objects.prefetch_related(
"autonomous_system", "ixp_connection"
),
"filterset": InternetExchangePeeringSessionFilterSet,
"table": InternetExchangePeeringSessionTable,
"url": "peering:internetexchangepeeringsession_list",
},
),
(
"router",
{
"queryset": Router.objects.prefetch_related("platform").annotate(
connection_count=count_related(Connection, "router")
),
"filterset": RouterFilterSet,
"table": RouterTable,
"url": "peering:router_list",
},
),
(
"routingpolicy",
{
"queryset": RoutingPolicy.objects.all(),
"filterset": RoutingPolicyFilterSet,
"table": RoutingPolicyTable,
"url": "peering:routingpolicy_list",
},
),
),
)
| 31.351955 | 85 | 0.533678 |
3cd24bc69492048a6c6dccda50896c121dfcd5b5 | 1,453 | py | Python | alexhart/day1-2.py | chadnetzer/advent2020 | b992eb202ff9dd5cc353914a136337412c8bd074 | [
"MIT"
] | null | null | null | alexhart/day1-2.py | chadnetzer/advent2020 | b992eb202ff9dd5cc353914a136337412c8bd074 | [
"MIT"
] | 1 | 2020-12-06T07:51:48.000Z | 2020-12-08T05:03:11.000Z | alexhart/day1-2.py | chadnetzer/advent2020 | b992eb202ff9dd5cc353914a136337412c8bd074 | [
"MIT"
] | 8 | 2020-12-01T21:29:21.000Z | 2020-12-09T23:55:15.000Z | chalenge_input = '''1956
1994
457
1654
2003
1902
1741
1494
1597
1129
1146
1589
1989
1093
1881
1288
1848
1371
1508
1035
1813
1335
1634
1102
1262
1637
1048
1807
1270
1528
1670
1803
1202
1294
1570
1640
1484
1872
1140
1207
1485
1781
1778
1772
1334
1267
1045
1194
1873
1441
1557
1414
1123
1980
1527
1591
1665
1916
1662
1139
1973
1258
1041
1134
1609
1554
1455
1124
1478
1938
1759
1281
1410
1511
930
1319
1302
1827
1216
1404
1460
2002
1590
1817
1341
1631
1608
1382
1158
1594
1049
1804
1555
1753
447
1021
1079
609
1766
1327
1851
1052
1737
1175
1043
1945
1573
1113
1724
1203
1856
1682
1623
1135
1015
1423
1412
1315
1375
1895
1351
1530
1758
1445
1518
1819
1567
1305
1919
1952
1432
1099
1476
1883
1871
1900
1442
1393
1214
1283
1538
1391
1008
1109
1621
1876
1998
1032
1324
1927
481
1732
1370
1683
1199
1465
1882
1293
1671
1456
1197
1506
1381
1469
1830
1957
1850
1184
1564
1170
1943
1131
1867
1208
1788
1337
1722
1760
1651
1069
1574
1959
1770
66
1190
1606
1899
1054
980
1693
1173
1479
1333
1579
1720
1782
1971
1438
1178
1306'''
test_input = '''1721
979
366
299
675
1456'''
print(sum_check(test_input))
print(sum_check(chalenge_input)) | 6.634703 | 70 | 0.751549 |
3cd2638aee801c7efa156f6936b153c75c517e46 | 465 | py | Python | e2e_graphsage/utils/logging.py | mingruimingrui/E2EGraphSage | 90de7befd3a8ced514697c073b4c64e96b63bdb0 | [
"MIT"
] | null | null | null | e2e_graphsage/utils/logging.py | mingruimingrui/E2EGraphSage | 90de7befd3a8ced514697c073b4c64e96b63bdb0 | [
"MIT"
] | null | null | null | e2e_graphsage/utils/logging.py | mingruimingrui/E2EGraphSage | 90de7befd3a8ced514697c073b4c64e96b63bdb0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import logging
| 24.473684 | 78 | 0.647312 |
3cd2949cb17d74dce66873599c286cade86072c8 | 3,486 | py | Python | dmipy/distributions/tests/test_bingham.py | AthenaEPI/mipy | dbbca4066a6c162dcb05865df5ff666af0e4020a | [
"MIT"
] | 59 | 2018-02-22T19:14:19.000Z | 2022-02-22T05:40:27.000Z | dmipy/distributions/tests/test_bingham.py | AthenaEPI/mipy | dbbca4066a6c162dcb05865df5ff666af0e4020a | [
"MIT"
] | 95 | 2018-02-03T11:55:30.000Z | 2022-03-31T15:10:39.000Z | dmipy/distributions/tests/test_bingham.py | AthenaEPI/mipy | dbbca4066a6c162dcb05865df5ff666af0e4020a | [
"MIT"
] | 23 | 2018-02-13T07:21:01.000Z | 2022-02-22T20:12:08.000Z | from numpy.testing import assert_almost_equal, assert_equal
from dmipy.utils import utils
import numpy as np
from dmipy.utils.utils import (
rotation_matrix_100_to_theta_phi, rotation_matrix_around_100,
rotation_matrix_100_to_theta_phi_psi
)
from dmipy.distributions import distributions
| 35.938144 | 77 | 0.676133 |
3cd3066a814fddcf19dac7173c44fed139f2e632 | 669 | py | Python | head_first_design_patterns/hofs/duck_dispenser.py | incolumepy-cursos/poop | e4ac26b8d2a8c263a93fd9642fab52aafda53d80 | [
"MIT"
] | null | null | null | head_first_design_patterns/hofs/duck_dispenser.py | incolumepy-cursos/poop | e4ac26b8d2a8c263a93fd9642fab52aafda53d80 | [
"MIT"
] | null | null | null | head_first_design_patterns/hofs/duck_dispenser.py | incolumepy-cursos/poop | e4ac26b8d2a8c263a93fd9642fab52aafda53d80 | [
"MIT"
] | null | null | null | __author__ = '@britodfbr'
from head_first_design_patterns.hofs import duck
from head_first_design_patterns.hofs import fly_behaviors
from head_first_design_patterns.hofs import quack_behaviors
| 27.875 | 59 | 0.715994 |
3cd5abf591689acf3071f0da912c722b5ef681bb | 1,279 | py | Python | tests/test_zones_json.py | electricitymap/electricitymap-contrib | 6572b12d1cef72c734b80273598e156ebe3c22ea | [
"MIT"
] | 143 | 2022-01-01T10:56:58.000Z | 2022-03-31T11:25:47.000Z | tests/test_zones_json.py | electricitymap/electricitymap-contrib | 6572b12d1cef72c734b80273598e156ebe3c22ea | [
"MIT"
] | 276 | 2021-12-30T15:57:15.000Z | 2022-03-31T14:57:16.000Z | tests/test_zones_json.py | electricitymap/electricitymap-contrib | 6572b12d1cef72c734b80273598e156ebe3c22ea | [
"MIT"
] | 44 | 2021-12-30T19:48:42.000Z | 2022-03-29T22:46:16.000Z | import json
import unittest
from electricitymap.contrib.config import ZONES_CONFIG
ZONE_KEYS = ZONES_CONFIG.keys()
if __name__ == "__main__":
unittest.main(buffer=True)
| 34.567568 | 89 | 0.656763 |
3cd609e71dc0ee42d0acf42ff022c5f15ae9992d | 3,483 | py | Python | app/bda_core/entities/training/word2vec_trainer.py | bda-19fs/bda-chatbot | 4fcbda813ff5d3854a4c2e12413775676bcba9e2 | [
"MIT"
] | 1 | 2019-05-25T12:12:39.000Z | 2019-05-25T12:12:39.000Z | app/bda_core/entities/training/word2vec_trainer.py | bda-19fs/bda-chatbot | 4fcbda813ff5d3854a4c2e12413775676bcba9e2 | [
"MIT"
] | null | null | null | app/bda_core/entities/training/word2vec_trainer.py | bda-19fs/bda-chatbot | 4fcbda813ff5d3854a4c2e12413775676bcba9e2 | [
"MIT"
] | null | null | null | import gensim
import numpy as np
def fit_model(sentences, config):
'''
Fits the Word2Vec model with the given sentences. The vectors were normalized after the training.
A further training of the model is not possible.
:param sentences: A python list of sentences
:param config: The config for the model
:return: The trained Word2Vec model
'''
model = gensim.models.Word2Vec(size=config.dimension, hs=config.hierarchical_softmax, window=config.window_size,
workers=config.workers, sg=config.use_skip_gram, min_count=2)
model.build_vocab(sentences)
model.train(sentences, total_examples=len(sentences), epochs=config.epochs)
model.init_sims(replace=True)
return model
def avg_word_vector(model, word_list):
'''
Calculates the average vector of a list of words. The average vector is the mean
of all word vectors. Only words of the Word2Vec vocabulary can be considered.
:param model: The trained Word2Vec model
:param word_list: A python list of words
:return: The average vector
'''
words = [word for word in word_list if word in model.wv.vocab]
return np.mean(model.wv.__getitem__(words), axis=0)
def transpose_vector(vec):
'''
Returns a new vector that is the transposition of the given vector.
:param vec: The vector to transpose
:return: The transposition vector
'''
return vec[np.newaxis]
def create_sentence_vectors(model, questions):
'''
Calculates the average vectors for all questions. The order of the sentences list
will remain in the returned list of vectors.
:param model: The trained Word2Vec model
:param questions: A python list of word lists
:return: A list of average vectors
'''
vectors = []
for i in range(len(questions)):
word_list = [word for word in questions[i] if word in model.wv.vocab]
avg_vector = None
if len(word_list) > 0:
avg_vector = avg_word_vector(model, word_list)
vectors.append(avg_vector)
vectors = np.array(vectors)
return vectors
def create_matrix_from_vectors(vectors):
'''
Creates a matrix that contains all vectors of the given vector list as row vectors.
:param vectors: A list of vectors with the same dimension
:return: The concatenation matrix of the given vectors
'''
vectors_len = len(vectors)
if vectors_len > 0:
matrix = transpose_vector(vectors[0])
for i in range(1, vectors_len):
vec = vectors[i]
if vec is not None:
transposed = transpose_vector(vectors[i])
matrix = np.concatenate((matrix, transposed), axis=0)
return matrix
else:
raise Exception('the given list of vectors is empty')
| 35.907216 | 116 | 0.681022 |
3cd825fe40c8c6d189d67799fba8e31f6ba53c8a | 642 | py | Python | polls/migrations/0008_auto_20150918_1715.py | santeyio/phantastesproject | 5ce1e2cb59e8283fe280e01d0e185be62cd4001a | [
"MIT"
] | null | null | null | polls/migrations/0008_auto_20150918_1715.py | santeyio/phantastesproject | 5ce1e2cb59e8283fe280e01d0e185be62cd4001a | [
"MIT"
] | null | null | null | polls/migrations/0008_auto_20150918_1715.py | santeyio/phantastesproject | 5ce1e2cb59e8283fe280e01d0e185be62cd4001a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| 23.777778 | 76 | 0.605919 |
3cd8375d5dea7465c5253237889db106c353b42a | 4,342 | py | Python | src/main/python/bktools/framework/money/currency.py | bspa10/bktools | 8ddff2bb325df6c4c2bb5cadd3029c0e11ba0734 | [
"MIT"
] | null | null | null | src/main/python/bktools/framework/money/currency.py | bspa10/bktools | 8ddff2bb325df6c4c2bb5cadd3029c0e11ba0734 | [
"MIT"
] | null | null | null | src/main/python/bktools/framework/money/currency.py | bspa10/bktools | 8ddff2bb325df6c4c2bb5cadd3029c0e11ba0734 | [
"MIT"
] | null | null | null | # encoding: utf-8
# Standard Library
from os import path
from threading import Lock
from typing import Set
from typing import Optional
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import Element
# 3rd Party Library
# Current Folder
# Current Application
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#: -=-=-=-=-=-=-=-=-=-=-=
#: Utility Functions
#: -=-=-=-=-=-=-=-=-=-=-=
def __hash__(self):
return hash(self.number)
def __repr__(self):
return f'{self.__class__.__name__} {self.code}'
class Currencies(object):
"""
Factory of ISO 4217 - Currency Code.
"""
__slots__ = '_'
__guard = Lock()
__entries: Set[Currency] = set()
__BASE_DIR = path.abspath(path.dirname(__file__))
| 25.692308 | 120 | 0.52211 |
3cd8a7fa6829673461545374eeacd667661ea155 | 4,863 | py | Python | DemoFinal.py | sohinim006/Heroku-App-demo | 875b894b48e8544f6dbe629635f195ccd97ba201 | [
"MIT"
] | null | null | null | DemoFinal.py | sohinim006/Heroku-App-demo | 875b894b48e8544f6dbe629635f195ccd97ba201 | [
"MIT"
] | 1 | 2020-06-02T02:53:57.000Z | 2020-06-02T02:53:57.000Z | DemoFinal.py | sohinim006/Heroku-App-demo | 875b894b48e8544f6dbe629635f195ccd97ba201 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import pickle
# In[2]:
data=pd.read_csv("wd.csv",encoding="ISO-8859-1")
# In[3]:
data
# In[4]:
data.fillna(0,inplace=True) #it fills NaN with O's
# In[5]:
data
# In[6]:
data.dtypes
# In[7]:
#conversion
data['Temp']=pd.to_numeric(data['Temp'],errors='coerce')
data['D.O. (mg/l)']=pd.to_numeric(data['D.O. (mg/l)'],errors='coerce')
data['PH']=pd.to_numeric(data['PH'],errors='coerce')
data['B.O.D. (mg/l)']=pd.to_numeric(data['B.O.D. (mg/l)'],errors='coerce')
data['CONDUCTIVITY (mhos/cm)']=pd.to_numeric(data['CONDUCTIVITY (mhos/cm)'],errors='coerce')
data['NITRATENAN N+ NITRITENANN (mg/l)']=pd.to_numeric(data['NITRATENAN N+ NITRITENANN (mg/l)'],errors='coerce')
data['TOTAL COLIFORM (MPN/100ml)Mean']=pd.to_numeric(data['TOTAL COLIFORM (MPN/100ml)Mean'],errors='coerce')
data.dtypes
# In[8]:
#initialization
start=2
end=1779
station=data.iloc [start:end ,0]
location=data.iloc [start:end ,1]
state=data.iloc [start:end ,2]
do= data.iloc [start:end ,4].astype(np.float64)
value=0
ph = data.iloc[ start:end,5]
co = data.iloc [start:end ,6].astype(np.float64)
year=data.iloc[start:end,11]
tc=data.iloc [2:end ,10].astype(np.float64)
bod = data.iloc [start:end ,7].astype(np.float64)
na= data.iloc [start:end ,8].astype(np.float64)
na.dtype
# In[9]:
data=pd.concat([station,location,state,do,ph,co,bod,na,tc,year],axis=1)
data. columns = ['station','location','state','do','ph','co','bod','na','tc','year']
# In[10]:
data
# In[11]:
#calulation of Ph
data['npH']=data.ph.apply(lambda x: (100 if (8.5>=x>=7)
else(80 if (8.6>=x>=8.5) or (6.9>=x>=6.8)
else(60 if (8.8>=x>=8.6) or (6.8>=x>=6.7)
else(40 if (9>=x>=8.8) or (6.7>=x>=6.5)
else 0)))))
# In[12]:
#calculation of dissolved oxygen
data['ndo']=data.do.apply(lambda x:(100 if (x>=6)
else(80 if (6>=x>=5.1)
else(60 if (5>=x>=4.1)
else(40 if (4>=x>=3)
else 0)))))
# In[13]:
#calculation of total coliform
data['nco']=data.tc.apply(lambda x:(100 if (5>=x>=0)
else(80 if (50>=x>=5)
else(60 if (500>=x>=50)
else(40 if (10000>=x>=500)
else 0)))))
#calculation of electrical conductivity
data['nec']=data.co.apply(lambda x:(100 if (75>=x>=0)
else(80 if (150>=x>=75)
else(60 if (225>=x>=150)
else(40 if (300>=x>=225)
else 0)))))
# In[14]:
#calc of B.D.O
data['nbdo']=data.bod.apply(lambda x:(100 if (3>=x>=0)
else(80 if (6>=x>=3)
else(60 if (80>=x>=6)
else(40 if (125>=x>=80)
else 0)))))
# In[15]:
data
# In[16]:
#Calulation of nitrate
data['nna']=data.na.apply(lambda x:(100 if (20>=x>=0)
else(80 if (50>=x>=20)
else(60 if (100>=x>=50)
else(40 if (200>=x>=100)
else 0)))))
data.head()
data.dtypes
# In[17]:
data
# In[18]:
from sklearn.model_selection import train_test_split
# In[19]:
data=data.drop(['station','location'],axis=1)
# In[20]:
data
# In[21]:
data=data.drop(['do','ph','co','bod','na','tc'],axis=1)
# In[22]:
data
# In[24]:
yt=data['nco']
# In[25]:
yt
# In[26]:
data=data.drop(['nco'],axis=1)
# In[27]:
data
# In[28]:
x_t,x_tt,y_t,y_tt=train_test_split(data,yt,test_size=0.2,random_state=4)
# In[29]:
#reg2.fit(x_t,y_t)
# In[30]:
#a2=reg2.predict(x_tt)
#a2
#randomforest
# In[39]:
from sklearn.ensemble import RandomForestRegressor
# In[40]:
rfr=RandomForestRegressor(n_estimators=1000,random_state=42)
# In[41]:
rfr.fit(x_t,y_t)
pickle.dump(rfr,open('model.pkl','wb'))
# In[42]:
model = pickle.load(open('model.pkl','rb'))
yrfr=rfr.predict(x_tt)
# In[43]:
from sklearn.metrics import mean_squared_error
print('mse:%.2f'%mean_squared_error(y_tt,yrfr))
# In[44]:
y_tt
# In[45]:
yrfr
# In[47]:
dtrfr = pd.DataFrame({'Actual': y_tt, 'Predicted': yrfr})
dtrfr.head(20)
# In[48]:
from sklearn.metrics import r2_score
# In[49]:
print(r2_score(y_tt,yrfr))
# In[ ]:
| 15.438095 | 112 | 0.499897 |
3cd8ed3786032ec99ff11bc34e84132d3b428b08 | 1,926 | py | Python | Classes/gaussian.py | sankarebarri/Python | 0c39da1df74d74b7b0a3724e57b5205a7d88537f | [
"MIT"
] | null | null | null | Classes/gaussian.py | sankarebarri/Python | 0c39da1df74d74b7b0a3724e57b5205a7d88537f | [
"MIT"
] | null | null | null | Classes/gaussian.py | sankarebarri/Python | 0c39da1df74d74b7b0a3724e57b5205a7d88537f | [
"MIT"
] | null | null | null | import numpy as np
import math
data = [9, 2, 5, 4, 12, 7]
gaussian = Gaussian()
gaussian.data = data
print(gaussian.calculate_mean())
print(gaussian.calculate_stdev(sample=True))
gaussian_one = Gaussian(5, 2)
gaussian_two = Gaussian(7, 3)
gaussian_sum = gaussian_one + gaussian_two
print(gaussian_sum)
print(gaussian_sum.stdev)
print(gaussian_sum.mean)
| 27.126761 | 81 | 0.574247 |
3cda167a85c43c6395a461abd5b9210a39f3e5bb | 987 | py | Python | setup.py | datagovau/ckanext-datagovau | 902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9 | [
"Apache-2.0"
] | 1 | 2019-07-22T08:02:11.000Z | 2019-07-22T08:02:11.000Z | setup.py | datagovau/ckanext-datagovau | 902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9 | [
"Apache-2.0"
] | null | null | null | setup.py | datagovau/ckanext-datagovau | 902c80a9c3a07ad6bbd52a4b19dac8a3ec2686b9 | [
"Apache-2.0"
] | 6 | 2015-01-23T16:32:18.000Z | 2021-06-27T03:42:18.000Z | from setuptools import find_packages, setup
version = "1.0.0a1"
# Keep in case we still need pylons...Just use the line below in place
# of the install_requires argument in the call to setup().
# install_requires=['requests', 'feedparser', 'pylons', 'python-dateutil'],
setup(
name="ckanext-datagovau",
version=version,
description="Extension for customising CKAN for data.gov.au",
long_description="",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Greg von Nessi",
author_email="greg.vonnessi@linkdigital.com.au",
url="",
license="",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
namespace_packages=["ckanext", "ckanext.datagovau"],
include_package_data=True,
zip_safe=False,
install_requires=[
"typing_extensions",
],
entry_points="""
[ckan.plugins]
datagovau = ckanext.datagovau.plugin:DataGovAuPlugin
""",
)
| 32.9 | 94 | 0.690983 |
3ce1874797f955e0861f0ec1dfc943c5714b8253 | 6,192 | py | Python | utils.py | kalpetros/greek-dictionary | 962f36c299cbb46ffce9c7f78db7c9e513269499 | [
"MIT"
] | 3 | 2021-04-27T16:39:12.000Z | 2021-11-17T02:15:13.000Z | utils.py | kalpetros/greek-dictionary | 962f36c299cbb46ffce9c7f78db7c9e513269499 | [
"MIT"
] | null | null | null | utils.py | kalpetros/greek-dictionary | 962f36c299cbb46ffce9c7f78db7c9e513269499 | [
"MIT"
] | 1 | 2021-06-15T23:57:44.000Z | 2021-06-15T23:57:44.000Z | import click
import os
import requests
import shutil
import sys
import time
from bs4 import BeautifulSoup
alphabet = [
{
'letter': '',
'pages': 31660
},
{
'letter': '',
'pages': 5050
},
{
'letter': '',
'pages': 5890
},
{
'letter': '',
'pages': 7130
},
{
'letter': '',
'pages': 12530
},
{
'letter': '',
'pages': 1500
},
{
'letter': '',
'pages': 1310
},
{
'letter': '',
'pages': 2300
},
{
'letter': '',
'pages': 1720
},
{
'letter': '',
'pages': 17700
},
{
'letter': '',
'pages': 4740
},
{
'letter': '',
'pages': 13020
},
{
'letter': '',
'pages': 3790
},
{
'letter': '',
'pages': 5250
},
{
'letter': '',
'pages': 4970
},
{
'letter': '',
'pages': 18560
},
{
'letter': '',
'pages': 2720
},
{
'letter': '',
'pages': 14340
},
{
'letter': '',
'pages': 7680
},
{
'letter': '',
'pages': 3170
},
{
'letter': '',
'pages': 5640
},
{
'letter': '',
'pages': 5370
},
{
'letter': '',
'pages': 2080
},
{
'letter': '',
'pages': 470
}
]
def is_clean(word):
"""
Check for profanity
"""
clean = True
profane_words = []
if word in profane_words:
clean = False
return clean
def get_source(url):
"""
Get page source for the given url
"""
rs = requests.get(url)
source = BeautifulSoup(rs.content, 'html.parser')
return source
def parse(source):
"""
Return words array for the given page source
"""
children = source.find(id='lemmas').children
words = []
for node in children:
dt = node.find('dt')
if dt != -1:
word = dt.find('b').text.strip(',')
words.append(word)
return words
def scrape(letter: str, pages: int):
"""
Scrapes www.greek-language.gr to build
a full list of modern Greek words
https://www.greek-language.gr/greekLang/index.html
"""
log(f'Getting letter {letter} words...', 'info')
start = time.time()
url = 'https://www.greek-language.gr/greekLang/modern_greek/tools/lexica/reverse/search.html'
results = []
page = 0
while page <= int(pages):
time.sleep(0.1)
endpoint = f'{url}?start={page}&lq={letter}*'
source = get_source(endpoint)
words = parse(source)
page = page + 10
for word in words:
results.append(word)
end = time.time()
total = end - start
log(f'Got {letter} in {total}', 'success')
return results
def get_data(file_name):
"""
Return words in a given file
"""
results = []
if not os.path.isfile(file_name):
return results
try:
with open(file_name, 'r') as words:
for word in words:
results.append(word.strip())
except Exception as e:
log(f'Could not get data {str(e)}', 'warning')
return results
def check():
"""
Check if necessary files exist
"""
if not os.path.isfile('files/el.txt'):
log('el.txt is missing from files. Please restore the repository.', 'warning')
sys.exit(2)
if not os.path.isdir('output'):
log('Output folder is missing. Creating folder...', 'warning')
os.mkdir('output')
def clean_output():
"""
Delete output files and folder
"""
if not os.path.isdir('output'):
log('Working directory already clean...', 'info')
return
shutil.rmtree('output')
log('Working directory clean', 'success')
return
def romanize_words(words):
"""
Romanize words
"""
mappings = {
'': 'a',
'': 'a',
'': 'v',
'': 'g',
'': 'd',
'': 'e',
'': 'e',
'': 'z',
'': 'i',
'': 'i',
'': 'th',
'': 'i',
'': 'i',
'': 'i',
'': 'i',
'': 'k',
'': 'l',
'': 'm',
'': 'n',
'': 'ks',
'': 'o',
'': 'o',
'': 'p',
'': 'r',
'': 's',
'': 's',
'': 't',
'': 'y',
'': 'y',
'': 'y',
'': 'y',
'': 'f',
'': 'h',
'x': 'h',
'': 'ps',
'': 'o',
'': 'o',
'-': '-',
'!': '!',
'.': '.',
',': ',',
"'": "'"
}
results = []
if not words:
log('No data provided', 'info')
return results
for word in words:
result = []
chars = list(word.strip())
for char in chars:
try:
char = char.lower()
result.append(mappings[char])
except Exception as e:
log(f'Could not map {str(e)}', 'warning')
word = ''.join(result)
results.append(word)
log('Romanized all words', 'success')
return results
def export(file_name, words, file_type='txt'):
"""
Create a words file
"""
if not words:
log('No data provided', 'warning')
return
check()
log(f'Creating file {file_name}.{file_type}...', 'info')
output = open(f'output/{file_name}.{file_type}', 'w')
if file_type == 'json':
output.write('[')
for word in words:
if file_type == 'txt':
output.write(f'{word.strip()}\n')
elif file_type == 'json':
output.write(f'"{word.strip()}",\n')
if file_type == 'json':
output.write(']')
output.close()
log(f'Created {file_name}.{file_type}', 'success')
| 18.211765 | 97 | 0.439599 |
3ce716ac3e56a4c2bf161beb78851142feb3c86b | 1,585 | py | Python | pysanejs/api.py | Lookyloo/PySaneJS | 99615608222d7386e74472bcc052f40b05916b2a | [
"BSD-2-Clause"
] | 1 | 2019-01-30T16:12:32.000Z | 2019-01-30T16:12:32.000Z | pysanejs/api.py | CIRCL/PySaneJS | 501f22d0d22d6361bb71a8bf0bbb2e14d3c0f9f1 | [
"BSD-2-Clause"
] | 36 | 2021-06-09T17:34:05.000Z | 2022-03-28T09:04:37.000Z | pysanejs/api.py | Lookyloo/PySaneJS | 99615608222d7386e74472bcc052f40b05916b2a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from urllib.parse import urljoin
from typing import Union, Dict, List, Optional
| 28.303571 | 129 | 0.477603 |
3ce959e8fac079b9e0e0bacc34e00bde93edb83c | 1,937 | py | Python | Log1/HiPyQt3/HiPyQt38QTableWidget.py | codenara/PyQt1 | 1550920577188e4d318b47fc69ba5ee243092d88 | [
"MIT"
] | null | null | null | Log1/HiPyQt3/HiPyQt38QTableWidget.py | codenara/PyQt1 | 1550920577188e4d318b47fc69ba5ee243092d88 | [
"MIT"
] | null | null | null | Log1/HiPyQt3/HiPyQt38QTableWidget.py | codenara/PyQt1 | 1550920577188e4d318b47fc69ba5ee243092d88 | [
"MIT"
] | null | null | null | # HiPyQt version 3.8
# use QTableWidget
# use QCheckBox
# use QPushButton
import sys
from PyQt5.QtWidgets import *
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec()
| 32.283333 | 97 | 0.674239 |
3cea6fdbaa10d4f4a87f24213944a946b586b65c | 1,346 | py | Python | predictor.py | abhayraw1/crnn.pytorch | 307f2dbf8163148d165ef15cdd522c7c137041e4 | [
"MIT"
] | null | null | null | predictor.py | abhayraw1/crnn.pytorch | 307f2dbf8163148d165ef15cdd522c7c137041e4 | [
"MIT"
] | null | null | null | predictor.py | abhayraw1/crnn.pytorch | 307f2dbf8163148d165ef15cdd522c7c137041e4 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
from . import utils
from . import dataset
from PIL import Image
from pathlib import Path
from . import crnn
model_path = Path(__file__).parent/'data/crnn.pth'
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
model = crnn.CRNN(32, 1, 37, 256)
if torch.cuda.is_available():
model = model.cuda()
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path))
converter = utils.strLabelConverter(alphabet)
transformer = dataset.resizeNormalize((100, 32))
| 28.041667 | 71 | 0.696137 |
3ced3da168b0c4d5fb8345ab35a6e8f79cade777 | 2,951 | py | Python | src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | 1 | 2018-07-26T13:52:21.000Z | 2018-07-26T13:52:21.000Z | src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | from typing import List, Sequence
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgl.attributes.channel_mode import ChannelMode, ChannelModeEnum
from webdnn.backend.webgl.generator import WebGLDescriptorGenerator
from webdnn.backend.webgl.kernel import Kernel
from webdnn.backend.webgl.kernels.util import FragmentShaderPreamble, texture_stride, texture_shape
from webdnn.backend.webgl.uniform_injector import UniformInjector
from webdnn.graph.operators.split_axis import SplitAxis
template = FragmentShaderPreamble + """
%%UNIFORM(sampler2D, sampler_x)%%;
%%UNIFORM(vec2, texture_stride_y)%%;
%%UNIFORM(vec4, variable_shape_y)%%;
%%UNIFORM(vec4, variable_stride_y)%%;
%%UNIFORM(vec4, variable_shape_x)%%;
%%UNIFORM(vec4, variable_stride_x)%%;
%%UNIFORM(vec2, texture_stride_x)%%;
%%UNIFORM(vec2, texture_shape_x)%%;
%%UNIFORM(vec4, offset)%%;
void main() {
vec4 variable_position_y = convert_position(gl_FragCoord.xy, texture_stride_y, variable_stride_y, variable_shape_y);
vec4 variable_position_x = variable_position_y + offset;
float x = texture2D(sampler_x, convert_coord(variable_position_x, variable_stride_x, texture_stride_x, texture_shape_x)).r;
gl_FragColor = vec4(x, 0, 0, 0);
}
"""
| 32.788889 | 127 | 0.686208 |
3ced6fbe48c455d53e5baee0065fd6577be73a4b | 35 | py | Python | __init__.py | chunlaw/GeoNews | 836547a51a0ed177f04135979e0a0f5212e88ed7 | [
"MIT"
] | 3 | 2016-09-05T13:43:59.000Z | 2016-09-05T15:36:12.000Z | __init__.py | chunlaw/GeoNews | 836547a51a0ed177f04135979e0a0f5212e88ed7 | [
"MIT"
] | null | null | null | __init__.py | chunlaw/GeoNews | 836547a51a0ed177f04135979e0a0f5212e88ed7 | [
"MIT"
] | null | null | null | __all__ = ['models']
import models
| 11.666667 | 20 | 0.714286 |
3cedde962258fae75ef3400a99dada61c8a82bd1 | 1,244 | py | Python | systemstat.py | asl97/asl97-i3bar-status-spacer | 83245582cf8973b0d128b5ed806e776e00960c5e | [
"MIT"
] | null | null | null | systemstat.py | asl97/asl97-i3bar-status-spacer | 83245582cf8973b0d128b5ed806e776e00960c5e | [
"MIT"
] | null | null | null | systemstat.py | asl97/asl97-i3bar-status-spacer | 83245582cf8973b0d128b5ed806e776e00960c5e | [
"MIT"
] | null | null | null | import time
import psutil
netlink = _netlink().get_status
| 28.272727 | 73 | 0.549035 |
3cefbde68b0741c1883ec538b390be6d177b8949 | 18,044 | py | Python | tests/test_net.py | ciubecca/kalasanty | df99f6814f073f2fb0fbd271d2fbfccb209c4b45 | [
"BSD-3-Clause"
] | 1 | 2021-10-19T16:59:31.000Z | 2021-10-19T16:59:31.000Z | tests/test_net.py | ciubecca/kalasanty | df99f6814f073f2fb0fbd271d2fbfccb209c4b45 | [
"BSD-3-Clause"
] | null | null | null | tests/test_net.py | ciubecca/kalasanty | df99f6814f073f2fb0fbd271d2fbfccb209c4b45 | [
"BSD-3-Clause"
] | 1 | 2021-10-20T13:05:56.000Z | 2021-10-20T13:05:56.000Z | import os
import numpy as np
import h5py
import tempfile
import pytest
from keras import backend as K
from keras.layers import Input, Convolution3D, concatenate
from keras.models import Model
from keras.optimizers import Adam
import pybel
from tfbio.data import Featurizer
from kalasanty.net import dice_np, dice, dice_loss, ovl_np, ovl, ovl_loss, DataWrapper, UNet
path = os.path.dirname(os.path.realpath(__file__))
test_dataset = os.path.join(path, 'test_data.hdf')
protein_file = os.path.join(path, 'datasets', 'scpdb', '2qfo_1', 'protein.mol2')
featurizer = Featurizer(save_molecule_codes=False)
num_features = len(featurizer.FEATURE_NAMES)
input_shape = (1, 4, 2, 3, 1)
arr_zeros = np.zeros(input_shape)
arr_ones = np.ones(input_shape)
def test_unet_from_data_handle(data):
with pytest.raises(ValueError, match='you must either provide'):
UNet()
with pytest.raises(TypeError, match='data_handle should be a DataWrapper'):
UNet(data_handle='10gs')
model = UNet(data_handle=data)
assert model.data_handle == data
assert model.scale == data.scale
assert model.max_dist == data.max_dist
assert len(model.inputs) == 1
assert model.inputs[0].shape[-1] == data.x_channels
assert len(model.outputs) == 1
assert model.outputs[0].shape[-1] == data.y_channels
def test_get_pockets_segmentation(data):
with pytest.raises(ValueError, match='data_handle must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7)
model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.scale = None
model.pocket_density_from_grid('10gs')
np.random.seed(42)
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, *_ = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='not supported'):
model.get_pockets_segmentation(np.array([density] * 2), 0.6)
pocket = model.get_pockets_segmentation(density, 0.6)
assert pocket.shape == (data.box_size,) * 3
assert pocket.max() > 0
assert len(np.unique(pocket)) - 1 <= pocket.max()
def test_save_pockets_cmap(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cmap(np.concatenate((density, density)), origin,
step)
with tempfile.NamedTemporaryFile(suffix='.cmap') as cmap_file:
fname = cmap_file.name
model.save_density_as_cmap(density, origin, step, fname=fname)
with h5py.File(fname, 'r') as f:
assert 'Chimera' in f
group = f['Chimera']
assert len(group.keys()) == data.y_channels
for i in range(data.y_channels):
key = 'image%s' % (i + 1)
assert key in group
assert 'data_zyx' in group[key]
dataset = group[key]['data_zyx'][:]
assert np.allclose(density[0, ..., i].transpose([2, 1, 0]),
dataset[:])
def test_save_pockets_cube(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cube(np.concatenate((density, density)), origin,
step)
with pytest.raises(NotImplementedError, match='saving multichannel'):
model.save_density_as_cube(density, origin, step)
density = density[..., [0]]
with tempfile.NamedTemporaryFile(suffix='.cube') as cmap_file:
fname = cmap_file.name
model.save_density_as_cube(density, origin, step, fname=fname)
with open(fname, 'r') as f:
# skip header
for _ in range(7):
f.readline()
values = np.array(f.read().split()).reshape(density.shape)
assert np.allclose(density, values.astype(float))
| 39.483589 | 92 | 0.63323 |
3cf130cd62278bdee384dab7ff29ec047f8b848a | 2,256 | py | Python | tests/test_bash_runner.py | rtmigo/svet | 06f9c5be7706351c2ef93fae0f9fa97ee69593f7 | [
"BSD-3-Clause"
] | 5 | 2021-05-18T19:55:22.000Z | 2022-03-07T20:52:19.000Z | tests/test_bash_runner.py | rtmigo/vien | 06f9c5be7706351c2ef93fae0f9fa97ee69593f7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_bash_runner.py | rtmigo/vien | 06f9c5be7706351c2ef93fae0f9fa97ee69593f7 | [
"BSD-3-Clause"
] | 1 | 2021-05-23T04:04:29.000Z | 2021-05-23T04:04:29.000Z | # SPDX-FileCopyrightText: (c) 2021 Artm IG <github.com/rtmigo>
# SPDX-License-Identifier: BSD-3-Clause
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from timeit import default_timer as timer
from tests.common import is_posix
from vien._bash_runner import *
from tests.time_limited import TimeLimited
| 37.6 | 79 | 0.624113 |
3cf1aac57cec16e9686acb6784d6d3e00f8dc890 | 8,825 | py | Python | adversarial/train_adversarial.py | liguge/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator | e0f2cd042e2c124e73d2982af28fa270263180d8 | [
"MIT"
] | 1 | 2022-01-16T03:21:18.000Z | 2022-01-16T03:21:18.000Z | adversarial/train_adversarial.py | liguge/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator | e0f2cd042e2c124e73d2982af28fa270263180d8 | [
"MIT"
] | 1 | 2022-03-29T10:50:48.000Z | 2022-03-30T07:14:56.000Z | adversarial/train_adversarial.py | hectorLop/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator | e0f2cd042e2c124e73d2982af28fa270263180d8 | [
"MIT"
] | 2 | 2022-01-16T03:21:54.000Z | 2022-03-10T01:17:12.000Z | from typing import Dict, List, Tuple
import torch
import numpy as np
import argparse
from torch import nn
import yaml
import pandas as pd
from sklearn.metrics import roc_auc_score
from adversarial.adversarial import AdversarialNetwork, Classifier, Discriminator
from adversarial.dataset import (
AdversarialDataset,
get_transforms
)
from adversarial.config import Config
from adversarial.utils import (
fix_all_seeds,
freeze_unfreeze,
get_ground_truth_vector
)
from torch.utils.data import DataLoader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='Config YAML file')
args = parser.parse_args()
with open(args.config) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
train(params) | 33.942308 | 136 | 0.566799 |
3cf1f4f9c94b916e1af4be610a5cfc8f880bc37a | 18,425 | py | Python | generate_md.py | wzyjerry/EPO-patent-process | 686c0ea6d9122436071c809a238b8348cdf65120 | [
"MIT"
] | null | null | null | generate_md.py | wzyjerry/EPO-patent-process | 686c0ea6d9122436071c809a238b8348cdf65120 | [
"MIT"
] | null | null | null | generate_md.py | wzyjerry/EPO-patent-process | 686c0ea6d9122436071c809a238b8348cdf65120 | [
"MIT"
] | null | null | null | labels = {
15: {
'de': [
'Korrekturinformation',
'Korrigierte Fassung Nr.',
'Korrekturen, siehe'
],
'en': [
'Correction information',
'Corrected version no',
'Corrections, see'
],
'fr': [
'Information de correction',
'Version corrige no',
'Corrections, voir'
]
},
21: {
'de': 'Anmeldenummer',
'en': 'Application number',
'fr': 'Numro de dpt'
},
22: {
'de': 'Anmeldetag',
'en': 'Date of filing',
'fr': 'Date de dpt'
},
30: {
'de': 'Prioritt',
'en': 'Priority',
'fr': 'Priorit'
},
43: {
'de': {
'A1': 'Verffentlichungstag',
'A3': 'Verffentlichungstag A2',
'A8': 'Verffentlichungstag',
'A9': 'Verffentlichungstag',
'B1': 'Verffentlichungstag der Anmeldung',
'B2': 'Verffentlichungstag der Anmeldung',
'B3': 'Verffentlichungstag der Anmeldung',
'B9': 'Verffentlichungstag der Anmeldung'
},
'en': {
'A1': 'Date of publication',
'A3': 'Date of publication A2',
'A8': 'Date of publication',
'A9': 'Date of publication',
'B1': 'Date of publication of application',
'B2': 'Date of publication of application',
'B3': 'Date of publication of application',
'B9': 'Date of publication of application'
},
'fr': {
'A1': 'Date de publication',
'A3': 'Date de publication A2',
'A8': 'Date de publication',
'A9': 'Date de publication',
'B1': 'Date de publication de la demande',
'B2': 'Date de publication de la demande',
'B3': 'Date de publication de la demande',
'B9': 'Date de publication de la demande'
}
},
45: {
'de': {
'B1': 'Verffentlichungstag und Bekanntmachung des Hinweises auf die Patenterteilung',
'B2': {
45: 'Hinweis auf die Patenterteilung',
47: 'Verffentlichungstag und Bekanntmachung des Hinweises auf die Entscheidung ber den Einspruch'
},
'B9': {
45: 'Hinweis auf die Patenterteilung',
47: 'Verffentlichungstag und Bekanntmachung des Hinweises auf die Entscheidung ber den Einspruch'
}
},
'en': {
'B1': 'Date of publication and mention of the grant of the patent',
'B2': {
45: 'Mention of the grant of the patent',
47: 'Date of publication and mention of the opposition decision:'
},
'B9': {
45: 'Mention of the grant of the patent',
47: 'Date of publication and mention of the opposition decision:'
}
},
'fr': {
'B1': 'Date de publication et mention de la dlivrance du brevet',
'B2': {
45: 'Mention de la dlivrance du brevet',
47: 'Date de publication et mention de la dcision concernant lopposition'
},
'B9': {
45: 'Mention de la dlivrance du brevet',
47: 'Date de publication et mention de la dcision concernant lopposition'
}
}
},
48: {
'de': 'Corrigendum ausgegeben am',
'en': 'Corrigendum issued on',
'fr': 'Corrigendum publi le'
},
51: {
'de': 'Int Cl.',
'en': 'Int Cl.',
'fr': 'Int Cl.',
},
56: {
'de': 'Entgegenhaltungen',
'en': 'References cited',
'fr': 'Documents cits'
},
60: {
'de': 'Teilanmeldung',
'en': 'Divisional application',
'fr': 'Demande divisionnaire'
},
71: {
'de': 'Anmelder',
'en': 'Applicant',
'fr': 'Demandeur'
},
72: {
'de': 'Erfinder',
'en': 'Inventor',
'fr': 'Inventeur'
},
73: {
'de': 'Patentinhaber',
'en': 'Proprietor',
'fr': 'Titulaire'
},
74: {
'de': 'Vertreter',
'en': 'Representative',
'fr': 'Mandataire'
},
84: {
'de': [
'Benannte Vertragsstaaten',
'Benannte Erstreckungsstaaten',
'Benannte Validierungsstaaten'
],
'en': [
'Designated Contracting States',
'Designated Extension States',
'Designated Validation States'
],
'fr': [
'Etats contractants dsigns',
'Etats dextension dsigns',
'Etats de validation dsigns'
]
},
86: {
'de': 'Internationale Anmeldenummer',
'en': 'International application number',
'fr': 'Numro de dpt international'
},
87: {
'de': 'Internationale Verffentlichungsnummer',
'en': 'International publication number',
'fr': 'Numro de publication internationale'
},
88: {
'de': 'Verffentlichungstag A3',
'en': 'Date of publication A3',
'fr': 'Date de publication A3'
},
'bulletin': {
'de': 'Patentblatt',
'en': 'Bulletin',
'fr': 'Bulletin'
},
'description': {
'de': 'Beschreibung',
'en': 'Description',
'fr': 'Description'
},
'remarks': {
'de': 'Bemerkungen',
'en': 'Remarks'
}
}
| 39.623656 | 153 | 0.48711 |
3cf38cae0f2a545ab33232a28befeb4c8470d502 | 1,103 | py | Python | tests/test_http_basic_auth.py | zhanghe06/flask_restful | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | [
"MIT"
] | 1 | 2020-12-04T03:15:47.000Z | 2020-12-04T03:15:47.000Z | tests/test_http_basic_auth.py | zhanghe06/flask_restful | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | [
"MIT"
] | 1 | 2021-06-01T22:24:27.000Z | 2021-06-01T22:24:27.000Z | tests/test_http_basic_auth.py | zhanghe06/flask_restful | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | [
"MIT"
] | 2 | 2020-12-04T03:16:18.000Z | 2021-09-04T14:10:12.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: test_http_basic_auth.py
@time: 2018-06-21 11:17
"""
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import requests
from requests.auth import HTTPBasicAuth
if __name__ == '__main__':
unittest.main()
| 20.425926 | 73 | 0.635539 |
3cf5781010a796345729a2c7347029eba43ec197 | 1,696 | py | Python | snomed_parent_cat_mapper.py | vickysam/pyHealth | 5660afd385a0342aa2039b42af5f208c672bfdeb | [
"Apache-2.0"
] | 7 | 2017-04-30T15:12:33.000Z | 2021-11-21T01:39:04.000Z | snomed_parent_cat_mapper.py | vickysam/pyHealth | 5660afd385a0342aa2039b42af5f208c672bfdeb | [
"Apache-2.0"
] | null | null | null | snomed_parent_cat_mapper.py | vickysam/pyHealth | 5660afd385a0342aa2039b42af5f208c672bfdeb | [
"Apache-2.0"
] | 2 | 2018-08-07T14:38:14.000Z | 2021-04-09T05:41:08.000Z | import csv
import pymedtermino
from pymedtermino.snomedct import *
pymedtermino.LANGUAGE = "en"
pymedtermino.REMOVE_SUPPRESSED_CONCEPTS = False
input_delta_file = 'sct2_Concept_Delta_INT_20160131.csv'
output_delta_file = 'sct2_Concept_Delta_INT_20160131_Top_Category_Mapped.csv'
data = []
snomed_data = []
with open('top_parent_cat.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in reader:
data.append([row['top_concept_id'],row['top_category_code']])
print "Supplied : ", data
with open(input_delta_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile, delimiter=' ', quotechar='"')
for row in reader:
snomed_data.append([row['id'],row['effectiveTime'],row['active'],row['moduleId'],row['definitionStatusId'],0,0])
csvfile = open(output_delta_file, 'w')
writer = csv.DictWriter(csvfile, fieldnames=['id','effectiveTime','active','moduleId','definitionStatusId','topCategoryCode','topCategoryId'])
writer.writeheader()
i = 0
for concept in snomed_data:
ancestors = list(SNOMEDCT[concept[0]].ancestors())
category = SNOMEDCT[138875005]
if len(ancestors) >= 2:
category = ancestors[-2]
if len(ancestors) >= 3:
if ancestors[-3].code == '406455002' or ancestors[-3].code == '116273005':
category = ancestors[-3]
else:
category = SNOMEDCT[138875005]
term = category.term
for item in data:
if item[0] == str(category.code):
term=item[1]
writer.writerow({'id': str(concept[0]), 'effectiveTime': concept[1],'active': concept[2],'moduleId': str(concept[3]),'definitionStatusId': str(concept[4]) , 'topCategoryCode': term,'topCategoryId': str(category.code)})
i = i + 1
csvfile.close()
print "Completed...."
| 32.615385 | 219 | 0.722877 |
3cf5831f266719f857798ff19bb7f65e432caf03 | 710 | py | Python | Python/287. FindTheDuplicateNumber.py | RaymondWaterlooLi/LeetCode-Solutions | 7973d2838b114f1dffc29f436fb660a96b51f660 | [
"MIT"
] | 263 | 2020-10-05T18:47:29.000Z | 2022-03-31T19:44:46.000Z | Python/287. FindTheDuplicateNumber.py | RaymondWaterlooLi/LeetCode-Solutions | 7973d2838b114f1dffc29f436fb660a96b51f660 | [
"MIT"
] | 1,264 | 2020-10-05T18:13:05.000Z | 2022-03-31T23:16:35.000Z | Python/287. FindTheDuplicateNumber.py | RaymondWaterlooLi/LeetCode-Solutions | 7973d2838b114f1dffc29f436fb660a96b51f660 | [
"MIT"
] | 760 | 2020-10-05T18:22:51.000Z | 2022-03-29T06:06:20.000Z | #Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
#There is only one duplicate number in nums, return this duplicate number.
| 37.368421 | 111 | 0.539437 |
3cf74e26261f13d85a64a42ef32a7fccd8ef0a55 | 2,484 | py | Python | utils/evaluate_annotation.py | cltl-students/hamersma-agression-causes | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | [
"MIT"
] | null | null | null | utils/evaluate_annotation.py | cltl-students/hamersma-agression-causes | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | [
"MIT"
] | null | null | null | utils/evaluate_annotation.py | cltl-students/hamersma-agression-causes | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.metrics import cohen_kappa_score, confusion_matrix
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
dirname = os.path.dirname(__file__)
def extract_annotations(files):
'''Function that takes a file with the annotations as input and extracts lists of annotations for vims that are
annotated by both annotators.
:param files: list of files
:returns annotations_ann1: list of strings
:returns annotations_ann2: list of strings'''
file_ann1 = dirname +'/annotations/' + files[0]
file_ann2 = dirname + '/annotations/' + files[1]
ann1 = pd.read_excel(file_ann1, index_col=1).T.to_dict()
ann2 = pd.read_excel(file_ann2, index_col=1).T.to_dict()
annotations_ann1 = []
annotations_ann2 = []
for key, value in ann2.items():
label2 = value['Aggression']
label1 = ann1.get(key).get('Aggression')
annotations_ann1.append(label1)
annotations_ann2.append(label2)
return annotations_ann1, annotations_ann2
def calculate_score(ann1, ann2):
"""Function that calculates the inter agreement score using Cohen's Kappa, prints the scores and confusion matrix.
:param ann1: list of annotation labels
:param ann2: list of annotation labels """
agreement = [anno1 == anno2 for anno1, anno2 in zip(ann1, ann2)]
percentage = sum(agreement) / len(agreement)
print("Percentage Agreement: %.2f" % percentage)
termlabels = ['pos', 'neg']
kappa = cohen_kappa_score(ann1, ann2, labels=termlabels)
print("Cohen's Kappa: %.2f" % kappa)
confusions = confusion_matrix(ann1, ann2, labels=termlabels)
pandas_table = pd.DataFrame(confusions, index=termlabels, columns = ['pos', 'neg'])
group_names = ["True Pos", "False Neg", "False Pos", "True Neg"]
group_counts = ["{0: 0.0f}".format(value) for value in confusions.flatten()]
labels = [f"{v1} {v2}" for v1, v2 in zip(group_names, group_counts)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(pandas_table, annot=labels, fmt='', cmap = 'Blues')
plt.title("Confusion matrix annotations", size=12)
plt.show()
print(pandas_table)
if __name__ == '__main__':
main() | 39.428571 | 119 | 0.686393 |
3cf83d68c033ebd1a763e8c4a9ee5516e254ffd0 | 1,068 | py | Python | cogs/Events.py | popop098/Teasia-Bot.py | 764c3b1cab8e07a9e98690263ad94011ee26ab72 | [
"MIT"
] | 1 | 2020-12-21T12:05:25.000Z | 2020-12-21T12:05:25.000Z | cogs/Events.py | popop098/Taesia-Bot.py | 764c3b1cab8e07a9e98690263ad94011ee26ab72 | [
"MIT"
] | null | null | null | cogs/Events.py | popop098/Taesia-Bot.py | 764c3b1cab8e07a9e98690263ad94011ee26ab72 | [
"MIT"
] | 1 | 2021-10-30T03:45:42.000Z | 2021-10-30T03:45:42.000Z | import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions, CommandNotFound, BucketType, cooldown, CommandOnCooldown
from discord import Webhook, RequestsWebhookAdapter
from time import gmtime, strftime
from discord.utils import get
import youtube_dl
import logging
import random
import praw
import time
import json
import sys
import os
from random import randint
| 30.514286 | 126 | 0.729401 |
3cf96ed28f3d03023b6eb089f792b8961163dffe | 1,927 | py | Python | panopto_client/access.py | uw-it-cte/django-panopto-client | cdfc22e1a7c1e06de62477c30681da0755238152 | [
"Apache-2.0"
] | 4 | 2017-12-29T19:15:37.000Z | 2019-11-18T18:32:39.000Z | panopto_client/access.py | uw-it-cte/django-panopto-client | cdfc22e1a7c1e06de62477c30681da0755238152 | [
"Apache-2.0"
] | 2 | 2017-09-07T23:27:52.000Z | 2019-04-10T20:27:22.000Z | panopto_client/access.py | uw-it-cte/django-panopto-client | cdfc22e1a7c1e06de62477c30681da0755238152 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
This module exposes Panopto "AccessManagement" Service methods
"""
from panopto_client import PanoptoAPI, PanoptoAPIException
| 34.410714 | 76 | 0.637779 |
3cf9d103d47dd847c7bbdc09c8f10bae634a2961 | 20,459 | py | Python | src/astrild/particles/halo.py | Christovis/wys-ars | bb15f2d392842f9b32de12b5db5c86079bc97105 | [
"MIT"
] | 3 | 2021-07-27T14:45:58.000Z | 2022-01-31T21:09:46.000Z | src/astrild/particles/halo.py | Christovis/wys-ars | bb15f2d392842f9b32de12b5db5c86079bc97105 | [
"MIT"
] | 1 | 2021-11-03T10:47:45.000Z | 2021-11-03T10:47:45.000Z | src/astrild/particles/halo.py | Christovis/wys-ars | bb15f2d392842f9b32de12b5db5c86079bc97105 | [
"MIT"
] | 1 | 2021-11-03T10:17:34.000Z | 2021-11-03T10:17:34.000Z | import os
from gc import collect
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
from importlib import import_module
import yaml
import numpy as np
import pandas as pd
from sklearn.neighbors import BallTree
#from halotools.mock_observables import tpcf_multipole
from astrild.particles.ecosmog import Ecosmog
from astrild.particles.hutils import SubFind
from astrild.particles.hutils import Rockstar
#from astrild.particles.utils import TPCF
from astrild.utils import read_hdf5
from astrild.io import IO
dir_src = Path(__file__).parent.absolute()
default_halo_stats_config = dir_src / "configs/halo_stats.yaml"
dm_particle_mass = 7.98408e10 #[Msun/h]
def filter_resolved_subfind_halos(
self, snapshot: read_hdf5.snapshot, nr_particles: int,
) -> read_hdf5.snapshot:
"""
Filter halos with '> nr_particles' particles
Args:
Return:
"""
min_mass = dm_particle_mass * nr_particles
mass = snapshot.cat["Group_M_Crit200"][:] * snapshot.header.hubble # [Msun/h]
idx_groups = mass > min_mass
mass = snapshot.cat["SubhaloMass"][:] * snapshot.header.hubble # [Msun/h]
idx_subhalos = mass > min_mass
# idx = snapshot.cat["GroupLenType"][:, 1] > nr_particles
# idx = snapshot.cat["Group_M_Crit200"][:] > \
# 100*(snapshot.header.massarr[1] * 1e10 / snapshot.header.hubble)
return self.filter_subfind_and_fof_halos(snapshot, idx_groups, idx_subhalos)
def filter_nonzero_subfind_halos_size(
self, snapshot: read_hdf5.snapshot,
) -> read_hdf5.snapshot:
"""
Filter halos with non-zero size
Args:
Return:
"""
rad = snapshot.cat["Group_R_Crit200"][:] # [ckpc/h]
idx_groups = rad > 0
rad = snapshot.cat["SubhaloHalfmassRad"][:] # [ckpc/h]
idx_subhalos = rad > 0
return self.filter_subfind_and_fof_halos(snapshot, idx_groups, idx_subhalos)
def filter_subfind_and_fof_halos(
self,
snapshot: read_hdf5.snapshot,
idx_groups: np.ndarray,
idx_subhalos: np.ndarray,
) -> read_hdf5.snapshot:
""" Filter sub- and fof-halos by indices """
for key, value in snapshot.cat.items():
if "Group" in key:
idx = idx_groups
elif ("Subhalo" in key) and (len(snapshot.cat[key]) > len(idx_groups)):
idx = idx_subhalos
else:
HalosWarning(f"The key is {key} is a problem")
continue
if len(value.shape) == 0:
continue
elif len(value.shape) == 1:
snapshot.cat.update({key: value[idx]})
elif len(value.shape) == 2:
snapshot.cat.update({key: value[idx, :]})
else:
raise HalosWarning(
f"The group data {key} has weird dimensions: {value.shape}."
)
return snapshot
#def get_subfind_tpcf(
# self,
# subfind_type: str,
# config: dict,
# save: bool = True,
#) -> None:
# """
# Compute real- and redshift-space TPCF for halos. This computation is
# done using halotools.
# https://halotools.readthedocs.io/en/latest/index.html
# Args:
# subfind_type: ["Group", "Subhalo"]
# config:
# save:
# wether to save results to file.
# """
# tpcf = {}
# for l in config["multipoles"]:
# tpcf[str(l)] = {}
# multipoles = config["multipoles"]
# del config["multipoles"]
# for snap_nr in self.sim.dir_nrs:
# snapshot = self.get_subfind_halo_data(snap_nr)
#
# if snapshot is None:
# print(f"No sub- & halos found for snapshot {snap_nr}")
# continue
# snapshot = self.filter_resolved_subfind_halos(snapshot, 100)
#
# if subfind_type == "group":
# halo_pos = snapshot.cat["GroupPos"][:] * \
# snapshot.header.hubble / 1e3 #[Mpc/h]
# scale_factor = 1 / (1 + snapshot.header.redshift)
# print("test a -------", scale_factor)
# halo_vel = snapshot.cat["GroupVel"][:] / scale_factor #[km/s]
# if subfind_type == "subhalo":
# halo_pos = snapshot.cat["SubhaloPos"][:] * \
# snapshot.header.hubble / 1e3 #[Mpc/h]
# halo_vel = snapshot.cat["SubhaloVel"][:] #[km/s]
# s_bins, mu_range, tpcf_s= TPCF.compute(
# pos=halo_pos,
# vel=halo_vel,
# **config,
# multipole=l,
# )
# for l in multipoles:
# _tpcf = tpcf_multipole(tpcf_s, mu_range, order=l)
# tpcf[str(l)]["snap_%d" % snap_nr] = _tpcf
# print(l, "!!!!!!!!!!!! snap_%d" % snap_nr, _tpcf)
#
# tpcf["s_bins"] = s_bins
# if save:
# IO.save_tpcf(
# self.sim.dirs['out'],
# config,
# multipoles,
# "subfind",
# "_"+subfind_type,
# tpcf,
# )
# else:
# self.tpcf = tpcf
def get_rockstar_stats(
self,
config_file: str = default_halo_stats_config,
snap_nrs: Optional[List[int]] = None,
save: bool = True,
):
"""
Compute statistics of halos identified with Rockstar from one or a
collection of simulations.
rockstar:
https://bitbucket.org/gfcstanford/rockstar/src/main/
https://github.com/yt-project/rockstar
https://www.cosmosim.org/cms/documentation/database-structure/tables/rockstar/
Args:
config_file:
file pointer in which containes info on what statistics to
compute and their settings.
save:
wether to save results to file.
"""
# load settings (stg)
with open(config_file) as f:
statistics = yaml.load(f, Loader=yaml.FullLoader)
for name in statistics.keys():
statistics[name]["results"] = {"bins": {}, "values": {}}
# load particles/utils/stats.py package for dynamic function call
module = import_module("astrild.particles.hutils")
# sort statistics according to required halo resolutions
stat_names_ord = self._sort_statistics(statistics)
if snap_nrs is None:
snap_nrs = self.sim.dir_nrs
for snap_nr in snap_nrs:
snapshot = self.get_rockstar_halo_data(
self.sim.files["halos"][str(snap_nr)]
)
if len(snapshot.index.values) == 0:
print(f"No sub- & halos found for snapshot {snap_nr}")
continue
resolution = 0
for stat_name in stat_names_ord:
if statistics[stat_name]["resolution"] != resolution:
resolution = int(statistics[stat_name]["resolution"])
snapshot = self.filter_resolved_rockstar_halos(
snapshot, resolution
)
print(f" Compute {stat_name}")
clas = getattr(module, "Rockstar")
fct = getattr(clas, stat_name)
if stat_name != "histograms":
bins, values = fct(snapshot, **statistics[stat_name]["args"])
if (bins is not None) and (values is not None):
statistics[stat_name]["results"]["bins"]["snap_%d" % snap_nr] = bins
statistics[stat_name]["results"]["values"][
"snap_%d" % snap_nr
] = values
else:
hist = fct(snapshot, **statistics[stat_name]["args"])
statistics[stat_name]["results"]["values"]["snap_%d" % snap_nr] = hist
if save:
self._save_results("rockstar", statistics)
else:
self.statistics = statistics
#def get_rockstar_tpcf(
# self,
# config: dict,
# snap_nrs: Optional[List[int]] = None,
# save: bool = True,
#) -> None:
# """
# Compute real- and redshift-space TPCF for halos. This computation is
# done using halotools.
# https://halotools.readthedocs.io/en/latest/index.html
# Args:
# config:
# save:
# wether to save results to file.
# """
# tpcf = {}
# for l in config["multipoles"]:
# tpcf[str(l)] = {}
# multipoles = config["multipoles"]
# del config["multipoles"]
#
# if snap_nrs is None:
# snap_nrs = self.sim.dir_nrs
# for snap_nr in snap_nrs:
# snapshot = self.get_rockstar_halo_data(
# self.sim.files["halos"][str(snap_nr)]
# )
#
# if snapshot is None:
# print(f"No sub- & halos found for snapshot {snap_nr}")
# continue
# snapshot = self.filter_resolved_rockstar_halos(snapshot, 100)
#
# halo_pos = snapshot[["x", "y", "z"]].values #[Mpc/h]
# halo_vel = snapshot[["vx", "vy", "vz"]].values #[km/s]
# s_bins, mu_range, tpcf_s= TPCF.compute(
# pos=halo_pos,
# vel=halo_vel,
# **config,
# )
# for l in multipoles:
# _tpcf = tpcf_multipole(tpcf_s, mu_range, order=l)
# tpcf[str(l)]["snap_%d" % snap_nr] = _tpcf
#
# tpcf["s_bins"] = s_bins
# if save:
# IO.save_tpcf(
# self.sim.dirs['out'],
# config,
# multipoles,
# "rockstar",
# "",
# tpcf,
# )
# else:
# self.tpcf = tpcf
def filter_resolved_rockstar_halos(
self, snapshot: pd.DataFrame, nr_particles: int,
) -> pd.DataFrame:
"""
Filter halos with '> nr_particles' particles
"""
min_mass = dm_particle_mass * nr_particles
return snapshot[snapshot["m200c"] > min_mass]
def _sort_statistics(self, statistics: dict) -> List[str]:
"""
Sort statistics by their required particle resolution
(low -to-> high).
"""
resolutions = np.zeros(len(list(statistics.keys())))
for idx, (_, stg) in enumerate(statistics.items()):
resolutions[idx] = int(stg["resolution"])
idxs = np.argsort(resolutions)
return [list(statistics.keys())[idx] for idx in idxs]
def _save_results(self, halofinder: str, methods: dict):
"""
Save results of each statistic of each simulations snapshot
for Rockstar and SubFind.
"""
for method, stg in methods.items():
if method != "histograms":
columns = list(stg["results"]["bins"].keys())
if len(self.sim.dir_nrs) > 1:
assert np.sum(stg["results"]["bins"][columns[0]]) == np.sum(
stg["results"]["bins"][columns[1]]
)
df = pd.DataFrame(
data=stg["results"]["values"], index=stg["results"]["bins"][columns[0]],
)
if "seperate" in list(stg["args"].keys()):
compare = np.sum(stg["args"]["seperate"]["compare"])
if compare == 2:
compare = "11"
if compare == 3:
compare = "12"
if compare == 4:
compare = "22"
else:
compare = "00"
file_out = f"{self.sim.dirs['out']}{halofinder}_{method}_{compare}.h5"
if os.path.exists(file_out):
os.remove(file_out)
print(f"Saving results to -> {file_out}")
df.to_hdf(file_out, key="df", mode="w")
else:
for snap_nr, stg_in_snap in stg["results"]["values"].items():
data = np.asarray(list(stg_in_snap.values())).T
columns = list(stg_in_snap.keys())
df = pd.DataFrame(data=data, columns=columns)
file_out = f"{self.sim.dirs['out']}{halofinder}_{method}" + \
"_{snap_nr}.h5"
if os.path.exists(file_out):
os.remove(file_out)
print(f"Saving results to -> {file_out}")
df.to_hdf(file_out, key="df", mode="w")
def _create_filename(self, file_in: str, quantity: str):
""" Create file-name for merged snapshots"""
quantity = quantity.replace("_", "")
file_out = file_in.split("/")[-1].replace("Ray", quantity)
file_out = file_out.replace(".h5", "_lt.fits")
if ("_lc" not in file_in) or ("zrange" not in file_in):
file_out = file_out.split("_")
box_string = [string for string in file_in.split("/") if "box" in string][0]
idx, string = [
(idx, "%s_" % box_string + string)
for idx, string in enumerate(file_out)
if "output" in string
][0]
file_out[idx] = string
file_out = "_".join(file_out)
return self.sim.dirs["out"] + file_out
| 34.853492 | 92 | 0.525783 |
3cfb5d1a0f1982dc0361736334993c9728647d4a | 367 | py | Python | webapi.py | Netherdrake/steemdata-webapi | 02b443b6e7292577dfcca1a7fcc55329b1b70fb9 | [
"MIT"
] | 1 | 2017-04-20T04:22:07.000Z | 2017-04-20T04:22:07.000Z | webapi.py | Netherdrake/steemdata-webapi | 02b443b6e7292577dfcca1a7fcc55329b1b70fb9 | [
"MIT"
] | 1 | 2017-06-07T13:08:32.000Z | 2017-06-07T13:08:32.000Z | webapi.py | Netherdrake/steemdata-webapi | 02b443b6e7292577dfcca1a7fcc55329b1b70fb9 | [
"MIT"
] | null | null | null | import os
from eve import Eve
from eve_docs import eve_docs
from flask_bootstrap import Bootstrap
# init Eve
app = Eve(settings='settings.py')
# init Eve-Docs
Bootstrap(app)
app.register_blueprint(eve_docs, url_prefix='/docs')
if __name__ == '__main__':
app.run(host=os.getenv('FLASK_HOST', '127.0.0.1'),
debug=not os.getenv('PRODUCTION', False))
| 21.588235 | 54 | 0.719346 |
3cfcd1fb4a8c9717754df6618804de4a66eaa349 | 5,475 | py | Python | notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py | hassanobeid1994/tr_b_causal_2020 | 1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5 | [
"MIT"
] | null | null | null | notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py | hassanobeid1994/tr_b_causal_2020 | 1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5 | [
"MIT"
] | 89 | 2020-02-10T02:52:11.000Z | 2020-06-23T03:50:27.000Z | notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py | hassan-obeid/tr_b_causal_2020 | 1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Purpose
# The point of this notebook is to demonstrate how to perform at least one type of visual, marginal independence test.
#
# In particular, the notebook will show how to visually test the following implication<br>
# $
# \begin{aligned}
# P \left( X_1 \mid X_2 \right) &= P \left( X_1 \right) \\
# \int x_1 P \left( X_1 \mid X_2 \right) \partial{x_1} &= \int x_1 P \left( X_1 \right) \partial{x_1} \\
# E \left[ X_1 \mid X_2 \right] &= E \left[ X_1 \right]
# \end{aligned}
# $
#
# In other words, if $X_1$ is marginally independent of $X_2$, then the expectation of $X_1$ conditional on $X_2$ is equal to the marginal expectation of $X_1$. This implies that shuffling / permuting the $X_2$ columns should make no difference to predicting $X_1$, beyond predicting the mean of $X_1$.
# +
# Declare hyperparameters for testing
NUM_PERMUTATIONS = 100
# Declare the columns to be used for testing
x1_col = "num_licensed_drivers"
x2_col = "num_cars"
mode_id_col = "mode_id"
# Set the colors for plotting
permuted_color = "#a6bddb"
# Declare paths to data
DATA_PATH = "../../data/raw/spring_2016_all_bay_area_long_format_plus_cross_bay_col.csv"
# +
import sys # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
import numpy as np # noqa: E402
import pandas as pd # noqa: E402
import seaborn as sbn # noqa: E402
from scipy.stats import multinomial # noqa: E402
from tqdm.notebook import tqdm # noqa: E402
# %matplotlib inline
sys.path.insert(0, "../../src/")
import testing.observable_independence as oi # noqa: E402
# -
# Load the raw data
df = pd.read_csv(DATA_PATH)
# +
title_str = "{} vs {}"
print(title_str.format(x1_col, x2_col))
drive_alone_filter = df[mode_id_col] == 1
license_array = df.loc[drive_alone_filter, x1_col].values
num_cars_array = df.loc[drive_alone_filter, x2_col].values
oi.visual_permutation_test(
license_array,
num_cars_array,
z_array=None,
seed=1038,
num_permutations=NUM_PERMUTATIONS,
permutation_color=permuted_color,
)
# -
# ## Test `visual_permutation_test`
# +
# Figure out how many observations to simulate, based on real data
num_drive_alone_obs = (df.mode_id == 1).sum()
# Determine how many simulations to carry out
NUM_TEST_SIM = 200
# Initialize an array to store the simulated p-values
test_p_vals = np.empty((NUM_TEST_SIM,), dtype=float)
# Set a random seed for reproducibility
np.random.seed(340)
# Compute the p-values of the visual permutation test when the
# null-hypothesis is true.
for i in tqdm(range(NUM_TEST_SIM)):
# Simulate data that, by construction, satisfies x2 indep x1
sim_x1 = 0.2 + 0.5 * np.random.normal(size=num_drive_alone_obs)
sim_x2 = -0.1 - 0.01 * np.random.uniform(size=num_drive_alone_obs)
# Determine which simulations to plot.
# Just plot 1 simulation for visual comparison with real data
current_close = True if i != 0 else False
# Carry out the permutation test
current_p = oi.visual_permutation_test(
sim_x1,
sim_x2,
z_array=None,
seed=None,
progress=False,
verbose=False,
show=False,
close=current_close,
)
# Store the resulting p-values
test_p_vals[i] = current_p
# +
# Create a distribution of p-values that is for sure are uniformly distributed
null_histogram_dist = multinomial(NUM_TEST_SIM, [0.1 for x in range(10)])
null_hist_samples = null_histogram_dist.rvs(100)
null_hist_mean = null_histogram_dist.mean()
null_hist_upper_bound = np.percentile(null_hist_samples, 95, axis=0)
null_hist_lower_bound = np.percentile(null_hist_samples, 5, axis=0)
# Plot the distribution of our test p-values versus the p-values from
# a uniform distriburtion
fig, ax = plt.subplots(figsize=(10, 6))
plot_categories = [0.05 + 0.1 * x for x in range(10)]
ax.fill_between(
plot_categories,
null_hist_upper_bound,
null_hist_lower_bound,
color=permuted_color,
label="Null 95% Distribution",
alpha=0.5,
zorder=2,
)
ax.hlines(null_hist_mean, 0, 1, label="Null Mean")
ax.hist(test_p_vals, bins=10, label="Observed", zorder=0)
ax.scatter(
plot_categories,
null_hist_upper_bound,
label="Null 95% Upper Bound",
color=permuted_color,
marker="+",
zorder=1,
)
ax.scatter(
plot_categories,
null_hist_lower_bound,
label="Null 5% Lower Bound",
color=permuted_color,
marker="*",
zorder=1,
)
ax.legend(loc=(1.05, 0.75))
ax.set_xlabel("p-values", fontsize=13)
ax.set_ylabel("Num Observations", rotation=0, labelpad=70, fontsize=13)
sbn.despine()
fig.show()
# -
# ## Conclusions
# - From the last plot, we can see that under the null hypothesis of $X_1$ independent of $X_2$, we get p-values that close to uniformly distributed.<br>
# This means the permutation p-values in `visual_permutation_test` are unlikely to be overly-optimistic.<br>
# In other words, we can feel safe(r) about relying on this test to distinguish conditional dependence from independence.
# - From the first two plots of this notebook, we can see from applying the `visual_permutation_test` that the number of licensed drivers per household and number of automobiles per household are not marginally independent.
| 30.586592 | 302 | 0.715982 |
3cfd1eff7aa3274bf5ba215dcc74c84bcd761113 | 1,799 | py | Python | Labs/Lab-4.0 WiFi/5_wifi_logging.py | Josverl/MicroPython-Bootcamp | 29f5ccc9768fbea621029dcf6eea9c91ff84c1d5 | [
"MIT"
] | 4 | 2018-04-28T13:43:20.000Z | 2021-03-11T16:10:35.000Z | Labs/Lab-4.0 WiFi/5_wifi_logging.py | Josverl/MicroPython-Bootcamp | 29f5ccc9768fbea621029dcf6eea9c91ff84c1d5 | [
"MIT"
] | null | null | null | Labs/Lab-4.0 WiFi/5_wifi_logging.py | Josverl/MicroPython-Bootcamp | 29f5ccc9768fbea621029dcf6eea9c91ff84c1d5 | [
"MIT"
] | null | null | null | # import the network module
# This module provides access to various network related functions and classes.
# https://github.com/loboris/MicroPython_ESP32_psRAM_LoBo/wiki/network
import network,utime #pylint: disable=import-error
# ----------------------------------------------------------
# Define callback function used for monitoring wifi activity
# ----------------------------------------------------------
'''
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
'''
# Enable callbacks
network.WLANcallback(wifi_cb)
# ----------------------------------------------------------
# create station interface - Standard WiFi client
wlan = network.WLAN(network.STA_IF)
wlan.active(False)
# activate the interface
wlan.active(True)
# connect to a known WiFi
wlan.connect('IOTBOOTCAMP', 'MicroPython')
# Note that this may take some time, so we need to wait
# Wait 5 sec or until connected
tmo = 50
while not wlan.isconnected():
utime.sleep_ms(100)
tmo -= 1
if tmo == 0:
break
# check if the station is connected to an AP
if wlan.isconnected():
print("=== Station Connected to WiFi \n")
else:
print("!!! Not able to connect to WiFi")
# gets or sets the interface's IP/netmask/gw/DNS addresses
# 'Raw'
print( wlan.ifconfig() )
#pretty
c = wlan.ifconfig()
print("IP:{0}, Network mask:{1}, Router:{2}, DNS: {3}".format( *c ))
| 24.986111 | 81 | 0.568093 |
3cfd92551f129b14e3271b5e4699d932dae50065 | 681 | py | Python | medium/1282.py | nkwib/leetcode | 73f7492ba208417d8bf8340b6bf9dc68a6ded7f7 | [
"MIT"
] | null | null | null | medium/1282.py | nkwib/leetcode | 73f7492ba208417d8bf8340b6bf9dc68a6ded7f7 | [
"MIT"
] | null | null | null | medium/1282.py | nkwib/leetcode | 73f7492ba208417d8bf8340b6bf9dc68a6ded7f7 | [
"MIT"
] | null | null | null | from typing import List
groupSizes = [3,3,3,3,4,4,2,2,4,3,4,3,1]
print(Solution().groupThePeople(groupSizes)) | 32.428571 | 71 | 0.565345 |
3cff24ff2a3befb7112dd8c73ae11e32acd5099b | 1,576 | py | Python | Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py | gilnribeiro/Work-Project | 15ad906ef5e757daed1df9c7547e5703ad496930 | [
"MIT"
] | 1 | 2022-01-31T11:31:04.000Z | 2022-01-31T11:31:04.000Z | Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py | gilnribeiro/Work-Project | 15ad906ef5e757daed1df9c7547e5703ad496930 | [
"MIT"
] | null | null | null | Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py | gilnribeiro/Work-Project | 15ad906ef5e757daed1df9c7547e5703ad496930 | [
"MIT"
] | null | null | null | # Import spiders
from .spiders.bons_empregos import BonsEmpregosSpider
from .spiders.cargadetrabalhos import CargaDeTrabalhosSpider
from .spiders.emprego_org import EmpregoOrgSpider
from .spiders.emprego_xl import EmpregoXlSpider
from .spiders.net_empregos import NetEmpregosSpider
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
# Make sure to be in the Data Collection directory
FOLDER_PATH = "/Users/gilnr/OneDrive - NOVASBE/Work Project/Code/Data/"
if __name__ == '__main__':
main() | 34.26087 | 84 | 0.741751 |
a70095a05438f3493dabb7b856707d3589d2cc37 | 2,302 | py | Python | sentiment/train/management/commands/train.py | mnvx/sentiment | b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c | [
"MIT"
] | null | null | null | sentiment/train/management/commands/train.py | mnvx/sentiment | b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c | [
"MIT"
] | null | null | null | sentiment/train/management/commands/train.py | mnvx/sentiment | b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c | [
"MIT"
] | null | null | null | import configparser
import csv
from django.core.management.base import BaseCommand
import logging
import os
from ....common.catalog.sentiment_type import SentimentType
from ....common.catalog.source import Source
| 34.878788 | 93 | 0.591659 |
a7024ecc7fc28ff6673f46a13ae3e63f8ae5b339 | 114 | py | Python | tests/demo/demoproject/urls.py | saxix/django-mb | 3700c05b45854a28bd23368c4e4971ae54c18cad | [
"BSD-3-Clause"
] | 2 | 2017-03-20T12:26:02.000Z | 2017-04-22T11:46:17.000Z | tests/demo/demoproject/urls.py | saxix/django-mb | 3700c05b45854a28bd23368c4e4971ae54c18cad | [
"BSD-3-Clause"
] | null | null | null | tests/demo/demoproject/urls.py | saxix/django-mb | 3700c05b45854a28bd23368c4e4971ae54c18cad | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.contrib import admin
admin.autodiscover()
urlpatterns = (
)
| 12.666667 | 38 | 0.798246 |
a70361c3e3b8431100d15650b5da10d40acb287d | 504 | py | Python | appzoo/utils/log/__init__.py | streamlit-badge-bot/AppZoo | 86547fdc5209fa137b0a6384d63e92f263c1e160 | [
"MIT"
] | 5 | 2020-11-05T12:13:45.000Z | 2021-11-19T12:26:49.000Z | appzoo/utils/log/__init__.py | streamlit-badge-bot/AppZoo | 86547fdc5209fa137b0a6384d63e92f263c1e160 | [
"MIT"
] | null | null | null | appzoo/utils/log/__init__.py | streamlit-badge-bot/AppZoo | 86547fdc5209fa137b0a6384d63e92f263c1e160 | [
"MIT"
] | 3 | 2020-11-23T23:06:34.000Z | 2021-04-18T02:12:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-App.
# @File : __init__.py
# @Time : 2019-12-10 17:24
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from loguru import logger
trace = logger.add('runtime_{time}.log', rotation="100 MB", retention='10 days')
logger.debug('this is a debug message')
if __name__ == '__main__':
print(f()) | 21.913043 | 80 | 0.571429 |
a704ebb77dcf3890670eefaa40d9424024056adf | 1,850 | py | Python | beast/tools/run/helper_functions.py | galaxyumi/beast | f5ce89d73c88ce481b04fc31a8c099c9c19041fb | [
"BSD-3-Clause"
] | 21 | 2017-03-18T13:46:06.000Z | 2022-02-21T16:02:10.000Z | beast/tools/run/helper_functions.py | galaxyumi/beast | f5ce89d73c88ce481b04fc31a8c099c9c19041fb | [
"BSD-3-Clause"
] | 673 | 2017-03-12T23:39:28.000Z | 2022-03-17T14:07:38.000Z | beast/tools/run/helper_functions.py | galaxyumi/beast | f5ce89d73c88ce481b04fc31a8c099c9c19041fb | [
"BSD-3-Clause"
] | 36 | 2017-03-18T18:00:35.000Z | 2021-09-22T06:35:55.000Z | # other imports
from multiprocessing import Pool
def subcatalog_fname(full_cat_fname, source_density, sub_source_density):
"""
Return the name of a sub-catalog
Parameters
----------
full_cat_fname : string
name of the photometry catalog
source_density : string
the current source density bin
sub_source_density : string
the current sub-file for the source density bin
Returns
-------
string
the file name of the sub-catalog
"""
return full_cat_fname.replace(
".fits",
"_SD{}_sub{}.fits".format(source_density.replace("_", "-"), sub_source_density),
)
def parallel_wrapper(function, arg_tuples, nprocs=1):
"""
A wrapper to automatically either run the function as-is or run it with parallel processes
Parameters
----------
function : function
the function to be evaluated
argument : list of tuples
the input to the function (details of course depend on the function)
nprocs : int (default=1)
number of parallel processes (no parallelization if nprocs=1)
Returns
-------
nothing
"""
if nprocs > 1:
p = Pool(nprocs)
for r in p.starmap(function, arg_tuples):
print(r)
else:
for a in arg_tuples:
r = function(*a)
print(r)
def get_modelsubgridfiles(subgrid_names_file):
"""
Read in the file that has the list of subgridded physicsmodel files
Parameters
----------
subgrid_names_file : string
name of the file with the list of names
Returns
-------
list of strings
the names of the subgridded physicsmodel files
"""
with open(subgrid_names_file, "r") as f:
modelsedgridfiles = f.read().split("\n")[:-1]
return modelsedgridfiles
| 21.511628 | 94 | 0.621081 |
a7054f9458e6b8299d380a912e48321581ca4d88 | 67 | py | Python | patan/exceptions.py | tttlh/patan | d3e5cfec085e21f963204b5c07a85cf1f029560c | [
"MIT"
] | null | null | null | patan/exceptions.py | tttlh/patan | d3e5cfec085e21f963204b5c07a85cf1f029560c | [
"MIT"
] | null | null | null | patan/exceptions.py | tttlh/patan | d3e5cfec085e21f963204b5c07a85cf1f029560c | [
"MIT"
] | 1 | 2021-03-01T08:35:34.000Z | 2021-03-01T08:35:34.000Z | # _*_ coding: utf-8 _*_
| 11.166667 | 31 | 0.671642 |
a70572ac4f62a9762d70dcd70a9fd3e4dc437ab3 | 2,621 | py | Python | experiments/sparse_sparsity_fixed_results.py | Remi-Boutin/sparsebm | 5979eafff99d59a3b6edac586ee5658529763402 | [
"MIT"
] | 1 | 2021-09-22T23:25:25.000Z | 2021-09-22T23:25:25.000Z | experiments/sparse_sparsity_fixed_results.py | Remi-Boutin/sparsebm | 5979eafff99d59a3b6edac586ee5658529763402 | [
"MIT"
] | null | null | null | experiments/sparse_sparsity_fixed_results.py | Remi-Boutin/sparsebm | 5979eafff99d59a3b6edac586ee5658529763402 | [
"MIT"
] | 1 | 2021-09-08T13:25:15.000Z | 2021-09-08T13:25:15.000Z | from matplotlib import rc
# rc("text", usetex=True)
import matplotlib
# font = {"size": 14}
# matplotlib.rc("font", **font)
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
import time
import matplotlib.colors as mcolors
dataset_files = glob.glob("./experiments/results/sparsity_fixed/*.pkl")
from collections import defaultdict
time_results_sparse = defaultdict(list)
time_results_not_sparse = defaultdict(list)
cari_results_sparse = defaultdict(list)
cari_results_not_sparse = defaultdict(list)
e = 0.25
exponent = 5
connection_probabilities = (
np.array([[4 * e, e, e, e * 2], [e, e, e, e], [2 * e, e, 2 * e, 2 * e]])
/ 2 ** exponent
)
for file in dataset_files:
results = pickle.load(open(file, "rb"))
n1 = results["model"]["tau_1"].shape[0]
n2 = results["model"]["tau_2"].shape[0]
time_results_sparse[(n1, n2)].append(results["end_time"])
cari_results_sparse[(n1, n2)].append(results["co_ari"])
if results["end_time_not_sparse"]:
cari_results_not_sparse[(n1, n2)].append(results["co_ari_not_sparse"])
time_results_not_sparse[(n1, n2)].append(
results["end_time_not_sparse"]
)
xs = sorted(list(time_results_sparse.keys()), key=lambda x: x[0])
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
xs_values = [a * a / 2 for a in np.array([a[0] for a in xs])]
ax.plot(
xs_values,
[np.median(time_results_sparse[x]) for x in xs],
marker="^",
markersize=7,
linewidth=0.5,
color=mcolors.TABLEAU_COLORS["tab:green"],
)
xs_value_not_sparse = [
a * a / 2
for a in np.array(
[a[0] for a in sorted(list(time_results_not_sparse.keys()))]
)
]
ax.plot(
xs_value_not_sparse,
[
np.median(time_results_not_sparse[x])
for x in sorted(list(time_results_not_sparse.keys()))
],
marker="*",
markersize=7,
linewidth=0.5,
color=mcolors.TABLEAU_COLORS["tab:blue"],
)
# ax.annotate(
# "OOM",
# (
# xs_value_not_sparse[-1],
# 20
# + np.median(
# time_results_not_sparse[
# sorted(list(time_results_not_sparse.keys()))[-1]
# ]
# ),
# ),
# color=mcolors.TABLEAU_COLORS["tab:blue"],
# )
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel("Execution time (sec.)")
ax.set_xlabel("Network size $(n_1 \cdot n_2)$")
# ax.ticklabel_format(style="sci", axis="x")
plt.show()
fig.savefig("experiments/results/sparsity_fixed.png")
print("Figure saved in " + "experiments/results/sparsity_fixed.png")
| 26.474747 | 78 | 0.649752 |
a70af31dd713880205073e138c1e10e6d9d8591d | 4,236 | py | Python | SerialController/Camera.py | Moi-poke/Poke-Controller-temp | b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606 | [
"MIT"
] | 3 | 2021-04-23T06:30:36.000Z | 2022-01-04T09:10:25.000Z | SerialController/Camera.py | Moi-poke/Poke-Controller-temp | b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606 | [
"MIT"
] | 1 | 2022-01-04T06:33:11.000Z | 2022-01-04T06:33:11.000Z | SerialController/Camera.py | Moi-poke/Poke-Controller-temp | b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606 | [
"MIT"
] | 6 | 2021-10-03T05:42:50.000Z | 2022-03-15T00:29:09.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import datetime
import os
import numpy as np
from logging import getLogger, DEBUG, NullHandler
| 33.09375 | 77 | 0.553824 |
a70b86cdb095113c2f13cde684b541b11f3759d8 | 4,975 | py | Python | my_tagger.py | jndevanshu/tagger | 51181d3ac9b0959ba507ee0c06c28bed55b51c76 | [
"Apache-2.0"
] | null | null | null | my_tagger.py | jndevanshu/tagger | 51181d3ac9b0959ba507ee0c06c28bed55b51c76 | [
"Apache-2.0"
] | null | null | null | my_tagger.py | jndevanshu/tagger | 51181d3ac9b0959ba507ee0c06c28bed55b51c76 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import time
import codecs
import optparse
import sys
import json
import numpy as np
from my_loader import prepare_sentence
from utils import create_input, iobes_iob, iob_ranges, zero_digits
from model import Model
from ccg_nlpy.core.text_annotation import TextAnnotation
from ccg_nlpy.core.view import View
optparser = optparse.OptionParser()
optparser.add_option(
"-m", "--model", default="",
help="Model location"
)
optparser.add_option(
"-i", "--input", default="",
help="Input file location"
)
optparser.add_option(
"-o", "--output", default="",
help="Output file location"
)
optparser.add_option(
"-d", "--delimiter", default="__",
help="Delimiter to separate words from their tags"
)
optparser.add_option(
"--outputFormat", default="",
help="Output file format"
)
opts = optparser.parse_args()[0]
# Check parameters validity
assert opts.delimiter
assert os.path.isdir(opts.model)
# assert os.path.isfile(opts.input)
# Load existing model
print "Loading model..."
model = Model(model_path=opts.model)
parameters = model.parameters
l1_model = None
l1_f_eval = None
if 'l1_model' in parameters:
print("Building L1 model:")
parameters['l1_model'] = parameters['l1_model']
assert os.path.isdir(parameters['l1_model'])
l1_model = Model(model_path=parameters['l1_model'])
l1_parameters = l1_model.parameters
_, l1_f_eval = l1_model.build(training=False, **l1_parameters)
l1_model.reload()
print("Done building l1 model")
# Load reverse mappings
word_to_id, char_to_id, tag_to_id = [
{v: k for k, v in x.items()}
for x in [model.id_to_word, model.id_to_char, model.id_to_tag]
]
# Load the model
_, f_eval = model.build(training=False, **parameters)
model.reload()
# f_output = codecs.open(opts.output, 'w', 'utf-8')
start = time.time()
print 'Tagging...'
file_list = os.listdir(opts.input)
count = 0
for doc in file_list:
document = TextAnnotation(json_str=open(os.path.join(opts.input, doc)).read())
token_list = document.tokens
start = 0
view_as_json = {}
cons_list = []
if 'NER_CONLL' in document.view_dictionary:
del document.view_dictionary['NER_CONLL']
for sent_end_offset in document.sentences['sentenceEndPositions']:
words_ini = token_list[start:sent_end_offset]
line = " ".join(words_ini)
if line:
# Lowercase sentence
if parameters['lower']:
line = line.lower()
# Replace all digits with zeros
if parameters['zeros']:
line = zero_digits(line)
words = line.rstrip().split()
# Prepare input
sentence = prepare_sentence(words, word_to_id, char_to_id, l1_model=l1_model, l1_f_eval=l1_f_eval, lower=parameters['lower'])
print(sentence)
input = create_input(sentence, parameters, False)
# Decoding
try:
if parameters['crf']:
y_preds = np.array(f_eval(*input))[1:-1]
else:
y_preds = f_eval(*input).argmax(axis=1)
y_preds = [model.id_to_tag[y_pred] for y_pred in y_preds]
except Exception as e:
y_preds = ["O"] * len(words)
# Output tags in the IOB2 format
if parameters['tag_scheme'] == 'iobes':
y_preds = iobes_iob(y_preds)
# Write tags
assert len(y_preds) == len(words)
assert len(y_preds) == len(words_ini)
print(y_preds)
idx = 0
while idx < len(y_preds):
if y_preds[idx] == "O":
idx += 1
elif y_preds[idx].startswith("B-"):
curr_label = y_preds[idx][2:]
st = idx
idx += 1
while idx < len(y_preds) and y_preds[idx].startswith("I-"):
idx += 1
cons_list.append({'start': start + st, 'end': start + idx, 'score': 1.0, 'label': curr_label})
else:
y_preds[idx] = "B-" + y_preds[idx][2:]
print("something wrong....")
# sys.exit(1)
count += 1
start = sent_end_offset + 1
if count % 100 == 0:
print count
view_as_json['viewName'] = 'NER_CONLL'
view_as_json['viewData'] = [{'viewType': 'edu.illinois.cs.cogcomp.core.datastructures.textannotation.View', 'viewName': 'NER_CONLL', 'generator': 'my-lstm-crf-tagger', 'score': 1.0, 'constituents': cons_list}]
view_obj = View(view_as_json, document.get_tokens)
document.view_dictionary['NER_CONLL'] = view_obj
document_json = document.as_json
json.dump(document_json, open(os.path.join(opts.output, doc), "w"), indent=True)
print '---- %i lines tagged in %.4fs ----' % (count, time.time() - start)
# f_output.close()
| 30.521472 | 213 | 0.605427 |
a70d45fc226ab2dd59c5db64dd9ed218486ffae6 | 4,691 | py | Python | Inkscape-OUTPUT-PRO-master/outputpro/cutmarks.py | ilnanny/Inkscape-addons | a30cdde2093fa2da68b90213e057519d0304433f | [
"X11"
] | 3 | 2019-03-08T23:32:29.000Z | 2019-05-11T23:53:46.000Z | Inkscape-OUTPUT-PRO-master/outputpro/cutmarks.py | ilnanny/Inkscape-addons | a30cdde2093fa2da68b90213e057519d0304433f | [
"X11"
] | null | null | null | Inkscape-OUTPUT-PRO-master/outputpro/cutmarks.py | ilnanny/Inkscape-addons | a30cdde2093fa2da68b90213e057519d0304433f | [
"X11"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess #re, subprocess, simplestyle, os#inkex, os, random, sys, subprocess, shutil
| 56.518072 | 248 | 0.5534 |
a70ebc7cdf0e76c3a3a02437342d60d6be4b5d1f | 4,513 | py | Python | test/test_cli.py | Datateer/upload-agent | 4684bcf902d6c54baefb08446252a69612bf15a0 | [
"MIT"
] | null | null | null | test/test_cli.py | Datateer/upload-agent | 4684bcf902d6c54baefb08446252a69612bf15a0 | [
"MIT"
] | 2 | 2021-02-05T18:58:23.000Z | 2021-02-14T15:23:46.000Z | test/test_cli.py | Datateer/upload-agent | 4684bcf902d6c54baefb08446252a69612bf15a0 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from unittest.mock import patch
from click.testing import CliRunner
import pytest
from datateer.upload_agent.main import cli
from datateer.upload_agent.config import load_config, save_config, save_feed
import datateer.upload_agent.constants as constants
def test_command_upload_handles_feed_key_and_path_arguments(runner):
result = runner.invoke(cli, ['upload', 'FEED-KEY', 'PATH'])
print(result.output)
assert result.exit_code == 1
assert 'Feed with key FEED-KEY does not exist'
def test_config_feed_key_defaults_to_feed_code(runner):
result = runner.invoke(cli, ['config', 'feed', '--provider', 'PROVIDER', '--source', 'SOURCE', '--feed', 'FEED'])
assert 'Feed key [FEED]:' in result.output
assert 'Feed key [FEED]: FEED' not in result.output # user did not type in a value
| 41.40367 | 132 | 0.734766 |
a70f8fbd9aef0f039b565e8b5e5bf81d26036760 | 14,899 | py | Python | modron/characters.py | WardLT/play-by-post-helper | 26df681f2a28510f88e552be628910e4e5fe57bb | [
"MIT"
] | null | null | null | modron/characters.py | WardLT/play-by-post-helper | 26df681f2a28510f88e552be628910e4e5fe57bb | [
"MIT"
] | 13 | 2020-04-08T02:56:58.000Z | 2020-10-04T21:52:43.000Z | modron/characters.py | WardLT/play-by-post-helper | 26df681f2a28510f88e552be628910e4e5fe57bb | [
"MIT"
] | null | null | null | """Saving and using information about characters"""
import json
import os
from enum import Enum
from typing import Dict, List, Optional, Tuple
import yaml
from pydantic import BaseModel, Field, validator
from modron.config import get_config
_config = get_config()
def _compute_mod(score: int) -> int:
"""Compute a mod given an ability score
Args:
score (int): Ability score
Returns:
(int) Modifier for that score
"""
return score // 2 - 5
_5e_skills = {
'acrobatics': Ability.DEX, 'animal handling': Ability.WIS, 'arcana': Ability.INT, 'athletics': Ability.STR,
'deception': Ability.CHA, 'history': Ability.INT, 'insight': Ability.WIS, 'intimidation': Ability.CHA,
'investigation': Ability.INT, 'medicine': Ability.WIS, 'nature': Ability.INT, 'perception': Ability.WIS,
'performance': Ability.CHA, 'persuasion': Ability.CHA, 'religion': Ability.INT, 'sleight of hand': Ability.DEX,
'stealth': Ability.DEX, 'survival': Ability.WIS
}
_class_hit_die = {
'artificer': 8, 'barbarian': 12, 'bard': 8, 'cleric': 8, 'druid': 8, 'fighter': 10, 'monk': 8, 'paladin': 10,
'ranger': 10, 'rogue': 8, 'sorcerer': 6, 'warlock': 8, 'wizard': 6
}
"""Hit die for each 5E class"""
def harm(self, amount: int):
"""Apply damage to this character
Args:
amount (int): Amount of damage
"""
assert amount >= 0, "Damage must be nonnegative"
if self.current_hit_points is None:
self.full_heal()
# Damage hits the temporary first
amount_to_temp = min(self.temporary_hit_points, amount)
amount_to_base = amount - amount_to_temp
self.temporary_hit_points -= amount_to_temp
# Subtract off the remaining damage from the base hit points
self.current_hit_points -= amount_to_base
self.current_hit_points = max(0, self.current_hit_points)
def full_heal(self):
"""Heal character up to hit point maximum"""
self.current_hit_points = self.current_hit_point_maximum
def grant_temporary_hit_points(self, amount: int):
"""Grant temporary hit points
Args:
amount: Amount of HP to give to the character
"""
assert amount > 0, "Amount must be positive"
self.temporary_hit_points += amount
def remove_temporary_hit_points(self):
"""Remove all temporary hit points"""
self.temporary_hit_points = 0
def adjust_hit_point_maximum(self, amount: int):
"""Apply a change to the hit point maximum
Args:
amount: Amount to change the HP maximum
"""
self.hit_points_adjustment += amount
# Make sure the hit point maximum is zero or more
self.hit_points_adjustment = max(-self.hit_points, self.hit_points_adjustment)
# Make sure the hit points stays below the maximum
self.current_hit_points = min(
self.current_hit_point_maximum,
self.current_hit_points
)
def reset_hit_point_maximum(self):
"""Remove any adjustments to the hit point maximum"""
self.hit_points_adjustment = 0
def get_hit_die(self) -> Dict[str, int]:
"""Maximum hit die, computed based on class
Returns:
(dict) Where key is the hit die and value is the number
"""
output = {}
for cls, num in self.classes.items():
hit_die = f'd{_class_hit_die[cls]}'
if hit_die not in output:
output[hit_die] = num
else:
output[hit_die] += num
return output
# Skills and checks
def save_modifier(self, ability: str) -> int:
"""Get the modifier for a certain save type of save
Args:
ability (str): Ability to check. You can use the full name or
the first three letters. Not case-sensitive
Returns:
(int) Modifier for the roll
"""
# Get the modifier
mod = self.ability_modifier(ability)
# Match the name of the ability
matched_ability = Ability.match(ability)
# Add any proficiency bonus
if matched_ability.lower() in self.saving_throws:
mod += self.proficiency_bonus
return mod
def ability_modifier(self, ability: str) -> int:
"""Get the modifier for a certain ability
Args:
ability (str): Ability to check. You can use the full name or
the first three letters. Not case-sensitive
Returns:
(int) Modifier for the roll
"""
# Attempt to match the ability to the pre-defined list
ability = ability.lower()
matched_ability = Ability.match(ability)
# Look up the ability modifier
return getattr(self, f'{matched_ability}_mod')
def skill_modifier(self, name: str) -> int:
"""Get the skill modifier for a certain skill
First looks in custom skill list and then in the standard 5e skills.
In this way, you can define a character to use a non-standard ability
for a certain skill (as in how Monks can use Wisdom for many checks).
Args:
name (str): Name of the skill. Not case sensitive
"""
name_lower = name.lower()
# Determine which ability modifier to use
if name_lower in self.custom_skills:
ability = self.custom_skills[name_lower]
elif name_lower in _5e_skills:
ability = _5e_skills[name_lower]
else:
raise ValueError(f'Unrecognized skill: {name}')
mod = getattr(self, f'{ability}_mod')
# Add proficiency or expertise
if name_lower in self.expertise:
return mod + self.proficiency_bonus * 2
elif name_lower in self.proficiencies:
return mod + self.proficiency_bonus
else:
return mod
def lookup_modifier(self, check: str) -> int:
"""Get the modifier for certain roll
Args:
check (str): Description of which check to make
Returns:
(int) Modifier for the d20 roll
"""
# Make it all lowercase
check = check.lower()
words = check.split(" ")
# Save
if 'save' in words:
return self.save_modifier(words[0])
# Ability check
try:
return self.ability_modifier(check)
except AssertionError:
pass # and try something else
# Skill
return self.skill_modifier(check)
def get_skills_by_ability(self, ability: str) -> Dict[str, str]:
"""List out the skills for this character that use a certain base ability
Args:
ability: Name of the ability
Returns:
Dictionary of the skill mapped to the level of skill (expert, proficient, untrained)
"""
# Match the ability
matched_ability = Ability.match(ability)
# Loop over the 5e skills
matched_skills = [skill for skill, attr in _5e_skills.items() if attr == matched_ability]
# Match the custom skills
matched_skills.extend([
skill for skill, attr in self.custom_skills.items() if attr == matched_ability
])
# Return the outputs
output = {}
for skill in matched_skills:
if skill in self.proficiencies:
output[skill] = "proficient"
elif skill in self.expertise:
output[skill] = "expert"
else:
output[skill] = "untrained"
return output
def list_available_characters(team_id: str, user_id: str) -> List[str]:
"""List the names of character sheets that are available to a user
Args:
team_id (str): ID of the Slack workspace
user_id (str): ID of the user in question
Returns:
([str]): List of characters available to this player
"""
# Get all characters for this team
sheets = _config.list_character_sheets(team_id)
# Return only the sheets
return [
os.path.basename(s)[:-4] # Remove the ".yml"
for s in sheets
if Character.from_yaml(s).player == user_id
]
def load_character(team_id: str, name: str) -> Tuple[Character, str]:
"""Load a character sheet
Arg:
team_id (str): ID of the Slack workspace
name (str): Name of the character
Returns:
- (Character) Desired character sheet
- (str): Absolute path to the character sheet, in case you must save it later
"""
config = get_config()
sheet_path = config.get_character_sheet_path(team_id, name)
return Character.from_yaml(sheet_path), os.path.abspath(sheet_path)
| 34.093822 | 118 | 0.627626 |
a7101a610a52017f13a5fe2d6d32d405867f9aef | 1,558 | py | Python | setup.py | Borsos/rubik | af220a142b81a8f5b5011e4e072be9e3d130e827 | [
"Apache-2.0"
] | 1 | 2019-11-13T00:44:09.000Z | 2019-11-13T00:44:09.000Z | setup.py | Borsos/rubik | af220a142b81a8f5b5011e4e072be9e3d130e827 | [
"Apache-2.0"
] | null | null | null | setup.py | Borsos/rubik | af220a142b81a8f5b5011e4e072be9e3d130e827 | [
"Apache-2.0"
] | 1 | 2019-11-13T00:47:16.000Z | 2019-11-13T00:47:16.000Z | #
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
from distutils.core import setup
import os
import sys
scripts = [
'bin/rubik',
]
try:
dirname = os.path.dirname(os.path.abspath(sys.argv[0]))
py_dirname = dirname
sys.path.insert(0, py_dirname)
from rubik import conf
finally:
del sys.path[0]
setup(
name = "python-rubik",
version = conf.VERSION,
requires = [],
description = "Tool to read/write/visualize N-dimensional cubes",
author = "Simone Campagna",
author_email = "simone.campagna@tiscali.it",
url="https://github.com/simone-campagna/rubik",
download_url = 'https://github.com/simone-campagna/rubik/archive/{}.tar.gz'.format(conf.VERSION),
packages = ["rubik",
"rubik.application",
"rubik.application.help_functions",
"rubik.cubes",
"rubik.visualizer",
"rubik.visualizer.impl"
],
scripts = scripts,
package_data = {},
)
| 27.333333 | 101 | 0.668164 |
a710a43bb737f726810f9f83e8727afbf0fbd72e | 5,130 | py | Python | geco/mips/tests/test_set_cover.py | FreestyleBuild/GeCO | 6db1a549b3145b3bc5d3025a9bccc03be6575564 | [
"MIT"
] | 8 | 2020-12-16T09:59:05.000Z | 2022-03-18T09:48:43.000Z | geco/mips/tests/test_set_cover.py | FreestyleBuild/GeCO | 6db1a549b3145b3bc5d3025a9bccc03be6575564 | [
"MIT"
] | 101 | 2020-11-09T10:20:03.000Z | 2022-03-24T13:50:06.000Z | geco/mips/tests/test_set_cover.py | FreestyleBuild/GeCO | 6db1a549b3145b3bc5d3025a9bccc03be6575564 | [
"MIT"
] | 3 | 2021-04-06T13:26:03.000Z | 2022-03-22T13:22:16.000Z | import collections
import itertools
import pytest
from geco.mips.set_cover.yang import *
from geco.mips.set_cover.sun import *
from geco.mips.set_cover.orlib import *
from geco.mips.set_cover.gasse import *
"""
Generic Tests
"""
"""
Yang Tests
"""
"""
Sun Tests
"""
"""
OR-Library tests
"""
"""
Gasse tests
"""
| 28.5 | 87 | 0.670175 |
a71112e7354fe0bb8dca61271d9bc6a1f7ca9381 | 8,430 | py | Python | lib/overnet/gen_bazel.py | PowerOlive/garnet | 16b5b38b765195699f41ccb6684cc58dd3512793 | [
"BSD-3-Clause"
] | null | null | null | lib/overnet/gen_bazel.py | PowerOlive/garnet | 16b5b38b765195699f41ccb6684cc58dd3512793 | [
"BSD-3-Clause"
] | null | null | null | lib/overnet/gen_bazel.py | PowerOlive/garnet | 16b5b38b765195699f41ccb6684cc58dd3512793 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2.7
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import sys
# This program generates BUILD.bazel, WORKSPACE, .bazelrc from BUILD.gn
####################################################################################################
# TOKENIZER
Tok = collections.namedtuple('Tok', ['tok', 'value'])
sym_name = {
',': 'comma',
'(': 'left_paren',
')': 'right_paren',
'{': 'left_mustache',
'}': 'right_mustache',
'[': 'left_square',
']': 'right_square',
'=': 'equals',
}
####################################################################################################
# PARSER
Bundle = collections.namedtuple('Bundle', ['rule', 'name', 'values'])
####################################################################################################
# CODEGEN
FUZZERS = ['bbr', 'internal_list', 'linearizer',
'packet_protocol', 'receive_mode', 'routing_header']
assert FUZZERS == sorted(FUZZERS)
with open('BUILD.bazel', 'w') as o:
with open('BUILD.gn') as f:
for bundle in parse(tokenize(f.read())):
if bundle.rule == 'source_set':
print >>o, 'cc_library('
print >>o, ' name="%s",' % bundle.name
print >>o, ' srcs=[%s],' % ','.join(
'"%s"' % s for s in bundle.values['sources'])
if 'deps' in bundle.values:
print >>o, ' deps=[%s],' % ','.join(
'"%s"' % mapdep(s) for s in bundle.values['deps'] if mapdep(s) is not None)
print >>o, ')'
if bundle.rule == 'executable':
if bundle.values.get('testonly', False):
print >>o, 'cc_test(shard_count=50,'
else:
print >>o, 'cc_binary('
print >>o, ' name="%s",' % bundle.name
print >>o, ' srcs=[%s],' % ','.join(
'"%s"' % s for s in bundle.values['sources'])
print >>o, ' deps=[%s],' % ','.join(
'"%s"' % mapdep(s) for s in bundle.values['deps'] if mapdep(s) is not None)
print >>o, ')'
for fuzzer in FUZZERS:
print >>o, 'cc_binary('
print >>o, ' name="%s_fuzzer",' % fuzzer
srcs = ['%s_fuzzer.cc' % fuzzer]
helpers_h = '%s_fuzzer_helpers.h' % fuzzer
if os.path.exists(helpers_h):
srcs.append(helpers_h)
print >>o, ' srcs=[%s],' % ', '.join('"%s"' % s for s in srcs)
print >>o, ' deps=[":overnet", ":test_util"],'
print >>o, ')'
WORKSPACE = """
# This file is not checked in, but generated by gen_bazel.py
# Make changes there
git_repository(
name = 'com_google_googletest',
remote = 'https://github.com/google/googletest.git',
commit = 'd5266326752f0a1dadbd310932d8f4fd8c3c5e7d',
)
"""
BAZELRC = """
# This file is not checked in, but generated by gen_bazel.py
# Make changes there
build --client_env=CC=clang
build --copt -std=c++14
build:asan --strip=never
build:asan --copt -fsanitize=address
build:asan --copt -O0
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address
build:asan --action_env=ASAN_OPTIONS=detect_leaks=1:color=always
build:asan --action_env=LSAN_OPTIONS=report_objects=1
build:asan-fuzzer --strip=never
build:asan-fuzzer --copt -fsanitize=fuzzer,address
build:asan-fuzzer --copt -fsanitize-coverage=trace-cmp
build:asan-fuzzer --copt -O0
build:asan-fuzzer --copt -fno-omit-frame-pointer
build:asan-fuzzer --linkopt -fsanitize=fuzzer,address
build:asan-fuzzer --action_env=ASAN_OPTIONS=detect_leaks=1:color=always
build:asan-fuzzer --action_env=LSAN_OPTIONS=report_objects=1
build:msan --strip=never
build:msan --copt -fsanitize=memory
build:msan --copt -O0
build:msan --copt -fsanitize-memory-track-origins
build:msan --copt -fsanitize-memory-use-after-dtor
build:msan --copt -fno-omit-frame-pointer
build:msan --copt -fPIC
build:msan --linkopt -fsanitize=memory
build:msan --linkopt -fPIC
build:msan --action_env=MSAN_OPTIONS=poison_in_dtor=1
build:tsan --strip=never
build:tsan --copt -fsanitize=thread
build:tsan --copt -fno-omit-frame-pointer
build:tsan --copt -DNDEBUG
build:tsan --linkopt -fsanitize=thread
build:tsan --action_env=TSAN_OPTIONS=halt_on_error=1
build:ubsan --strip=never
build:ubsan --copt -fsanitize=undefined
build:ubsan --copt -fno-omit-frame-pointer
build:ubsan --copt -DNDEBUG
build:ubsan --copt -fno-sanitize=function,vptr
build:ubsan --linkopt -fsanitize=undefined
build:ubsan --action_env=UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1
build:ubsan-fuzzer --strip=never
build:ubsan-fuzzer --copt -fsanitize=fuzzer,undefined
build:ubsan-fuzzer --copt -fno-omit-frame-pointer
build:ubsan-fuzzer --copt -DNDEBUG
build:ubsan-fuzzer --copt -fno-sanitize=function,vptr
build:ubsan-fuzzer --linkopt -fsanitize=fuzzer,undefined
build:ubsan-fuzzer --action_env=UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1
"""
with open('WORKSPACE', 'w') as o:
o.write(WORKSPACE)
with open('.bazelrc', 'w') as o:
o.write(BAZELRC)
| 27.281553 | 100 | 0.57758 |
a711b022a699f3a1657ba1bf4a22b34ce38cfe57 | 2,878 | py | Python | hcplot/scales/colors/hue.py | bernhard-42/hcplot | 1c791e2b19b173b9b98a3d8914095e3c372c9de4 | [
"Apache-2.0"
] | null | null | null | hcplot/scales/colors/hue.py | bernhard-42/hcplot | 1c791e2b19b173b9b98a3d8914095e3c372c9de4 | [
"Apache-2.0"
] | null | null | null | hcplot/scales/colors/hue.py | bernhard-42/hcplot | 1c791e2b19b173b9b98a3d8914095e3c372c9de4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.color import hcl2rgb, rgb2str
import numpy as np
#
# Quick Accessor
#
def getBrewer(typ, palette, size):
return getattr(HueColors, typ)(palette, size)
| 30.294737 | 99 | 0.592078 |
a71203325ed630e617cb8551726c8b7f07f5f6f8 | 423 | py | Python | accounts/migrations/0013_alter_caller_list_file.py | Srinjay-hack/Buddy | 155b9ba58a20bf043493213dd8349f61012fc480 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0013_alter_caller_list_file.py | Srinjay-hack/Buddy | 155b9ba58a20bf043493213dd8349f61012fc480 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0013_alter_caller_list_file.py | Srinjay-hack/Buddy | 155b9ba58a20bf043493213dd8349f61012fc480 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.4 on 2021-07-12 14:38
from django.db import migrations, models
| 22.263158 | 72 | 0.614657 |
a712ce0eafb15d53426b4b979da8580fdd2c7a4c | 7,978 | py | Python | vk_bots/api.py | termisaal/VkBotsApi | 0957ea46952c260090741aeddf71d50dd950f74c | [
"MIT"
] | null | null | null | vk_bots/api.py | termisaal/VkBotsApi | 0957ea46952c260090741aeddf71d50dd950f74c | [
"MIT"
] | null | null | null | vk_bots/api.py | termisaal/VkBotsApi | 0957ea46952c260090741aeddf71d50dd950f74c | [
"MIT"
] | null | null | null | """
VK Bots API Wrapper
Copyright (c) 2020-2021 Misaal
"""
import aiohttp
import json
import typing
from .errors import VKAPIError
from .keyboard import Keyboard
from .utils import to_namedtuple, get_random_id
| 27.701389 | 93 | 0.534971 |
a71396c8eccbd499f64ee47c8235e9246d3bc275 | 32,867 | py | Python | saber/xbrain/xbrain.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 12 | 2018-05-14T17:43:18.000Z | 2021-11-16T04:03:33.000Z | saber/xbrain/xbrain.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 34 | 2019-05-06T19:13:36.000Z | 2021-05-06T19:12:35.000Z | saber/xbrain/xbrain.py | elenimath/saber | 71acab9798cf3aee1c4d64b09453e5234f8fdf1e | [
"Apache-2.0"
] | 3 | 2019-10-08T17:42:17.000Z | 2021-07-28T05:52:02.000Z | # Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def classify_pixel(input_data, classifier, threads=8, ram=4000):
"""
Runs a pre-trained ilastik classifier on a volume of data
Adapted from Stuart Berg's example here:
https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
Arguments:
input_data: data to be classified - 3D numpy array
classifier: ilastik trained/classified file
threads: number of thread to use for classifying input data
ram: RAM to use in MB
Returns:
pixel_out: The raw trained classifier
"""
import numpy as np
import six
import pdb
from collections import OrderedDict
import vigra
import os
import ilastik_main
from ilastik.applets.dataSelection import DatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
# Before we start ilastik, prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = str(threads)
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
# Set the command-line arguments directly into argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = classifier
# Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = ilastik_main.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)
# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator
# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()
# For this example, we'll use random input data to "batch process"
print("input_data.shape", input_data.shape)
# In this example, we're using 2D data (extra dimension for channel).
# Tagging the data ensures that ilastik interprets the axes correctly.
input_data = vigra.taggedView(input_data, 'xyz')
# In case you're curious about which label class is which,
# let's read the label names from the project file.
label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value
print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
# Construct an OrderedDict of role-names -> DatasetInfos
# (See PixelClassificationWorkflow.ROLE_NAMES)
role_data_dict = OrderedDict([("Raw Data",
[DatasetInfo(preloaded_array=input_data)])])
# Run the export via the BatchProcessingApplet
# Note: If you don't provide export_to_array, then the results will
# be exported to disk according to project's DataExport settings.
# In that case, run_export() returns None.
predictions = shell.workflow.batchProcessingApplet.\
run_export(role_data_dict, export_to_array=True)
predictions = np.squeeze(predictions)
print("predictions.dtype, predictions.shape", predictions.dtype, predictions.shape)
print("DONE.")
return predictions
#Unsupervised gmm clasification
#Unsupervised gmm clasification (3D for xbrain data)
#cell_class=1, then cells are darker than background. Cell_class=0, cells are lighter than background
def segment_vessels(vessel_probability, probability_threshold, dilation_size, minimum_size):
"""
This function produces a binary image with segmented vessels from a probability map (from
ilastik or another classifier).
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vessel_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a vessel.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
vessel_probability(r,c,z) > probability_threshold).
dilation_size : int
Sphere Structural Element diameter size.
minimum_size : int
components smaller than this are removed from image.
Returns
-------
ndarry
Binary Image
"""
import numpy as np
import scipy.io as sio
from scipy import ndimage as ndi
from skimage import morphology
smallsize = 100 # components smaller than this size are removed. WHY Fixed Size??
unfiltered_im = (vessel_probability >= probability_threshold)
im_removed_small_objects = morphology.remove_small_objects(unfiltered_im,
min_size = smallsize, in_place = True)
dilated_im = ndi.binary_dilation(im_removed_small_objects, morphology.ball((dilation_size-1)/2))
image_out = morphology.remove_small_objects(dilated_im, min_size = minimum_size,
in_place = True)
return(image_out)
def detect_cells2D(cell_probability, probability_threshold, stopping_criterion,
initial_template_size, dilation_size, max_no_cells):
"""
This is the top level function to infer the position (and eventually size) of all cells in a 2D
volume of image data. We assume that we already have computed a "probability map" which encodes
the probability that each voxel corresponds to a cell body.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
cell_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a cell body.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
cell_probability(r,c,z) > probability_threshold)
stopping_criterion : float
stopping criterion is a value between (0,1) (minimum normalized correlation between
template and probability map) (Example = 0.47)
initial_template_size : int
initial size of spherical template (to use in sweep)
dilation_size : int
size to increase mask around each detected cell (zero out sphere of radius with
initial_template_size+dilation_size around each centroid)
max_no_cells : int
maximum number of cells (alternative stopping criterion)
Returns
-------
ndarray
centroids = D x 4 matrix, where D = number of detected cells.
The (x,y,z) coordinate of each cell are in columns 1-3.
The fourth column contains the correlation (ptest) between the template
and probability map and thus represents our "confidence" in the estimate.
The algorithm terminates when ptest<=stopping_criterion.
ndarray
new_map = Nr x Nc x Nz matrix containing labeled detected cells (1,...,D)
"""
# following imports to be updated when directory structure are finalized
#import create_synth_dict
#from compute3dvec import compute3dvec
from scipy import signal
import numpy as np
import pdb
import logging
# threshold probability map.
newtest = (cell_probability * (cell_probability > probability_threshold)).astype('float32')
#initial_template_size is an int now but could a vector later on - convert it to an array
initial_template_size = np.atleast_1d(initial_template_size)
# create dictionary of spherical templates
box_radius = np.ceil(np.max(initial_template_size)/2) + 1
dict = create_synth_dict2D(initial_template_size, box_radius)
dilate_dict = create_synth_dict2D(initial_template_size + dilation_size, box_radius)
box_length = int(round(np.shape(dict)[0] ** (1/2)))
new_map = np.zeros((np.shape(cell_probability)), dtype='uint8')
newid = 1
centroids = np.empty((0, 3))
# run greedy search step for at most max_no_cells steps (# cells <= max_no_cells)
for ktot in range(max_no_cells):
val = np.zeros((np.shape(dict)[1], 1), dtype='float32')
id = np.zeros((np.shape(dict)[1], 1), dtype='uint32')
# loop to convolve the probability cube with each template in dict
for j in range(np.shape(dict)[1]):
convout = signal.fftconvolve(newtest, np.reshape(dict[:,j], (box_length, box_length)), mode='same')
# get the max value of the flattened convout array and its index
val[j],id[j] = np.real(np.amax(convout)), np.argmax(convout)
# find position in image with max correlation
which_atom = np.argmax(val)
which_loc = id[which_atom]
# Save dict into a cube array with its center given by which_loc and place it into a 3-D array.
x2 = compute2dvec(dict[:, which_atom], which_loc, box_length, np.shape(newtest))
xid = np.nonzero(x2)
# Save dilate_dict into a cube array with its center given by which_loc and place it into a 3-D array.
x3 = compute2dvec(dilate_dict[:, which_atom], which_loc, box_length, np.shape(newtest))
newtest = newtest * (x3 == 0)
ptest = val/np.sum(dict, axis=0)
if ptest < stopping_criterion:
print("Cell Detection is done")
return(centroids, new_map)
# Label detected cell
new_map[xid] = newid
newid = newid + 1
#Convert flat index to indices
rr, cc = np.unravel_index(which_loc, np.shape(newtest))
new_centroid = cc, rr #Check - why cc is first? Flip indices
# insert a row into centroids
centroids = np.vstack((centroids, np.append(new_centroid, ptest)))
# for later: convert to logging and print with much less frequency
if(ktot % 10 == 0):
print('Iteration remaining = ', (max_no_cells - ktot - 1), 'Correlation = ', ptest )
print("Cell Detection is done")
return(centroids, new_map)
def detect_cells(cell_probability, probability_threshold, stopping_criterion,
initial_template_size, dilation_size, max_no_cells):
"""
This is the top level function to infer the position (and eventually size) of all cells in a 3D
volume of image data. We assume that we already have computed a "probability map" which encodes
the probability that each voxel corresponds to a cell body.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
cell_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a cell body.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
cell_probability(r,c,z) > probability_threshold)
stopping_criterion : float
stopping criterion is a value between (0,1) (minimum normalized correlation between
template and probability map) (Example = 0.47)
initial_template_size : int
initial size of spherical template (to use in sweep)
dilation_size : int
size to increase mask around each detected cell (zero out sphere of radius with
initial_template_size+dilation_size around each centroid)
max_no_cells : int
maximum number of cells (alternative stopping criterion)
Returns
-------
ndarray
centroids = D x 4 matrix, where D = number of detected cells.
The (x,y,z) coordinate of each cell are in columns 1-3.
The fourth column contains the correlation (ptest) between the template
and probability map and thus represents our "confidence" in the estimate.
The algorithm terminates when ptest<=stopping_criterion.
ndarray
new_map = Nr x Nc x Nz matrix containing labeled detected cells (1,...,D)
"""
# following imports to be updated when directory structure are finalized
#import create_synth_dict
#from compute3dvec import compute3dvec
from scipy import signal
import numpy as np
import pdb
import logging
if len(cell_probability.shape) == 4:
print('Assuming Z, Chan, Y, X input')
cell_probability = np.transpose(cell_probability[:,0,:,:], (2,1,0))
# threshold probability map.
newtest = (cell_probability * (cell_probability > probability_threshold)).astype('float32')
#initial_template_size is an int now but could a vector later on - convert it to an array
initial_template_size = np.atleast_1d(initial_template_size)
# create dictionary of spherical templates
box_radius = np.ceil(np.max(initial_template_size)/2) + 1
dict = create_synth_dict(initial_template_size, box_radius)
dilate_dict = create_synth_dict(initial_template_size + dilation_size, box_radius)
box_length = int(round(np.shape(dict)[0] ** (1/3)))
new_map = np.zeros((np.shape(cell_probability)), dtype='uint8')
newid = 1
centroids = np.empty((0, 4))
# run greedy search step for at most max_no_cells steps (# cells <= max_no_cells)
for ktot in range(max_no_cells):
val = np.zeros((np.shape(dict)[1], 1), dtype='float32')
id = np.zeros((np.shape(dict)[1], 1), dtype='uint32')
# loop to convolve the probability cube with each template in dict
for j in range(np.shape(dict)[1]):
convout = signal.fftconvolve(newtest, np.reshape(dict[:,j], (box_length, box_length,
box_length)), mode='same')
# get the max value of the flattened convout array and its index
val[j],id[j] = np.real(np.amax(convout)), np.argmax(convout)
# find position in image with max correlation
which_atom = np.argmax(val)
which_loc = id[which_atom]
# Save dict into a cube array with its center given by which_loc and place it into a 3-D array.
x2 = compute3dvec(dict[:, which_atom], which_loc, box_length, np.shape(newtest))
xid = np.nonzero(x2)
# Save dilate_dict into a cube array with its center given by which_loc and place it into a 3-D array.
x3 = compute3dvec(dilate_dict[:, which_atom], which_loc, box_length, np.shape(newtest))
newtest = newtest * (x3 == 0)
ptest = val/np.sum(dict, axis=0)
if ptest < stopping_criterion:
print("Cell Detection is done")
return(centroids, new_map)
# Label detected cell
new_map[xid] = newid
newid = newid + 1
#Convert flat index to indices
rr, cc, zz = np.unravel_index(which_loc, np.shape(newtest))
new_centroid = rr, cc, zz #Check - why cc is first?
# insert a row into centroids
centroids = np.vstack((centroids, np.append(new_centroid, ptest)))
# for later: convert to logging and print with much less frequency
if(ktot % 10 == 0):
print('Iteration remaining = ', (max_no_cells - ktot - 1), 'Correlation = ', ptest )
print("Cell Detection is done, centroids: {} map: {}".format(centroids.shape, new_map.shape))
return(centroids, new_map)
def create_synth_dict(radii, box_radius):
"""
This function creates a collection of spherical templates of different sizes.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
radii : int
radii coubld be 1xN vector but currently is an integer
box_radius : float
Returns
-------
ndarray
dictionary of template vectors, of size (box_length ** 3 x length(radii)), where
box_length = box_radius*2 +1 and radii is an input to the function which contains a vector
of different sphere sizes.
"""
import numpy as np
from numpy import linalg as LA
from scipy import ndimage as ndi
from skimage.morphology import ball
box_length = int(box_radius * 2 + 1) #used for array dimension
dict = np.zeros((box_length**3, np.size(radii)), dtype='float32')
cvox = int((box_length-1)/2 + 1)
for i in range(len(radii)):
template = np.zeros((box_length, box_length, box_length))
template[cvox, cvox, cvox] = 1
dict[:, i] = np.reshape(ndi.binary_dilation(template, ball((radii[i] - 1)/2)), (box_length**3))
dict[:, i] = dict[:, i]/(LA.norm(dict[:, i]))
return(dict)
def create_synth_dict2D(radii, box_radius):
"""
This function creates a collection of spherical templates of different sizes.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
radii : int
radii coubld be 1xN vector but currently is an integer
box_radius : float
Returns
-------
ndarray
dictionary of template vectors, of size (box_length ** 3 x length(radii)), where
box_length = box_radius*2 +1 and radii is an input to the function which contains a vector
of different sphere sizes.
"""
import numpy as np
from numpy import linalg as LA
from scipy import ndimage as ndi
from skimage.morphology import ball
box_length = int(box_radius * 2 + 1) #used for array dimension
dict = np.zeros((box_length**2, np.size(radii)), dtype='float32')
cvox = int((box_length-1)/2 + 1)
for i in range(len(radii)):
template = np.zeros((box_length, box_length, box_length))
template[cvox, cvox, cvox] = 1
tmp = ndi.binary_dilation(template, ball((radii[i] - 1)/2))
dict[:, i] = np.reshape(tmp[:,:,cvox], (box_length**2))
if(LA.norm(dict[:, i])>0):
dict[:, i] = dict[:, i]/(LA.norm(dict[:, i]))
return(dict)
def placeatom(vector, box_length, which_loc, stacksz):
"""
Copies the data from vector into a cube with the width of "box_length" and places the cube
into a 3-D array with the shape/size defined by the "stacksz" parameter. The center of cube is
given by the "which_loc" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom in the flattened array
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = np.zeros((stacksz), dtype='float32')
#Convert flat index to indices
r, c, z = np.unravel_index(which_loc, (stacksz))
output_array[r, c, z] = 1
# Increase every dimension by box_length at the top and at the bottom and fill them with zeroes.
output_array = np.lib.pad(output_array, ((box_length, box_length), (box_length, box_length),
(box_length, box_length)), 'constant', constant_values=(0, 0))
# get the indices of the center of cube into increased dimensions output_array.
r, c, z = np.nonzero(output_array)
#save the output of round() function to avoid multiple calls to it.
half_length = np.int(round(box_length/2))
# TODO: casting to int to avoid problems downstream with indexing
c = np.int(c)
r = np.int(r)
z = np.int(z)
#Save the data from the cube into output_array.
output_array[(r - half_length +1) : (r + box_length - half_length +1), \
(c - half_length +1) : (c + box_length - half_length +1), \
(z - half_length +1) : (z + box_length - half_length +1)] = \
np.reshape(vector, (box_length, box_length, box_length))
return(output_array)
def placeatom2D(vector, box_length, which_loc, stacksz):
"""
Copies the data from vector into a cube with the width of "box_length" and places the
into a 2-D array with the shape/size defined by the "stacksz" parameter. The center of tbhe data is
given by the "which_loc" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom in the flattened array
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = np.zeros((stacksz), dtype='float32')
#Convert flat index to indices
r, c = np.unravel_index(which_loc, (stacksz))
output_array[r, c] = 1
# Increase every dimension by box_length at the top and at the bottom and fill them with zeroes.
output_array = np.lib.pad(output_array, ((box_length, box_length), (box_length, box_length)), 'constant', constant_values=(0, 0))
# get the indices of the center of cube into increased dimensions output_array.
r, c = np.nonzero(output_array)
#save the output of round() function to avoid multiple calls to it.
half_length = np.int(round(box_length/2))
# TODO: casting to int to avoid problems downstream with indexing
c = np.int(c)
r = np.int(r)
#Save the data from the cube into output_array.
output_array[(r - half_length +1) : (r + box_length - half_length +1), \
(c - half_length +1) : (c + box_length - half_length +1)] = \
np.reshape(vector, (box_length, box_length))
return(output_array)
def compute3dvec(vector, which_loc, box_length, stacksz):
"""
Resizes the array dimension returned by placeatom() to the shape/size given by "stacksz" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = placeatom(vector, box_length, which_loc, stacksz)
#delete the top "box_length" arrays for all dimensions.
x, y, z = np.shape(output_array)
output_array = output_array[box_length:x, box_length:y, box_length:z]
#delete the bottom "box_length" arrays for all dimensions.
x, y, z = np.shape(output_array)
output_array = output_array[0 : (x - box_length), 0 : (y - box_length), 0 : (z - box_length)]
return output_array
def compute2dvec(vector, which_loc, box_length, stacksz):
"""
Resizes the array dimension returned by placeatom() to the shape/size given by "stacksz" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = placeatom2D(vector, box_length, which_loc, stacksz)
#delete the top "box_length" arrays for all dimensions.
x, y = np.shape(output_array)
output_array = output_array[box_length:x, box_length:y]
#delete the bottom "box_length" arrays for all dimensions.
x, y = np.shape(output_array)
output_array = output_array[0 : (x - box_length), 0 : (y - box_length)]
return output_array
#Call this function for centroid-level f1 score on 2d (nii) data
#Call this function for centroid-level f1 score on 2d (nii) data
#Dense, 3D f1 score of cell detection
| 37.562286 | 220 | 0.66334 |
a71437b5469d3a544e7b8017e8d77b89874193c2 | 2,088 | py | Python | migrations/versions/b846613b404e_.py | python-02/flask-spa-CoopApp | 8ecd9e22847401c6ee18b76a80c68c8ba5d77401 | [
"MIT"
] | 6 | 2021-04-16T06:37:04.000Z | 2021-11-11T23:37:04.000Z | migrations/versions/b846613b404e_.py | python-02/flask-spa-CoopApp | 8ecd9e22847401c6ee18b76a80c68c8ba5d77401 | [
"MIT"
] | null | null | null | migrations/versions/b846613b404e_.py | python-02/flask-spa-CoopApp | 8ecd9e22847401c6ee18b76a80c68c8ba5d77401 | [
"MIT"
] | 2 | 2021-06-01T15:35:17.000Z | 2022-03-05T03:50:57.000Z | """empty message
Revision ID: b846613b404e
Revises: fc25bf71d841
Create Date: 2020-01-06 21:43:28.958558
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'b846613b404e'
down_revision = 'fc25bf71d841'
branch_labels = None
depends_on = None
| 36.631579 | 69 | 0.684866 |
a7143837d4f1b09881e05cb620fce36372532de7 | 2,010 | py | Python | alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EnergyExtRequest import EnergyExtRequest
| 26.8 | 69 | 0.584577 |
a715a55b0649d434e3e3db7475617b277a5112ae | 1,657 | py | Python | project_receipt/receipt/urls.py | Guilouf/django-receipt | fb42de12311cd1a20cc28c74a732d818f28ef551 | [
"Apache-2.0"
] | null | null | null | project_receipt/receipt/urls.py | Guilouf/django-receipt | fb42de12311cd1a20cc28c74a732d818f28ef551 | [
"Apache-2.0"
] | 8 | 2021-02-01T12:47:02.000Z | 2021-12-13T09:34:38.000Z | project_receipt/receipt/urls.py | Guilouf/django-receipt | fb42de12311cd1a20cc28c74a732d818f28ef551 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from receipt import views
urlpatterns = [
path('', views.ReceiptList.as_view(), name='home'),
path('receipt/', views.ReceiptList.as_view(), name='receipt_list'),
path('receipt/create', views.ReceiptCreate.as_view(), name='receipt_create'),
path('receipt/<int:pk>/edit', views.ReceiptUpdate.as_view(), name='receipt_update'),
path('establishment/', views.EstablishmentList.as_view(), name='establishment_list'),
path('establishment/create', views.EstablishmentCreate.as_view(), name='establishment_create'),
path('establishment/<int:pk>/edit', views.EstablishmentUpdate.as_view(), name='establishment_update'),
path('establishment/<int:pk>', views.EstablishmentDetail.as_view(), name='establishment_detail'),
path('establishment/<int:pk>/add_receipt', views.ReceiptFromEstablishmentCreate.as_view(),
name='establishment_add_receipt'),
path('company/', views.CompanyList.as_view(), name='company_list'),
path('company/create', views.CompanyCreate.as_view(), name='company_create'),
path('company/<int:pk>/edit', views.CompanyUpdate.as_view(), name='company_update'),
path('company/<int:pk>', views.CompanyDetail.as_view(), name='company_detail'),
path('company/<int:pk>/add_establishment', views.EstablishmentFromCompanyCreate.as_view(),
name='company_add_establishment'),
path('tag/', views.TagList.as_view(), name='tag_list'),
path('tag/create', views.TagCreate.as_view(), name='tag_create'),
path('tag/<int:pk>/edit', views.TagUpdate.as_view(), name='tag_update'),
path('tag/<int:pk>', views.TagDetail.as_view(), name='tag_detail'),
]
| 61.37037 | 106 | 0.719976 |
a71e3a4361a99f178927d847326e3096eeaee755 | 4,216 | py | Python | utils/common/_common.py | Pzqqt/Django_Transportation_Management_System | f4f0905d8e007920ae190252eeaefbc6ee67ed85 | [
"MIT"
] | null | null | null | utils/common/_common.py | Pzqqt/Django_Transportation_Management_System | f4f0905d8e007920ae190252eeaefbc6ee67ed85 | [
"MIT"
] | null | null | null | utils/common/_common.py | Pzqqt/Django_Transportation_Management_System | f4f0905d8e007920ae190252eeaefbc6ee67ed85 | [
"MIT"
] | null | null | null | from functools import partial
from itertools import chain
from collections import UserList
import logging
import traceback
from django import forms
from django.db.models import Model
from django.core.validators import validate_comma_separated_integer_list
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.fields.related import ForeignKey
from django.http import JsonResponse
from django.utils import timezone
UnescapedJsonResponse = partial(JsonResponse, encoder=UnescapedDjangoJSONEncoder)
def multi_lines_log(logger: logging.Logger, string: str, level=logging.INFO):
""" """
for line in string.splitlines():
logger.log(level, line)
def traceback_log(logger: logging.Logger, level=logging.ERROR):
""" """
multi_lines_log(logger=logger, string=traceback.format_exc(), level=level)
def traceback_and_detail_log(request, logger: logging.Logger, level=logging.ERROR):
""" """
logger.log(level, "=" * 100)
logger.log(level, "Exception:")
logger.log(level, "Time: %s" % timezone.make_naive(timezone.now()).strftime("%Y-%m-%d %H:%M:%S"))
logger.log(level, "Url: %s" % request.path)
logger.log(level, "Method: %s" % request.method)
logger.log(level, "Cookies: %s" % request.COOKIES)
logger.log(level, "Session: %s" % dict(request.session.items()))
if request.method == "POST":
logger.log(level, "Post data: %s" % request.POST.dict())
logger.log(level, "")
traceback_log(logger=logger, level=level)
logger.log(level, "=" * 100)
def validate_comma_separated_integer_list_and_split(string: str, auto_strip=True) -> list:
"""
, ; , ValidationError
:param string:
:param auto_strip: Truestringstrip()
:return: list
"""
if auto_strip:
string = string.strip()
validate_comma_separated_integer_list(string)
return [int(x) for x in string.split(',')]
def model_to_dict_(instance: Model) -> dict:
""" Djangodjango.forms.models.model_to_dict(model_to_dict)
, , (editableFalse)
model_to_dictModelForm, ,
""
model_to_dictmodel_to_dict_
model_to_dictfieldsexclude,
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
# , (else)
# : ForeignKeyattname"_id", value_from_object, id
if isinstance(f, ForeignKey):
data[f.name] = getattr(instance, f.name, None)
else:
data[f.name] = f.value_from_object(instance)
return data
def del_session_item(request, *items):
""" request """
for item in items:
request.session.pop(item, None)
| 35.728814 | 101 | 0.708491 |
a71f0fb6127bf9b694c0e036c4b163b042f9e29b | 127 | py | Python | landingpage/urls.py | aurphillus/Django-Library-Completed | f46e45f85c888e7694323e22f6e966c291a4a0be | [
"MIT"
] | null | null | null | landingpage/urls.py | aurphillus/Django-Library-Completed | f46e45f85c888e7694323e22f6e966c291a4a0be | [
"MIT"
] | null | null | null | landingpage/urls.py | aurphillus/Django-Library-Completed | f46e45f85c888e7694323e22f6e966c291a4a0be | [
"MIT"
] | null | null | null | from django.urls import path
from landingpage.views import *
urlpatterns = [
path('',landingpage,name="landingpage"),
]
| 14.111111 | 44 | 0.716535 |
a71fe8e9c812b790a9f8e10c54db7ff385e01808 | 31,509 | py | Python | cloud-v2.0/verify/verify.py | 13242084001/api | 71f57b485d685caae94a84b625d64be832cf8910 | [
"Apache-2.0"
] | null | null | null | cloud-v2.0/verify/verify.py | 13242084001/api | 71f57b485d685caae94a84b625d64be832cf8910 | [
"Apache-2.0"
] | 1 | 2021-03-25T23:58:32.000Z | 2021-03-25T23:58:32.000Z | cloud-v2.0/verify/verify.py | 13242084001/api | 71f57b485d685caae94a84b625d64be832cf8910 | [
"Apache-2.0"
] | null | null | null | from common import sshClient
import time
import eventlet
from .gol import *
import requests
from common.uploadMirror import login
from common.sqlquery import Query
#import pytest
import json
#
#json
#
#
#
#
#
#running
#clusterid
#l2vmn check
| 31.8917 | 180 | 0.564061 |