hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2065e5fc7e61fdabd4ab6fd12c1ead2ad9d477a
| 78,713
|
py
|
Python
|
htdeblur/acquisition/motion.py
|
zfphil/htdeblur
|
ac557284f9913292721a6b9f943ff9b921043978
|
[
"BSD-3-Clause"
] | 2
|
2020-01-16T18:30:55.000Z
|
2020-02-06T08:33:51.000Z
|
htdeblur/acquisition/motion.py
|
zfphil/htdeblur
|
ac557284f9913292721a6b9f943ff9b921043978
|
[
"BSD-3-Clause"
] | null | null | null |
htdeblur/acquisition/motion.py
|
zfphil/htdeblur
|
ac557284f9913292721a6b9f943ff9b921043978
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 Regents of the University of California
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, copy, collections, math, json
import numpy as np
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
import llops as yp
# Custom scale bar object
from matplotlib_scalebar.scalebar import ScaleBar
# Libwallerlab imports
from llops import display
from llops import Roi
| 54.284828
| 757
| 0.612656
|
c2094cbd00b0292a602f2ea788a9486c162b5e7e
| 2,053
|
py
|
Python
|
leetcode/weekly150/last_substring.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | 2
|
2018-01-18T11:01:36.000Z
|
2021-12-20T18:14:48.000Z
|
leetcode/weekly150/last_substring.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
leetcode/weekly150/last_substring.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
'''
https://leetcode.com/contest/weekly-contest-150/problems/last-substring-in-lexicographical-order/
SA algorithm mostly copied from https://cp-algorithms.com/string/suffix-array.html
Status: tle. probably py3 lists
'''
| 31.584615
| 97
| 0.431076
|
c20c7d9e299f07af3208c0a8aedd483571769bbb
| 18,555
|
py
|
Python
|
schemagen/schemagen.py
|
GoZaddy/SchemaGen
|
c8374382f6b52ad3cec398c77fd5bc90fe891818
|
[
"MIT"
] | 3
|
2021-03-26T22:51:41.000Z
|
2021-03-27T15:17:24.000Z
|
schemagen/schemagen.py
|
GoZaddy/SchemaGen
|
c8374382f6b52ad3cec398c77fd5bc90fe891818
|
[
"MIT"
] | null | null | null |
schemagen/schemagen.py
|
GoZaddy/SchemaGen
|
c8374382f6b52ad3cec398c77fd5bc90fe891818
|
[
"MIT"
] | null | null | null |
from antlr4 import *
from .antlr import GraphQLLexer, GraphQLListener, GraphQLParser
from .codegen import CodegenTool, Class, String, ClassInstance, IfElse, If, Method, Expr, Variable
import re
from math import floor
from datetime import datetime
from .utils import strip_string_quotes, camel_case_to_snake_case, process_input_value_definition
from .errors import ParsingError
GraphQLParser = GraphQLParser.GraphQLParser
graphene = 'graphene'
built_in_scalars = [
'Int',
'Float',
'String',
'Boolean',
'ID',
'Date',
'Datetime',
'Time'
'Decimal',
'JSONString',
'Base64',
]
| 46.156716
| 136
| 0.492643
|
c20cac9dd66122173bfd30ba53957fea5bb5307b
| 2,231
|
py
|
Python
|
app/api/views.py
|
rickywang432/flask
|
c956dee6c7dfbb57a5fcd247d23af37e20b96da7
|
[
"MIT"
] | null | null | null |
app/api/views.py
|
rickywang432/flask
|
c956dee6c7dfbb57a5fcd247d23af37e20b96da7
|
[
"MIT"
] | 1
|
2021-06-02T02:01:38.000Z
|
2021-06-02T02:01:38.000Z
|
app/api/views.py
|
rickywang432/flask
|
c956dee6c7dfbb57a5fcd247d23af37e20b96da7
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify,Blueprint
from flask_marshmallow import Marshmallow
from app.models import User, Group, Role
from app import ma
api = Blueprint('api', __name__)
user_schema = UserSchema()
users_schema = UserSchema(many=True)
group_schema = GroupSchema()
groups_schema = GroupSchema(many=True)
role_schema = RoleSchema()
roles_schema = RoleSchema(many=True)
# endpoint to get user detail by id
# endpoint to get group detail by id
# endpoint to get group detail by id
| 26.247059
| 80
| 0.685791
|
c20d8ed82808f42c1ce9f7452c5668af8015a2b5
| 2,335
|
py
|
Python
|
setup.py
|
maljovec/samply
|
9364c2f671c02cb7bab484c0e856a0a0ca6ecc40
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
maljovec/samply
|
9364c2f671c02cb7bab484c0e856a0a0ca6ecc40
|
[
"BSD-3-Clause"
] | 2
|
2019-02-21T00:28:36.000Z
|
2019-11-09T04:35:39.000Z
|
setup.py
|
maljovec/samplers
|
9364c2f671c02cb7bab484c0e856a0a0ca6ecc40
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Setup script for samply
"""
from setuptools import setup
import re
extra_args = {}
def get_property(prop, project):
"""
Helper function for retrieving properties from a project's
__init__.py file
@In, prop, string representing the property to be retrieved
@In, project, string representing the project from which we will
retrieve the property
@Out, string, the value of the found property
"""
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
open(project + "/__init__.py").read(),
)
return result.group(1)
VERSION = get_property("__version__", "samply")
def long_description():
""" Reads the README.rst file and extracts the portion tagged between
specific LONG_DESCRIPTION comment lines.
"""
description = ""
recording = False
with open("README.rst") as f:
for line in f:
if "END_LONG_DESCRIPTION" in line:
return description
elif "LONG_DESCRIPTION" in line:
recording = True
continue
if recording:
description += line
# Consult here: https://packaging.python.org/tutorials/distributing-packages/
setup(
name="samply",
packages=["samply"],
version=VERSION,
description="A library for computing samplings in arbitrary dimensions",
long_description=long_description(),
author="Dan Maljovec",
author_email="maljovec002@gmail.com",
license="BSD",
test_suite="samply.tests",
url="https://github.com/maljovec/samply",
download_url="https://github.com/maljovec/samply/archive/"
+ VERSION
+ ".tar.gz",
keywords=[""],
# Consult here: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Mathematics",
],
setup_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"],
install_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"],
python_requires=">=2.7, <4",
)
| 29.935897
| 77
| 0.615418
|
c20db92c5e61a54ef4ff2401b5df9360bca3d9b1
| 4,353
|
py
|
Python
|
数据结构实践课/实验3/文本格式化.py
|
TD21forever/hdu-term-project-helper
|
f42f553efd1d7b59162d3fc793ac14ae30850efd
|
[
"Apache-2.0"
] | 17
|
2021-01-09T06:49:09.000Z
|
2022-02-23T01:36:20.000Z
|
数据结构实践课/实验3/文本格式化.py
|
TD21forever/hdu-term-project-helper
|
f42f553efd1d7b59162d3fc793ac14ae30850efd
|
[
"Apache-2.0"
] | null | null | null |
数据结构实践课/实验3/文本格式化.py
|
TD21forever/hdu-term-project-helper
|
f42f553efd1d7b59162d3fc793ac14ae30850efd
|
[
"Apache-2.0"
] | 1
|
2021-06-22T12:56:16.000Z
|
2021-06-22T12:56:16.000Z
|
# -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2018-11-14 15:41:57
# @Last Modified by: TD21forever
# @Last Modified time: 2018-11-15 16:50:48
file = open('input.txt','r')#
#
if __name__ == '__main__':
while True:
print("")
print("\n1.\n2.\n3.\n4.\n5.\n6.\n")
ans = "no"
ans = input("5,55,2,3,3,1?,yesno:")
if ans == 'yes':
operate()
else:
print("\n")
a = int(input("1."))
b = int(input("2."))
if b>80:
b = int(input("80"))
c = int(input("3."))
d = int(input("4."))
e = int(input("5."))
ff = int(input("6."))
operate(a,b,c,d,e,ff)
f.close()
| 34.824
| 107
| 0.464278
|
c210287e380e114135144808518dac8414c8a7fc
| 1,405
|
py
|
Python
|
authors/apps/authentication/tests/test_models.py
|
C3real-kill3r/binary-jungle-backend
|
5333138fbce901e75accf5487b10990979afa571
|
[
"MIT"
] | null | null | null |
authors/apps/authentication/tests/test_models.py
|
C3real-kill3r/binary-jungle-backend
|
5333138fbce901e75accf5487b10990979afa571
|
[
"MIT"
] | 8
|
2020-02-12T03:04:07.000Z
|
2022-03-12T00:07:31.000Z
|
authors/apps/authentication/tests/test_models.py
|
C3real-kill3r/binary-jungle-backend
|
5333138fbce901e75accf5487b10990979afa571
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from authors.apps.authentication.models import (
User
)
| 30.543478
| 112
| 0.64484
|
c213671e056f4ccf87fd3dab05b33b16957f6f48
| 5,908
|
py
|
Python
|
simulations/MonteCarlo_function.py
|
chengning-zhang/Statistical-methods-for-combining-multiple-tests-a-Wrapper-
|
42b5aabf6a0619fa7fe18a034926236d133a35d8
|
[
"MIT"
] | 1
|
2020-11-24T02:58:58.000Z
|
2020-11-24T02:58:58.000Z
|
simulations/MonteCarlo_function.py
|
chengning-zhang/Combining-multiple-tests-Wrapper-
|
42b5aabf6a0619fa7fe18a034926236d133a35d8
|
[
"MIT"
] | null | null | null |
simulations/MonteCarlo_function.py
|
chengning-zhang/Combining-multiple-tests-Wrapper-
|
42b5aabf6a0619fa7fe18a034926236d133a35d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Created on Nov 23, 2020
@author: Chengning Zhang
"""
## simulation for Scenario A: generate X0 and X1.
def MonteCarlo_1(T, n0, n1, u0, u1, sigma0, sigma1, log_bool = False):
"""simulation for first scenario: multivarite normal with equal variance
T: number of simulation
n0: sample size of class 0
n1: sample size of class 1
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
#i = 10
np.random.seed(seed= 100*i+ 4*i)
X0 = multivariate_normal(u0, sigma0, size = n0)
X1 = multivariate_normal(u1, sigma1, size = n1)
if log_bool:
X0 = np.exp(X0)
X1 = np.exp(X1)
#
X = np.concatenate([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y) ## X,y is one simulation
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
## Simulation scenario B: generate X first, then generate bernulli Y via logit(P(Y=1|X)) = ...
def MonteCarlo_2(T, n, u, sigma):
"""simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... )
T: number of simulation
n: sample size
u: mean for X
sigma: variance for X
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
#i = 10
print(i)
np.random.seed(seed= 100*i+ 4*i)
X = multivariate_normal(u, sigma, size = n)
X_trans = [ 10*sum(list(map(lambda x: np.sin(np.pi*x) , ele))) for ele in X]
p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans))
y = bernoulli.rvs(p, size= n)
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
## Simulation scenario B: generate X first, then generate bernulli Y via logit(P(Y=1|X)) = ...
def MonteCarlo_3(T, n, u, sigma):
"""simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... )
T: number of simulation
n: sample size
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
np.random.seed(seed= 100*i+ 4*i)
X = multivariate_normal(u, sigma, size = n); #X = np.exp(X)
X_trans = [ele[0] - ele[1] - ele[2]+ (ele[0] - ele[1])**2 - ele[3]**4 for ele in X] ## x1 - x2 - x3 + (x1-x2)^2 - x4^4
p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans))
y = bernoulli.rvs(p, size= n)
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
| 46.15625
| 128
| 0.609682
|
c213c3cc512fab07ba3d806bd3d3286525745450
| 389
|
py
|
Python
|
crawler/robo_proxy.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | 1
|
2018-09-19T06:27:14.000Z
|
2018-09-19T06:27:14.000Z
|
crawler/robo_proxy.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
crawler/robo_proxy.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
import requests
if __name__ == '__main__':
pages_crawler()
| 24.3125
| 135
| 0.676093
|
c2145a28b8098d26c67f49818369dff92c2ac06b
| 11,662
|
py
|
Python
|
apiosintDS/apiosintDS.py
|
davidonzo/apiosintDS
|
b5bb1c42e1a3d984a69e8794a4c5da6969dcd917
|
[
"MIT"
] | 13
|
2019-10-15T06:54:49.000Z
|
2022-03-28T23:23:29.000Z
|
apiosintDS/apiosintDS.py
|
davidonzo/apiosintDS
|
b5bb1c42e1a3d984a69e8794a4c5da6969dcd917
|
[
"MIT"
] | 1
|
2019-11-12T15:00:53.000Z
|
2019-11-14T09:37:46.000Z
|
apiosintDS/apiosintDS.py
|
davidonzo/apiosintDS
|
b5bb1c42e1a3d984a69e8794a4c5da6969dcd917
|
[
"MIT"
] | 4
|
2019-12-05T05:34:07.000Z
|
2022-03-24T09:59:26.000Z
|
import sys
import logging
import pytz
logging.basicConfig(format='%(levelname)s: %(message)s')
if (sys.version_info < (3, 0)):#NO MORE PYTHON 2!!! https://pythonclock.org/
logging.error(" ########################### ERROR ###########################")
logging.error(" =============================================================")
logging.error(" Invalid python version detected: "+str(sys.version_info[0])+"."+str(sys.version_info[1]))
logging.error(" =============================================================")
logging.error(" It seems your are still using python 2 even if you should")
logging.error(" now it will be retire next 2020.")
logging.error(" For more info please read https://pythonclock.org/")
logging.error(" =============================================================")
logging.error(" Try again typing: python3 /path/to/"+sys.argv[0])
logging.error(" =============================================================")
logging.error(" ########################### ERROR ###########################")
exit(0)
import tempfile
import argparse
import os
import requests
import re
import json
italyTZ = pytz.timezone("Europe/Rome")
from apiosintDS.modules import listutils, dosearch
try:
from urllib.parse import urlparse
except ImportError as ierror:
logging.error(ierror)
logging.error("To run this script you need to install the \"urllib\" module")
logging.error("Try typing: \"pip3 install urllib3\"")
exit(0)
try:
import validators
except ImportError as e:
logging.error(e)
logging.error("To run this script you need to install the \"validators\" module")
logging.error("Try typing: \"pip3 install validators\"")
exit(0)
import platform
if platform.system() not in ['Linux']:
logging.warning("Script not testes on "+platform.system()+" systems. Use at your own risks.")
scriptinfo = {"scriptname": "DigitalSide-API",
"majorversion": "1",
"minorversion": "8.3",
"license": "MIT",
"licenseurl": "https://raw.githubusercontent.com/davidonzo/Threat-Intel/master/LICENSE",
"author": "Davide Baglieri",
"mail": "info[at]digitalside.it",
"pgp": "30B31BDA",
"fingerprint": "0B4C F801 E8FF E9A3 A602 D2C7 9C36 93B2 30B3 1BDA",
"git": "https://github.com/davidonzo/Threat-Intel/blob/master/tools/DigitalSide-API/v1",
"DSProjectHP": "https://osint.digitalside.it",
"DSGitHubHP": "https://github.com/davidonzo/Threat-Intel"}
if __name__ == '__main__':
main()
| 51.149123
| 428
| 0.607271
|
c2160b83bdfd16bb5fd59f1cfbfcbb7c7d36395f
| 3,327
|
py
|
Python
|
5-3_stock inventory.py
|
hkrsmk/python
|
1ee1b0adc911b62af3911428f441c6c59e1b345f
|
[
"Unlicense"
] | null | null | null |
5-3_stock inventory.py
|
hkrsmk/python
|
1ee1b0adc911b62af3911428f441c6c59e1b345f
|
[
"Unlicense"
] | null | null | null |
5-3_stock inventory.py
|
hkrsmk/python
|
1ee1b0adc911b62af3911428f441c6c59e1b345f
|
[
"Unlicense"
] | null | null | null |
#Stock inventory control system.
#======================================= 1 ===========================
#======================================= 2 ===========================
#======================================= 3 ============================
#main prog below
choice = 0
myStock = {}
#empty dictionary for myStock
try:
infile = open("myStock.txt","r")
read1LineStock = infile.readline()
#read first line
while read1LineStock !=" ":
#while the file has not ended,
myStock[read1LineStock.split(",")[0]] = int(read1LineStock.split(",")[1])
read1LineStock = infile.readline()
print(myStock)
#place item 0 in the split up sentence as the name for the item for myStock,
#and whatever number you can find in item 1 of the split up sentence (ignore '\n')
#as the 'quantity' for myStock.
#eg myStock['apple'] = '1'
#then, read the next line.
infile.close()
except:
print("Welcome to the stock management system!")
while choice != 9:
choice = menu()
#rmb to return choice to the global choice.
#the choice inside menu() is a LOCAL choice.
if choice ==1:
newStock()
elif choice ==2:
addVolume()
elif choice ==3:
sell()
#======================================= 8 ===========================
elif choice ==8:
print(myStock)
#======================================= 9 ===========================
print("Have a noice day")
| 30.522936
| 87
| 0.479411
|
c21a45651bf528b945d22bdb962a7e3a45ad0e4d
| 91
|
py
|
Python
|
backend/gunicorn_config_worker.py
|
matan-h/futurecoder
|
5117cbab7ed2bc41fe9d4763038d9c4a0aba064e
|
[
"MIT"
] | null | null | null |
backend/gunicorn_config_worker.py
|
matan-h/futurecoder
|
5117cbab7ed2bc41fe9d4763038d9c4a0aba064e
|
[
"MIT"
] | 1
|
2022-02-28T01:35:27.000Z
|
2022-02-28T01:35:27.000Z
|
backend/gunicorn_config_worker.py
|
matan-h/futurecoder
|
5117cbab7ed2bc41fe9d4763038d9c4a0aba064e
|
[
"MIT"
] | null | null | null |
bind = "0.0.0.0:5000"
threads = 10
worker_class = "gthread"
accesslog = '-'
errorlog = '-'
| 15.166667
| 24
| 0.626374
|
c21a8492971d5deb4f24b54f0d01b958dad6c817
| 1,780
|
py
|
Python
|
2017/day23.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
2017/day23.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
2017/day23.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
"""
2017 Day 23
https://adventofcode.com/2017/day/23
"""
from typing import Dict
import aocd # type: ignore
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2017, day=23)
program = Program(data)
program.run()
print(f"Part 1: {program.mul_count}")
print(f"Part 2: {run_program()}")
if __name__ == "__main__":
main()
| 25.797101
| 75
| 0.561798
|
c21ace7559f52cf54fe988e11522102469f04048
| 1,641
|
py
|
Python
|
src/simulator/wsn/test.py
|
liuliuliu0605/Federated-Learning-PyTorch
|
04169455917ae50a8fea2dabd756a0ca1774e5d5
|
[
"MIT"
] | null | null | null |
src/simulator/wsn/test.py
|
liuliuliu0605/Federated-Learning-PyTorch
|
04169455917ae50a8fea2dabd756a0ca1774e5d5
|
[
"MIT"
] | null | null | null |
src/simulator/wsn/test.py
|
liuliuliu0605/Federated-Learning-PyTorch
|
04169455917ae50a8fea2dabd756a0ca1774e5d5
|
[
"MIT"
] | null | null | null |
import sys
from sklearn.datasets import make_blobs
from src.simulator.wsn.network import Network
from src.simulator.wsn.utils import *
from src.simulator.wsn.fcm import *
from src.simulator.wsn.direct_communication import *
from src.utils import complete, star
seed = 1
np.random.seed(seed )
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
traces = {}
topo = complete(cf.NB_CLUSTERS)
# topo = independent(cf.NB_CLUSTERS)
# topo = star(cf.NB_CLUSTERS)
# topo = ring(cf.NB_CLUSTERS)
centers = [[50, 225], [25, 110], [125, 20], [220, 80], [200, 225]]
X, y = make_blobs(n_samples=100, centers=centers, n_features=2,
random_state=seed, cluster_std=15)
traces = {}
network = Network(init_nodes=X, topo=topo)
# network = Network(topo=topo)
for routing_topology in ['FCM']:#, 'DC']:
network.reset()
routing_protocol_class = eval(routing_topology)
network.init_routing_protocol(routing_protocol_class())
# traces[routing_topology] = network.simulate()
for i in range(1000):
print("--------Round %d--------"% i)
network.activate_mix()
traces[routing_topology] = network.simulate_one_round()
network.deactivate_mix()
if len(network.get_alive_nodes()) == 0 :
break
# plot_clusters(network)
# plot_time_of_death(network)
# print(network.energy_dis)
# print(network.energy_dis['inter-comm']/ network.energy_dis['intra-comm'])
print("All death round: ", i)
print("First death round: ", network.first_depletion)
print("Energy:", network.energy_dis)
plot_traces(traces)
| 32.176471
| 80
| 0.672151
|
c21c3b472b61858775a3801d8a7ee0aff0f5536a
| 4,149
|
py
|
Python
|
src/dewloosh/geom/cell.py
|
dewloosh/dewloosh-geom
|
5c97fbab4b68f4748bf4309184b9e0e877f94cd6
|
[
"MIT"
] | 2
|
2021-12-11T17:25:51.000Z
|
2022-01-06T15:36:27.000Z
|
src/dewloosh/geom/cell.py
|
dewloosh/dewloosh-geom
|
5c97fbab4b68f4748bf4309184b9e0e877f94cd6
|
[
"MIT"
] | null | null | null |
src/dewloosh/geom/cell.py
|
dewloosh/dewloosh-geom
|
5c97fbab4b68f4748bf4309184b9e0e877f94cd6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
from numpy import ndarray
from dewloosh.math.array import atleast1d
from dewloosh.math.utils import to_range
from .celldata import CellData
from .utils import jacobian_matrix_bulk, points_of_cells, pcoords_to_coords_1d
| 33.192
| 89
| 0.577489
|
c2221b8872b6350f052296a7af3215fb075a5795
| 30
|
py
|
Python
|
src/python/src/rmq/items/__init__.py
|
halimov-oa/scrapy-boilerplate
|
fe3c552fed26bedb0618c245ab923aa34a89ac9d
|
[
"MIT"
] | 34
|
2019-12-13T10:31:39.000Z
|
2022-03-09T15:59:07.000Z
|
src/python/src/rmq/items/__init__.py
|
halimov-oa/scrapy-boilerplate
|
fe3c552fed26bedb0618c245ab923aa34a89ac9d
|
[
"MIT"
] | 49
|
2020-02-25T19:41:09.000Z
|
2022-02-27T12:05:25.000Z
|
src/python/src/rmq/items/__init__.py
|
halimov-oa/scrapy-boilerplate
|
fe3c552fed26bedb0618c245ab923aa34a89ac9d
|
[
"MIT"
] | 23
|
2019-12-23T15:19:42.000Z
|
2022-03-09T16:00:15.000Z
|
from .rmq_item import RMQItem
| 15
| 29
| 0.833333
|
c22246e42a11a496e2843439e4ad4abd332a1d57
| 968
|
py
|
Python
|
softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py
|
anyboby/mbpo
|
98b75cb4cb13a2640fce1fbe1ddef466b864342e
|
[
"MIT"
] | 5
|
2020-02-12T17:09:09.000Z
|
2021-09-29T16:06:40.000Z
|
softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py
|
anyboby/mbpo
|
98b75cb4cb13a2640fce1fbe1ddef466b864342e
|
[
"MIT"
] | 10
|
2020-08-31T02:50:02.000Z
|
2022-02-09T23:36:43.000Z
|
softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py
|
anyboby/mbpo
|
98b75cb4cb13a2640fce1fbe1ddef466b864342e
|
[
"MIT"
] | 2
|
2022-03-15T01:45:26.000Z
|
2022-03-15T06:46:47.000Z
|
import os
import numpy as np
from gym import utils
from mujoco_safety_gym.envs import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join('fetch', 'slide.xml')
| 37.230769
| 105
| 0.66219
|
c222e22c9b1710ce4667ef563dce67f96dc33915
| 163
|
py
|
Python
|
packages/raspi_ip/setup.py
|
atoy322/PiDrive
|
8758f4b5dae4a0187ce0a769c4146628c88015de
|
[
"MIT"
] | null | null | null |
packages/raspi_ip/setup.py
|
atoy322/PiDrive
|
8758f4b5dae4a0187ce0a769c4146628c88015de
|
[
"MIT"
] | 2
|
2021-09-21T06:32:58.000Z
|
2021-09-22T23:15:18.000Z
|
packages/raspi_ip/setup.py
|
atoy322/PiDrive
|
8758f4b5dae4a0187ce0a769c4146628c88015de
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="raspi_ip",
version="1.0.0",
author="atoy322",
description="",
long_description=""
)
| 14.818182
| 43
| 0.644172
|
c223b2854d4053fb4e412891092b11e58745c844
| 339
|
py
|
Python
|
kiteconnect/__version__.py
|
AnjayGoel/pykiteconnect
|
e33833a86d3e6483f2cff1be8bed74f40d5771c5
|
[
"MIT"
] | 1
|
2022-02-05T08:43:37.000Z
|
2022-02-05T08:43:37.000Z
|
kiteconnect/__version__.py
|
AnjayGoel/pykiteconnect
|
e33833a86d3e6483f2cff1be8bed74f40d5771c5
|
[
"MIT"
] | null | null | null |
kiteconnect/__version__.py
|
AnjayGoel/pykiteconnect
|
e33833a86d3e6483f2cff1be8bed74f40d5771c5
|
[
"MIT"
] | null | null | null |
__title__ = "open_kite_connect"
__description__ = "Fork of the official Kite Connect python client, allowing free access to the api."
__url__ = "https://kite.trade"
__download_url__ = "https://github.com/AnjayGoel/pykiteconnect"
__version__ = "4.0.0"
__author__ = "Anjay Goel"
__author_email__ = "anjay.goel@gmail.com"
__license__ = "MIT"
| 37.666667
| 101
| 0.766962
|
c223dd7e30b36ebfa0f41bf3e5a06ae1a6e0b5cd
| 1,679
|
py
|
Python
|
CrsData/pipelines.py
|
DivineEnder/CrsData
|
cd0cf14e79b4a3bbf7347b8612a5b67e2a185208
|
[
"MIT"
] | null | null | null |
CrsData/pipelines.py
|
DivineEnder/CrsData
|
cd0cf14e79b4a3bbf7347b8612a5b67e2a185208
|
[
"MIT"
] | null | null | null |
CrsData/pipelines.py
|
DivineEnder/CrsData
|
cd0cf14e79b4a3bbf7347b8612a5b67e2a185208
|
[
"MIT"
] | null | null | null |
# @Author: DivineEnder
# @Date: 2018-03-08 22:24:45
# @Email: danuta@u.rochester.edu
# @Last modified by: DivineEnder
# @Last modified time: 2018-03-11 01:25:41
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from dotenv import load_dotenv, find_dotenv
from Utils import connection_utils as glc
from psycopg2.extensions import AsIs
import os
| 34.265306
| 214
| 0.731983
|
c224e7c1cff16812960fb4cd9afab8ab99e06afc
| 2,227
|
py
|
Python
|
index_to_csv.py
|
grenzi/photoindexer
|
d10b3b6f347168706dc9c2673a29102fd73f31e1
|
[
"Apache-2.0"
] | null | null | null |
index_to_csv.py
|
grenzi/photoindexer
|
d10b3b6f347168706dc9c2673a29102fd73f31e1
|
[
"Apache-2.0"
] | null | null | null |
index_to_csv.py
|
grenzi/photoindexer
|
d10b3b6f347168706dc9c2673a29102fd73f31e1
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from enum import Enum
from datetime import datetime,date
import logging
import pathlib
from tqdm import tqdm
from datastructures import Volume, IndexedFile,load_index_if_exists, save_index
from os import listdir
from os.path import isfile, join
import itertools
import csv
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
###############################################################################
index_dir = os.path.join(os.getcwd(), 'index')
logger.info('finding index files')
indexfiles = list([f for f in listdir(index_dir) if isfile(join(index_dir, f)) and f[-4:]=='json'])
columns = ['VolumeName', 'VolumeSerialNumber', 'Directory', 'Name', 'InodeNumber', 'Modified On', 'Created On', 'SHA256']
exif_columns=set()
logger.info('parsing index files')
#Pass 1 = collect keys
for index_file in indexfiles:
index = load_index_if_exists(os.path.join(index_dir, index_file))
for vol in index:
for ixf in vol.files:
if ixf.EXIF is not None:
for i in ixf.EXIF.keys():
exif_columns.add(i)
logger.info('writing csv')
#Pass 2 = write header
with open(os.path.join(os.getcwd(), 'index.csv'), mode='w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(columns+list(exif_columns))
#and now rows
for index_file in indexfiles:
index = load_index_if_exists(os.path.join(index_dir, index_file))
for vol in index:
for ixf in vol.files:
row = [
vol.VolumeName,
vol.VolumeSerialNumber,
ixf.Directory,
ixf.Name,
ixf.st_ino,
ixf.st_mtime.strftime("%c"),
ixf.st_ctime.strftime("%c"),
ixf.SHA256
]
for ec in exif_columns:
row.append(ixf.EXIF.get(ec, None))
writer.writerow(row)
| 35.349206
| 122
| 0.58599
|
c2253045dcaa56a5991a62320574be6662b1c519
| 1,056
|
py
|
Python
|
tests/test_wrapper.py
|
waysup/Jike-Metro
|
b8ead80dddd5d695784c5587edfd8df87c55a4e6
|
[
"MIT"
] | 193
|
2018-04-04T02:27:51.000Z
|
2022-03-14T03:26:44.000Z
|
tests/test_wrapper.py
|
BeiFenKu/Jike-Metro
|
e97fd0a751dca28a39d0e9fb94fbd696d5ee07b3
|
[
"MIT"
] | 16
|
2018-04-04T05:58:15.000Z
|
2021-01-08T02:56:57.000Z
|
tests/test_wrapper.py
|
BeiFenKu/Jike-Metro
|
e97fd0a751dca28a39d0e9fb94fbd696d5ee07b3
|
[
"MIT"
] | 24
|
2018-04-06T09:34:58.000Z
|
2021-03-02T02:10:07.000Z
|
import unittest
from collections import namedtuple
from jike.objects.wrapper import *
if __name__ == '__main__':
unittest.main()
| 34.064516
| 83
| 0.61553
|
c225d7cd38555d8a71f34fd96c413aa41e8e84be
| 10,125
|
py
|
Python
|
storm_control/hal4000/illumination/illuminationChannelUI.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 47
|
2015-02-11T16:05:54.000Z
|
2022-03-26T14:13:12.000Z
|
storm_control/hal4000/illumination/illuminationChannelUI.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 110
|
2015-01-30T03:53:41.000Z
|
2021-11-03T15:58:44.000Z
|
storm_control/hal4000/illumination/illuminationChannelUI.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 61
|
2015-01-09T18:31:27.000Z
|
2021-12-21T13:07:51.000Z
|
#!/usr/bin/env python
"""
The various ChannelUI classes.
Hazen 04/17
"""
import os
from PyQt5 import QtCore, QtWidgets
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 32.041139
| 100
| 0.645136
|
c229b5ef4f038beb67f6eb13b6306e08e27df0c9
| 4,051
|
py
|
Python
|
3-compressor/compress.py
|
JamesG3/Search-Engine
|
790f32c7833eb82d9b01b84af634e650ef7a9e75
|
[
"BSD-2-Clause"
] | null | null | null |
3-compressor/compress.py
|
JamesG3/Search-Engine
|
790f32c7833eb82d9b01b84af634e650ef7a9e75
|
[
"BSD-2-Clause"
] | null | null | null |
3-compressor/compress.py
|
JamesG3/Search-Engine
|
790f32c7833eb82d9b01b84af634e650ef7a9e75
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import struct
from io import FileIO, BufferedWriter
import S9Compressor as S9
BLOCKSIZE = (64*1024) / 4 # number of Int
LexiPos = 0 # record the current position for new lexicon writing
lexiconBuffer = []
IIBuffer = []
WriteThreshold = 0
del IIBuffer[:]
newII.close()
return
main()
| 26.827815
| 96
| 0.674155
|
c22ad6cee4570624757675e6c7ad19a18a8249f2
| 5,028
|
py
|
Python
|
DataProcess/ultimate_subimage.py
|
EmmaAlexander/possum-tools
|
051ebca682cd97b68fa2a89c9d67e99cf85b09c7
|
[
"MIT"
] | 5
|
2021-11-18T13:27:30.000Z
|
2021-12-05T00:15:33.000Z
|
DataProcess/ultimate_subimage.py
|
EmmaAlexander/possum-tools
|
051ebca682cd97b68fa2a89c9d67e99cf85b09c7
|
[
"MIT"
] | null | null | null |
DataProcess/ultimate_subimage.py
|
EmmaAlexander/possum-tools
|
051ebca682cd97b68fa2a89c9d67e99cf85b09c7
|
[
"MIT"
] | null | null | null |
#CASA script to create cutouts of fits cubes
directoryA = '/Volumes/TARDIS/Work/askap/'
directoryB = '/Volumes/NARNIA/pilot_cutouts/'
import numpy as np
sources=np.loadtxt('/Users/emma/GitHub/possum-tools/DataProcess/pilot_sources.txt',dtype='str')
for i in range(0,sources.shape[0]):
objectname=sources[i,0]
POSSUMSB=sources[i,3]
EMUSB=sources[i,4]
ra=sources[i,1]
dec=sources[i,2]
sourcecentre=ra+','+dec
fov=sources[i,6]#arcsec
print(objectname)
region='centerbox[['+sourcecentre+'], ['+fov+'arcsec, '+fov+'arcsec]]'
possum_outfile=directoryB+objectname+'/'+objectname+'_POSSUM.fits'
emu_outfile=directoryB+objectname+'/'+objectname+'_EMU.fits'
#POSSUM
if POSSUMSB == '5038':
#this is the Early Science data
possum_cont_filename = '/Volumes/NARNIA/PawseySync/DRAGN_1_0p8_A/DRAGN_1_0p8_A/image.i.SB5038.cont.restored.fits'
else:
possum_cont_filename = directoryA +'fullfields/image.i.SB'+POSSUMSB+'.cont.taylor.0.restored.fits'
if POSSUMSB == '10035':
print('Skipping POSSUM: bad SB10035')
else:
imsubimage(imagename=possum_cont_filename,outfile='possum_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='possum_cont_temp',fitsimage=possum_outfile,overwrite=True)
#cubes
i_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.i.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
q_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.q.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
u_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.u.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
imsubimage(imagename=i_filename,outfile='i_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_filename,outfile='q_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_filename,outfile='u_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_im_temp',fitsimage=objectname+'_POSSUM_i.fits',overwrite=True)
exportfits(imagename='q_im_temp',fitsimage=objectname+'_POSSUM_q.fits',overwrite=True)
exportfits(imagename='u_im_temp',fitsimage=objectname+'_POSSUM_u.fits',overwrite=True)
#EMU
if EMUSB != 'NaN':
if EMUSB=='10083':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10083.contcube.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10083.contcube.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10083.contcube.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10083.cont.taylor.0.restored.conv.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
elif EMUSB=='10635':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10635.contcube.v2.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10635.contcube.v2.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10635.contcube.v2.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10635.cont.taylor.0.restored.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
else:
#no cubes
emu_filename= directoryA +'fullfields/image.i.SB'+EMUSB+'.cont.taylor.0.restored.fits'
imsubimage(imagename=emu_filename,outfile='emu_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='emu_cont_temp',fitsimage=emu_outfile,overwrite=True)
os.system("rm -r emu_cont_temp")
#tidy up
os.system("rm -r *_temp")
os.system("mv *{}* {}/".format(objectname,objectname))
| 57.136364
| 134
| 0.793755
|
c22b8b9f23f5fc7b3cfeba52a978e7ba6441ed61
| 92
|
py
|
Python
|
cv_comparison_slider_window/__init__.py
|
Kazuhito00/cv-comparison-slider-window
|
215cd91c1832b419af9fb99b484ce8c2a9e79a37
|
[
"MIT"
] | 2
|
2020-11-14T09:09:02.000Z
|
2020-11-14T10:54:57.000Z
|
cv_comparison_slider_window/__init__.py
|
Kazuhito00/cv-comparison-slider-window
|
215cd91c1832b419af9fb99b484ce8c2a9e79a37
|
[
"MIT"
] | null | null | null |
cv_comparison_slider_window/__init__.py
|
Kazuhito00/cv-comparison-slider-window
|
215cd91c1832b419af9fb99b484ce8c2a9e79a37
|
[
"MIT"
] | null | null | null |
from cv_comparison_slider_window.cv_comparison_slider_window import CvComparisonSliderWindow
| 92
| 92
| 0.956522
|
c22bb60421f79ce3a2d29c04e35af61e67fb09d3
| 1,207
|
py
|
Python
|
telegram_ecommerce/filters/decorators.py
|
Anonylions/telegram_ecommerce
|
f5382886bbebf607c735e2f451774c56df8d6011
|
[
"MIT"
] | 10
|
2020-11-20T20:55:52.000Z
|
2022-02-10T20:25:45.000Z
|
telegram_ecommerce/filters/decorators.py
|
Anonylions/telegram_ecommerce
|
f5382886bbebf607c735e2f451774c56df8d6011
|
[
"MIT"
] | 1
|
2022-02-16T10:28:18.000Z
|
2022-02-16T10:35:31.000Z
|
telegram_ecommerce/filters/decorators.py
|
Anonylions/telegram_ecommerce
|
f5382886bbebf607c735e2f451774c56df8d6011
|
[
"MIT"
] | 8
|
2021-05-01T01:13:09.000Z
|
2022-03-13T14:00:01.000Z
|
from ..language import get_text
from ..database.query import (
user_exist,
is_admin)
END = -1
| 26.822222
| 67
| 0.71831
|
c22fbe148dfbc37e36952003c17c1b1180d11337
| 63
|
py
|
Python
|
albow/demo/openGL/__init__.py
|
hasii2011/albow-python-3
|
04b9d42705b370b62f0e49d10274eebf3ac54bc1
|
[
"MIT"
] | 6
|
2019-04-30T23:50:39.000Z
|
2019-11-04T06:15:02.000Z
|
albow/demo/openGL/__init__.py
|
hasii2011/albow-python-3
|
04b9d42705b370b62f0e49d10274eebf3ac54bc1
|
[
"MIT"
] | 73
|
2019-05-12T18:43:14.000Z
|
2021-04-13T19:19:03.000Z
|
albow/demo/openGL/__init__.py
|
hasii2011/albow-python-3
|
04b9d42705b370b62f0e49d10274eebf3ac54bc1
|
[
"MIT"
] | null | null | null |
""""
This package contains the OpenGL demonstration classes
"""
| 21
| 54
| 0.761905
|
c2306615617cec84564c5dcb8ee8a144809be27e
| 1,640
|
py
|
Python
|
openhab2/scripts/readNilan.py
|
starze/openhab2
|
e4eeeecd829cdf286372067bd61561e63fed6e1a
|
[
"MIT"
] | 10
|
2017-04-04T08:28:54.000Z
|
2021-02-24T04:36:07.000Z
|
openhab2/scripts/readNilan.py
|
starze/openhab2
|
e4eeeecd829cdf286372067bd61561e63fed6e1a
|
[
"MIT"
] | 2
|
2017-04-18T13:33:12.000Z
|
2018-06-05T21:27:18.000Z
|
openhab2/scripts/readNilan.py
|
starze/openhab2
|
e4eeeecd829cdf286372067bd61561e63fed6e1a
|
[
"MIT"
] | 7
|
2017-04-17T18:02:19.000Z
|
2020-09-25T21:28:08.000Z
|
#!/usr/bin/env python3
# -*- coding: ISO-8859-1 -*-
# https://github.com/starze/openhab2
# https://github.com/roggmaeh/nilan-openhab
import minimalmodbus
import serial
import os, sys
import csv
import httplib2
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
instrument = minimalmodbus.Instrument('/dev/ttyUSB0', 30, mode='rtu') # port name, slave address (in decimal)
instrument.serial.port
instrument.serial.baudrate = 19200 # Baud
instrument.serial.bytesize = 8
instrument.serial.parity = serial.PARITY_EVEN
instrument.serial.stopbits = 1
instrument.serial.timeout = 2 # seconds
#instrument.debug = True
h = httplib2.Http()
with open('nilan_modbus.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
if row['Register Type'] == "Input":
fc = 4
elif row['Register Type'] == "Holding":
fc = 3
if row['Unit'] == "text" or row['Unit'] == "ascii":
strRet = instrument.read_string(int(row['Address']), numberOfRegisters=1, functioncode=fc)
lst = list(strRet)
strRet = lst[1] + lst[0]
elif row['Scale'] == "100":
strRet = instrument.read_register(int(row['Address']), numberOfDecimals=2, functioncode=fc)
else:
strRet = instrument.read_register(int(row['Address']), numberOfDecimals=0, functioncode=fc)
if row['Unit'] == "%" or row['Unit'] == "C":
print("%s: %s %s" % (row['Name'], strRet, row['Unit']))
h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
else:
print("%s: %s" % (row['Name'], strRet))
h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
| 34.166667
| 109
| 0.675
|
c230b7732d9a3dd108e45e13abd94ad053baac7e
| 2,316
|
py
|
Python
|
face_signin/prepare_training.py
|
sribs/FaceRecognition
|
68284173195d55f32a353fe3d78a53c25fbf1363
|
[
"Apache-2.0"
] | null | null | null |
face_signin/prepare_training.py
|
sribs/FaceRecognition
|
68284173195d55f32a353fe3d78a53c25fbf1363
|
[
"Apache-2.0"
] | null | null | null |
face_signin/prepare_training.py
|
sribs/FaceRecognition
|
68284173195d55f32a353fe3d78a53c25fbf1363
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import os
| 31.297297
| 85
| 0.593264
|
c230e009f1c0351446c02fccceb30b7ead29e784
| 138
|
py
|
Python
|
randomselection.py
|
Ristinoa/cs257
|
e7c31f995d08661114d868a55448c628b4cc9327
|
[
"MIT"
] | null | null | null |
randomselection.py
|
Ristinoa/cs257
|
e7c31f995d08661114d868a55448c628b4cc9327
|
[
"MIT"
] | null | null | null |
randomselection.py
|
Ristinoa/cs257
|
e7c31f995d08661114d868a55448c628b4cc9327
|
[
"MIT"
] | null | null | null |
"randompicker.py"
import random
"A very short practice program designed
to spit out a random, user-determined
sample of input names"
| 13.8
| 39
| 0.775362
|
c2318081600b41f253e54a78d1001f4ddb857e30
| 15,873
|
py
|
Python
|
fisspy/analysis/tdmap.py
|
SNU-sunday/FISS-PYTHON
|
f79420debef476a904356d42542cb6472990bb2f
|
[
"BSD-2-Clause"
] | 3
|
2017-02-18T06:42:08.000Z
|
2021-01-05T04:15:08.000Z
|
fisspy/analysis/tdmap.py
|
SNU-sunday/fisspy
|
f79420debef476a904356d42542cb6472990bb2f
|
[
"BSD-2-Clause"
] | 1
|
2019-06-30T10:35:27.000Z
|
2019-06-30T10:35:27.000Z
|
fisspy/analysis/tdmap.py
|
SNU-sunday/FISS-PYTHON
|
f79420debef476a904356d42542cb6472990bb2f
|
[
"BSD-2-Clause"
] | 1
|
2017-02-23T05:24:13.000Z
|
2017-02-23T05:24:13.000Z
|
from __future__ import absolute_import, division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from fisspy.analysis.filter import FourierFilter
from interpolation.splines import LinearSpline
from matplotlib.animation import FuncAnimation
import astropy.units as u
from astropy.time import Time
__author__= "Juhyung Kang"
__email__ = "jhkang@astro.snu.ac.kr"
| 41.015504
| 86
| 0.449001
|
c231926cf5107bb89588302bb3bc85d7ec967624
| 2,806
|
py
|
Python
|
Graphy/setup.py
|
andrepbento/OpenTracingProcessor
|
9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6
|
[
"MIT"
] | 4
|
2021-03-06T13:50:58.000Z
|
2022-03-28T15:17:07.000Z
|
Graphy/setup.py
|
andrepbento/OpenTracingProcessor
|
9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6
|
[
"MIT"
] | null | null | null |
Graphy/setup.py
|
andrepbento/OpenTracingProcessor
|
9e4b01cb59cecbfa04af8d5d93e3b7deb76d9ee6
|
[
"MIT"
] | null | null | null |
"""
Author: Andr Bento
Date last modified: 26-02-2019
"""
import subprocess
import sys
from os.path import dirname, abspath, join
from setuptools import find_packages, Command, setup
from setuptools.command.test import test as TestCommand
this_dir = abspath(dirname(__file__))
NAME = 'graphy'
VERSION = '0.0.1'
# Readme
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
readme = file.read()
# License
with open(join(this_dir, 'LICENSE'), encoding='utf-8') as file:
license_file = file.read()
# Requirements
with open(join(this_dir, 'requirements.txt')) as file:
requirements = file.read().splitlines()
setup(
name=NAME,
version=VERSION,
description='A micro-services system monitor command line program in Python.',
long_description=readme,
# long_description_content_type='text/markdown',
url='https://github.com/andrepbento/MScThesis/tree/master/Graphy',
author='Andr Bento',
author_email='apbento@student.dei.uc.pt',
license=license_file,
classifiers=[
# How mature is this project? Common values are
# 1 - Project setup
# 2 - Prototype
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Prototype',
'Intended Audience :: Developers',
'Topic :: Observing and Controlling Performance in Micro-services',
'License :: MIT License',
'Programming Language :: Python :: 3.6',
],
keywords='cli',
packages=find_packages(exclude=('tests*', 'docs')),
install_requires=requirements,
tests_require=['pytest'],
extras_require={
'test': ['coverage', 'pytest', 'pytest-cov'],
},
cmdclass={
'install': Install,
'run': Run,
'test': Test
},
)
| 24.189655
| 82
| 0.62794
|
c232029579d8b288e2ac9ed43b03f0690df1e9c2
| 1,317
|
py
|
Python
|
polaris/polaris/sep24/tzinfo.py
|
yuriescl/django-polaris
|
8806d0e4e8baaddbffbceb3609786d2436b8abe1
|
[
"Apache-2.0"
] | 81
|
2019-11-16T21:47:22.000Z
|
2022-02-17T07:35:02.000Z
|
polaris/polaris/sep24/tzinfo.py
|
yuriescl/django-polaris
|
8806d0e4e8baaddbffbceb3609786d2436b8abe1
|
[
"Apache-2.0"
] | 491
|
2019-11-10T23:44:30.000Z
|
2022-03-20T00:25:02.000Z
|
polaris/polaris/sep24/tzinfo.py
|
yuriescl/django-polaris
|
8806d0e4e8baaddbffbceb3609786d2436b8abe1
|
[
"Apache-2.0"
] | 89
|
2019-11-18T21:31:01.000Z
|
2022-03-28T13:47:41.000Z
|
import pytz
from datetime import datetime, timedelta, timezone
from rest_framework.decorators import api_view, parser_classes, renderer_classes
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.request import Request
from rest_framework.response import Response
from django.contrib.sessions.backends.db import SessionStore
from polaris.utils import render_error_response, getLogger
logger = getLogger(__name__)
| 34.657895
| 87
| 0.741838
|
c2321c74ae596a68d5084730c6df5fe1a40a8090
| 1,615
|
py
|
Python
|
utils/fundoptutils.py
|
joshualee155/FundOptimizer
|
da842de6c99f89c767d03c9ef1b392237b726a3f
|
[
"MIT"
] | 2
|
2021-01-03T00:46:51.000Z
|
2021-09-01T02:48:51.000Z
|
utils/fundoptutils.py
|
joshualee155/FundOptimizer
|
da842de6c99f89c767d03c9ef1b392237b726a3f
|
[
"MIT"
] | null | null | null |
utils/fundoptutils.py
|
joshualee155/FundOptimizer
|
da842de6c99f89c767d03c9ef1b392237b726a3f
|
[
"MIT"
] | 1
|
2021-08-28T11:04:00.000Z
|
2021-08-28T11:04:00.000Z
|
import pandas as pd
import datetime as dt
def str2date( sDate ):
"""
Convert a string date to datetime.date
"""
try:
dateTime = dt.datetime.strptime( sDate, "%Y%m%d" )
except ValueError:
dateTime = dt.datetime.strptime( sDate, "%Y-%m-%d" )
return dateTime.date()
def getHolidays( startDate, endDate ):
"""
Return China exchange holidays ( non-trading days ) from `startDate` to `endDate`
"""
with open( 'refData/holidays.txt', 'r' ) as f:
holidays = f.read().strip().split('\n')
holidays = [ date for date in map( str2date, holidays ) if date >= startDate and date <= endDate ]
return holidays
| 30.471698
| 102
| 0.596285
|
c2329a7b9e06911a1ed82d81214f1385b352823d
| 398
|
py
|
Python
|
Http-api-auth0-jwt/src/list-all-unicorns.py
|
JimmyDqv/blogs-and-sessions-code
|
737a2e88f3fd84bd8426be609f7474374d4ac4d6
|
[
"MIT"
] | 2
|
2021-07-08T10:31:11.000Z
|
2022-01-07T23:04:31.000Z
|
Http-api-auth0-jwt/src/list-all-unicorns.py
|
JimmyDqv/blogs-and-sessions-code
|
737a2e88f3fd84bd8426be609f7474374d4ac4d6
|
[
"MIT"
] | null | null | null |
Http-api-auth0-jwt/src/list-all-unicorns.py
|
JimmyDqv/blogs-and-sessions-code
|
737a2e88f3fd84bd8426be609f7474374d4ac4d6
|
[
"MIT"
] | null | null | null |
import json
| 15.92
| 36
| 0.371859
|
c232b3257fa969c4deba44d282906664d6091820
| 253
|
py
|
Python
|
examples/http_server.py
|
srossross/uvio
|
f4d55ad5ea5900a2a8b9c1249484ed621dc30055
|
[
"MIT"
] | 3
|
2016-03-23T08:12:03.000Z
|
2018-10-06T02:46:54.000Z
|
examples/http_server.py
|
srossross/uvio
|
f4d55ad5ea5900a2a8b9c1249484ed621dc30055
|
[
"MIT"
] | null | null | null |
examples/http_server.py
|
srossross/uvio
|
f4d55ad5ea5900a2a8b9c1249484ed621dc30055
|
[
"MIT"
] | null | null | null |
import uvio
| 14.882353
| 71
| 0.632411
|
c2334b533c25c85bcfe5823b2fbd3fe0b9cec5f6
| 7,933
|
py
|
Python
|
ui/django_site_v2/data_cube_ui/utils/dc_mosaic.py
|
ceos-seo/Data_Cube_v2
|
81c3be66153ea123b5d21cf9ec7f59ccb7a2050a
|
[
"Apache-2.0"
] | 27
|
2016-08-16T18:22:47.000Z
|
2018-08-25T17:18:15.000Z
|
ui/django_site_v2/data_cube_ui/utils/dc_mosaic.py
|
data-cube/CEOS-cube
|
31baeba08d8e8470c4663c18aaf9056431d9c49f
|
[
"Apache-2.0"
] | null | null | null |
ui/django_site_v2/data_cube_ui/utils/dc_mosaic.py
|
data-cube/CEOS-cube
|
31baeba08d8e8470c4663c18aaf9056431d9c49f
|
[
"Apache-2.0"
] | 27
|
2016-08-26T18:14:40.000Z
|
2021-12-24T08:41:29.000Z
|
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gdal, osr
import collections
import gc
import numpy as np
import xarray as xr
from datetime import datetime
import collections
from collections import OrderedDict
import datacube
from . import dc_utilities as utilities
# Author: KMF
# Creation date: 2016-06-14
# Modified by: AHDS
# Last modified date:
def create_mosaic_iterative(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
#masks data with clean_mask. all values that are clean_mask==False are set to nodata.
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for index in reversed(range(len(clean_mask))):
dataset_slice = dataset_in.isel(time=index).astype("int16").drop('time')
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_in.data_vars):
dataset_out[key].values[dataset_out[key].values==-9999] = dataset_slice[key].values[dataset_out[key].values==-9999]
return dataset_out
def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Method for calculating the median pixel value for a given dataset.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
#required for np.nan
dataset_in = dataset_in.astype("float64")
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
dataset_out = dataset_in.isel(time=0).drop('time').copy(deep=True)
dataset_out.attrs = OrderedDict()
# Loop over every key.
for key in list(dataset_in.data_vars):
dataset_in[key].values[dataset_in[key].values==no_data] = np.nan
dataset_out[key].values = np.nanmedian(dataset_in[key].values, axis=0)
dataset_out[key].values[dataset_out[key].values==np.nan] = no_data
return dataset_out.astype('int16')
def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Method for calculating the pixel value for the max ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = -1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]
return dataset_out
def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None):
"""
Description:
Method for calculating the pixel value for the min ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = 1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values]
return dataset_out
| 40.682051
| 173
| 0.704399
|
c234a2bf9d847b0178d0e12fe82918d472e89c91
| 2,014
|
py
|
Python
|
plotter.py
|
keshavbantu/covclass
|
e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4
|
[
"MIT"
] | null | null | null |
plotter.py
|
keshavbantu/covclass
|
e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4
|
[
"MIT"
] | null | null | null |
plotter.py
|
keshavbantu/covclass
|
e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4
|
[
"MIT"
] | null | null | null |
import cleaner as dataStream
import plotly.graph_objects as go
import plotly.io as pio
#DONUT PLOT - CONDITIONS -----------------------------------------
labels = ['Diabetes','Hypertension','Coronary Heart(D)','Chronic Kidney(D)','No Conditions','Obstructive Pulmonary(D)']
values = dataStream.PIEList
fig_cond = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.3)])
#fig_cond.show()
pio.write_html(fig_cond, file="templates/cond.html")
#GROUP BAR PLOT - SYMPTOMS ---------------------------------------
symplabel=['Symptoms']
fig_symp = go.Figure(data=[
go.Bar(name='Fever', x=symplabel, y=dataStream.Fever),
go.Bar(name='Cough', x=symplabel, y=dataStream.Cough),
go.Bar(name='Breathlessness', x=symplabel, y=dataStream.Breathlessness),
go.Bar(name='Severe Acute Respiratory Syndrome', x=symplabel, y=dataStream.SARI),
go.Bar(name='Influenza-like Illness', x=symplabel, y=dataStream.ILI),
go.Bar(name='Asymptomatic', x=symplabel, y=dataStream.NONE_sym)
])
fig_symp.update_layout(barmode='group')
#fig_symp.show()
pio.write_html(fig_symp, file="templates/symp.html")
#STACK BAR PLOT - AGE DATA ------------------------------------------
fig_age = go.Figure()
fig_age.add_trace(go.Bar(
y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'],
x=dataStream.maleAgeList,
name='Male Deaths',
orientation='h',
marker=dict(
color='rgba(61, 112, 242, 0.6)',
line=dict(color='rgba(61, 112, 242, 1.0)', width=2)
)
))
fig_age.add_trace(go.Bar(
y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'],
x=dataStream.femaleAgeList,
name='Female Deaths',
orientation='h',
marker=dict(
color='rgba(242, 61, 221, 0.6)',
line=dict(color='rgba(242, 61, 221, 1.0)', width=2)
)
))
fig_age.update_layout(barmode='stack')
#fig_age.show()
pio.write_html(fig_age, file="templates/age.html")
| 38
| 124
| 0.627607
|
c235b37d33733193984303077c70e9f3d941faa4
| 1,847
|
py
|
Python
|
pyglare/scene/objects.py
|
keyvank/pyglare
|
9e26ae444ff4481f0f50d7344d2a5a881d04fe64
|
[
"MIT"
] | 6
|
2017-01-13T22:32:55.000Z
|
2022-03-27T22:19:49.000Z
|
pyglare/scene/objects.py
|
keyvank/pyglare
|
9e26ae444ff4481f0f50d7344d2a5a881d04fe64
|
[
"MIT"
] | 1
|
2016-09-13T17:59:41.000Z
|
2016-09-13T18:05:20.000Z
|
pyglare/scene/objects.py
|
keyvank/pyglare
|
9e26ae444ff4481f0f50d7344d2a5a881d04fe64
|
[
"MIT"
] | null | null | null |
from ..math import geometry as geo
from ..image.color import Color
import math
| 24.959459
| 102
| 0.7634
|
c235c83aedce86f0591eb9d244db1ef5424b59b5
| 1,401
|
py
|
Python
|
Led.py
|
Zico56/raspberry-gt500
|
85e29ec8bb604fab9c0eb37b63e85b8058baf2b2
|
[
"Xnet",
"X11"
] | null | null | null |
Led.py
|
Zico56/raspberry-gt500
|
85e29ec8bb604fab9c0eb37b63e85b8058baf2b2
|
[
"Xnet",
"X11"
] | null | null | null |
Led.py
|
Zico56/raspberry-gt500
|
85e29ec8bb604fab9c0eb37b63e85b8058baf2b2
|
[
"Xnet",
"X11"
] | null | null | null |
import time
from tkinter import *
from PIL import Image, ImageTk
from Configuration import config
| 31.840909
| 67
| 0.615989
|
c236c320912188a01c92278b510292a2d1855a42
| 249
|
py
|
Python
|
tests/accounts/model/test_social_security_number.py
|
Hyaxia/Bank-DDD-CQRS-ES
|
116e3eb3e93d549c1da53e6d506ab47667d77445
|
[
"MIT"
] | 8
|
2020-10-27T09:46:20.000Z
|
2022-01-27T12:16:48.000Z
|
tests/accounts/model/test_social_security_number.py
|
Hyaxia/Bank-DDD-CQRS-ES
|
116e3eb3e93d549c1da53e6d506ab47667d77445
|
[
"MIT"
] | null | null | null |
tests/accounts/model/test_social_security_number.py
|
Hyaxia/Bank-DDD-CQRS-ES
|
116e3eb3e93d549c1da53e6d506ab47667d77445
|
[
"MIT"
] | 2
|
2021-05-29T08:11:48.000Z
|
2021-07-26T04:44:53.000Z
|
import pytest
from bank_ddd_es_cqrs.accounts import SocialSecurityNumber
| 27.666667
| 90
| 0.84739
|
c236d3b1e5bb73ed1d08dc25325aad2b8f8b0b9e
| 358
|
py
|
Python
|
setup.py
|
jjakimoto/rl_traders.py
|
d5411c96d49ba6a54751d12cdd11974e5cc1a8aa
|
[
"MIT"
] | 2
|
2018-10-07T14:16:32.000Z
|
2019-01-28T00:14:29.000Z
|
setup.py
|
jjakimoto/rl_traders.py
|
d5411c96d49ba6a54751d12cdd11974e5cc1a8aa
|
[
"MIT"
] | null | null | null |
setup.py
|
jjakimoto/rl_traders.py
|
d5411c96d49ba6a54751d12cdd11974e5cc1a8aa
|
[
"MIT"
] | 1
|
2019-11-05T00:51:20.000Z
|
2019-11-05T00:51:20.000Z
|
from setuptools import setup
from setuptools import find_packages
setup(name='rl_traders',
version='0.1.0',
description='Reinforcement Learning for Trading',
url='https://github.com/jjakimoto/rl_traders.git',
author='jjakimoto',
author_email='f.j.akimoto@gmail.com',
license='MIT',
packages=find_packages()
)
| 27.538462
| 56
| 0.678771
|
c239846032333fb5d26b1c1eb5b5c8a5cf233d15
| 2,219
|
py
|
Python
|
Music/__init__.py
|
izazkhan8293/Musicheu
|
9cd33a71868b8b850d6fd78eaac05dda0713b7cc
|
[
"Apache-2.0"
] | null | null | null |
Music/__init__.py
|
izazkhan8293/Musicheu
|
9cd33a71868b8b850d6fd78eaac05dda0713b7cc
|
[
"Apache-2.0"
] | null | null | null |
Music/__init__.py
|
izazkhan8293/Musicheu
|
9cd33a71868b8b850d6fd78eaac05dda0713b7cc
|
[
"Apache-2.0"
] | null | null | null |
from pyrogram import Client
import asyncio
from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
import uvloop
from Music import config
import importlib
from pyrogram import Client as Bot
from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID, OWNER_ID
from pyrogram import Client
from aiohttp import ClientSession
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
initialize()
MONGODB_CLI = MongoClient(MONGO_DB_URI)
db = MONGODB_CLI.wbb
SUDOERS = SUDO_USERS
OWNER = OWNER_ID
loop = asyncio.get_event_loop()
loop.run_until_complete(load_sudoers())
Music_START_TIME = time.time()
loop = asyncio.get_event_loop()
BOT_ID = 0
BOT_NAME = ""
BOT_USERNAME = ""
ASSID = 0
ASSNAME = ""
ASSUSERNAME = ""
ASSMENTION = ""
app = Client(
'MusicBot',
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
)
aiohttpsession = ClientSession()
client = Client(config.SESSION_NAME, config.API_ID, config.API_HASH)
app.start()
client.start()
all_info(app, client)
| 28.448718
| 102
| 0.708878
|
c23a870064fefb4e740984ad848e886ea4aa0cd9
| 9,372
|
py
|
Python
|
test.py
|
ZJianjin/Traffic4cast2020_lds
|
6cb76e885a9539e485c055222be77f41a559c507
|
[
"Apache-2.0"
] | 3
|
2020-12-10T13:43:08.000Z
|
2021-01-17T04:36:34.000Z
|
test.py
|
ZJianjin/Traffic4cast2020_lds
|
6cb76e885a9539e485c055222be77f41a559c507
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
ZJianjin/Traffic4cast2020_lds
|
6cb76e885a9539e485c055222be77f41a559c507
|
[
"Apache-2.0"
] | null | null | null |
import random
from random import shuffle
import numpy as np
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import yaml
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from net_all import *
from trainer_all import *
season = None
use_mask = True
use_flip = False
use_time = True
model_name = 'neta'
train_winter = ['-01-', '-02-', '-03-']
train_summer = ['-05-', '-04-', '-06-']
test_winter = ['-11-', '-12-']
test_summer = ['-07-', '-08-', '-09-', '-10-']
SEED = 0
num_train_file = 285
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
target_frames = [0, 1, 2, 5, 8, 11]
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height = 495
width = 436
num_channel = 9
num_channel_discretized = 8 # 4 * 2
visual_input_channels = 115 # 12 * 8
visual_output_channels = 6 * 8 # 6 * 8
vector_input_channels = 1 # start time point
import json
#
n = 1
s = 255
e = 85
w = 170
tv = 16
##############################Set the path##############################################
data_root = './data'
model_root = './jianjzhmodelstest'
log_root = './output'
##############################Set the path##############################################
#
target_city = 'ISTANBUL' # ['BERLIN', 'MOSCOW', 'ISTANBUL']
# test_start_index_list = np.array([ 18, 57, 114, 174, 222], np.int32) # 'BERLIN'
# test_start_index_list = np.array([ 45, 102, 162, 210, 246], np.int32) # 'Moscow' # 'Istanbul'
input_static_data_path = data_root + '/' + target_city + '/' + target_city + '_static_2019.h5'
input_mask_data_path = data_root + '/maskdata/'
input_train_data_folder_path = data_root + '/' + target_city + '/training'
input_val_data_folder_path = data_root + '/' + target_city + '/validation'
input_test_data_folder_path = data_root + '/' + target_city + '/testing'
save_model_path = model_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask)
summary_path = log_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask)
#
batch_size_test = 5
learning_rate = 3e-4
load_model_path = model_root + '/' + 'ISTANBULneta'
# load_model_path = ''
is_training = False
# premodel = os.path.join(model_root, 'BERLINneta', 'model-58000.cptk')
global_step = 60000
if __name__ == '__main__':
random.seed(SEED)
np.random.seed(SEED)
tf.set_random_seed(SEED)
trainer = Trainer(height, width, visual_input_channels, visual_output_channels, vector_input_channels,
learning_rate,
save_model_path, load_model_path, summary_path, is_training, use_mask, model_name)
tf.reset_default_graph()
test_data_filepath_list = get_data_filepath_list(input_test_data_folder_path)
if season == 'winter':
tmp = []
for i in test_data_filepath_list:
if any([j in i for j in test_winter]):
tmp.append(i)
data_filepath_list = tmp
elif season == 'summer':
tmp = []
for i in test_data_filepath_list:
if any([j in i for j in test_summer]):
tmp.append(i)
data_filepath_list = tmp
print('test_data_filepath_list\t', len(test_data_filepath_list), )
test_output_filepath_list = list()
for test_data_filepath in test_data_filepath_list:
filename = test_data_filepath.split('/')[-1]
test_output_filepath_list.append('output/' + target_city + '/' + target_city + '_test' + '/' + filename)
static_data = get_static_data(input_static_data_path)
mask_data = get_mask_data(input_mask_data_path, target_city)
try:
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('output/' + target_city):
os.makedirs('output/' + target_city)
if not os.path.exists('output/' + target_city + '/' + target_city + '_test'):
os.makedirs('output/' + target_city + '/' + target_city + '_test')
except Exception:
print('output path not made')
exit(-1)
with open('test_data.json') as f:
test_json = json.load(f)
for i in range(len(test_data_filepath_list)):
file_path = test_data_filepath_list[i]
out_file_path = test_output_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
# assert data.shape[0] == num_frame_per_day
data = np.array(data, np.uint8)
test_data_batch_list = []
test_data_time_list = []
test_data_mask_list = []
batch_size_test = data.shape[0]
for j in range(batch_size_test):
test_data_time_list.append(float(j) / float(num_frame_per_day))
data_sliced = data[:, :, :, :, :num_channel]
if use_time:
for time_dict in test_json:
time_data = list(time_dict.keys())[0]
if time_data in file_path:
time_data = time_dict[time_data]
break
time_id = np.ones_like(data_sliced)[:, :, :, :, :1]
for m in range(len(time_data)):
for n in range(num_frame_before):
time_id[m, n] = time_id[m, n] * (time_data[m] + n) / 288.0 * 255.0
data_sliced = np.concatenate([data_sliced, time_id], axis=-1)
data_mask = (np.max(data_sliced, axis=4) == 0)
test_data_mask_list = data_mask[:, :, :, :]
test_data_batch_list.append(data_sliced)
test_data_time_list = np.asarray(test_data_time_list, np.float32)
input_time = np.reshape(test_data_time_list, (batch_size_test, 1))
test_data_mask = test_data_mask_list
input_data = np.concatenate(test_data_batch_list, axis=0).astype(np.float32)
input_data[:, :, :, :, :] = input_data[:, :, :, :, :] / 255.0
input_data = np.moveaxis(input_data, 1, -1).reshape((batch_size_test, height, width, -1))
static_data_tmp = np.tile(static_data, [batch_size_test, 1, 1, 1])
input_data = np.concatenate([input_data, static_data_tmp], axis=-1)
# input_data_mask = np.zeros((batch_size_test, num_frame_before, height, width, num_channel_discretized), np.bool)
# input_data_mask[test_data_mask[:, :num_frame_before, :, :], :] = True
# input_data_mask = np.moveaxis(input_data_mask, 1, -1).reshape((batch_size_test, height, width, -1))
# input_data[input_data_mask] = -1.0
true_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32)
if use_mask:
orig_label_mask = np.tile(mask_data, [1, 1, 1, len(target_frames)])
else:
orig_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32)
prediction_list = []
# print(input_data.shape)
# assert 0
import scipy.misc as misc
# trainer.load_model(premodel)
# print('load model')
for b in range(batch_size_test):
run_out_one = trainer.infer(input_data[b, :, :, :][np.newaxis, :, :, :],
input_time[b, :][np.newaxis, :],
true_label_mask[b, :, :, :][np.newaxis, :, :, :], global_step)
prediction_one = run_out_one['predict']
prediction_list.append(prediction_one)
# print(input_data[b,:,:,:].shape)
# for t in range(3):
# misc.imsave('output_'+str(b)+'_'+str(t)+'.png', np.reshape(prediction_one, [495, 436, 3, 8])[:, :, t, 0])
# assert 0
prediction = np.concatenate(prediction_list, axis=0)
prediction = np.moveaxis(np.reshape(prediction, (
batch_size_test, height, width, num_channel_discretized, len(target_frames),)), -1, 1)
prediction = prediction.astype(np.float32) * 255.0
prediction = np.rint(prediction)
prediction = np.clip(prediction, 0.0, 255.0).astype(np.uint8)
assert prediction.shape == (batch_size_test, len(target_frames), height, width, num_channel_discretized)
write_data(prediction, out_file_path)
| 37.94332
| 122
| 0.636364
|
c23bc080151d66518c85923b1ce1c8be7c0ff949
| 3,037
|
py
|
Python
|
python/python-010/rds.py
|
suzuxander/suzuxander_samples
|
736224dae91b432ef3ec796f5eda23417865f142
|
[
"MIT"
] | null | null | null |
python/python-010/rds.py
|
suzuxander/suzuxander_samples
|
736224dae91b432ef3ec796f5eda23417865f142
|
[
"MIT"
] | null | null | null |
python/python-010/rds.py
|
suzuxander/suzuxander_samples
|
736224dae91b432ef3ec796f5eda23417865f142
|
[
"MIT"
] | null | null | null |
from troposphere import Template, Ref, Parameter, GetAtt
from troposphere.ec2 import SecurityGroup
from troposphere.rds import DBSubnetGroup, DBInstance
if __name__ == '__main__':
create_rds_template()
| 25.957265
| 66
| 0.55186
|
c23f39dcaa9bc21fb37ef18d6de38e47058d0da3
| 514
|
py
|
Python
|
hlwtadmin/migrations/0044_gigfinderurl_ignore_periods.py
|
Kunstenpunt/havelovewilltravel
|
6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83
|
[
"Apache-2.0"
] | 1
|
2020-10-16T16:29:01.000Z
|
2020-10-16T16:29:01.000Z
|
hlwtadmin/migrations/0044_gigfinderurl_ignore_periods.py
|
Kunstenpunt/havelovewilltravel
|
6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83
|
[
"Apache-2.0"
] | 365
|
2020-02-03T12:46:53.000Z
|
2022-02-27T17:20:46.000Z
|
hlwtadmin/migrations/0044_gigfinderurl_ignore_periods.py
|
Kunstenpunt/havelovewilltravel
|
6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.7 on 2021-01-26 09:57
import django.contrib.postgres.fields
from django.db import migrations, models
| 25.7
| 126
| 0.659533
|
c23fbfd17a95b6fdf7b229efd815b011116431cc
| 1,855
|
py
|
Python
|
conanfile.py
|
ltjax/nativefiledialog-extended
|
2dc958b98f41e081189e84b56f9f250e1b30f92e
|
[
"Zlib"
] | null | null | null |
conanfile.py
|
ltjax/nativefiledialog-extended
|
2dc958b98f41e081189e84b56f9f250e1b30f92e
|
[
"Zlib"
] | null | null | null |
conanfile.py
|
ltjax/nativefiledialog-extended
|
2dc958b98f41e081189e84b56f9f250e1b30f92e
|
[
"Zlib"
] | null | null | null |
from conans import ConanFile, CMake, tools
| 40.326087
| 116
| 0.654447
|
c24130645b33d6b4c145bae50da7d266149801e6
| 960
|
py
|
Python
|
hackerearth/Algorithms/New World/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/New World/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/New World/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
import bisect
t = int(input())
for _ in range(t):
n, k = map(int, input().strip().split())
stones = list(map(int, input().strip().split()))
low = 1
high = stones[-1] - stones[0] # Location of all stones are given in ascending order.
while low <= high:
mid = (low + high) // 2
if check(stones, mid, n, k):
high = mid - 1
else:
low = mid + 1
print(low)
| 24
| 94
| 0.527083
|
c2416fc1e551162c64c074b04f41a960aa792575
| 712
|
py
|
Python
|
core/departments/urls.py
|
IvanRch/bsuphys
|
105715cde8fc8e9a42019ed4b650fe00b94fa132
|
[
"Apache-2.0"
] | 1
|
2022-01-04T07:04:46.000Z
|
2022-01-04T07:04:46.000Z
|
core/departments/urls.py
|
IvanRch/bsuphys
|
105715cde8fc8e9a42019ed4b650fe00b94fa132
|
[
"Apache-2.0"
] | 1
|
2020-02-17T19:06:03.000Z
|
2020-02-17T19:06:03.000Z
|
core/departments/urls.py
|
IvanRch/bsuphys
|
105715cde8fc8e9a42019ed4b650fe00b94fa132
|
[
"Apache-2.0"
] | 1
|
2021-07-08T13:21:04.000Z
|
2021-07-08T13:21:04.000Z
|
from django.urls import path, re_path
from . import views
app_name = "departments"
urlpatterns = [path("", views.departmentList, name="energy department"),
path(
"<slug:department>/",
views.department_detail,
name="department_detail",
),
path(
"<slug:department>/staff/",
views.department_detail_staff,
name="department_detail",
),
path(
"<slug:department>/thesis/",
views.department_detail_thesis,
name="department_detail",
),
path(
"<slug:department>/directions/",
views.department_detail_directions,
name="department_detail",
),
]
| 25.428571
| 72
| 0.573034
|
c2456834188f5aaff78f04e88343303b398c8b26
| 432
|
py
|
Python
|
18th May Assignments/case study 1/question_5.py
|
JangirSumit/data_science
|
a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4
|
[
"MIT"
] | 15
|
2019-05-05T04:48:42.000Z
|
2022-02-15T12:08:33.000Z
|
18th May Assignments/case study 1/question_5.py
|
JangirSumit/data_science
|
a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4
|
[
"MIT"
] | null | null | null |
18th May Assignments/case study 1/question_5.py
|
JangirSumit/data_science
|
a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4
|
[
"MIT"
] | 53
|
2019-11-10T05:09:25.000Z
|
2022-03-28T01:26:32.000Z
|
# 5. How do you Count The Number Of Times Each Value Appears In An Array Of Integers?
# [0, 5, 4, 0, 4, 4, 3, 0, 0, 5, 2, 1, 1, 9]
# Answer should be array([4, 2, 1, 1, 3, 2, 0, 0, 0, 1]) which means 0 comes 4 times, 1 comes 2 times, 2 comes 1 time, 3 comes 1 time and so on.
array = [0, 5, 4, 0, 4, 4, 3, 0, 0, 5, 2, 1, 1, 9]
count_array_elements = [array.count(a) for a in set(array)]
print(count_array_elements)
| 43.2
| 144
| 0.601852
|
c246213af42c94a04ec5e4409ced7cd800cf77ef
| 4,957
|
py
|
Python
|
pvp-tic-tac-toe.py
|
lsfidelis/pvp-tic-tac-toe
|
6ed73e5a053cc22a092c7b56d774503f57a859ab
|
[
"MIT"
] | null | null | null |
pvp-tic-tac-toe.py
|
lsfidelis/pvp-tic-tac-toe
|
6ed73e5a053cc22a092c7b56d774503f57a859ab
|
[
"MIT"
] | null | null | null |
pvp-tic-tac-toe.py
|
lsfidelis/pvp-tic-tac-toe
|
6ed73e5a053cc22a092c7b56d774503f57a859ab
|
[
"MIT"
] | null | null | null |
from time import sleep
print("Welcome to Tic Tac Toe! \nWe'll be playing in a sec, but, first..")
general_board = {'7': ' ', '8': ' ', '9': ' ',
'4': ' ', '5': ' ', '6': ' ',
'1': ' ', '2': ' ', '3': ' '}
# prints board structure
# Choose which player goes first
# Clear the board and reset the game
if __name__ == "__main__":
game()
restart()
| 35.92029
| 89
| 0.435747
|
c24663b502469b48e008fb30a563fba0b901fd18
| 7,119
|
py
|
Python
|
total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py
|
spethso/Verleihsystem-TTF
|
39179f9ac5b07f5106e555f82f3c9011d33805bd
|
[
"MIT"
] | 1
|
2019-03-17T08:11:14.000Z
|
2019-03-17T08:11:14.000Z
|
total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py
|
spethso/Verleihsystem-TTF
|
39179f9ac5b07f5106e555f82f3c9011d33805bd
|
[
"MIT"
] | 60
|
2018-06-12T14:46:50.000Z
|
2020-11-16T00:50:37.000Z
|
total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py
|
FIUS/ttf-backend
|
39179f9ac5b07f5106e555f82f3c9011d33805bd
|
[
"MIT"
] | 1
|
2019-12-02T19:25:59.000Z
|
2019-12-02T19:25:59.000Z
|
"""
Auth Providers which provides LDAP login
"""
from typing import List, Dict
from ldap3 import Connection, Server, AUTO_BIND_TLS_BEFORE_BIND, SUBTREE
from ldap3.core.exceptions import LDAPSocketOpenError, LDAPBindError
from ..login import LoginProvider
from .. import APP, AUTH_LOGGER
| 47.46
| 118
| 0.61975
|
c247338889dd4aef3193b428e74aac5424652e3f
| 4,117
|
py
|
Python
|
md2html.py
|
osfans/yancheng
|
1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0
|
[
"Apache-2.0"
] | 4
|
2017-01-26T03:25:24.000Z
|
2019-04-15T14:11:46.000Z
|
md2html.py
|
osfans/yancheng
|
1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0
|
[
"Apache-2.0"
] | 1
|
2016-12-02T04:26:31.000Z
|
2016-12-05T05:02:39.000Z
|
md2html.py
|
osfans/xu
|
1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import re, os, glob
template = """
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
<style>
body {
font-family: PMingLiu, HanaMinA, HanaMinB, Helvetica, arial, sans-serif;
writing-mode: vertical-rl;
-webkit-writing-mode: vertical-rl; }
.sm {
margin: 20px 0 10px;
padding: 0;
font-weight: bold;
font-size: 30px;
border-left: 1px solid #cccccc;
margin: 0 5px;
cursor: text;
position: static;
clear: both;
text-align: right;
}
.sd, .sd2, .zy, .zi, .zi1, .yi {
font-size: 10px;
text-align: center;
cursor: text;
float: left;
margin-left: 10px;
margin-right: 10px;
line-height: 10px;
letter-spacing: 0.35em;
}
.sd, .sd2 {
margin-right: 25px;
clear: both;
}
.sd2 {
margin-right: 20px;
}
.zi, .zi1 {
padding-top: 20px;
padding-bottom: 10px;
font-size: 20px;
line-height: 20px;
}
.zi1 {
padding-top: 10px;
}
.yi {
min-height: 40px;
text-align: left;
line-height: 12px;
margin-right: 8px;
}
.clear {
clear: both;
}
</style>
<title></title>
</head>
<body>
%s
</body>
</html>
"""
lines = list()
copy_readme()
for filename in glob.glob("wiki/??.md"):
md2html(filename)
| 24.360947
| 120
| 0.459072
|
c2478e02ca506e0323d992197336faa1570d5c97
| 2,460
|
py
|
Python
|
plugin_hide_run_panel/__init__.py
|
Holt59/modorganizer-python_plugins
|
f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9
|
[
"MIT"
] | null | null | null |
plugin_hide_run_panel/__init__.py
|
Holt59/modorganizer-python_plugins
|
f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9
|
[
"MIT"
] | null | null | null |
plugin_hide_run_panel/__init__.py
|
Holt59/modorganizer-python_plugins
|
f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtWidgets import QMainWindow, QFrame
import mobase
| 31.948052
| 85
| 0.660976
|
c24ab458d07762596a9a0b958ea5ceac2489021a
| 164
|
py
|
Python
|
nnet/learning_rate_func/__init__.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/learning_rate_func/__init__.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/learning_rate_func/__init__.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
from ._inv_prop_lr import InvPropLR
from ._constant_lr import ConstantLR
from ._step_size_lr import StepSizeLR
from ._dynamic_step_size_lr import DynamicStepSizeLR
| 32.8
| 52
| 0.878049
|
c24d4c5a8f9125c9ef834c785c10d1d380869f30
| 8,645
|
py
|
Python
|
src/utils/strava.py
|
adrigrillo/endomondo-strava-migrator
|
398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079
|
[
"MIT"
] | 2
|
2020-12-08T20:51:38.000Z
|
2021-01-03T20:42:10.000Z
|
src/utils/strava.py
|
adrigrillo/endomondo-strava-migrator
|
398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079
|
[
"MIT"
] | 1
|
2020-12-08T21:09:50.000Z
|
2020-12-08T21:30:35.000Z
|
src/utils/strava.py
|
adrigrillo/endomondo-strava-migrator
|
398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
utils/strava.py
=================
Utility class to Strava API
"""
import json
import time
from configparser import ConfigParser, NoOptionError
from datetime import datetime
from pathlib import Path
from typing import Tuple
from loguru import logger
from stravalib import Client, exc
from utils.parameters import SECRET
from utils.constants import CONFIG_PATH, CODE_ID_FILE_NAME, TOKEN_FILE_NAME
from utils.files_handler import check_folder
from utils.parameters import STRAVA, CLIENT_ID
def get_client_id(app_config: ConfigParser) -> int:
""" Obtains the client ID from the configuration file.
Args:
app_config (ConfigParser): app configuration.
Returns:
int: client id from the configuration file.
Raises:
NoOptionError: If the `client_id` key is not
present in the configuration.
ValueError: If the client id is not an integer.
"""
try:
client_id = app_config.getint(STRAVA, CLIENT_ID)
except NoOptionError:
raise ValueError('The client id has not been set in the configuration.')
except ValueError:
logger.exception('Invalid client id format.')
raise
return client_id
def get_secret(app_config: ConfigParser) -> str:
""" Obtains the secret from the configuration file.
Args:
app_config (ConfigParser): app configuration.
Returns:
str: secret from the configuration file.
Raises:
NoOptionError: If the `secret` key is not
present in the configuration.
"""
try:
secret = app_config.get(STRAVA, SECRET)
except NoOptionError:
raise ValueError('The client id has not been set in the configuration.')
return secret
def get_strava_token_from_code_id(config: ConfigParser) -> str:
""" Method that interchange the temporary authentication code obtained
when `src/request_auth.py` is executed. The method reads the file
`config/code_id.txt` that contains the temporal authentication and generates
the POST request to obtain the final access token which is saved in
`config/token.json`.
This method requires the Strava application `client_id` and `secret` that
has to be set in the configuration file (`config/config.ini`).
Args:
config (ConfigParser): app configuration.
Returns:
str: Strava access token.
Raises:
ValueError: If no token is found in the configuration.
"""
code_id_path = Path(CONFIG_PATH, CODE_ID_FILE_NAME)
if not code_id_path.is_file():
raise ValueError('The file with the temporal authentication code (`config/code_id.txt`)'
'was NOT found. Execute `request_auth.py` to obtain the temporal access.')
with open(code_id_path, 'r') as file:
logger.debug('The file with the temporal authentication code (`config/code_id.txt`)'
'was found.')
code_id = file.read()
if not code_id:
raise ValueError('No valid temporal code access found. Rerun `request_auth.py` '
'to obtain the temporal access.')
client = Client()
token = client.exchange_code_for_token(client_id=get_client_id(config),
client_secret=get_secret(config),
code=code_id)
logger.debug('Obtained access until {}:\n'
'- token: {}.'
'- refresh token: {}.',
datetime.utcfromtimestamp(int(token['expires_at'])).strftime('%d-%m-%Y %H:%M:%S'),
token['access_token'], token['refresh_token'])
# Save JSON with the response
save_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME)
with open(save_path, 'w') as file:
logger.info('Writing token information to `{}`.', save_path)
json.dump(token, file, indent=4)
return token['access_token']
def get_strava_client(config: ConfigParser) -> Client:
""" Checks the authentication token and generates the Strava client.
Args:
config (ConfigParser): app configuration.
Returns:
if exist, strava client configured with the authentication token.
"""
token_file_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME)
if token_file_path.is_file():
logger.debug('The token info file (`config/token.json`) was found.')
with open(token_file_path, 'r') as file:
token_data = json.load(file)
token = token_data.get('access_token')
# If the file exists but no access token found, check against the temporary auth
if not token:
logger.warning('The token info file (`config/token.json`) was found'
' but the access token could not be read.')
token = get_strava_token_from_code_id(config)
else:
logger.info('The token info file (`config/token.json`) was NOT found. '
'Retrieving from the temporal authentication code.')
token = get_strava_token_from_code_id(config)
client = Client(access_token=token)
return client
def upload_activity(client: Client, activity_type: str, file_path: Path) -> bool:
""" Helper method to upload the activity to Strava. This method will handle
the different possibilities when uploading an activity.
Args:
client (Client): configured Strava client.
activity_type (str): Strava activity string.
file_path (Path): Path to the `*.tcx` activity file.
Returns:
bool: True if the activity have been uploaded successfully. False otherwise.
Raises:
RateLimitExceeded: When the API limits have been reached. Generally when
more than 1000 petitions have been done during the day.
ConnectionError: When it has been impossible to connect the Strava servers.
Exception: Unknown exceptions that will be logged in detail.
"""
try:
activity_file = open(file_path, 'r')
client.upload_activity(
activity_file=activity_file,
data_type='tcx',
activity_type=activity_type,
private=False
)
except exc.ActivityUploadFailed:
logger.exception('Error uploading the activity `{}`.', file_path.stem)
return False
except exc.RateLimitExceeded:
logger.exception('Exceeded the API rate limit.')
raise
except ConnectionError:
logger.exception('No internet connection.')
raise
except Exception:
logger.exception('Unknown exception')
raise
# If no error return true
logger.debug('Activity `{}` uploaded sucessfully.', file_path.stem)
return True
def handle_rate_limit(start_time: float, requests: int) -> Tuple[float, int]:
""" Method to handle the 15 minutes API limit. This method will check the
elapsed time since the first request and the number of them. Three cases
are possible:
- Less than 15 minutes elapsed from the first request and less than 100
requests -> continue.
- More than 15 minutes elapsed from the first request and less than 100
requests -> reset timer and request number to count from 0 again.
- Less than 15 minutes elapsed from the first request but more than 100
requests -> sleep until the 15 minutes block is over and reset timer
and request number to count from 0 again.
Args:
start_time (float): timestamp of the first request of the block.
requests (int): number of request done in the block.
Returns:
float, int: updated start time and number of requests following the
possible cases.
"""
requests += 1
elapsed_time = time.time() - start_time
if elapsed_time <= 60 * 15:
if requests >= 100:
remaining_time_stopped = 60 * 15 - elapsed_time
mins, secs = divmod(remaining_time_stopped, 60)
logger.warning('The number of allowed request per 15 minutes have'
'been reached. Sleeping for {:0.0f} minutes, {:0.1f} seconds.',
mins, secs)
time.sleep(remaining_time_stopped)
# Reset values. Include petition to be processed
logger.info('Waiting time elapsed. Continuing with the process.')
requests = 1
start_time = time.time()
else:
logger.debug('15 minutes have been elapsed. Resetting requests and time.')
# Reset values. Include petition to be processed
requests = 1
start_time = time.time()
return start_time, requests
| 36.020833
| 99
| 0.65587
|
df9a160281e97721997326dd0b0903a52cd73273
| 5,293
|
py
|
Python
|
train_synthText.py
|
skyatmoon/Detailed-Handwriting-detection
|
1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c
|
[
"MIT"
] | 1
|
2020-12-08T01:24:34.000Z
|
2020-12-08T01:24:34.000Z
|
train_synthText.py
|
skyatmoon/Detailed-Handwriting-detection
|
1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c
|
[
"MIT"
] | null | null | null |
train_synthText.py
|
skyatmoon/Detailed-Handwriting-detection
|
1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c
|
[
"MIT"
] | null | null | null |
"""
Author: brooklyn
train with synthText
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from net.craft import CRAFT
import sys
from utils.cal_loss import cal_synthText_loss
from dataset.synthDataset import SynthDataset
import argparse
from eval import eval_net
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='CRAFT Train Fine-Tuning')
parser.add_argument('--gt_path', default='/media/brooklyn/EEEEE142EEE10425/SynthText/gt.mat', type=str, help='SynthText gt.mat')
parser.add_argument('--synth_dir', default='/media/brooklyn/EEEEE142EEE10425/SynthText', type=str, help='SynthText image dir')
parser.add_argument('--label_size', default=96, type=int, help='target label size')
parser.add_argument('--batch_size', default=16, type=int, help='training data batch size')
parser.add_argument('--test_batch_size', default=16, type=int, help='test data batch size')
parser.add_argument('--test_interval', default=40, type=int, help='test interval')
parser.add_argument('--max_iter', default=50000, type=int, help='max iteration')
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('--epochs', default=500, type=int, help='training epochs')
parser.add_argument('--test_iter', default=10, type=int, help='test iteration')
args = parser.parse_args()
image_transform = transforms.Compose([
transforms.Resize((args.label_size * 2, args.label_size * 2)),
transforms.ToTensor()
])
label_transform = transforms.Compose([
transforms.Resize((args.label_size,args.label_size)),
transforms.ToTensor()
])
if __name__ == "__main__":
batch_size = args.batch_size
test_batch_size = args.test_batch_size
epochs = args.epochs #
lr = args.lr #
test_interval = args.test_interval #
max_iter = args.max_iter
net = CRAFT(pretrained=True) # craft
net = net.to(device)
model_save_prefix = 'checkpoints/craft_netparam_'
try:
train(net=net,
batch_size=batch_size,
test_batch_size=test_batch_size,
lr=lr,
test_interval=test_interval,
max_iter=max_iter,
epochs=epochs,
model_save_path=model_save_prefix)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED1.pth')
print('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 37.807143
| 128
| 0.642736
|
df9b4ebedd02514962424a1cc0a1b5aae502b670
| 1,896
|
py
|
Python
|
friendcircle/models.py
|
jossafossa/Project24_backend
|
bb5cc91d21c9f93034b85b3e94e829f7ab33c565
|
[
"MIT"
] | null | null | null |
friendcircle/models.py
|
jossafossa/Project24_backend
|
bb5cc91d21c9f93034b85b3e94e829f7ab33c565
|
[
"MIT"
] | 9
|
2019-12-04T23:15:59.000Z
|
2022-02-10T09:08:38.000Z
|
friendcircle/models.py
|
jossafossa/Project24_backend
|
bb5cc91d21c9f93034b85b3e94e829f7ab33c565
|
[
"MIT"
] | null | null | null |
from django.db import models
# Keeps track of FriendCircle memberships
MATCH_STATUS = (
('O', 'Not swiped',),
('V', 'Swiped Right',),
('X', 'Swiped Left',),
)
# Keeps track of matches. If both parties swiped right, the user can be added to FriendCircleMembership
| 35.773585
| 103
| 0.68038
|
df9d6d03fbed45db8f46a22336474ebb4831783c
| 474
|
py
|
Python
|
components/collector/tests/source_collectors/jira/test_issues.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/collector/tests/source_collectors/jira/test_issues.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/collector/tests/source_collectors/jira/test_issues.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for the Jira issues collector."""
from .base import JiraTestCase
| 29.625
| 78
| 0.679325
|
df9e429f72ebf0471ad51a2d2296ecb2934b944d
| 1,485
|
py
|
Python
|
cf_xarray/tests/test_coding.py
|
rcaneill/cf-xarray
|
210e997ab5e550e411ec1a4e789aac28e77bacff
|
[
"Apache-2.0"
] | null | null | null |
cf_xarray/tests/test_coding.py
|
rcaneill/cf-xarray
|
210e997ab5e550e411ec1a4e789aac28e77bacff
|
[
"Apache-2.0"
] | null | null | null |
cf_xarray/tests/test_coding.py
|
rcaneill/cf-xarray
|
210e997ab5e550e411ec1a4e789aac28e77bacff
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import cf_xarray as cfxr
| 34.534884
| 80
| 0.60404
|
dfa1fd750ebe41f9f5e5dbc785b717257dc70d9d
| 1,908
|
py
|
Python
|
slack_bolt/adapter/socket_mode/base_handler.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 504
|
2020-08-07T05:02:57.000Z
|
2022-03-31T14:32:46.000Z
|
slack_bolt/adapter/socket_mode/base_handler.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 560
|
2020-08-07T01:16:06.000Z
|
2022-03-30T00:40:56.000Z
|
slack_bolt/adapter/socket_mode/base_handler.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 150
|
2020-08-07T09:41:14.000Z
|
2022-03-30T04:54:51.000Z
|
"""The base class of Socket Mode client implementation.
If you want to build asyncio-based ones, use `AsyncBaseSocketModeHandler` instead.
"""
import logging
import signal
import sys
from threading import Event
from slack_sdk.socket_mode.client import BaseSocketModeClient
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_bolt import App
from slack_bolt.util.utils import get_boot_message
| 32.896552
| 101
| 0.678197
|
dfa2ba545c720071817fb0691cb4e7c5aad3c2a5
| 8,344
|
py
|
Python
|
project/pfasst/transfer_tools.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 2
|
2016-04-04T15:20:50.000Z
|
2020-08-01T19:28:55.000Z
|
project/pfasst/transfer_tools.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 1
|
2020-10-02T05:44:45.000Z
|
2020-10-02T05:44:45.000Z
|
project/pfasst/transfer_tools.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 11
|
2016-03-26T18:37:06.000Z
|
2020-10-01T19:44:55.000Z
|
# coding=utf-8
import numpy as np
import scipy.interpolate as intpl
import scipy.sparse as sprs
def to_sparse(D, format="csc"):
"""
Transform dense matrix to sparse matrix of return_type
bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix
coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format.
csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix
csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix
dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage
dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix.
lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix
:param D: Dense matrix
:param format: how to save the sparse matrix
:return: sparse version
"""
if format == "bsr":
return sprs.bsr_matrix(D)
elif format == "coo":
return sprs.coo_matrix(D)
elif format == "csc":
return sprs.csc_matrix(D)
elif format == "csr":
return sprs.csr_matrix(D)
elif format == "dia":
return sprs.dia_matrix(D)
elif format == "dok":
return sprs.dok_matrix(D)
elif format == "lil":
return sprs.lil_matrix(D)
else:
return to_dense(D)
def next_neighbors_periodic(p, ps, k, T=None):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors and an array containing the
"""
if T is None:
T = ps[-1]-2*ps[0]+ps[1]
p_bar = p - np.floor(p/T)*T
ps = ps - ps[0]
distance_to_p = []
for tk in ps:
d1 = tk+T-p_bar
d2 = tk-p_bar
d3 = tk-T-p_bar
min_d = min([np.abs(d1), np.abs(d2), np.abs(d3)])
if np.abs(d1) == min_d:
distance_to_p.append(d1)
elif np.abs(d2) == min_d:
distance_to_p.append(d2)
else:
distance_to_p.append(d3)
distance_to_p = np.asarray(distance_to_p)
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted_by_abs = sorted(value_index,cmp=lambda x,y:cmp(np.abs(x),np.abs(y)), key=lambda s: s[0])
if k % 2 == 1:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k+1], key=lambda s: s[0])[:k]
else:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k], key=lambda s: s[0])
return map(lambda s: s[1], value_index_sorted_by_sign), map(lambda s: s[0]+p, value_index_sorted_by_sign)
def next_neighbors(p, ps, k):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors
"""
distance_to_p = np.abs(ps-p)
# zip it
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d,i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the restriction matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a restriction matrix
"""
M = np.zeros((coarse_grid.size, fine_grid.size))
n_g = coarse_grid.size
for i, p in zip(range(n_g), coarse_grid):
if periodic:
nn, cont_arr = next_neighbors_periodic(p, fine_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, fine_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(fine_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the interpolation matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a interpolation matrix
"""
M = np.zeros((fine_grid.size, coarse_grid.size))
n_f = fine_grid.size
for i, p in zip(range(n_f), fine_grid):
if periodic:
nn,cont_arr = next_neighbors_periodic(p, coarse_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, coarse_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(coarse_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def kron_on_list(matrix_list):
"""
:param matrix_list: a list of sparse matrices
:return: a matrix
"""
if len(matrix_list) == 2:
return sprs.kron(matrix_list[0], matrix_list[1])
elif len(matrix_list) == 1:
return matrix_list[0]
else:
return sprs.kron(matrix_list[0], kron_on_list(matrix_list[1:]))
def interpolate_to_t_end(nodes_on_unit, values):
"""
Assume a GaussLegendre nodes, we are interested in the value at the end of
the interval, but we now only the values in the interior of the interval.
We compute the value by legendre interpolation.
:param nodes_on_unit: nodes transformed to the unit interval
:param values: values on those nodes
:return: interpolation to the end of the interval
"""
n = nodes_on_unit.shape[0]
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
result = np.zeros(values[0].shape)
for i in range(n):
lag_pol.append(intpl.lagrange(nodes_on_unit, np.roll(circulating_one, i)))
result += values[i]*lag_pol[-1](1.0)
return result
| 36.920354
| 111
| 0.628715
|
dfa3a2fa2289a9c892b09c29ede2ebe39a3dd0c8
| 7,266
|
py
|
Python
|
python/trees/rbtree_graphviz.py
|
rcanepa/cs-fundamentals
|
b362fc206417501e53a5739df1edf7568901eef8
|
[
"MIT"
] | null | null | null |
python/trees/rbtree_graphviz.py
|
rcanepa/cs-fundamentals
|
b362fc206417501e53a5739df1edf7568901eef8
|
[
"MIT"
] | null | null | null |
python/trees/rbtree_graphviz.py
|
rcanepa/cs-fundamentals
|
b362fc206417501e53a5739df1edf7568901eef8
|
[
"MIT"
] | null | null | null |
"""rbtree_graphviz.py - create a graphviz representation of a LLRBT.
The purpose of this module is to visually show how the shape of a LLRBT
changes when keys are inserted in it. For every insert, sub graph (tree)
is added to the main graph.
`initialization_list` holds the values that are inserted in the tree.
This list can be changed for a list of anything that can be compared
with > == <. For example, with `initialization_list = range(50)` keys
from 0 to 49 will be inserted in the tree.
Consider that for every key, a graph is going to be generated.
"""
from graphviz import Digraph
from trees.rbtree import LLRBT, is_red
NODE_SHAPE = "circle"
NONE_NODE_SHAPE = "point"
TITLE_SHAPE = "box"
RED_COLOR = "#b8000f"
DEFAULT_GRAPH_NODE_ATTR = {
"shape": NODE_SHAPE,
"color": "black",
"style": "filled",
"fillcolor": "#cfd3d6",
}
RED_NODE_ATTR = {
"fontcolor": "white",
"fillcolor": RED_COLOR
}
DEFAULT_GRAPH_EDGE_ATTR = {
"color": "black",
"arrowhead": "vee",
"style": "solid",
}
def add_node(graph, node):
"""Add `node` to `graph`. `node` is a tuple with the
following shape:
(node_id, {<node attributes>}, {<graph's node attributes>})
^ ^ ^
string see graphviz documentation"""
node_id, node_attr, graph_node_attr = node
graph.node(node_id, **node_attr, **graph_node_attr)
return graph
def add_edge(graph, edge):
"""Add edge from `edge[0]` to `edge[1]` to `graph`. `edge` is
a tuple with the following shape:
(source_node_id, destiny_node_id, {<graph's edge attributes>})
^ ^ ^
string string see graphviz documentation"""
source_node_id, destiny_node_id, graph_edge_attr = edge
graph.edge(source_node_id, destiny_node_id, **graph_edge_attr)
return graph
if __name__ == "__main__":
initialization_list = ["Z", "W", "F", "D", "S", "E", "A", "R", "C", "H", "X", "M", "P", "L"]
# initialization_list = ["A", "B", "C", "D"]
tree = LLRBT()
# graph = generate_graph(tree, initialization_list)
graph = generate_graph_per_insert(tree, initialization_list)
print(graph.source)
graph.render("trees/rbtree.gv", view=True)
| 38.648936
| 100
| 0.597991
|
dfa5cb1adcaf33702f7f2c3cd145a0c36382a865
| 97
|
py
|
Python
|
students/K33421/Samoshchenkov_Alexei/lr_2/hw_system/apps.py
|
Vivasus/ITMO_ICT_WebDevelopment_2020-2021
|
833d7ac1d40d9f7456a3c6b104a0e53c652d0035
|
[
"MIT"
] | null | null | null |
students/K33421/Samoshchenkov_Alexei/lr_2/hw_system/apps.py
|
Vivasus/ITMO_ICT_WebDevelopment_2020-2021
|
833d7ac1d40d9f7456a3c6b104a0e53c652d0035
|
[
"MIT"
] | null | null | null |
students/K33421/Samoshchenkov_Alexei/lr_2/hw_system/apps.py
|
Vivasus/ITMO_ICT_WebDevelopment_2020-2021
|
833d7ac1d40d9f7456a3c6b104a0e53c652d0035
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
| 16.166667
| 34
| 0.721649
|
dfa771b70b06ebdb06698f8a6ef450643663f3e9
| 5,721
|
py
|
Python
|
azure_sftp_plugin/hooks/adls_gen2_hook.py
|
christo-olivier/airflow_azure_sftp_plugin
|
1d483be6419744909af2fa940cd50880ed8d5890
|
[
"Apache-2.0"
] | null | null | null |
azure_sftp_plugin/hooks/adls_gen2_hook.py
|
christo-olivier/airflow_azure_sftp_plugin
|
1d483be6419744909af2fa940cd50880ed8d5890
|
[
"Apache-2.0"
] | null | null | null |
azure_sftp_plugin/hooks/adls_gen2_hook.py
|
christo-olivier/airflow_azure_sftp_plugin
|
1d483be6419744909af2fa940cd50880ed8d5890
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from typing import Generator
from airflow.hooks.base_hook import BaseHook
from azure.storage.filedatalake import FileSystemClient
from azure.storage.filedatalake._generated.models._models_py3 import (
StorageErrorException,
)
| 39.729167
| 86
| 0.632582
|
dfa9f05edc79136d5654d284a464ccb459169f40
| 536
|
py
|
Python
|
phr/ciudadano/migrations/0044_ciudadano_codigo_asegurado.py
|
richardqa/django-ex
|
e5b8585f28a97477150ac5daf5e55c74b70d87da
|
[
"CC0-1.0"
] | null | null | null |
phr/ciudadano/migrations/0044_ciudadano_codigo_asegurado.py
|
richardqa/django-ex
|
e5b8585f28a97477150ac5daf5e55c74b70d87da
|
[
"CC0-1.0"
] | null | null | null |
phr/ciudadano/migrations/0044_ciudadano_codigo_asegurado.py
|
richardqa/django-ex
|
e5b8585f28a97477150ac5daf5e55c74b70d87da
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 17:10
from __future__ import unicode_literals
from django.db import migrations, models
| 26.8
| 109
| 0.654851
|
dfad270ef93b37ed6df9bcf779f6cf41ac7ec78e
| 2,499
|
py
|
Python
|
graphtiny/service.py
|
Canicio/pyqtgraph-tiny
|
b88ebe8a2e6ad860ca4857b527adccbbde14851d
|
[
"MIT"
] | 1
|
2018-03-17T12:36:56.000Z
|
2018-03-17T12:36:56.000Z
|
graphtiny/service.py
|
Canicio/pyqtgraph-tiny
|
b88ebe8a2e6ad860ca4857b527adccbbde14851d
|
[
"MIT"
] | 1
|
2017-08-08T18:31:31.000Z
|
2017-08-08T18:31:31.000Z
|
graphtiny/service.py
|
Canicio/graphtiny
|
b88ebe8a2e6ad860ca4857b527adccbbde14851d
|
[
"MIT"
] | null | null | null |
from time import sleep
import pyqtgraph as pg
import threading
from graphtiny.api import IChart, IDataStreamWindow
from graphtiny.domain import DataStreamWindow, Chart
| 34.232877
| 95
| 0.612645
|
dfad2ce40cf4b3e7c6bdab613bdf207aa9161bc1
| 2,576
|
py
|
Python
|
backend/app/app/api/deps.py
|
totalhack/zillion-web
|
e567c04d3564aec8105d54533d318b79d943c9c6
|
[
"MIT"
] | 3
|
2020-10-01T11:28:02.000Z
|
2020-10-31T15:35:51.000Z
|
backend/app/app/api/deps.py
|
totalhack/zillion-web
|
e567c04d3564aec8105d54533d318b79d943c9c6
|
[
"MIT"
] | 1
|
2022-02-09T04:19:20.000Z
|
2022-02-09T13:56:40.000Z
|
backend/app/app/api/deps.py
|
totalhack/zillion-web
|
e567c04d3564aec8105d54533d318b79d943c9c6
|
[
"MIT"
] | null | null | null |
from typing import Generator, Dict, Any
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import jwt
from pydantic import ValidationError
from sqlalchemy.orm import Session
from tlbx import json, pp
from zillion.configs import load_warehouse_config, zillion_config
from zillion.model import Warehouses
from zillion.warehouse import Warehouse
from app import app
from app import crud, models, schemas
from app.core import security
from app.core.config import settings
from app.db.session import SessionLocal
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/login/access-token"
)
warehouses = {}
def get_warehouses() -> Dict[str, Any]:
"""NOTE: this assumes Zillion Web DB is same as Zillion DB"""
global warehouses
if warehouses:
# TODO: cache control?
return warehouses
print("Building warehouses...")
db = SessionLocal()
try:
result = db.query(Warehouses).all()
for row in result:
warehouses[row.id] = Warehouse.load(row.id)
pp(warehouses)
return warehouses
finally:
db.close()
def get_current_user(
db: Session = Depends(get_db), token: str = Depends(reusable_oauth2)
) -> models.User:
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.TokenPayload(**payload)
except (jwt.JWTError, ValidationError):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Could not validate credentials",
)
user = crud.user.get(db, id=token_data.sub)
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
def get_current_active_user(
current_user: models.User = Depends(get_current_user),
) -> models.User:
if not crud.user.is_active(current_user):
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
def get_current_active_superuser(
current_user: models.User = Depends(get_current_user),
) -> models.User:
if not crud.user.is_superuser(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return current_user
| 27.115789
| 77
| 0.69604
|
dfb0e4025d32f8743112eeea3ef16b5393035552
| 212
|
py
|
Python
|
BufferStockModel/run.py
|
bbardoczy/ConsumptionSavingNotebooks
|
91811f784ec61fe2f11f8c9e0e172d085574f57c
|
[
"MIT"
] | 1
|
2022-03-09T14:43:29.000Z
|
2022-03-09T14:43:29.000Z
|
BufferStockModel/run.py
|
bbardoczy/ConsumptionSavingNotebooks
|
91811f784ec61fe2f11f8c9e0e172d085574f57c
|
[
"MIT"
] | null | null | null |
BufferStockModel/run.py
|
bbardoczy/ConsumptionSavingNotebooks
|
91811f784ec61fe2f11f8c9e0e172d085574f57c
|
[
"MIT"
] | null | null | null |
from BufferStockModel import BufferStockModelClass
updpar = dict()
updpar["Np"] = 1500
updpar["Nm"] = 1500
updpar["Na"] = 1500
model = BufferStockModelClass(name="baseline",solmethod="egm",**updpar)
model.test()
| 26.5
| 71
| 0.745283
|
dfb2125e655f351b14d7a2e313cfea92c5b3d51d
| 4,629
|
py
|
Python
|
pcie_bw.py
|
pcie-bench/pcie-model
|
5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11
|
[
"Apache-2.0"
] | 30
|
2018-12-05T22:02:26.000Z
|
2022-03-13T17:09:51.000Z
|
pcie_bw.py
|
pcie-bench/pcie-model
|
5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11
|
[
"Apache-2.0"
] | null | null | null |
pcie_bw.py
|
pcie-bench/pcie-model
|
5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11
|
[
"Apache-2.0"
] | 13
|
2018-12-28T14:31:48.000Z
|
2022-02-25T11:24:36.000Z
|
#! /usr/bin/env python3
#
## Copyright (C) 2015-2018 Rolf Neugebauer. All rights reserved.
## Copyright (C) 2015 Netronome Systems, Inc. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""A simple script to generate data for PCIe and ethernet bandwidth estimates"""
import sys
from optparse import OptionParser
from model import pcie, eth, mem_bw
# pylint: disable=too-many-locals
OUT_FILE = "pcie_bw.dat"
def main():
"""Main"""
usage = """usage: %prog [options]"""
parser = OptionParser(usage)
parser.add_option('--mps', dest='MPS', type="int", action='store',
default=256,
help='Set the maximum payload size of the link')
parser.add_option('--mrrs', dest='MRRS', type="int", action='store',
default=512,
help='Set the maximum read request size of the link')
parser.add_option('--rcb', dest='RCB', type="int", action='store',
default=64,
help='Set the read completion boundary of the link')
parser.add_option('--lanes', dest='lanes', type="string", action='store',
default='x8',
help='Set num lanes (x2, x4, x8, x16, or x32)')
parser.add_option('--gen', dest='gen', type="string", action='store',
default='gen3',
help='Set PCIe version (gen1, gen2, gen3, gen4, or gen5)')
parser.add_option('--addr', dest='addr', type="int", action='store',
default=64,
help='Set the number of address bits (32 or 64)')
parser.add_option('--ecrc', dest='ecrc', type="int", action='store',
default=0,
help='Use ECRC (0 or 1)')
parser.add_option('-o', '--outfile', dest='FILE',
default=OUT_FILE, action='store',
help='File where to write the data to')
(options, _) = parser.parse_args()
pciecfg = pcie.Cfg(version=options.gen,
lanes=options.lanes,
addr=options.addr,
ecrc=options.ecrc,
mps=options.MPS,
mrrs=options.MRRS,
rcb=options.RCB)
print("PCIe Config:")
pciecfg.pp()
ethcfg = eth.Cfg('40GigE')
tlp_bw = pciecfg.TLP_bw
bw_spec = pcie.BW_Spec(tlp_bw, tlp_bw, pcie.BW_Spec.BW_RAW)
dat = open(options.FILE, "w")
dat.write("\"Payload(Bytes)\" "
"\"PCIe Write BW\" "
"\"PCIe Write Trans/s\" "
"\"PCIe Read BW\" "
"\"PCIe Read Trans/s\" "
"\"PCIe Read/Write BW\" "
"\"PCIe Read/Write Trans/s\" "
"\"40G Ethernet BW\" "
"\"40G Ethernet PPS\" "
"\"40G Ethernet Frame time (ns)\" "
"\n")
for size in range(1, 1500 + 1):
wr_bw = mem_bw.write(pciecfg, bw_spec, size)
rd_bw = mem_bw.read(pciecfg, bw_spec, size)
rdwr_bw = mem_bw.read_write(pciecfg, bw_spec, size)
wr_trans = (wr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size
rd_trans = (rd_bw.rx_eff * 1000 * 1000 * 1000 / 8) / size
rdwr_trans = (rdwr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size
if size >= 64:
eth_bw = ethcfg.bps_ex(size) / (1000 * 1000 * 1000.0)
eth_pps = ethcfg.pps_ex(size)
eth_lat = 1.0 * 1000 * 1000 * 1000 / eth_pps
dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f %.2f %d %.2f\n" %
(size,
wr_bw.tx_eff, wr_trans,
rd_bw.rx_eff, rd_trans,
rdwr_bw.tx_eff, rdwr_trans,
eth_bw, eth_pps, eth_lat))
else:
dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f\n" %
(size,
wr_bw.tx_eff, wr_trans,
rd_bw.rx_eff, rd_trans,
rdwr_bw.tx_eff, rdwr_trans))
dat.close()
if __name__ == '__main__':
sys.exit(main())
| 38.575
| 80
| 0.534241
|
dfb68a5201db3b2abf55a2e729e1d1531d27950c
| 77
|
py
|
Python
|
src/buildercore/external.py
|
elifesciences/builder
|
161829686f777f7ac7f97bd970395886ba5089c1
|
[
"MIT"
] | 11
|
2017-03-01T18:00:30.000Z
|
2021-12-10T05:11:02.000Z
|
src/buildercore/external.py
|
elifesciences/builder
|
161829686f777f7ac7f97bd970395886ba5089c1
|
[
"MIT"
] | 397
|
2016-07-08T14:39:46.000Z
|
2022-03-30T12:45:09.000Z
|
src/buildercore/external.py
|
elifesciences/builder
|
161829686f777f7ac7f97bd970395886ba5089c1
|
[
"MIT"
] | 14
|
2016-07-13T08:33:28.000Z
|
2020-04-22T21:42:21.000Z
|
import subprocess
| 15.4
| 39
| 0.779221
|
dfb696c1a314cee61ccd51a38771b72300f8407a
| 648
|
py
|
Python
|
Round 2/data_packing.py
|
kamyu104/GoogleCodeJam-2014
|
ff29a677f502168eb0b92d6928ad6983d2622017
|
[
"MIT"
] | 10
|
2016-04-10T22:50:54.000Z
|
2021-04-17T18:17:02.000Z
|
Round 2/data_packing.py
|
kamyu104/GoogleCodeJam-2014
|
ff29a677f502168eb0b92d6928ad6983d2622017
|
[
"MIT"
] | null | null | null |
Round 2/data_packing.py
|
kamyu104/GoogleCodeJam-2014
|
ff29a677f502168eb0b92d6928ad6983d2622017
|
[
"MIT"
] | 10
|
2016-07-19T08:43:38.000Z
|
2021-07-22T22:38:44.000Z
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2014 Round 2 - Problem A. Data Packing
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000432fed/0000000000432b8d
#
# Time: O(NlogN)
# Space: O(1)
#
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, data_packing())
| 24.923077
| 91
| 0.598765
|
dfb71ae9c49c8ec75050dd6031ca98dd54f66f9f
| 18,950
|
py
|
Python
|
BiModNeuroCNN/training/bimodal_classification.py
|
cfcooney/BiModNeuroCNN
|
f79da6150b4186bcbc15d876394f4af8a47076d0
|
[
"MIT"
] | 4
|
2020-10-31T21:20:12.000Z
|
2022-01-05T16:13:07.000Z
|
BiModNeuroCNN/training/bimodal_classification.py
|
cfcooney/BiModNeuroCNN
|
f79da6150b4186bcbc15d876394f4af8a47076d0
|
[
"MIT"
] | null | null | null |
BiModNeuroCNN/training/bimodal_classification.py
|
cfcooney/BiModNeuroCNN
|
f79da6150b4186bcbc15d876394f4af8a47076d0
|
[
"MIT"
] | null | null | null |
"""
Description: Class for training CNNs using a nested cross-validation method. Train on the inner_fold to obtain
optimized hyperparameters. Train outer_fold to obtain classification performance.
"""
from braindecode.datautil.iterators import BalancedBatchSizeIterator
from braindecode.experiments.stopcriteria import MaxEpochs, NoDecrease, Or
from braindecode.torch_ext.util import set_random_seeds, np_to_var, var_to_np
from braindecode.datautil.signal_target import SignalAndTarget
from braindecode.torch_ext.functions import square, safe_log
import torch as th
from sklearn.model_selection import train_test_split
from BiModNeuroCNN.training.training_utils import current_acc, current_loss
from BiModNeuroCNN.data_loader.data_utils import smote_augmentation, multi_SignalAndTarget
from BiModNeuroCNN.results.results import Results as res
from torch.nn.functional import nll_loss, cross_entropy
from BiModNeuroCNN.training.bimodal_training import Experiment
import numpy as np
import itertools as it
import torch
from torch import optim
import logging
from ast import literal_eval
from BiModNeuroCNN.results.metrics import cross_entropy
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
log = logging.getLogger(__name__)
torch.backends.cudnn.deterministic = True
| 57.95107
| 154
| 0.644063
|
dfb822e8f7cafa7cb423cc71ade94b740d42328b
| 7,462
|
py
|
Python
|
simple_task_repeater/str_app.py
|
lavrpetrov/simple-task-repeater
|
cd56ed52143ac31171fc757c6e1f7740bebe1ed4
|
[
"MIT"
] | null | null | null |
simple_task_repeater/str_app.py
|
lavrpetrov/simple-task-repeater
|
cd56ed52143ac31171fc757c6e1f7740bebe1ed4
|
[
"MIT"
] | null | null | null |
simple_task_repeater/str_app.py
|
lavrpetrov/simple-task-repeater
|
cd56ed52143ac31171fc757c6e1f7740bebe1ed4
|
[
"MIT"
] | 1
|
2021-04-20T15:38:44.000Z
|
2021-04-20T15:38:44.000Z
|
import datetime
from collections import Counter
from functools import wraps
from dateparser import parse as parse_date
from calmlib import get_current_date, get_current_datetime, to_date, trim
from .base import Task
from .str_database import STRDatabase
from .telegram_bot import TelegramBot, command, catch_errors
DEFAULT_PERIOD = 4
TASK_PER_DAY_LIMIT = 3
def run(self):
with self.db:
super().run()
def actualize_tasks(self):
if self._last_actualize_date < get_current_date():
self._actualize_tasks()
self._last_actualize_date = get_current_date()
def _actualize_tasks(self):
"""
Go over all tasks and update date/reschedule
"""
for user in self.db.user_names:
for task in self.db.get_users_tasks(user):
today = get_current_datetime()
while to_date(task.date) < to_date(today):
if task.reschedule:
# if task is past due and to be rescheduled - reschedule it on today
task.date = today
else:
task.date += datetime.timedelta(days=task.period)
self.db.update_task(task)
| 32.163793
| 107
| 0.590592
|
dfb8674c6f7746d9692d1c11fcd1c8fdb24ebb98
| 258
|
py
|
Python
|
Strings/conversion-operation.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | 3
|
2022-03-28T09:10:08.000Z
|
2022-03-29T10:47:56.000Z
|
Strings/conversion-operation.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | 1
|
2022-03-27T11:52:58.000Z
|
2022-03-27T11:52:58.000Z
|
Strings/conversion-operation.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | null | null | null |
# lower , title , upper operations on string
x = "spider"
y = "MAN"
v=x.upper() # all letters will become uppercase
w=y.lower() # all letters will become lowercase
z=y.title() # only first letter will become upper and rest of all lowercase
print(v,w,z)
| 19.846154
| 75
| 0.705426
|
dfb9067db6876e985a83eb3d9d6219b06ce32b30
| 1,197
|
py
|
Python
|
setup.py
|
adadesions/sfcpy
|
d395218ae9f72fed378c30ad604923373b7fbf3f
|
[
"MIT"
] | 2
|
2019-08-28T19:30:32.000Z
|
2020-03-28T16:17:01.000Z
|
setup.py
|
adadesions/sfcpy
|
d395218ae9f72fed378c30ad604923373b7fbf3f
|
[
"MIT"
] | 5
|
2021-03-18T22:53:57.000Z
|
2022-03-11T23:42:38.000Z
|
setup.py
|
adadesions/sfcpy
|
d395218ae9f72fed378c30ad604923373b7fbf3f
|
[
"MIT"
] | null | null | null |
"""Setup script for sfcpy"""
import os.path
from setuptools import setup
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md"), encoding='utf-8') as fid:
README = fid.read()
# This call to setup() does all the work
setup(
name="sfcpy",
version="1.2.3",
description="Space-Filling Curve library for image-processing tasks",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/adadesions/sfcpy",
author="adadesions",
author_email="adadesions@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=["sfcpy"],
include_package_data=True,
tests_require=['pytest'],
install_requires=[
"numpy", "matplotlib", "Pillow"
],
entry_points={"console_scripts": ["sfcpy=sfcpy.__main__:main"]},
)
| 29.925
| 73
| 0.652464
|
dfb94390c72e2b9eb210dfba78b3240cd00784e2
| 7,921
|
py
|
Python
|
make_DigitalCommons_spreadsheet.py
|
lsulibraries/CWBR_DigitalCommons
|
6eb994d08d6de088075cde82f6dc2b3aed15bdda
|
[
"Apache-2.0"
] | null | null | null |
make_DigitalCommons_spreadsheet.py
|
lsulibraries/CWBR_DigitalCommons
|
6eb994d08d6de088075cde82f6dc2b3aed15bdda
|
[
"Apache-2.0"
] | null | null | null |
make_DigitalCommons_spreadsheet.py
|
lsulibraries/CWBR_DigitalCommons
|
6eb994d08d6de088075cde82f6dc2b3aed15bdda
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import csv
import os
from collections import namedtuple
import string
from nameparser import HumanName
if __name__ == '__main__':
issues_dict = csv_to_dict('3rdStageSourceCSVs/Interviews.csv')
make_csv_data(issues_dict)
| 36.004545
| 112
| 0.550562
|
dfb9a74f5e09588db5c20e479a0c85f0735ce76b
| 7,524
|
py
|
Python
|
pip_services3_redis/cache/RedisCache.py
|
pip-services-python/pip-services-redis-python
|
ecb2e667ab266af0274b0891a19e802cb256766a
|
[
"MIT"
] | null | null | null |
pip_services3_redis/cache/RedisCache.py
|
pip-services-python/pip-services-redis-python
|
ecb2e667ab266af0274b0891a19e802cb256766a
|
[
"MIT"
] | null | null | null |
pip_services3_redis/cache/RedisCache.py
|
pip-services-python/pip-services-redis-python
|
ecb2e667ab266af0274b0891a19e802cb256766a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Optional, Any
import redis
from pip_services3_commons.config import IConfigurable, ConfigParams
from pip_services3_commons.errors import ConfigException, InvalidStateException
from pip_services3_commons.refer import IReferenceable, IReferences
from pip_services3_commons.run import IOpenable
from pip_services3_components.auth import CredentialResolver
from pip_services3_components.cache import ICache
from pip_services3_components.connect import ConnectionResolver
| 37.064039
| 158
| 0.636895
|
dfbade8328cd7332030b49fd40ed470582f05c91
| 7,392
|
py
|
Python
|
main/model/property.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | 1
|
2018-10-26T13:33:20.000Z
|
2018-10-26T13:33:20.000Z
|
main/model/property.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | 652
|
2018-10-26T12:28:08.000Z
|
2021-08-02T09:13:48.000Z
|
main/model/property.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from api import fields
import model
import util
FIELDS = {
'auto_now': fields.Boolean,
'auto_now_add': fields.Boolean,
'autofocus': fields.Boolean,
'choices': fields.String,
'default': fields.String,
'description': fields.String,
'email_filter': fields.Boolean,
'field_property': fields.String,
'forms_property': fields.String,
'kind': fields.String,
'name': fields.String,
'ndb_property': fields.String,
'placeholder': fields.String,
'rank': fields.Integer,
'readonly': fields.Boolean,
'repeated': fields.Boolean,
'required': fields.Boolean,
'sort_filter': fields.Boolean,
'strip_filter': fields.Boolean,
'verbose_name': fields.String,
'wtf_property': fields.String,
}
FIELDS.update(model.Base.FIELDS)
| 38.701571
| 123
| 0.652056
|
dfbc302b59b318fa83066ffc6aa91c4caa2533da
| 1,189
|
py
|
Python
|
tests/test_request.py
|
pauleveritt/wired_components
|
a9072d5fc48680d5ff895887842ffd0f06bc0081
|
[
"MIT"
] | 1
|
2019-09-15T12:30:44.000Z
|
2019-09-15T12:30:44.000Z
|
tests/test_request.py
|
pauleveritt/wired_components
|
a9072d5fc48680d5ff895887842ffd0f06bc0081
|
[
"MIT"
] | null | null | null |
tests/test_request.py
|
pauleveritt/wired_components
|
a9072d5fc48680d5ff895887842ffd0f06bc0081
|
[
"MIT"
] | null | null | null |
import pytest
from wired import ServiceContainer
| 31.289474
| 69
| 0.764508
|
dfbd03cf9bf0d42acbc4621a1653916d133bdb8e
| 958
|
py
|
Python
|
Charts and Graphs/LollipopCharts.py
|
aprakash7/Buildyourown
|
58f0530ea84bf9e91f258d947610ea1e93d7d456
|
[
"MIT"
] | null | null | null |
Charts and Graphs/LollipopCharts.py
|
aprakash7/Buildyourown
|
58f0530ea84bf9e91f258d947610ea1e93d7d456
|
[
"MIT"
] | null | null | null |
Charts and Graphs/LollipopCharts.py
|
aprakash7/Buildyourown
|
58f0530ea84bf9e91f258d947610ea1e93d7d456
|
[
"MIT"
] | 1
|
2021-05-31T04:20:54.000Z
|
2021-05-31T04:20:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 21:24:53 2021
@author: Akshay Prakash
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
table = pd.read_csv(r'\1617table.csv')
table.head()
plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = 'skyblue')
plt.plot(table['Pts'], np.arange(1,21), "o")
plt.yticks(np.arange(1,21), table['team'])
plt.show()
teamColours = ['#034694','#001C58','#5CBFEB','#D00027',
'#EF0107','#DA020E','#274488','#ED1A3B',
'#000000','#091453','#60223B','#0053A0',
'#E03A3E','#1B458F','#000000','#53162f',
'#FBEE23','#EF6610','#C92520','#BA1F1A']
plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = teamColours)
plt.plot(table['Pts'], np.arange(1,21), "o")
plt.yticks(np.arange(1,21), table['team'])
plt.xlabel('Points')
plt.ylabel('Teams')
plt.title("Premier league 16/17")
| 30.903226
| 84
| 0.583507
|
dfbf2ca5c949daa624f3881dc6dcb4567701067b
| 1,126
|
py
|
Python
|
python/merge-kml-files/merge-kml-files.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 1
|
2019-11-23T10:44:58.000Z
|
2019-11-23T10:44:58.000Z
|
python/merge-kml-files/merge-kml-files.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 8
|
2020-07-16T07:14:12.000Z
|
2020-10-14T17:25:33.000Z
|
python/merge-kml-files/merge-kml-files.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 1
|
2019-11-23T10:45:00.000Z
|
2019-11-23T10:45:00.000Z
|
#!/usr/bin/env python
import sys
import lxml.etree
if __name__ == '__main__':
main()
| 31.277778
| 79
| 0.599467
|
dfbf59c5b26596753447f4f968efc9068d24fa0b
| 3,829
|
py
|
Python
|
tccli/services/partners/v20180321/help.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/partners/v20180321/help.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/partners/v20180321/help.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
DESC = "partners-2018-03-21"
INFO = {
"AgentPayDeals": {
"params": [
{
"name": "OwnerUin",
"desc": "uin"
},
{
"name": "AgentPay",
"desc": "10"
},
{
"name": "DealNames",
"desc": ""
}
],
"desc": "/"
},
"DescribeAgentBills": {
"params": [
{
"name": "SettleMonth",
"desc": "2018-02"
},
{
"name": "ClientUin",
"desc": "ID"
},
{
"name": "PayMode",
"desc": "prepay/postpay"
},
{
"name": "OrderId",
"desc": ""
},
{
"name": "ClientRemark",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"AgentTransferMoney": {
"params": [
{
"name": "ClientUin",
"desc": "ID"
},
{
"name": "Amount",
"desc": ""
}
],
"desc": ""
},
"DescribeRebateInfos": {
"params": [
{
"name": "RebateMonth",
"desc": "2018-02"
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"ModifyClientRemark": {
"params": [
{
"name": "ClientRemark",
"desc": ""
},
{
"name": "ClientUin",
"desc": "ID"
}
],
"desc": ""
},
"DescribeAgentClients": {
"params": [
{
"name": "ClientUin",
"desc": "ID"
},
{
"name": "ClientName",
"desc": ""
},
{
"name": "ClientFlag",
"desc": "a/b"
},
{
"name": "OrderDirection",
"desc": "ASC/DESC "
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"DescribeClientBalance": {
"params": [
{
"name": "ClientUin",
"desc": "()ID"
}
],
"desc": ""
},
"DescribeAgentAuditedClients": {
"params": [
{
"name": "ClientUin",
"desc": "ID"
},
{
"name": "ClientName",
"desc": ""
},
{
"name": "ClientFlag",
"desc": "a/b"
},
{
"name": "OrderDirection",
"desc": "ASC/DESC "
},
{
"name": "ClientUins",
"desc": "ID"
},
{
"name": "HasOverdueBill",
"desc": "01"
},
{
"name": "ClientRemark",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
},
{
"name": "ClientType",
"desc": "new()/assign()/old()/"
},
{
"name": "ProjectType",
"desc": "self()/platform()/repeat( )/"
}
],
"desc": ""
},
"AuditApplyClient": {
"params": [
{
"name": "ClientUin",
"desc": "ID"
},
{
"name": "AuditResult",
"desc": "accept/reject"
},
{
"name": "Note",
"desc": "B"
}
],
"desc": ""
}
}
| 19.049751
| 68
| 0.404022
|
dfbfef0fe41686291ae36ae72197b63006cb0f9c
| 83,387
|
py
|
Python
|
src/main/python/lib/default/__init__.py
|
emilybache/texttest-runner
|
2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a
|
[
"MIT"
] | null | null | null |
src/main/python/lib/default/__init__.py
|
emilybache/texttest-runner
|
2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a
|
[
"MIT"
] | null | null | null |
src/main/python/lib/default/__init__.py
|
emilybache/texttest-runner
|
2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a
|
[
"MIT"
] | null | null | null |
""" The default configuration, from which all others should be derived """
import os, plugins, sandbox, console, rundependent, comparetest, batch, performance, subprocess, operator, logging
from copy import copy
from string import Template
from fnmatch import fnmatch
from threading import Thread
# For back-compatibility
from runtest import RunTest, Running, Killed
from scripts import *
| 52.876982
| 316
| 0.651193
|
dfc3450cc6a455bca7329de3130cbc552b8baa62
| 747
|
py
|
Python
|
2019/10 October/dp10302019.py
|
vishrutkmr7/DailyPracticeProblemsDIP
|
d1bfbc75f2024736c22c05385f753a90ddcfa0f5
|
[
"MIT"
] | 5
|
2019-08-06T02:34:41.000Z
|
2022-01-08T03:03:16.000Z
|
2019/10 October/dp10302019.py
|
ourangzeb/DailyPracticeProblemsDIP
|
66c07af88754e5d59b243e3ee9f02db69f7c0a77
|
[
"MIT"
] | 15
|
2021-06-01T14:04:16.000Z
|
2022-03-08T21:17:22.000Z
|
2019/10 October/dp10302019.py
|
ourangzeb/DailyPracticeProblemsDIP
|
66c07af88754e5d59b243e3ee9f02db69f7c0a77
|
[
"MIT"
] | 4
|
2019-09-19T20:00:05.000Z
|
2021-08-16T11:31:51.000Z
|
# This problem was recently asked by LinkedIn:
# Given a non-empty array where each element represents a digit of a non-negative integer, add one to the integer.
# The most significant digit is at the front of the array and each element in the array contains only one digit.
# Furthermore, the integer does not have leading zeros, except in the case of the number '0'.
num = [2, 9, 9]
print(Solution().plusOne(num))
# [3, 0, 0]
| 28.730769
| 114
| 0.619813
|
dfc40c993839966190091cb6ae4333cb9d7b2cc3
| 1,122
|
py
|
Python
|
kbr/run_utils.py
|
brugger/kbr-tools
|
95c8f8274e28b986e7fd91c8404026433488c940
|
[
"MIT"
] | 1
|
2021-02-02T09:47:40.000Z
|
2021-02-02T09:47:40.000Z
|
kbr/run_utils.py
|
brugger/kbr-tools
|
95c8f8274e28b986e7fd91c8404026433488c940
|
[
"MIT"
] | 1
|
2021-08-04T13:00:00.000Z
|
2021-08-04T13:00:00.000Z
|
kbr/run_utils.py
|
brugger/kbr-tools
|
95c8f8274e28b986e7fd91c8404026433488c940
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
import os
| 23.375
| 123
| 0.632799
|
dfc40d4989f8ef494b36888ba91588827d76ffc5
| 2,614
|
py
|
Python
|
tests/client/test_files.py
|
philopon/datapane
|
d7d69865d4def0cbe6eb334acd9edeb829dd67e6
|
[
"Apache-2.0"
] | 481
|
2020-04-25T05:40:21.000Z
|
2022-03-30T22:04:35.000Z
|
tests/client/test_files.py
|
tig/datapane
|
defae6776e73b07191c0a5804a50b284ec3c9a63
|
[
"Apache-2.0"
] | 74
|
2020-04-28T10:47:35.000Z
|
2022-03-14T15:50:55.000Z
|
tests/client/test_files.py
|
admariner/datapane
|
c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f
|
[
"Apache-2.0"
] | 41
|
2020-07-21T16:30:21.000Z
|
2022-02-21T22:50:27.000Z
|
from pathlib import Path
import altair as alt
import folium
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as p_go
import pytest
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from pandas.io.formats.style import Styler
from datapane.client.api.files import save
data = pd.DataFrame({"x": np.random.randn(20), "y": np.random.randn(20)})
# NOTE - test disabled until pip release of altair_pandas - however should work if altair test passes
# NOTE - test disabled updated pip release of pdvega that tracks git upstream - however should work if altair test passes
def test_save_table(tmp_path: Path):
# tests saving a DF directly to a html file
save(data)
# save styled table
save(Styler(data))
| 26.948454
| 121
| 0.694721
|
dfc53823fb3adccd40e9762c665f5bb3deecbf27
| 95
|
py
|
Python
|
instance/config.py
|
antomuli/News_Highlight
|
9feb33c0a32fa78cd93f5ab2c74942a8ca281701
|
[
"Unlicense"
] | 2
|
2020-03-23T23:16:51.000Z
|
2020-04-26T21:15:11.000Z
|
instance/config.py
|
antomuli/News_Highlight
|
9feb33c0a32fa78cd93f5ab2c74942a8ca281701
|
[
"Unlicense"
] | null | null | null |
instance/config.py
|
antomuli/News_Highlight
|
9feb33c0a32fa78cd93f5ab2c74942a8ca281701
|
[
"Unlicense"
] | null | null | null |
NEWS_API_KEY= '138b22df68394ecbaa9c9af0d0377adb'
SECRET_KEY= 'f9bf78b9a18ce6d46a0cd2b0b86df9da'
| 47.5
| 48
| 0.905263
|
dfc5ea1ec35f681b24bc22174c17b45b8de95235
| 1,417
|
py
|
Python
|
twirp/logging.py
|
batchar2/twirpy
|
e5940a2a038926844098def09748953287071747
|
[
"Unlicense"
] | 51
|
2020-05-23T22:31:53.000Z
|
2022-03-08T19:14:04.000Z
|
twirp/logging.py
|
batchar2/twirpy
|
e5940a2a038926844098def09748953287071747
|
[
"Unlicense"
] | 20
|
2020-05-15T10:20:38.000Z
|
2022-02-06T23:21:56.000Z
|
twirp/logging.py
|
batchar2/twirpy
|
e5940a2a038926844098def09748953287071747
|
[
"Unlicense"
] | 10
|
2020-05-29T09:55:49.000Z
|
2021-10-16T00:14:04.000Z
|
import os
import logging
import sys
import structlog
from structlog.stdlib import LoggerFactory, add_log_level
_configured = False
def configure(force = False):
"""
Configures logging & structlog modules
Keyword Arguments:
force: Force to reconfigure logging.
"""
global _configured
if _configured and not force:
return
# Check whether debug flag is set
debug = os.environ.get('DEBUG_MODE', False)
# Set appropriate log level
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# Set logging config
logging.basicConfig(
level = log_level,
format = "%(message)s",
)
# Configure structlog
structlog.configure(
logger_factory = LoggerFactory(),
processors = [
add_log_level,
# Add timestamp
structlog.processors.TimeStamper('iso'),
# Add stack information
structlog.processors.StackInfoRenderer(),
# Set exception field using exec info
structlog.processors.format_exc_info,
# Render event_dict as JSON
structlog.processors.JSONRenderer()
]
)
_configured = True
def get_logger(**kwargs):
"""
Get the structlog logger
"""
# Configure logging modules
configure()
# Return structlog
return structlog.get_logger(**kwargs)
| 22.140625
| 57
| 0.628088
|
dfc68640fe94c25498745f6373d4a8f15e6f9a5f
| 878
|
py
|
Python
|
setup.py
|
Arkq/pyexec
|
ec90b0aaff80996155d033bd722ff59c9259460e
|
[
"MIT"
] | null | null | null |
setup.py
|
Arkq/pyexec
|
ec90b0aaff80996155d033bd722ff59c9259460e
|
[
"MIT"
] | null | null | null |
setup.py
|
Arkq/pyexec
|
ec90b0aaff80996155d033bd722ff59c9259460e
|
[
"MIT"
] | null | null | null |
# setup.py
# Copyright (c) 2015-2017 Arkadiusz Bokowy
#
# This file is a part of pyexec.
#
# This project is licensed under the terms of the MIT license.
from setuptools import setup
import pyexec
with open("README.rst") as f:
long_description = f.read()
setup(
name="pyexec",
version=pyexec.__version__,
author="Arkadiusz Bokowy",
author_email="arkadiusz.bokowy@gmail.com",
url="https://github.com/Arkq/pyexec",
description="Signal-triggered process reloader",
long_description=long_description,
license="MIT",
py_modules=["pyexec"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
| 25.085714
| 62
| 0.654897
|
dfc68f46a49d56c4e9d2e4ea5761354ae3746b5b
| 323
|
py
|
Python
|
tests/func_eqconstr.py
|
andyjost/Sprite
|
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
|
[
"MIT"
] | 1
|
2022-03-16T16:37:11.000Z
|
2022-03-16T16:37:11.000Z
|
tests/func_eqconstr.py
|
andyjost/Sprite
|
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
|
[
"MIT"
] | null | null | null |
tests/func_eqconstr.py
|
andyjost/Sprite
|
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
|
[
"MIT"
] | null | null | null |
'''Functional tests for the equational constraint.'''
import cytest # from ./lib; must be first
| 32.3
| 96
| 0.693498
|
dfc7144f2268699316911b76b5597b6509452a54
| 4,898
|
py
|
Python
|
data-sources/kbr/authority-persons-marc-to-csv.py
|
kbrbe/beltrans-data-integration
|
951ae3941b22a6fe0a8d30079bdf6f4f0a55f092
|
[
"MIT"
] | null | null | null |
data-sources/kbr/authority-persons-marc-to-csv.py
|
kbrbe/beltrans-data-integration
|
951ae3941b22a6fe0a8d30079bdf6f4f0a55f092
|
[
"MIT"
] | 21
|
2022-02-14T10:58:52.000Z
|
2022-03-28T14:04:40.000Z
|
data-sources/kbr/authority-persons-marc-to-csv.py
|
kbrbe/beltrans-data-integration
|
951ae3941b22a6fe0a8d30079bdf6f4f0a55f092
|
[
"MIT"
] | null | null | null |
#
# (c) 2022 Sven Lieber
# KBR Brussels
#
#import xml.etree.ElementTree as ET
import lxml.etree as ET
import os
import json
import itertools
import enchant
import hashlib
import csv
from optparse import OptionParser
import utils
import stdnum
NS_MARCSLIM = 'http://www.loc.gov/MARC21/slim'
ALL_NS = {'marc': NS_MARCSLIM}
# -----------------------------------------------------------------------------
def addAuthorityFieldsToCSV(elem, writer, natWriter, stats):
"""This function extracts authority relevant data from the given XML element 'elem' and writes it to the given CSV file writer."""
#
# extract relevant data from the current record
#
authorityID = utils.getElementValue(elem.find('./marc:controlfield[@tag="001"]', ALL_NS))
namePerson = utils.getElementValue(elem.find('./marc:datafield[@tag="100"]/marc:subfield[@code="a"]', ALL_NS))
nameOrg = utils.getElementValue(elem.find('./marc:datafield[@tag="110"]/marc:subfield[@code="a"]', ALL_NS))
nationalities = utils.getElementValue(elem.findall('./marc:datafield[@tag="370"]/marc:subfield[@code="c"]', ALL_NS))
gender = utils.getElementValue(elem.find('./marc:datafield[@tag="375"]/marc:subfield[@code="a"]', ALL_NS))
birthDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="f"]', ALL_NS))
deathDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="g"]', ALL_NS))
isniRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and (text()="isni" or text()="ISNI")]/../marc:subfield[@code="a"]', namespaces=ALL_NS))
viafRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and text()="viaf"]/../marc:subfield[@code="a"]', namespaces=ALL_NS))
countryCode = utils.getElementValue(elem.find('./marc:datafield[@tag="043"]/marc:subfield[@code="c"]', ALL_NS))
(familyName, givenName) = utils.extractNameComponents(namePerson)
birthDate = ''
deathDate = ''
datePatterns = ['%Y', '(%Y)', '[%Y]', '%Y-%m-%d', '%d/%m/%Y', '%Y%m%d']
if birthDateRaw:
birthDate = utils.parseDate(birthDateRaw, datePatterns)
if deathDateRaw:
deathDate = utils.parseDate(deathDateRaw, datePatterns)
name = f'{namePerson} {nameOrg}'.strip()
if nationalities:
nationalityURIString = utils.createURIString(nationalities, ';', 'http://id.loc.gov/vocabulary/countries/')
for n in nationalityURIString.split(';'):
natWriter.writerow({'authorityID': authorityID, 'nationality': n})
newRecord = {
'authorityID': authorityID,
'name': name,
'family_name': familyName,
'given_name': givenName,
'gender': gender,
'birth_date': birthDate,
'death_date': deathDate,
'isni_id': utils.extractIdentifier(authorityID, f'ISNI {isniRaw}', pattern='ISNI'),
'viaf_id': utils.extractIdentifier(authorityID, f'VIAF {viafRaw}', pattern='VIAF'),
'country_code': countryCode
}
writer.writerow(newRecord)
# -----------------------------------------------------------------------------
def main():
"""This script reads an XML file in MARC slim format and extracts several fields to create a CSV file."""
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-i', '--input-file', action='store', help='The input file containing MARC SLIM XML records')
parser.add_option('-o', '--output-file', action='store', help='The output CSV file containing selected MARC fields')
parser.add_option('-n', '--nationality-csv', action='store', help='The output CSV file containing the IDs of authorities and their nationality')
(options, args) = parser.parse_args()
#
# Check if we got all required arguments
#
if( (not options.input_file) or (not options.output_file) or (not options.nationality_csv) ):
parser.print_help()
exit(1)
#
# Instead of loading everything to main memory, stream over the XML using iterparse
#
with open(options.output_file, 'w') as outFile, \
open(options.nationality_csv, 'w') as natFile:
stats = {}
outputFields = ['authorityID', 'name', 'family_name', 'given_name', 'gender', 'birth_date', 'death_date', 'isni_id', 'viaf_id', 'country_code']
outputWriter = csv.DictWriter(outFile, fieldnames=outputFields, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
outputWriter.writeheader()
nationalityWriter = csv.DictWriter(natFile, fieldnames=['authorityID', 'nationality'], delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
nationalityWriter.writeheader()
for event, elem in ET.iterparse(options.input_file, events=('start', 'end')):
# The parser finished reading one authority record, get information and then discard the record
if event == 'end' and elem.tag == ET.QName(NS_MARCSLIM, 'record'):
addAuthorityFieldsToCSV(elem, outputWriter, nationalityWriter, stats)
main()
| 43.732143
| 186
| 0.682115
|
dfc7e5a8bbc57e53f20590d631fe2b87c31a1671
| 3,886
|
py
|
Python
|
promoterz/evaluationPool.py
|
emillj/gekkoJaponicus
|
d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7
|
[
"MIT"
] | null | null | null |
promoterz/evaluationPool.py
|
emillj/gekkoJaponicus
|
d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7
|
[
"MIT"
] | null | null | null |
promoterz/evaluationPool.py
|
emillj/gekkoJaponicus
|
d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7
|
[
"MIT"
] | 1
|
2021-11-29T20:18:25.000Z
|
2021-11-29T20:18:25.000Z
|
#!/bin/python
import time
import random
from multiprocessing import Pool, Process, Pipe, TimeoutError
from multiprocessing.pool import ThreadPool
| 33.213675
| 97
| 0.587494
|
dfc9bea7af7becf02c3cd0e4f00d6640fee9f247
| 3,001
|
py
|
Python
|
website/drawquest/apps/following/models.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 19
|
2015-11-10T17:36:20.000Z
|
2021-04-12T07:36:00.000Z
|
website/drawquest/apps/following/models.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T03:45:34.000Z
|
2021-06-09T03:45:34.000Z
|
website/drawquest/apps/following/models.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 6
|
2015-11-11T00:38:38.000Z
|
2020-07-25T20:10:08.000Z
|
from canvas.cache_patterns import CachedCall
from drawquest import knobs
from drawquest.apps.drawquest_auth.models import User
from drawquest.apps.drawquest_auth.details_models import UserDetails
from drawquest.pagination import FakePaginator
def _paginate(redis_obj, offset, request=None):
'''
items should already start at the proper offset.
'''
if offset == 'top':
items = redis_obj.zrevrange(0, knobs.FOLLOWERS_PER_PAGE, withscores=True)
else:
items = redis_obj.zrevrangebyscore('({}'.format(offset), '-inf',
start=0,
num=knobs.FOLLOWERS_PER_PAGE,
withscores=True)
try:
next_offset = items[-1][1]
next_offset = next_offset.__repr__()
except IndexError:
next_offset = None
items = [item for item, ts in items]
pagination = FakePaginator(items, offset=offset, next_offset=next_offset)
return items, pagination
def followers(user, viewer=None, offset='top', direction='next', request=None):
""" The users who are following `user`. """
if direction != 'next':
raise ValueError("Follwers only supports 'next' - scrolling in one direction.")
if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)):
user_ids = user.redis.new_followers.zrevrange(0, -1)
pagination = None
else:
user_ids, pagination = _paginate(user.redis.new_followers, offset, request=request)
users = UserDetails.from_ids(user_ids)
if request is None or request.app_version_tuple < (3, 0):
users = _sorted(users)
return _for_viewer(users, viewer=viewer), pagination
def following(user, viewer=None, offset='top', direction='next', request=None):
""" The users that `user` is following. """
if direction != 'next':
raise ValueError("Following only supports 'next' - scrolling in one direction.")
if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)):
user_ids = user.redis.new_following.zrange(0, -1)
pagination = None
else:
user_ids, pagination = _paginate(user.redis.new_following, offset, request=request)
users = UserDetails.from_ids(user_ids)
if request is None or request.app_version_tuple < (3, 0):
users = _sorted(users)
return _for_viewer(users, viewer=viewer), pagination
| 34.102273
| 92
| 0.65978
|
dfcaf8188821bfe0448579c92b86161cf07a8cb5
| 3,674
|
py
|
Python
|
Python 3/PyGame/Matrix_based_3D/entities.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/PyGame/Matrix_based_3D/entities.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/PyGame/Matrix_based_3D/entities.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | 1
|
2020-08-19T17:25:22.000Z
|
2020-08-19T17:25:22.000Z
|
import numpy as np
| 33.099099
| 106
| 0.459717
|
dfcb6f77810c69d9413f60d8f54a4f595fd87395
| 65
|
py
|
Python
|
pytreearray/multiply.py
|
PhilipVinc/netket_dynamics
|
6e8009098c279271cb0f289ba9e85c039bb284e4
|
[
"Apache-2.0"
] | 2
|
2021-10-02T20:29:44.000Z
|
2021-10-02T20:38:28.000Z
|
pytreearray/multiply.py
|
PhilipVinc/netket_dynamics
|
6e8009098c279271cb0f289ba9e85c039bb284e4
|
[
"Apache-2.0"
] | 11
|
2021-10-01T09:15:06.000Z
|
2022-03-21T09:19:23.000Z
|
pytreearray/multiply.py
|
PhilipVinc/netket_dynamics
|
6e8009098c279271cb0f289ba9e85c039bb284e4
|
[
"Apache-2.0"
] | null | null | null |
from ._src.multiply import multiply_outer as outer # noqa: F401
| 32.5
| 64
| 0.784615
|
dfcda9e0f1ad0a543490dfbdc63f6f36b102ec00
| 1,258
|
py
|
Python
|
setup.py
|
utix/django-json-api
|
938f78f664a4ecbabb9e678595926d1a580f9d0c
|
[
"MIT"
] | 7
|
2021-02-26T14:35:17.000Z
|
2021-02-26T21:21:58.000Z
|
setup.py
|
utix/django-json-api
|
938f78f664a4ecbabb9e678595926d1a580f9d0c
|
[
"MIT"
] | 7
|
2021-02-26T14:44:30.000Z
|
2021-06-02T14:27:17.000Z
|
setup.py
|
utix/django-json-api
|
938f78f664a4ecbabb9e678595926d1a580f9d0c
|
[
"MIT"
] | 1
|
2021-02-26T20:10:42.000Z
|
2021-02-26T20:10:42.000Z
|
#!/usr/bin/env python
from os.path import join
from setuptools import find_packages, setup
# DEPENDENCIES
core_deps = requirements_from_pip("requirements.txt")
dev_deps = requirements_from_pip("requirements_dev.txt")
# DESCRIPTION
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
author="Sharework",
author_email="root@sharework.co",
description="JSON API specification for Django services",
extras_require={"all": dev_deps, "dev": dev_deps},
install_requires=core_deps,
long_description=long_description,
long_description_content_type="text/markdown",
name="django-json-api",
package_data={"django_json_api": ["resources/VERSION"]},
packages=find_packages(),
python_requires=">=3.8",
url="https://github.com/share-work/django-json-api",
version=open(join("django_json_api", "resources", "VERSION")).read().strip(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.952381
| 90
| 0.692369
|
dfcf9bc6b50b9274d2e45ff7e0b6d1af9920cab0
| 1,632
|
py
|
Python
|
youtube_dl/extractor/businessinsider.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 16
|
2020-12-01T15:26:58.000Z
|
2022-02-24T23:12:14.000Z
|
youtube_dl/extractor/businessinsider.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 5
|
2021-02-20T10:30:00.000Z
|
2021-06-01T21:12:31.000Z
|
youtube_dl/extractor/businessinsider.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 7
|
2020-12-01T15:27:04.000Z
|
2022-01-09T23:21:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .jwplatform import JWPlatformIE
| 37.953488
| 112
| 0.571078
|
dfcfb445e47c75ccbf0dd0f1527b09b9571a8702
| 578
|
py
|
Python
|
map_house.py
|
renankalfa/python-0-ao-Data_Scientist
|
2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88
|
[
"MIT"
] | 1
|
2022-03-27T23:55:37.000Z
|
2022-03-27T23:55:37.000Z
|
map_house.py
|
renankalfa/python-0-ao-Data_Scientist
|
2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88
|
[
"MIT"
] | null | null | null |
map_house.py
|
renankalfa/python-0-ao-Data_Scientist
|
2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88
|
[
"MIT"
] | null | null | null |
import plotly.express as px
import pandas as pd
data = pd.read_csv('kc_house_data.csv')
data_mapa = data[['id', 'lat', 'long', 'price']]
grafico1 = px.scatter_mapbox(data_mapa, lat='lat', lon='long',
hover_name='id', hover_data=['price'],
color_discrete_sequence=['fuchsia'],
zoom=3, height=300)
grafico1.update_layout(mapbox_style='open-street-map')
grafico1.update_layout(height=600, margin={'r': 0, 't': 0, 'l': 0, 'b': 0})
grafico1.show()
grafico1.write_html('map_house_rocket.html')
| 36.125
| 75
| 0.610727
|