hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef1beeeb227406f72c9053a339254f85199fda6b
| 2,062
|
py
|
Python
|
app/app.py
|
tigpt/docker-flask-postgres
|
ba0b192afe77e6946c8e49574def3533ea0f1181
|
[
"MIT"
] | null | null | null |
app/app.py
|
tigpt/docker-flask-postgres
|
ba0b192afe77e6946c8e49574def3533ea0f1181
|
[
"MIT"
] | null | null | null |
app/app.py
|
tigpt/docker-flask-postgres
|
ba0b192afe77e6946c8e49574def3533ea0f1181
|
[
"MIT"
] | null | null | null |
from elasticapm.contrib.flask import ElasticAPM
import os
from flask import Flask, request, render_template
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
APP = Flask(__name__)
APP.config['ELASTIC_APM'] = {
}
apm = ElasticAPM(APP)
APP.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
APP.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://%s:%s@%s/%s' % (
# ARGS.dbuser, ARGS.dbpass, ARGS.dbhost, ARGS.dbname
os.environ['DBUSER'], os.environ['DBPASS'], os.environ['DBHOST'], os.environ['DBNAME']
)
# initialize the database connection
DB = SQLAlchemy(APP)
# initialize database migration management
MIGRATE = Migrate(APP, DB)
from models import *
# bad query
# error message
# Error
# Unhandled error
| 25.775
| 90
| 0.70805
|
ef1c14040a2c37814d24485011b2191f84d572dc
| 325
|
py
|
Python
|
pytify/strategy.py
|
EngineeringIsLife/Pytify
|
ae9a351144cb8f5556740d33cdf29073ffd2dc1e
|
[
"MIT"
] | null | null | null |
pytify/strategy.py
|
EngineeringIsLife/Pytify
|
ae9a351144cb8f5556740d33cdf29073ffd2dc1e
|
[
"MIT"
] | null | null | null |
pytify/strategy.py
|
EngineeringIsLife/Pytify
|
ae9a351144cb8f5556740d33cdf29073ffd2dc1e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sys import platform
| 21.666667
| 58
| 0.630769
|
ef1e04b7ef6eaf43f6fa7d6f871605144e4d447e
| 8,836
|
py
|
Python
|
scrapers/meetings/fetch_meetings.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 2
|
2015-04-11T12:22:41.000Z
|
2016-08-18T11:12:06.000Z
|
scrapers/meetings/fetch_meetings.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 84
|
2015-01-22T14:33:49.000Z
|
2015-04-01T23:15:29.000Z
|
scrapers/meetings/fetch_meetings.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 1
|
2015-04-16T03:10:39.000Z
|
2015-04-16T03:10:39.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime
import logging
import os.path
import requests
import time
import urllib
from bs4 import BeautifulSoup
from utils import mongo
| 46.26178
| 175
| 0.563943
|
ef20178603cd20e2dd144ff595f24f1bbc671045
| 282
|
py
|
Python
|
django_mediamosa/templatetags/mediamosa_extras.py
|
UGentPortaal/django-mediamosa
|
553a725cd02e8dd2489bf25a613c9b98155cf90d
|
[
"BSD-3-Clause"
] | null | null | null |
django_mediamosa/templatetags/mediamosa_extras.py
|
UGentPortaal/django-mediamosa
|
553a725cd02e8dd2489bf25a613c9b98155cf90d
|
[
"BSD-3-Clause"
] | null | null | null |
django_mediamosa/templatetags/mediamosa_extras.py
|
UGentPortaal/django-mediamosa
|
553a725cd02e8dd2489bf25a613c9b98155cf90d
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
register = template.Library()
| 21.692308
| 60
| 0.695035
|
ef21cfd36477df2859e374f71d6a0bbf86ff8519
| 561
|
py
|
Python
|
tests/settings.py
|
managedbyq/mbq.atomiq
|
23edd33e8b958cfd9257ea62a107d8bb793ff3b9
|
[
"Apache-2.0"
] | null | null | null |
tests/settings.py
|
managedbyq/mbq.atomiq
|
23edd33e8b958cfd9257ea62a107d8bb793ff3b9
|
[
"Apache-2.0"
] | 9
|
2018-09-17T20:50:43.000Z
|
2018-12-07T21:19:56.000Z
|
tests/settings.py
|
managedbyq/mbq.atomiq
|
23edd33e8b958cfd9257ea62a107d8bb793ff3b9
|
[
"Apache-2.0"
] | null | null | null |
import os
import boto3
import dj_database_url
from mbq import env, metrics
SECRET_KEY = 'fake-key'
DEBUG = True
ATOMIQ = {
'env': 'Test',
'service': 'test-service',
}
database_url = os.environ.get('DATABASE_URL', 'mysql://root:@mysql:3306/atomiqdb')
DATABASES = {
'default': dj_database_url.parse(database_url),
}
INSTALLED_APPS = [
'mbq.atomiq',
]
USE_TZ = True
boto3.setup_default_session(
region_name='us-east-1',
)
ENV = env.get_environment("ENV_NAME")
metrics.init('mbq.atomiq', env=ENV, constant_tags={"env": ENV.long_name})
| 16.5
| 82
| 0.695187
|
ef246213ff135ecbc464dc2dd429de5edde34475
| 720
|
py
|
Python
|
backend/util.py
|
ahangchen/Rasp-Person-Sensor
|
77d0e41b1a80cf9012f66c7bd44f062edbc6825d
|
[
"MIT"
] | 2
|
2018-02-26T10:00:29.000Z
|
2018-03-16T11:39:34.000Z
|
backend/util.py
|
ahangchen/Rasp-Person-Sensor
|
77d0e41b1a80cf9012f66c7bd44f062edbc6825d
|
[
"MIT"
] | null | null | null |
backend/util.py
|
ahangchen/Rasp-Person-Sensor
|
77d0e41b1a80cf9012f66c7bd44f062edbc6825d
|
[
"MIT"
] | null | null | null |
import json
import requests
| 27.692308
| 83
| 0.708333
|
ef249d4819e51ded253cba64970d4792e29e13ee
| 4,761
|
py
|
Python
|
hard-gists/2338529/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/2338529/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/2338529/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
"""
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
"""
from tempfile import NamedTemporaryFile as namedtmp
import time
from M2Crypto import X509, EVP, RSA, ASN1
__author__ = 'eskil@yelp.com'
__all__ = ['mk_temporary_cacert', 'mk_temporary_cert']
def mk_ca_issuer():
"""
Our default CA issuer name.
"""
issuer = X509.X509_Name()
issuer.C = "US"
issuer.CN = "ca_testing_server"
issuer.ST = 'CA'
issuer.L = 'San Francisco'
issuer.O = 'ca_yelp'
issuer.OU = 'ca_testing'
return issuer
def mk_cert_valid(cert, days=365):
"""
Make a cert valid from now and til 'days' from now.
Args:
cert -- cert to make valid
days -- number of days cert is valid for from now.
"""
t = long(time.time())
now = ASN1.ASN1_UTCTIME()
now.set_time(t)
expire = ASN1.ASN1_UTCTIME()
expire.set_time(t + days * 24 * 60 * 60)
cert.set_not_before(now)
cert.set_not_after(expire)
def mk_request(bits, cn='localhost'):
"""
Create a X509 request with the given number of bits in they key.
Args:
bits -- number of RSA key bits
cn -- common name in the request
Returns a X509 request and the private key (EVP)
"""
pk = EVP.PKey()
x = X509.Request()
rsa = RSA.gen_key(bits, 65537, lambda: None)
pk.assign_rsa(rsa)
x.set_pubkey(pk)
name = x.get_subject()
name.C = "US"
name.CN = cn
name.ST = 'CA'
name.O = 'yelp'
name.OU = 'testing'
x.sign(pk,'sha1')
return x, pk
def mk_cacert():
"""
Make a CA certificate.
Returns the certificate, private key and public key.
"""
req, pk = mk_request(1024)
pkey = req.get_pubkey()
cert = X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
mk_cert_valid(cert)
cert.set_issuer(mk_ca_issuer())
cert.set_subject(cert.get_issuer())
cert.set_pubkey(pkey)
cert.add_ext(X509.new_extension('basicConstraints', 'CA:TRUE'))
cert.add_ext(X509.new_extension('subjectKeyIdentifier', cert.get_fingerprint()))
cert.sign(pk, 'sha1')
return cert, pk, pkey
def mk_cert():
"""
Make a certificate.
Returns a new cert.
"""
cert = X509.X509()
cert.set_serial_number(2)
cert.set_version(2)
mk_cert_valid(cert)
cert.add_ext(X509.new_extension('nsComment', 'SSL sever'))
return cert
def mk_casigned_cert():
"""
Create a CA cert + server cert + server private key.
"""
# unused, left for history.
cacert, pk1, _ = mk_cacert()
cert_req, pk2 = mk_request(1024, cn='testing_server')
cert = mk_cert(cacert)
cert.set_subject(cert_req.get_subject())
cert.set_pubkey(cert_req.get_pubkey())
cert.sign(pk1, 'sha1')
return cacert, cert, pk2
def mk_temporary_cacert():
"""
Create a temporary CA cert.
Returns a tuple of NamedTemporaryFiles holding the CA cert and private key.
"""
cacert, pk1, pkey = mk_cacert()
cacertf = namedtmp()
cacertf.write(cacert.as_pem())
cacertf.flush()
pk1f = namedtmp()
pk1f.write(pk1.as_pem(None))
pk1f.flush()
return cacertf, pk1f
def mk_temporary_cert(cacert_file, ca_key_file, cn):
"""
Create a temporary certificate signed by the given CA, and with the given common name.
If cacert_file and ca_key_file is None, the certificate will be self-signed.
Args:
cacert_file -- file containing the CA certificate
ca_key_file -- file containing the CA private key
cn -- desired common name
Returns a namedtemporary file with the certificate and private key
"""
cert_req, pk2 = mk_request(1024, cn=cn)
if cacert_file and ca_key_file:
cacert = X509.load_cert(cacert_file)
pk1 = EVP.load_key(ca_key_file)
else:
cacert = None
pk1 = None
cert = mk_cert()
cert.set_subject(cert_req.get_subject())
cert.set_pubkey(cert_req.get_pubkey())
if cacert and pk1:
cert.set_issuer(cacert.get_issuer())
cert.sign(pk1, 'sha1')
else:
cert.set_issuer(cert.get_subject())
cert.sign(pk2, 'sha1')
certf = namedtmp()
certf.write(cert.as_pem())
certf.write(pk2.as_pem(None))
certf.flush()
return certf
if __name__ == '__main__':
cacert, cert, pk = mk_casigned_cert()
with open('cacert.crt', 'w') as f:
f.write(cacert.as_pem())
with open('cert.crt', 'w') as f:
f.write(cert.as_pem())
f.write(pk.as_pem(None))
# Sanity checks...
cac = X509.load_cert('cacert.crt')
print cac.verify(), cac.check_ca()
cc = X509.load_cert('cert.crt')
print cc.verify(cac.get_pubkey())
# protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
| 23.924623
| 87
| 0.710985
|
ef25471191ad1db593810b69150f45edb9dc331e
| 2,615
|
py
|
Python
|
WickContractions/ops/indexed.py
|
chrisculver/WickContractions
|
a36af32bdd049789faf42d24d168c4073fc45ed0
|
[
"MIT"
] | 2
|
2021-08-03T17:32:09.000Z
|
2021-08-03T18:28:31.000Z
|
WickContractions/ops/indexed.py
|
chrisculver/WickContractions
|
a36af32bdd049789faf42d24d168c4073fc45ed0
|
[
"MIT"
] | null | null | null |
WickContractions/ops/indexed.py
|
chrisculver/WickContractions
|
a36af32bdd049789faf42d24d168c4073fc45ed0
|
[
"MIT"
] | null | null | null |
from collections import deque
| 30.406977
| 111
| 0.549522
|
ef25c53ea4c0fb58041ed1cd6cded53b4e340d23
| 10,942
|
py
|
Python
|
v0/aia_eis_v0/ml_sl/rf/dt_main.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | 1
|
2022-03-02T12:57:19.000Z
|
2022-03-02T12:57:19.000Z
|
v0/aia_eis_v0/ml_sl/rf/dt_main.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | null | null | null |
v0/aia_eis_v0/ml_sl/rf/dt_main.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | null | null | null |
import copy
from utils.file_utils.dataset_reader_pack.ml_dataset_reader import get_TV_T_dataset, get_T_V_T_dataset
from ml_sl.rf.dt_0 import Node, save_node, load_node
from ml_sl.ml_data_wrapper import pack_list_2_list, single_point_list_2_list, reform_labeled_dataset_list
from ml_sl.ml_data_wrapper import split_labeled_dataset_list
from utils.file_utils.filename_utils import get_date_prefix
from ml_sl.ml_critrions import cal_accuracy, cal_kappa, cal_accuracy_on_2, cal_accuracy_on_3
label_list = [2, 4, 5, 6, 7, 8, 9]
# Import dataset (Training, validation, Test)
ml_dataset_pickle_file_path = '../../datasets/ml_datasets/normed'
tr_dataset, va_dataset, te_dataset = get_T_V_T_dataset(file_path=ml_dataset_pickle_file_path)
tr_va_dataset, test_dataset = get_TV_T_dataset(file_path=ml_dataset_pickle_file_path)
tr_label_list, tr_data_list = split_labeled_dataset_list(tr_dataset)
va_label_list, va_data_list = split_labeled_dataset_list(va_dataset)
tr_va_label_list, tr_va_data_list = split_labeled_dataset_list(tr_va_dataset)
te_label_list, te_data_list = split_labeled_dataset_list(te_dataset)
# --------------------- 1-No Pruning ---------------------
#------------- Train on tr, tested on va #-------------
# acc,kappa = dt_no_pruning(training_dataset=tr_dataset, validation_dataset=[], test_dataset=tr_dataset)
# print(acc,kappa) # --> 1.0 1.0
#------------- Train on tr, tested on va #-------------
# if __name__ == '__main__':
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# Running condition-1
# acc, kappa = dt_no_pruning(training_dataset, validation_dataset, test_dataset)
# print('Accuracy: {0}, Kappa: {1}'.format(acc, kappa))
# Running condition-2
# acc, kappa = dt_no_pruning(training_dataset, validation_dataset=[], test_dataset=validation_dataset)
# print('Accuracy: {0}, Kappa: {1}'.format(acc, kappa))
"""
Running condition-1
Train on [Training+validation]-dataset
Test on test-dataset
1-Accuracy: 0.45054945054945056, Kappa: 0.3173293323330833
2-Accuracy: 0.45054945054945056, Kappa: 0.3173293323330833
Running condition-2
Train on [Training]-dataset
Test on validation-dataset
1-Accuracy: 0.5319148936170213, Kappa: 0.42762247439800716
2-Accuracy: 0.5319148936170213, Kappa: 0.42762247439800716
"""
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# load_dt_no_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2,4,5,6,7,8,9])
# Decision Tree with no pruning: Accuracy on 1 = 0.4945054945054945, Accuracy on 2 = 0.5164835164835165,
# Accuracy on 3 = 0.6923076923076923, Kappa=0.3706209592542475
# --------------------- 1-No Pruning ---------------------
"""
EA-Revise
EA-Revise, DTGS no pruning / posterior pruning,DTGSFinal res
DT final config no pruning tr+va te
"""
# dtFinalRes()
"""
node = pickle.load(file) ModuleNotFoundError: No module named 'ml_sl'
Final res:
trVaAcc=0.9163568773234201, trVaKappa=0.897055384288296, trVaAK=1.813412261611716,
teAcc=0.4945054945054945, teKappa=0.3706209592542475, teAK=0.8651264537597421
"""
# --------------------- 2-Pruning ---------------------
# if __name__ == '__main__':
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# acc, kappa = dt_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2, 4, 5, 6, 7, 8, 9])
# print('Accuracy: {0}, Kappa: {1}'.format(acc, kappa))
"""
1- Accuracy: 0.4835164835164835, Kappa: 0.3591549295774648
2- Accuracy: 0.4835164835164835, Kappa: 0.3591549295774648
"""
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# load_dt_pruning(test_dataset, label_list=[2,4,5,6,7,8,9])
# Decision Tree with pruning: Accuracy on 1 = 0.4835164835164835, Accuracy on 2 = 0.5054945054945055,
# Accuracy on 3 = 0.6703296703296703, Kappa = 0.3591549295774648
# --------------------- 2-Pruning ---------------------
| 46.961373
| 123
| 0.732681
|
ef29d7cb4df5849c15653808babb4473a2403757
| 874
|
py
|
Python
|
python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
from SAGIRIBOT.basics.aio_mysql_excute import execute_sql
| 31.214286
| 105
| 0.662471
|
ef2afd3b3d3cc23390816b111f6a8ec32454a594
| 486
|
py
|
Python
|
setup.py
|
fmaida/caro-diario
|
adc5018f2ef716b49db39aa9189ab1e803fcd357
|
[
"MIT"
] | null | null | null |
setup.py
|
fmaida/caro-diario
|
adc5018f2ef716b49db39aa9189ab1e803fcd357
|
[
"MIT"
] | null | null | null |
setup.py
|
fmaida/caro-diario
|
adc5018f2ef716b49db39aa9189ab1e803fcd357
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'caro-diario',
packages = ['caro-diario'], # this must be the same as the name above
version = '0.1',
description = 'Diario',
author = 'Francesco Maida',
author_email = 'francesco.maida@gmail.com',
url = 'https://github.com/fmaida/caro-diario.git', # use the URL to the github repo
download_url = '', # I'll explain this in a second
keywords = ['diario', 'logging', 'esempio'], # arbitrary keywords
classifiers = [],
)
| 34.714286
| 85
| 0.67284
|
ef2c168f7b4d969663dc1ed93f01785a68c36dd1
| 3,695
|
py
|
Python
|
cVQE/operators/converters/tensoredop_distributor.py
|
gblazq/cVQE
|
5a566103c35696ec0cf2b016c38d71de696e0e29
|
[
"Apache-2.0"
] | 1
|
2021-09-16T12:43:21.000Z
|
2021-09-16T12:43:21.000Z
|
cVQE/operators/converters/tensoredop_distributor.py
|
gblazq/cVQE
|
5a566103c35696ec0cf2b016c38d71de696e0e29
|
[
"Apache-2.0"
] | null | null | null |
cVQE/operators/converters/tensoredop_distributor.py
|
gblazq/cVQE
|
5a566103c35696ec0cf2b016c38d71de696e0e29
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Guillermo Blzquez
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from qiskit.aqua.operators.converters import ConverterBase
from qiskit.aqua.operators.list_ops import TensoredOp, SummedOp
from qiskit.aqua.operators.primitive_ops import PauliOp
| 43.988095
| 131
| 0.677402
|
ef313c50d5c6317ec48b8b4af0c2b6702fb01991
| 8,027
|
py
|
Python
|
tests/test_core_deformation.py
|
matmodlab/matmodlab2
|
97bb858e2b625cca5f3291db5d50bdbb6352e976
|
[
"BSD-3-Clause"
] | 6
|
2017-02-14T02:04:56.000Z
|
2022-02-03T04:53:32.000Z
|
tests/test_core_deformation.py
|
tjfulle/matmodlab2
|
97bb858e2b625cca5f3291db5d50bdbb6352e976
|
[
"BSD-3-Clause"
] | 10
|
2017-01-21T00:00:06.000Z
|
2017-01-22T07:39:44.000Z
|
tests/test_core_deformation.py
|
tjfulle/matmodlab2
|
97bb858e2b625cca5f3291db5d50bdbb6352e976
|
[
"BSD-3-Clause"
] | 3
|
2018-10-20T22:53:59.000Z
|
2022-01-13T07:17:24.000Z
|
# -*- coding: utf-8 -*-
"""
This file contains tests for tensor.py
"""
import sys
import pathlib
import pytest
import numpy as np
from testing_utils import isclose
# Ensure that 'matmodlab2' is imported from parent directory.
sys.path.insert(0, str(pathlib.Path(__file__).absolute().parent.parent))
import matmodlab2
import matmodlab2.core.deformation as df
deformation_measures_db = [
{"name": "Uniaxial Extension",
"eps": np.array([0.042857142857142857143,0,0,0,0,0]),
"depsdt": np.array([0.10000000000000000000,0,0,0,0,0]),
"subtests": [
{
"k": 2,
"u": np.array([1.0419761445034553738,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.095971486993739310740,0,0,0,0,0]),
"d": np.array([0.092105263157894736842,0,0,0,0,0]),
},
{
"k": 1,
"u": np.array([1.0428571428571428571,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.10000000000000000000,0,0,0,0,0]),
"d": np.array([0.095890410958904109589,0,0,0,0,0]),
},
{
"k": 0,
"u": np.array([1.0437887715175541853,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.10437887715175541853,0,0,0,0,0]),
"d": np.array([0.10000000000000000000,0,0,0,0,0]),
},
{
"k": -1,
"u": np.array([1.0447761194029850746,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.10915571396747605257,0,0,0,0,0]),
"d": np.array([0.10447761194029850746,0,0,0,0,0]),
},
{
"k": -2,
"u": np.array([1.0458250331675944350,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.11438711300270564133,0,0,0,0,0]),
"d": np.array([0.10937500000000000000,0,0,0,0,0]),
},
],
},
{"name": "Uniaxial Extension with rotation",
"eps": np.array([0.026196877156206737235,0.016660265700936119908,0,0.020891312403896220150,0,0]),
"depsdt": np.array([-0.0045059468741139683829,0.10450594687411396838,0,0.063726469853100399588,0,0]),
"subtests": [
{
"k": 2,
"u": np.array([1.0256583576911247384,1.0163177868123306353,1.0000000000000000000,0.020461857461098139159,0,0]),
"dudt": np.array([-0.0056192451222061811013,0.10159073211594549184,0,0.061454775148472809312,0,0]),
"d": np.array([-0.0066876940055755266344,0.098792957163470263477,0,0.059274595960483676859,0,0]),
},
{
"k": 1,
"u": np.array([1.0261968771562067372,1.0166602657009361199,1.0000000000000000000,0.020891312403896220150,0,0]),
"dudt": np.array([-0.0045059468741139683829,0.10450594687411396838,0,0.063726469853100399588,0,0]),
"d": np.array([-0.0056693735828201687630,0.10155978454172427835,0,0.061415383576480024658,0,0]),
},
{
"k": 0,
"u": np.array([1.0267663449262200007,1.0170224265913341846,1.0000000000000000000,0.021345447796308002806,0,0]),
"dudt": np.array([-0.0032560207940279426371,0.10763489794578336117,0,0.066186651517750065998,0,0]),
"d": np.array([-0.0045260401459293278687,0.10452604014592932787,0,0.063731056011271402912,0,0]),
},
{
"k": -1,
"u": np.array([1.0273698716557383822,1.0174062477472466924,1.0000000000000000000,0.021826744302578140456,0,0]),
"dudt": np.array([-0.0018481668596687927090,0.11100388082714484528,0,0.068860299997538432155,0,0]),
"d": np.array([-0.0032383326989564664762,0.10771594463925497394,0,0.066244519079865882721,0,0]),
},
{
"k": -2,
"u": np.array([1.0280110311733133167,1.0178140019942811183,1.0000000000000000000,0.022338051955872830687,0,0]),
"dudt": np.array([-0.00025673980976010909772,0.11464385281246575042,0,0.071777050608761226760,0,0]),
"d": np.array([-0.0017829682784827673453,0.11115796827848276735,0,0.068982906840537447349,0,0]),
},
],
},
]
def test_deformation_measures_from_strain_dissertation_test():
""" Verify that we are converting from strain to D correctly. """
a = 0.5
t = 0.1
# Setup (inputs)
st = np.sin(np.pi * t)
ct = np.cos(np.pi * t)
sht = np.sinh(a * t)
eat = np.exp(a * t)
eps = np.array([a * t * np.cos(np.pi * t / 2.0) ** 2,
a * t * np.sin(np.pi * t / 2.0) ** 2,
0.0,
a * t * np.sin(np.pi * t) / 2.0,
0.0, 0.0])
depsdt = np.array([a / 2.0 * (1.0 + ct - np.pi * t * st),
a / 2.0 * (1.0 - ct + np.pi * t * st),
0.0,
a / 2.0 * (np.pi * t * ct + st),
0.0, 0.0])
# Setup (expected outputs)
d_g = np.array([(a + a * ct - np.pi * st * sht) / 2.0,
(a - a * ct + np.pi * st * sht) / 2.0,
0.0,
(a * st + np.pi * ct * sht) / 2.0,
0.0, 0.0])
# Test
d = df.rate_of_strain_to_rate_of_deformation(depsdt, eps, 0)
assert vec_isclose("D", d, d_g)
# Teardown
pass
def test_deformation_measures_from_strain_dissertation_static():
""" Verify that we are converting from strain to D correctly. """
# Setup (inputs)
eps=np.array([2.6634453918413015230,0.13875241035650067478,0,0.60791403008229297100,0,0])
depsdt=np.array([-0.66687706806142212351,1.9745693757537298158,0,4.2494716756395844993,0,0])
# Setup (expected outputs)
d_g=np.array([-4.3525785227788080461,5.6602708304711157384,0,11.902909607738023219,0,0])
# Test
d = df.rate_of_strain_to_rate_of_deformation(depsdt, eps, 0)
assert vec_isclose("D", d, d_g)
# Teardown
pass
| 38.042654
| 123
| 0.590133
|
ef3678c7e21e6c165bc6c6b597bc9cfc9cfa52bc
| 10,380
|
py
|
Python
|
examples/tutorial/example4.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 97
|
2018-01-15T19:29:31.000Z
|
2022-03-11T00:27:34.000Z
|
examples/tutorial/example4.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 10
|
2018-01-15T22:44:55.000Z
|
2022-02-18T09:44:10.000Z
|
examples/tutorial/example4.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 33
|
2018-01-15T19:34:23.000Z
|
2022-03-05T22:39:33.000Z
|
from trumania.core import circus
import trumania.core.population as population
import trumania.core.random_generators as gen
import trumania.core.operations as ops
import trumania.core.story as story
import trumania.components.time_patterns.profilers as profilers
import trumania.core.util_functions as util_functions
import trumania.components.db as DB
import pandas as pd
# each step?() function below implement one step of the fourth example of the
# tutorial documented at
# https://realimpactanalytics.atlassian.net/wiki/display/LM/Data+generator+tutorial
# this is essentially a modification of example3, with some supplementary
# features demonstrating persistence
if __name__ == "__main__":
util_functions.setup_logging()
step2()
| 34.832215
| 87
| 0.657225
|
ef377a0c8139bd037fffc10567802d319f904716
| 1,104
|
py
|
Python
|
Hackerrank/Python/class-1-dealing-with-complex-numbers.py
|
PROxZIMA/Competitive-Coding
|
ba6b365ea130b6fcaa15c5537b530ed363bab793
|
[
"MIT"
] | 1
|
2021-01-10T13:29:21.000Z
|
2021-01-10T13:29:21.000Z
|
Hackerrank/Python/class-1-dealing-with-complex-numbers.py
|
PROxZIMA/Competitive-Coding
|
ba6b365ea130b6fcaa15c5537b530ed363bab793
|
[
"MIT"
] | null | null | null |
Hackerrank/Python/class-1-dealing-with-complex-numbers.py
|
PROxZIMA/Competitive-Coding
|
ba6b365ea130b6fcaa15c5537b530ed363bab793
|
[
"MIT"
] | null | null | null |
import math
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| 33.454545
| 130
| 0.588768
|
ef3d18dad9fb4f3ea7850ca0af729153b0fd6bb6
| 1,828
|
py
|
Python
|
hyperparameter_tuner/run_command_generator.py
|
chutien/zpp-mem
|
470dec89dda475f7272b876f191cef9f8266a6dc
|
[
"MIT"
] | 1
|
2019-10-22T11:33:23.000Z
|
2019-10-22T11:33:23.000Z
|
hyperparameter_tuner/run_command_generator.py
|
chutien/zpp-mem
|
470dec89dda475f7272b876f191cef9f8266a6dc
|
[
"MIT"
] | null | null | null |
hyperparameter_tuner/run_command_generator.py
|
chutien/zpp-mem
|
470dec89dda475f7272b876f191cef9f8266a6dc
|
[
"MIT"
] | null | null | null |
from itertools import product
from hyperparameter_tuner.single_parameter_generator import single_parameter_generator as sgen
if __name__ == '__main__':
commands = default_commands_generator()
for c in commands:
print(c)
| 46.871795
| 116
| 0.650438
|
ef3d7706ee027142a3cc848598e7a4e1a2e3f600
| 1,718
|
py
|
Python
|
utils/storage/redisPSCO/python/storage/storage_object.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | 3
|
2018-03-05T14:52:22.000Z
|
2019-02-08T09:58:24.000Z
|
utils/storage/redisPSCO/python/storage/storage_object.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | null | null | null |
utils/storage/redisPSCO/python/storage/storage_object.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2017 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''Redis Storage Object implementation for the PyCOMPSs Python Binding
@author: srodrig1
'''
import uuid
import storage.api
'''Add support for camelCase
'''
StorageObject = storage_object
| 28.163934
| 75
| 0.679278
|
ef3ec4855031980afb1650987b97c64ce63c1807
| 5,476
|
py
|
Python
|
origin_response_test.py
|
dnsinogeorgos/lambdas
|
4294089b311585c18e101e776aa2e8ca211413cd
|
[
"Apache-2.0"
] | null | null | null |
origin_response_test.py
|
dnsinogeorgos/lambdas
|
4294089b311585c18e101e776aa2e8ca211413cd
|
[
"Apache-2.0"
] | null | null | null |
origin_response_test.py
|
dnsinogeorgos/lambdas
|
4294089b311585c18e101e776aa2e8ca211413cd
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=C0114
import unittest
from origin_response import lambda_handler
event = {
"Records": [
{
"cf": {
"config": {"requestId": "thisfakeidisthisfakeidisthisfakeidis"},
"request": {"uri": ""},
"response": {"headers": {}, "status": 0},
}
}
]
}
if __name__ == "__main__":
unittest.main()
| 58.255319
| 974
| 0.626004
|
ef3f29141380c4970504779ca0adbe37edfcc48e
| 377
|
py
|
Python
|
lang/Python/abstract-type-2.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/abstract-type-2.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/abstract-type-2.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
| 17.136364
| 39
| 0.594164
|
ef4114aeaf1e0c3215bf5aee9d278bc0e2171dca
| 338
|
py
|
Python
|
apps/permissions/router.py
|
yhkl-dev/JAutoOps
|
e42342fc6d814813dcac2e0154cd5dfdc1adf4c1
|
[
"MIT"
] | null | null | null |
apps/permissions/router.py
|
yhkl-dev/JAutoOps
|
e42342fc6d814813dcac2e0154cd5dfdc1adf4c1
|
[
"MIT"
] | null | null | null |
apps/permissions/router.py
|
yhkl-dev/JAutoOps
|
e42342fc6d814813dcac2e0154cd5dfdc1adf4c1
|
[
"MIT"
] | null | null | null |
from rest_framework.routers import DefaultRouter
from .views import PermissionsViewset, GroupPermissionsViewset
permission_router = DefaultRouter()
permission_router.register(r'permissions', PermissionsViewset, basename="permissions")
permission_router.register(r'grouppermissions', GroupPermissionsViewset, basename="grouppermissions")
| 48.285714
| 101
| 0.866864
|
ef41254ab69ff27661576195222b554a1c94e4da
| 6,158
|
py
|
Python
|
src/inscriptis/model/canvas/__init__.py
|
rlskoeser/inscriptis
|
e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb
|
[
"Apache-2.0"
] | 90
|
2016-01-29T15:09:21.000Z
|
2022-03-08T15:08:57.000Z
|
src/inscriptis/model/canvas/__init__.py
|
rlskoeser/inscriptis
|
e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb
|
[
"Apache-2.0"
] | 27
|
2016-01-14T10:30:10.000Z
|
2022-03-24T08:00:31.000Z
|
src/inscriptis/model/canvas/__init__.py
|
rlskoeser/inscriptis
|
e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb
|
[
"Apache-2.0"
] | 20
|
2016-01-14T12:50:55.000Z
|
2022-03-04T07:26:30.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""Classes used for rendering (parts) of the canvas.
Every parsed :class:`~inscriptis.model.html_element.HtmlElement` writes its
textual content to the canvas which is managed by the following three classes:
- :class:`Canvas` provides the drawing board on which the HTML page is
serialized and annotations are recorded.
- :class:`~inscriptis.model.canvas.block.Block` contains the current line to
which text is written.
- :class:`~inscriptis.model.canvas.prefix.Prefix` handles indentation
and bullets that prefix a line.
"""
from inscriptis.annotation import Annotation
from inscriptis.html_properties import WhiteSpace, Display
from inscriptis.model.canvas.block import Block
from inscriptis.model.html_element import HtmlElement
from inscriptis.model.canvas.prefix import Prefix
| 38.248447
| 79
| 0.636733
|
ef4351fb100c957415ebe720f79b5a02ebc2c300
| 9,324
|
py
|
Python
|
tests/webtests/test_admin.py
|
zodman/ZoomFoundry
|
87a69f519a2ab6b63aeec0a564ce41259e64f88d
|
[
"MIT"
] | 8
|
2017-04-10T09:53:15.000Z
|
2020-08-16T09:53:14.000Z
|
tests/webtests/test_admin.py
|
zodman/ZoomFoundry
|
87a69f519a2ab6b63aeec0a564ce41259e64f88d
|
[
"MIT"
] | 49
|
2017-04-13T22:51:48.000Z
|
2019-08-15T22:53:25.000Z
|
tests/webtests/test_admin.py
|
zodman/ZoomFoundry
|
87a69f519a2ab6b63aeec0a564ce41259e64f88d
|
[
"MIT"
] | 12
|
2017-04-11T04:16:47.000Z
|
2019-08-10T21:41:54.000Z
|
# -*- coding: utf-8 -*-
"""
zoom.tests.webdriver_tests.test_admin
test admin app functions
"""
from zoom.testing.webtest import AdminTestCase
| 30.976744
| 88
| 0.569284
|
ef44efdf1df1a7a380310f517a87f13a57e2f804
| 1,832
|
py
|
Python
|
server/app.py
|
Catsvilles/Lofi
|
f3a783a5ba3e80e6c8f958990f6f09767d25a48e
|
[
"Apache-2.0"
] | 27
|
2021-07-14T17:12:29.000Z
|
2022-03-18T16:15:18.000Z
|
server/app.py
|
Catsvilles/Lofi
|
f3a783a5ba3e80e6c8f958990f6f09767d25a48e
|
[
"Apache-2.0"
] | 3
|
2021-08-29T11:22:04.000Z
|
2022-02-16T23:20:04.000Z
|
server/app.py
|
Catsvilles/Lofi
|
f3a783a5ba3e80e6c8f958990f6f09767d25a48e
|
[
"Apache-2.0"
] | 4
|
2021-07-25T09:55:09.000Z
|
2022-03-25T17:16:18.000Z
|
import json
import torch
from flask import Flask, request, jsonify
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from model.lofi2lofi_model import Decoder as Lofi2LofiDecoder
from model.lyrics2lofi_model import Lyrics2LofiModel
from server.lofi2lofi_generate import decode
from server.lyrics2lofi_predict import predict
device = "cpu"
app = Flask(__name__)
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=["30 per minute"]
)
lofi2lofi_checkpoint = "checkpoints/lofi2lofi_decoder.pth"
print("Loading lofi model...", end=" ")
lofi2lofi_model = Lofi2LofiDecoder(device=device)
lofi2lofi_model.load_state_dict(torch.load(lofi2lofi_checkpoint, map_location=device))
print(f"Loaded {lofi2lofi_checkpoint}.")
lofi2lofi_model.to(device)
lofi2lofi_model.eval()
lyrics2lofi_checkpoint = "checkpoints/lyrics2lofi.pth"
print("Loading lyrics2lofi model...", end=" ")
lyrics2lofi_model = Lyrics2LofiModel(device=device)
lyrics2lofi_model.load_state_dict(torch.load(lyrics2lofi_checkpoint, map_location=device))
print(f"Loaded {lyrics2lofi_checkpoint}.")
lyrics2lofi_model.to(device)
lyrics2lofi_model.eval()
| 29.548387
| 90
| 0.771288
|
ef473c6a7f8ab89bcd75652de804e2198dfb2d97
| 1,153
|
py
|
Python
|
cw-bitcoin-price.py
|
buraktokman/Crypto-Exchange-Data-Fetcher
|
23e6ba542ff7a862af3247db2c04c2c10a5f3edf
|
[
"MIT"
] | 1
|
2021-08-09T07:22:25.000Z
|
2021-08-09T07:22:25.000Z
|
cw-bitcoin-price.py
|
buraktokman/Crypto-Exchange-Data-Fetcher
|
23e6ba542ff7a862af3247db2c04c2c10a5f3edf
|
[
"MIT"
] | null | null | null |
cw-bitcoin-price.py
|
buraktokman/Crypto-Exchange-Data-Fetcher
|
23e6ba542ff7a862af3247db2c04c2c10a5f3edf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
Cryptowat.ch API
https://cryptowat.ch/docs/api
https://api.cryptowat.ch/markets/prices '''
import urllib.request, json, datetime, time
from urllib.request import urlopen
from pathlib import Path
csv_file_price = Path(__file__).parents[0] / 'data' / 'cryptowatch-bitcoin-price2.csv'
if __name__ == '__main__':
#main()
while True:
now = datetime.datetime.now()
while (now.second % 5):
now = datetime.datetime.now()
print(now.second)
time.sleep(0.5)
main()
| 26.813953
| 86
| 0.633998
|
ef477b67fc29e51e58555a187fcad861bf802178
| 3,516
|
py
|
Python
|
Actor_critic/actor_critic_test.py
|
aniketSanap/RL-session
|
68243121277c24509585f51fd01f53fe8d41f119
|
[
"MIT"
] | null | null | null |
Actor_critic/actor_critic_test.py
|
aniketSanap/RL-session
|
68243121277c24509585f51fd01f53fe8d41f119
|
[
"MIT"
] | null | null | null |
Actor_critic/actor_critic_test.py
|
aniketSanap/RL-session
|
68243121277c24509585f51fd01f53fe8d41f119
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""actor_critic.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17Gpya9yswf-xonOvKhoHpmQGhCYpq8x4
"""
# !pip install box2d-py
# !pip install gym[Box_2D]
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import gym
import os
agent = Agent()
agent.play()
torch.save(agent.ac_network.state_dict(), agent.MODEL_PATH)
| 32.859813
| 103
| 0.593003
|
ef488748bc20e35c68916d75dae55ef743e1069d
| 6,145
|
py
|
Python
|
python/orz/sta2json.py
|
ViewFaceCore/OpenRoleZoo
|
19cef3cdc5238374cedcf7068dc7a6ad8448c21b
|
[
"BSD-2-Clause"
] | null | null | null |
python/orz/sta2json.py
|
ViewFaceCore/OpenRoleZoo
|
19cef3cdc5238374cedcf7068dc7a6ad8448c21b
|
[
"BSD-2-Clause"
] | null | null | null |
python/orz/sta2json.py
|
ViewFaceCore/OpenRoleZoo
|
19cef3cdc5238374cedcf7068dc7a6ad8448c21b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: UTF-8
import os
import struct
from .sta import *
import json
import copy
import base64
from collections import OrderedDict
def unpack_obj(stream, **kwargs):
"""
Convert an stream(sta format) to object(json format)
:param stream: Stream of binary sta file
:param workshop: path to write binary file
:param getway: the getway to all values
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return: unpacked object
"""
mark = struct.unpack('=b', stream.read(1))[0]
if mark == STA_NIL:
return unpack_nil(stream, **kwargs)
elif mark == STA_INT:
return unpack_int(stream, **kwargs)
elif mark == STA_FLOAT:
return unpack_float(stream, **kwargs)
elif mark == STA_STRING:
return unpack_string(stream, **kwargs)
elif mark == STA_BINARY:
return unpack_binary(stream, **kwargs)
elif mark == STA_LIST:
return unpack_list(stream, **kwargs)
elif mark == STA_DICT:
return unpack_dict(stream, **kwargs)
else:
raise Exception("Unsupported mark type: ", type(mark))
def sta2obj(sta_filename, **kwargs):
"""
Convert filename.sta to object
:param sta_filename: input sta filename
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return:
"""
byte = ''
with open(sta_filename, 'rb') as ifile:
byte = ifile.read()
stream = Stream(byte)
mark = struct.unpack('=i', stream.read(4))[0]
if mark != STA_MARK:
raise Exception("%s is not a valid sta file." % sta_filename)
# kwargs = {}
if 'binary_mode' not in kwargs:
kwargs['binary_mode'] = 0
obj = unpack_obj(stream, **kwargs)
return obj
def sta2json(sta_filename, json_filename=None, **kwargs):
"""
Convert filename.sta to filename.json.
:param sta_filename: input sta filename
:param json_filename: output json filename or path
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return:
"""
filepath, filename_ext = os.path.split(sta_filename)
filename, ext = os.path.splitext(filename_ext)
if json_filename is None:
json_filename = os.path.join(filepath, filename + ".json")
if os.path.isdir(json_filename):
json_filename = os.path.join(json_filename, filename + ".json")
workshop, getway_ext = os.path.split(json_filename)
getway = os.path.splitext(getway_ext)[0]
if len(workshop) > 0 and not os.path.isdir(workshop):
raise Exception("%s/ is not a valid path." % workshop)
with open(json_filename, 'w') as ofile:
byte = ''
with open(sta_filename, 'rb') as ifile:
byte = ifile.read()
stream = Stream(byte)
mark = struct.unpack('=i', stream.read(4))[0]
if mark != STA_MARK:
raise Exception("%s is not a valid sta file." % sta_filename)
kwargs['workshop'] = workshop
kwargs['getway'] = getway
if 'binary_mode' not in kwargs:
kwargs['binary_mode'] = 1
obj = unpack_obj(stream, **kwargs)
json.dump(obj, ofile, indent=2)
| 28.449074
| 73
| 0.593979
|
ef4888a9795dbbe5df0abc36429c88521fbd3e99
| 1,494
|
py
|
Python
|
872 Leaf-Similar Trees.py
|
krishna13052001/LeetCode
|
cd6ec626bea61f0bd9e8493622074f9e69a7a1c3
|
[
"MIT"
] | 872
|
2015-06-15T12:02:41.000Z
|
2022-03-30T08:44:35.000Z
|
872 Leaf-Similar Trees.py
|
nadeemshaikh-github/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
[
"MIT"
] | 8
|
2015-06-21T15:11:59.000Z
|
2022-02-01T11:22:34.000Z
|
872 Leaf-Similar Trees.py
|
nadeemshaikh-github/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
[
"MIT"
] | 328
|
2015-06-28T03:10:35.000Z
|
2022-03-29T11:05:28.000Z
|
#!/usr/bin/python3
"""
Consider all the leaves of a binary tree. From left to right order, the values
of those leaves form a leaf value sequence.
For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9,
8).
Two binary trees are considered leaf-similar if their leaf value sequence is the
same.
Return true if and only if the two given trees with head nodes root1 and root2
are leaf-similar.
Note:
Both of the given trees will have between 1 and 100 nodes.
"""
# Definition for a binary tree node.
| 25.758621
| 80
| 0.566934
|
ef4f605e514f18c935ef699c3ca9417a54b457c9
| 2,465
|
py
|
Python
|
apollo/auth.py
|
sorinbiriescu/Apollo_backend
|
b6fb68a26487a138e7efd691e7fdffaa5042a155
|
[
"Apache-2.0"
] | null | null | null |
apollo/auth.py
|
sorinbiriescu/Apollo_backend
|
b6fb68a26487a138e7efd691e7fdffaa5042a155
|
[
"Apache-2.0"
] | null | null | null |
apollo/auth.py
|
sorinbiriescu/Apollo_backend
|
b6fb68a26487a138e7efd691e7fdffaa5042a155
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from passlib.context import CryptContext
from apollo.crud import query_first_user
from apollo.main import site_settings
from apollo.schemas import TokenData, UserModel
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="api/token")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
| 30.060976
| 103
| 0.710345
|
ef519f677beac77f2c2e144f66d4be64d1cbd341
| 200
|
py
|
Python
|
main.py
|
Gabriel-ino/Automated-Sticker-Hero
|
d76952cc35f051b7d9562912f0a063fed6f75068
|
[
"MIT"
] | null | null | null |
main.py
|
Gabriel-ino/Automated-Sticker-Hero
|
d76952cc35f051b7d9562912f0a063fed6f75068
|
[
"MIT"
] | null | null | null |
main.py
|
Gabriel-ino/Automated-Sticker-Hero
|
d76952cc35f051b7d9562912f0a063fed6f75068
|
[
"MIT"
] | null | null | null |
from App import App
from utils.get_screen_size import get_screen_size
if __name__ == "__main__":
app = App()
h, w, ch = get_screen_size()
while True:
app.proccessing(h, w, ch)
| 16.666667
| 49
| 0.655
|
ef53ba7f982e4f61582b4dfc595af89608ab9da3
| 3,695
|
py
|
Python
|
third_party/graphy/graphy/common_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/graphy/graphy/common_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
third_party/graphy/graphy/common_test.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common.py."""
import warnings
from graphy import common
from graphy import graphy_test
from graphy.backends import google_chart_api
if __name__ == '__main__':
graphy_test.main()
| 33.899083
| 74
| 0.707984
|
ef53e0e036cb078d36e154064142222b1dfe4d85
| 608
|
py
|
Python
|
projects/utils_func/fetch_data.py
|
blitty-codes/ml-proyects
|
97d41757cfb45209bbbb09e4c3b51e20c4328a30
|
[
"Apache-2.0"
] | null | null | null |
projects/utils_func/fetch_data.py
|
blitty-codes/ml-proyects
|
97d41757cfb45209bbbb09e4c3b51e20c4328a30
|
[
"Apache-2.0"
] | null | null | null |
projects/utils_func/fetch_data.py
|
blitty-codes/ml-proyects
|
97d41757cfb45209bbbb09e4c3b51e20c4328a30
|
[
"Apache-2.0"
] | null | null | null |
# Download the data you
import os
import tarfile
import requests
| 27.636364
| 68
| 0.692434
|
ef54bb20c88dda93a302698251aa2e77667dc8a2
| 4,526
|
py
|
Python
|
xpython/builtins.py
|
pmp-p/x-python
|
e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a
|
[
"MIT"
] | null | null | null |
xpython/builtins.py
|
pmp-p/x-python
|
e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a
|
[
"MIT"
] | null | null | null |
xpython/builtins.py
|
pmp-p/x-python
|
e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a
|
[
"MIT"
] | null | null | null |
"""
A place to implement built-in functions.
We use the bytecode for these when doing cross-version interpreting
"""
from xpython.pyobj import Function, Cell, make_cell
from xdis import codeType2Portable, PYTHON_VERSION, IS_PYPY
# This code was originally written by Darius Bacon,
# but follows code from PEP 3115 listed below.
# Rocky Bernstein did the xdis adaptions and
# added a couple of bug fixes.
def build_class(opc, func, name, *bases, **kwds):
"""
Like built-in __build_class__() in bltinmodule.c, but running in the
byterun VM.
See also: PEP 3115: https://www.python.org/dev/peps/pep-3115/ and
https://mail.python.org/pipermail/python-3000/2007-March/006338.html
"""
# Parameter checking...
if not (isinstance(func, Function)):
raise TypeError("func must be a PyVM function")
if not isinstance(name, str):
raise TypeError("name is not a string")
metaclass = kwds.pop("metaclass", None)
if metaclass is None:
metaclass = type(bases[0]) if bases else type
if isinstance(metaclass, type):
metaclass = calculate_metaclass(metaclass, bases)
if hasattr(metaclass, "__prepare__"):
prepare = metaclass.__prepare__
namespace = prepare(name, bases, **kwds)
else:
namespace = {}
python_implementation = "PyPy" if IS_PYPY else "CPython"
if not (
opc.version == PYTHON_VERSION
and python_implementation == opc.python_implementation
):
# convert code to xdis's portable code type.
class_body_code = codeType2Portable(func_code(func))
else:
class_body_code = func.func_code
# Execute the body of func. This is the step that would go wrong if
# we tried to use the built-in __build_class__, because __build_class__
# does not call func, it magically executes its body directly, as we
# do here (except we invoke our PyVM instead of CPython's).
#
# This behavior when interpreting bytecode that isn't the same as
# the bytecode using in the running Python can cause a SEGV, specifically
# between Python 3.5 running 3.4 or earlier.
frame = func._vm.make_frame(
code=class_body_code,
f_globals=func.func_globals,
f_locals=namespace,
closure=func.__closure__,
)
# rocky: cell is the return value of a function where?
cell = func._vm.eval_frame(frame)
# Add any class variables that may have been added in running class_body_code.
# See test_attribute_access.py for a simple example that needs the update below.
namespace.update(frame.f_locals)
# If metaclass is builtin "type", it can't deal with a xpython.pyobj.Cell object
# but needs a builtin cell object. make_cell() can do this.
if "__classcell__" in namespace and metaclass == type:
namespace["__classcell__"] = make_cell(namespace["__classcell__"].get())
try:
cls = metaclass(name, bases, namespace)
except TypeError:
# For mysterious reasons the above can raise a:
# __init__() takes *n* positional arguments but *n+1* were given.
# In particular for:
# class G(Generic[T]):
# pass
import types
cls = types.new_class(name, bases, kwds, exec_body=lambda ns: namespace)
pass
if isinstance(cell, Cell):
cell.set(cls)
return cls
# From Pypy 3.6
# def find_metaclass(bases, namespace, globals, builtin):
# if '__metaclass__' in namespace:
# return namespace['__metaclass__']
# elif len(bases) > 0:
# base = bases[0]
# if hasattr(base, '__class__'):
# return base.__class__
# else:
# return type(base)
# elif '__metaclass__' in globals:
# return globals['__metaclass__']
# else:
# try:
# return builtin.__metaclass__
# except AttributeError:
# return type
def calculate_metaclass(metaclass, bases):
"Determine the most derived metatype."
winner = metaclass
for base in bases:
t = type(base)
if issubclass(t, winner):
winner = t
elif not issubclass(winner, t):
raise TypeError("metaclass conflict", winner, t)
return winner
| 32.328571
| 84
| 0.650685
|
ef58bac3885ae00f40f0903957d207828fe3e0c6
| 857
|
py
|
Python
|
config/object_detection_retinanet_config.py
|
kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--
|
5baacf4475f3679b96ea2001994a575ec0a72bf0
|
[
"Apache-2.0"
] | null | null | null |
config/object_detection_retinanet_config.py
|
kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--
|
5baacf4475f3679b96ea2001994a575ec0a72bf0
|
[
"Apache-2.0"
] | null | null | null |
config/object_detection_retinanet_config.py
|
kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--
|
5baacf4475f3679b96ea2001994a575ec0a72bf0
|
[
"Apache-2.0"
] | null | null | null |
# import the necessary packages
import os
# Set the dataset base path here
BASE_PATH = "/content/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--/dataset"
# build the path to the annotations and input images
ANNOT_PATH = os.path.sep.join([BASE_PATH, 'annotations'])
IMAGES_PATH = os.path.sep.join([BASE_PATH, 'images'])
# degine the training/testing split
# If you have only training dataset then put here TRAIN_TEST_SPLIT = 1
TRAIN_TEST_SPLIT = 0.80
# build the path to the output training and test .csv files
TRAIN_CSV = os.path.sep.join([BASE_PATH, 'train.csv'])
TEST_CSV = os.path.sep.join([BASE_PATH, 'test.csv'])
# build the path to the output classes CSV files
CLASSES_CSV = os.path.sep.join([BASE_PATH, 'classes.csv'])
# build the path to the output predictions dir
OUTPUT_DIR = os.path.sep.join([BASE_PATH, 'predictions'])
| 35.708333
| 97
| 0.757293
|
ef593e9168b64350b18b0f9f56ed9f30d578e6cf
| 4,199
|
py
|
Python
|
CiTOCrawler/OC/script/static_lode.py
|
patmha/CiTOCrawler
|
6c5027f42aacc2d250305e5e877bc271470acde5
|
[
"BSD-3-Clause"
] | null | null | null |
CiTOCrawler/OC/script/static_lode.py
|
patmha/CiTOCrawler
|
6c5027f42aacc2d250305e5e877bc271470acde5
|
[
"BSD-3-Clause"
] | null | null | null |
CiTOCrawler/OC/script/static_lode.py
|
patmha/CiTOCrawler
|
6c5027f42aacc2d250305e5e877bc271470acde5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
__author__ = 'essepuntato'
import os
import requests
import codecs
import argparse
import re
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser("static_lode.py")
arg_parser.add_argument("-pu", "--prefix-url", dest="prefurl", required=True,
help="The prefix followed by a ':' plus the URL of the ontology to convert.")
arg_parser.add_argument("-o", "--output-dir", dest="output_dir", required=True,
help="The directory where to store the documentation files created.")
arg_parser.add_argument("-s", "--source-material-url", dest="source_material_url",
help="The directory that contains all the LODE related files for "
"presentation on the browser.")
arg_parser.add_argument("-l", "--lode-url", dest="lode_url",
default="http://eelst.cs.unibo.it/apps/LODE",
help="The URL where LODE is available.")
arg_parser.add_argument("-lang", "--language", dest="language", default="en",
help="The ISO code of the language used to retrieve the documentation "
"(default: 'en?).")
arg_parser.add_argument("-repl", "--string-replace", dest="string_replace",
help="A 'source->replace' regular expression for replacement of strings.")
args = arg_parser.parse_args()
all_ontologies_url = {}
split_input = args.prefurl.split(":", 1)
all_ontologies_url.update({split_input[0]: split_input[1]})
sl = StaticLODE(args.output_dir, all_ontologies_url, args.language,
args.source_material_url, args.lode_url, args.string_replace)
sl.create_documentation()
# How to call it for a specific ontology:
# python static_lode.py -pu fabio:http://purl.org/spar/fabio -o spar/ontology_documentations -s /static/lode
| 49.988095
| 112
| 0.635389
|
ef59c84efb2830bb4da68800485a32f52a474ab9
| 14,738
|
py
|
Python
|
src/c4/cmany/cmake.py
|
biojppm/cmany
|
b20c24169d60077122ae29a0c09526913340fd5c
|
[
"MIT"
] | 20
|
2017-05-17T18:43:08.000Z
|
2021-02-13T16:20:53.000Z
|
src/c4/cmany/cmake.py
|
biojppm/cmany
|
b20c24169d60077122ae29a0c09526913340fd5c
|
[
"MIT"
] | 8
|
2017-06-04T17:01:06.000Z
|
2022-03-17T12:43:32.000Z
|
src/c4/cmany/cmake.py
|
biojppm/cmany
|
b20c24169d60077122ae29a0c09526913340fd5c
|
[
"MIT"
] | 1
|
2017-06-04T13:09:19.000Z
|
2017-06-04T13:09:19.000Z
|
import re
import os
from collections import OrderedDict as odict
from .conf import USER_DIR
from .util import cacheattr, setcwd, runsyscmd, logdbg
from . import util
from . import err
_cache_entry = r'^(.*?)(:.*?)=(.*)$'
def loadvars(builddir):
"""if builddir does not exist or does not have a cache, returns an
empty odict"""
v = odict()
if builddir is None or not os.path.exists(builddir):
return v
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
with open(c, 'r') as f:
for line in f:
# logdbg("loadvars0", line.strip())
if not re.match(_cache_entry, line):
continue
ls = line.strip()
name = re.sub(_cache_entry, r'\1', ls)
vartype = re.sub(_cache_entry, r'\2', ls)[1:]
value = re.sub(_cache_entry, r'\3', ls)
# logdbg("loadvars1", name, vartype, value)
v[name] = CMakeCacheVar(name, value, vartype)
return v
# -----------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def _remove_invalid_args_from_sysinfo_cmd(cmd):
gotit = None
# remove compile commands args
for i, elm in enumerate(cmd):
if 'CMAKE_EXPORT_COMPILE_COMMANDS' in elm:
# can't strip out if compile commands is not given as one,
# because the command will become malformed when we remove
if elm not in ('-DCMAKE_EXPORT_COMPILE_COMMANDS=ON', '-DCMAKE_EXPORT_COMPILE_COMMANDS=OFF'):
raise Exception("malformed command")
gotit = i
if gotit is not None:
del cmd[gotit]
# remove architecture args
if '-A' in cmd:
i = cmd.index('-A')
del cmd[i+1]
del cmd[i]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# def get_toolchain_cache(toolchain):
# d = os.path.join(USER_DIR, 'toolchains', re.sub(os.sep, '+', toolchain))
# logdbg("toolchain cache: USER_DIR=", USER_DIR)
# logdbg("toolchain cache: d=", d)
# bd = os.path.join(d, 'build')
# logdbg("toolchain cache: bd=", bd)
# if not os.path.exists(d):
# os.makedirs(d)
# with setcwd(d):
# with open('main.cpp', 'w') as f:
# f.write("int main() {}")
# with open('CMakeLists.txt', 'w') as f:
# f.write("""
# cmake_minimum_required(VERSION 2.6)
# project(toolchain_test)
# add_executable(main main.cpp)
# """)
# if not os.path.exists(bd):
# os.makedirs(bd)
# with setcwd(bd):
# cmd = ['cmake', '-DCMAKE_TOOLCHAIN_FILE='+toolchain, '..']
# runsyscmd(cmd, echo_output=True)
# return loadvars(bd)
| 36.937343
| 125
| 0.519677
|
ef5a62962aed890737736832f581c39140877b07
| 2,130
|
py
|
Python
|
Python/Searching/2/quick_select.py
|
Tikam02/Data_Structure_Algorithms
|
7c17f744975a72fa42f0f3f892c0b7e041cdef0c
|
[
"MIT"
] | 5
|
2017-08-03T06:33:49.000Z
|
2021-08-06T13:20:57.000Z
|
Python/Searching/2/quick_select.py
|
Tikam02/Data_Structure_Algorithms
|
7c17f744975a72fa42f0f3f892c0b7e041cdef0c
|
[
"MIT"
] | null | null | null |
Python/Searching/2/quick_select.py
|
Tikam02/Data_Structure_Algorithms
|
7c17f744975a72fa42f0f3f892c0b7e041cdef0c
|
[
"MIT"
] | 6
|
2017-04-27T13:30:49.000Z
|
2020-11-01T20:28:55.000Z
|
#!/usr/bin/env python
__author__ = "bt3"
import random
''' The simplest way...'''
''' If you don't want to use pythons feature at all and
also select pivot randomly'''
if __name__ == '__main__':
# Checking the Answer
seq = [10, 60, 100, 50, 60, 75, 31, 50, 30, 20, 120, 170, 200]
#seq = [3, 7, 2, 1, 4, 6, 5, 10, 9, 11]
# we want the middle element
k = len(seq) // 2
# Note that this only work for odd arrays, since median in
# even arrays is the mean of the two middle elements
print(quickSelect(seq, k))
print(quickSelectHard(seq, k))
import numpy
print numpy.median(seq)
| 23.932584
| 78
| 0.597653
|
ef5b7b88dd380eec142de24fd5621ee02381ea01
| 3,744
|
py
|
Python
|
RGB_extraction_maize_diversity.py
|
xiangjunli/Maize_Phenotype_Map
|
15765c1a9a58bdf5cfca5602e09e9cbe74d12b98
|
[
"BSD-3-Clause"
] | 4
|
2018-02-06T21:15:31.000Z
|
2018-07-28T14:00:17.000Z
|
RGB_extraction_maize_diversity.py
|
xiangjunli/Maize_Phenotype_Map
|
15765c1a9a58bdf5cfca5602e09e9cbe74d12b98
|
[
"BSD-3-Clause"
] | null | null | null |
RGB_extraction_maize_diversity.py
|
xiangjunli/Maize_Phenotype_Map
|
15765c1a9a58bdf5cfca5602e09e9cbe74d12b98
|
[
"BSD-3-Clause"
] | 2
|
2020-02-07T18:26:09.000Z
|
2020-10-16T15:52:56.000Z
|
import numpy as np
import cv2
import sys
import os
#######################RGB Image Data Analysis############################################################
###Should follow the data structure of image data: Genotype --> Replicates (Plants) --> Different Views --> Image captured by each Day###
# mfold defines the folder name that stores the data in our data structure
mfold = sys.argv[1]
# The ratio between pixels further zoom level and closer zoom level is 1:2.02, each pixel in closer zoom level is 0.746mm. This script generates values based on pixel counts.
# binary function is going to extract green pixels by defined threshold of (2*G)/(R+B) > 1.15
# create a function to extract values of plant height, plant width and plant area pixel counts
whole = os.listdir(mfold)
# because two zoom levels were applied on the RGB images in different days, and we analyze plant images in two zoom levels
close = set([])
far = set([])
for i in range(1,27):
close.add('Day_'+str(i).zfill(3))
close.remove('Day_'+str(11).zfill(3))
for i in range(27,33):
far.add('Day_'+str(i).zfill(3))
far.add('Day_'+str(11).zfill(3))
# out is the file with extracted numeric values from RGB images
out = open('RGB_extraction.csv','w')
# create this file to trace some image files that can not load correctly to make sure the whole loop can go correctly
error = open('RGB_extraction_error.csv','w')
out.write('PlantID'+'\t'+'Date'+'\t'+'View'+'\t'+'Plant Height'+'\t'+'Plant Width'+'\t'+'Projected Plant Area'+'\n')
views = ['VIS SV 0','VIS SV 90']
for j1 in sorted(whole):
if j1 == 'Genotype_ZL022':continue
for i1 in os.listdir('{0}/{1}'.format(mfold,j1)):
for v in views:
for d1 in sorted(os.listdir('{0}/{1}/{2}/{3}/'.format(mfold,j1,i1,v))):
nlist = [i1,d1.replace('.png','')]
myview = 'View'+v.replace('VIS SV ','')
na = [myview,'NA','NA','NA']
date = d1.replace('.png','')
try:
abc = cv2.imread('{0}/{1}/{2}/{3}/{4}'.format(mfold,j1,i1,v,d1))
abc = abc.astype(np.float)
imgreen = (2*abc[:,:,1])/(abc[:,:,0]+abc[:,:,2])
if date in close:
thresh = binary(imgreen,50,1950,335,2280)
elif date in far:
thresh = binary(imgreen,50,1450,815,1780)
cv2.imwrite('test.jpg',thresh)
thresh = cv2.imread("test.jpg",cv2.CV_LOAD_IMAGE_GRAYSCALE)
h,w,area,areas0 = call_numeric(thresh)
total = max(areas0)
k = areas0.index(total)
del areas0[k]
for i in areas0:
total -= i
nlist.append(myview)
if date in far:
nlist.append(str(float(h)*2.02))
nlist.append(str(float(w)*2.02))
nlist.append(str(float(total)))
else:
nlist.append(h)
nlist.append(w)
nlist.append(total)
except:
nlist.extend(na)
error.write(j1+':'+i1+':'+v+':'+d1+'\n')
out.write('\t'.join(nlist)+'\n')
out.close()
error.close()
| 32
| 174
| 0.626603
|
ef5c0e5ff1790c1367e3395cb63ad1ddf91375ef
| 4,620
|
py
|
Python
|
cgtools/skinning.py
|
tneumann/cgtools
|
8f77b6a4642fe79ac85b8449ebd3f72ea0e56032
|
[
"MIT"
] | 10
|
2019-05-02T14:08:32.000Z
|
2021-03-15T16:07:19.000Z
|
cgtools/skinning.py
|
tneumann/cgtools
|
8f77b6a4642fe79ac85b8449ebd3f72ea0e56032
|
[
"MIT"
] | null | null | null |
cgtools/skinning.py
|
tneumann/cgtools
|
8f77b6a4642fe79ac85b8449ebd3f72ea0e56032
|
[
"MIT"
] | 3
|
2019-05-02T14:08:33.000Z
|
2021-02-10T03:47:29.000Z
|
import numpy as np
from . import vector as V
def blend_skinning(pts, BW, rbms, method='lbs'):
"""
perform blend skinning of pts given blend weights BW and the 4x4 rigid body motions in rbms
pts should be an array of points, so the shape should be (num_points, 3)
BW should be an array of blendweights, so the shape should be (num_points, num_rbms)
where num_rbms give the number of rigid body motion parts (joints)
rbms should be an array of shape (num_rbms, 4, 4) - one rigid body motions for each column in BW
supported methods are "lbs" (linear blend skinning)
and "dq" (dual quaternion skinning)
"""
# TODO use masked arrays to accellerate?
if method == 'lbs':
transformed_pts = np.tensordot(V.hom(pts), rbms, axes=(1, 2))
if transformed_pts.shape[-1] == 4:
transformed_pts = V.dehom(transformed_pts)
return np.sum(BW[:,:,np.newaxis] * transformed_pts, axis=1)
elif method == 'dq':
rbms = np.asanyarray(rbms)
dqs = np.array(list(map(rbm_to_dualquat, rbms)))
return dq_skinning(pts, BW, dqs)
else:
raise ValueError("Unknown skinning method")
| 37.868852
| 104
| 0.515368
|
ef5cca29cfc460b593d8a2ef7fb0d7625f148237
| 2,214
|
py
|
Python
|
methods/self_attention.py
|
uyplayer/machine_learning_notice
|
9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416
|
[
"Apache-2.0"
] | 1
|
2019-12-10T12:27:33.000Z
|
2019-12-10T12:27:33.000Z
|
methods/self_attention.py
|
uyplayer/machine_learning_notice
|
9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416
|
[
"Apache-2.0"
] | null | null | null |
methods/self_attention.py
|
uyplayer/machine_learning_notice
|
9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Team : uyplayer team
# Author uyplayer
# Date 2019/11/20 4:22
# Tool PyCharm
'''
https://blog.csdn.net/c9Yv2cf9I06K2A9E/article/details/79739287
https://msd.misuland.com/pd/13340603045208861
'''
| 41.773585
| 88
| 0.653117
|
ef5e5867ee1d6b8b8d8f0bd5472d8f25ae61b5ab
| 497
|
py
|
Python
|
Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
"""
Write a Python program that reads a date (from 2016/1/1 to 2016/12/31) and prints the day of the date.
Jan. 1, 2016, is Friday.
Note that 2016 is a leap year.
"""
from datetime import date
print("Input month and date(separated by a single space): ")
m, d = map(int, input().split())
weeks = {1: "Monday", 2: "Tuesday", 3: "Wednesday", 4:"Thursday", 5: "Friday", 6: "Saturday", 7: "sunday"}
w = date.isoweekday(date(2016, m, d))
print("Name of the date: ", weeks[w])
#Reference: w3resources
| 33.133333
| 106
| 0.668008
|
ef5e8dee6b61a5247d6e4659a6ab926d4b74a1e7
| 347
|
py
|
Python
|
test15.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
test15.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
test15.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
"""
nn
:
n(1n1000),nn(100),
:
n
1
9
cap
to
cat
card
two
too
up
boat
boot
boat
boot
cap
card
cat
to
too
two
up
"""
list = []
n = int(input())
for i in range(0, n):
s = input()
list.append(s)
list.sort()
for i in list:
print(i)
| 8.069767
| 58
| 0.674352
|
ef5fbbee42c9df1a0ff003ab57c38b8bb1ccfe30
| 2,558
|
py
|
Python
|
0-EXP-TIRA-C10.py
|
webis-de/Luyckx2008
|
a7b2711a354a71ba326ddb1e495a8343091e4d8c
|
[
"Unlicense"
] | null | null | null |
0-EXP-TIRA-C10.py
|
webis-de/Luyckx2008
|
a7b2711a354a71ba326ddb1e495a8343091e4d8c
|
[
"Unlicense"
] | null | null | null |
0-EXP-TIRA-C10.py
|
webis-de/Luyckx2008
|
a7b2711a354a71ba326ddb1e495a8343091e4d8c
|
[
"Unlicense"
] | null | null | null |
import jsonhandler
from LuyckxFeatures import *
import timblClassification as timbl
import os
import numpy as np
from collections import Counter
dictPath = "c10"
jsonhandler.loadJson(dictPath)
jsonhandler.loadTraining()
candidates = jsonhandler.candidates
unknowns = jsonhandler.unknowns
authors = list()
uAuthors = list()
for cand in candidates:
a = author(cand)
for fileName in jsonhandler.trainings[cand]:
fName = '%s/%s/%s' % (dictPath, cand, fileName)
pName = '%s/%s/%s' % (dictPath, cand, os.path.splitext(fileName)[0] + '.mbsp')
a.addDoc(fName, pName)
authors.append(a)
for unknown in unknowns:
fName = '%s/unknown/%s' % (dictPath, unknown)
pName = '%s/unknown/%s' % (dictPath, os.path.splitext(unknown)[0] + '.mbsp')
a = author(os.path.splitext(unknown)[0])
a.addDoc(fName, pName)
uAuthors.append(a)
docs = getAllDocuments(authors + uAuthors)
globalFeatures = dict.fromkeys((docs[0].features.keys()))
accuracy = dict.fromkeys((docs[0].features.keys()))
predict = dict.fromkeys((docs[0].features.keys()))
for idk, key in enumerate(globalFeatures.keys()):
globalFeatures[key] = globalFeature(key, docs)
train_fName = '%s/%s_training.c5' % (dictPath, key)
test_fName = '%s/%s_test.c5' % (dictPath, key)
exportC5(getAllDocuments(authors), authors, globalFeatures[key], 50, train_fName)
exportC5(getAllDocuments(uAuthors), uAuthors, globalFeatures[key], 50, test_fName)
noFeatures = len(Counter(globalFeatures[key].chi2).most_common(50))
predict[key] = timbl.classify(train_fName, test_fName, noFeatures)
os.remove(train_fName)
os.remove(test_fName)
# jsonhandler.storeJson(unknowns, predict)
jsonhandler.loadGroundTruth()
with open('%s/results' % dictPath, 'w') as rHandle:
for key in globalFeatures.keys():
cMatrix = timbl.confusionMatrix(jsonhandler.trueAuthors, predict[key])
accuracy[key] = np.sum(np.diag(cMatrix)) / np.sum(cMatrix)
rHandle.write('%s \t %.4f \n' % (key, accuracy[key]))
| 38.179104
| 86
| 0.670837
|
ef6043c616af761fa9470ba29ff276fd15c95e0d
| 3,133
|
py
|
Python
|
bus.py
|
resc863/Kakao_Chatbot
|
fe4a038de323ad733cd49e69c7ceb283a36bef0c
|
[
"MIT"
] | 1
|
2020-08-01T13:42:26.000Z
|
2020-08-01T13:42:26.000Z
|
bus.py
|
resc863/Kakao_Chatbot
|
fe4a038de323ad733cd49e69c7ceb283a36bef0c
|
[
"MIT"
] | null | null | null |
bus.py
|
resc863/Kakao_Chatbot
|
fe4a038de323ad733cd49e69c7ceb283a36bef0c
|
[
"MIT"
] | 1
|
2021-08-24T14:02:32.000Z
|
2021-08-24T14:02:32.000Z
|
from bs4 import BeautifulSoup
from multiprocessing import Pool
import requests
if __name__ == "__main__":
print(bus())
| 27.243478
| 210
| 0.616981
|
ef60ce6fc063e157d7dfaad93f8114a633854b16
| 4,256
|
py
|
Python
|
model_training.py
|
PatriceC/MLProjectISDP2020
|
64e83824690ccde2714d915c70fb00b20aa66a42
|
[
"MIT"
] | 1
|
2021-01-23T01:04:00.000Z
|
2021-01-23T01:04:00.000Z
|
model_training.py
|
cor3ntino/Time-Series-Prediction-with-Deep-Learning-for-Road-Trafic-Data
|
e8eefdf2e630a53e09f88550357b67732f2bccd0
|
[
"MIT"
] | null | null | null |
model_training.py
|
cor3ntino/Time-Series-Prediction-with-Deep-Learning-for-Road-Trafic-Data
|
e8eefdf2e630a53e09f88550357b67732f2bccd0
|
[
"MIT"
] | 1
|
2021-01-19T16:57:27.000Z
|
2021-01-19T16:57:27.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 13:54:58 2020
@author: Patrice CHANOL & Corentin MORVAN--CHAUMEIL
"""
import numpy as np
import torch
import time
import visualisation
from datetime import datetime
def main(model, criterion, optimizer, scheduler, data_train_loader, data_test_loader, num_epochs, input_window, output_window, batch_size):
"""
Entrainement du modle et Loss Test.
Parameters
----------
model : TYPE
DESCRIPTION. model to train
criterion : TYPE
DESCRIPTION. criterion to compute
optimizer : TYPE
DESCRIPTION.
scheduler : TYPE
DESCRIPTION.
data_loader_train : TYPE
DESCRIPTION. train set
data_loader_test : TYPE
DESCRIPTION. test set
num_epochs : TYPE
DESCRIPTION. number of epoch to compute
input_window : TYPE
DESCRIPTION. input windonw length
output_window : TYPE
DESCRIPTION. output windonw length
batch_size : TYPE
DESCRIPTION. batch_size
Returns
-------
model : TYPE
DESCRIPTION. trained model
test_loss_list : TYPE
DESCRIPTION. test loss
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dateTimeObj = datetime.now()
print('Dbut Entrainement : ', dateTimeObj.hour, 'H', dateTimeObj.minute)
test_loss_list = []
n_batches = len(data_train_loader)
# On va entrainer le modle num_epochs fois
for epoch in range(1, num_epochs + 1):
# Temps epoch
epoch_start_time = time.time()
dateTimeObj = datetime.now()
print('Dbut epoch', epoch, ':', dateTimeObj.hour, 'H', dateTimeObj.minute)
# Modle en mode entrainement
model.train()
# Pourcentage du Dataset raliser
pourcentage = 0.
# Loss du batch en cours
test_loss_batch = []
# Temps pour raliser 10%
start_time = time.time()
for batch, ((day_of_week, serie_input), serie_output) in enumerate(data_train_loader):
# Initializing a gradient as 0 so there is no mixing of gradient among the batches
optimizer.zero_grad()
# Forward pass
output = model.forward(day_of_week.to(device), serie_input.float().to(device))
loss = criterion(output, serie_output.float().to(device))
# Propagating the error backward
loss.backward()
# Normalisation des gradients si Transformer
if model.name_model == 'Transformer':
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.7)
# Optimizing the parameters
optimizer.step()
# Pourcentage rel raliser
count_pourcentage = batch / n_batches
# Si on a ralis 10% nouveau du Dataset, on test
if count_pourcentage >= pourcentage:
# Temps des 10%
T = time.time() - start_time
# Evaluation du model
model.eval()
with torch.no_grad():
for ((day_of_week_t, serie_input_t), serie_output_t) in data_test_loader:
output_t = model.forward(day_of_week_t.to(device), serie_input_t.float().to(device))
loss_t = criterion(output_t, serie_output_t.float().to(device))
test_loss_batch.append(loss_t.item())
test_loss = np.mean(test_loss_batch)
test_loss_list.append(test_loss)
print('-'*10)
print("Pourcentage: {}%, Test Loss : {}, Epoch: {}, Temps : {}s".format(round(100*pourcentage), test_loss, epoch, round(T)))
print('-'*10)
# Visualisation
visualisation.pred_vs_reality(model, input_window, output_window, epoch=epoch, pourcentage=round(100*pourcentage))
pourcentage += 0.1
start_time = time.time()
model.train()
print('Fin epoch : {}, Temps de l\'epoch : {}s'.format(epoch, round(time.time() - epoch_start_time)))
visualisation.forecast(model, input_window, output_window, epoch=epoch)
scheduler.step()
model.save()
return model, test_loss_list
| 34.322581
| 140
| 0.608083
|
ef61b3b08001b19237e5f7463a25cc96b621c9fe
| 3,679
|
py
|
Python
|
process_data.py
|
johnnyp2587/fx-drqn
|
0ea8a4ad673a1883dd4630a69629c75c8f49148c
|
[
"MIT"
] | 1
|
2021-01-30T11:50:54.000Z
|
2021-01-30T11:50:54.000Z
|
process_data.py
|
johnnyp2587/fx-drqn
|
0ea8a4ad673a1883dd4630a69629c75c8f49148c
|
[
"MIT"
] | null | null | null |
process_data.py
|
johnnyp2587/fx-drqn
|
0ea8a4ad673a1883dd4630a69629c75c8f49148c
|
[
"MIT"
] | 2
|
2021-01-30T11:50:57.000Z
|
2021-02-04T15:43:54.000Z
|
import numpy as np
import pandas as pd
import datetime
if __name__=='__main__':
CreateFeature('EURUSD', 16, 1)
| 37.927835
| 113
| 0.580864
|
ef625fbf84f8e46aa31c085f3762960c2186790e
| 3,863
|
py
|
Python
|
benchmark.py
|
tgisaturday/minGPT
|
3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b
|
[
"MIT"
] | null | null | null |
benchmark.py
|
tgisaturday/minGPT
|
3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b
|
[
"MIT"
] | null | null | null |
benchmark.py
|
tgisaturday/minGPT
|
3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b
|
[
"MIT"
] | null | null | null |
import math
import os
from argparse import ArgumentParser
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.callbacks import XLAStatsMonitor
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
from mingpt.lr_decay import LearningRateDecayCallback
from mingpt.model import GPT
if __name__ == '__main__':
seed_everything(42)
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parser)
parser.add_argument('--n_layer', default=22, type=int)
parser.add_argument('--n_head', default=16, type=int)
parser.add_argument('--n_embd', default=720, type=int)
parser.add_argument('--learning_rate', default=6e-4, type=float)
parser.add_argument('--block_size', default=128, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_workers', default=16, type=int)
args = parser.parse_args()
if not os.path.exists("input.txt"):
os.system("wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt")
dm = CharDataModule(args.batch_size, args.num_workers, args.block_size)
dm.setup()
model = GPT(
vocab_size=dm.train_dataset.vocab_size,
block_size=dm.train_dataset.block_size,
n_layer=args.n_layer,
n_head=args.n_head,
n_embd=args.n_embd,
learning_rate=args.learning_rate
)
lr_decay = LearningRateDecayCallback(
learning_rate=6e-4,
warmup_tokens=512 * 20,
final_tokens=2 * len(dm.train_dataset) * args.block_size
)
trainer = Trainer.from_argparse_args(
args,
max_epochs=5,
tpu_cores=8,
gradient_clip_val=1.0,
callbacks=[lr_decay, XLAStatsMonitor()],
)
trainer.fit(model, datamodule = dm )
| 36.443396
| 119
| 0.681077
|
ef62a93780f5d22fd2c5c963cb04b78649fda229
| 2,059
|
py
|
Python
|
weather.py
|
corgiclub/CorgiBot_telegram
|
a63d91a74ee497b9a405e93bd3b303367ef95268
|
[
"MIT"
] | null | null | null |
weather.py
|
corgiclub/CorgiBot_telegram
|
a63d91a74ee497b9a405e93bd3b303367ef95268
|
[
"MIT"
] | null | null | null |
weather.py
|
corgiclub/CorgiBot_telegram
|
a63d91a74ee497b9a405e93bd3b303367ef95268
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
import requests
import json
| 44.76087
| 112
| 0.594463
|
ef63d9fcd4c7ced9c5506a721a486919e70bacc7
| 2,536
|
py
|
Python
|
paz/datasets/ferplus.py
|
niqbal996/paz
|
f27205907367415d5b21f90e1a1d1d1ce598e889
|
[
"MIT"
] | 300
|
2020-10-29T08:02:05.000Z
|
2022-03-30T21:47:32.000Z
|
paz/datasets/ferplus.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 30
|
2020-10-29T12:40:32.000Z
|
2022-03-31T14:06:35.000Z
|
paz/datasets/ferplus.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 62
|
2020-10-29T12:34:13.000Z
|
2022-03-29T05:21:45.000Z
|
import os
import numpy as np
from .utils import get_class_names
from ..abstract import Loader
from ..backend.image import resize_image
# IMAGES_PATH = '../datasets/fer2013/fer2013.csv'
# LABELS_PATH = '../datasets/fer2013/fer2013new.csv'
| 39.015385
| 79
| 0.613565
|
ef651d134e566a45ca23483fc6b3987d980d24af
| 863
|
py
|
Python
|
code/array/container-with-most-water.py
|
windsuzu/leetcode-python
|
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
|
[
"MIT"
] | 1
|
2021-09-29T11:05:07.000Z
|
2021-09-29T11:05:07.000Z
|
code/array/container-with-most-water.py
|
windsuzu/leetcode-python
|
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
|
[
"MIT"
] | null | null | null |
code/array/container-with-most-water.py
|
windsuzu/leetcode-python
|
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
|
[
"MIT"
] | 1
|
2021-09-29T11:06:32.000Z
|
2021-09-29T11:06:32.000Z
|
from typing import List
| 30.821429
| 74
| 0.468134
|
ef68897796bf15cfbe41f5e79ff37ee0aa7a33e6
| 3,578
|
py
|
Python
|
src/python/DipSimUtilities.py
|
ndeybach/DipSim
|
091f147f933b000b6ab829ec7d10eef985c260b2
|
[
"MIT"
] | null | null | null |
src/python/DipSimUtilities.py
|
ndeybach/DipSim
|
091f147f933b000b6ab829ec7d10eef985c260b2
|
[
"MIT"
] | null | null | null |
src/python/DipSimUtilities.py
|
ndeybach/DipSim
|
091f147f933b000b6ab829ec7d10eef985c260b2
|
[
"MIT"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""
MIT License
Copyright (c) 2020 Nils DEYBACH & Lo OUDART
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
It serves as a containers for various utility functions. They can be useful in a multitude of cases.
"""
from math import cos, sin, radians, degrees, acos, atan2, pi
from PySide2.QtCore import QRandomGenerator
from PySide2.QtGui import QVector3D, QColor, QQuaternion
######## NUMBER GENERATION #########
"""
Return randomly -1 or 1 as a random sign generator.
"""
######## ANGLES CONVERTIONS #########
"""
Returns rotated quaternion from a rotation (theta) applied to original
direction around specified axis.
"""
"""
Returns quaternion rotation from spherical position (following physics convention) with
a (1,0,0) oriention initialy.
phi, theta: angles in physics convention in degrees.
"""
"""
Returns orientation (following physics convention) to a quaternion representing the rotation
needed to get a vector to follow the orientation
"""
######## COLORS #########
"""
Returns a color from a 3D vector of angles.
phi, theta: angles in physics convention in radians.
"""
"""
Returns a random color.
"""
| 35.425743
| 149
| 0.734768
|
3226aa7f7ea523e5b462c538450fa0bfe4a22a9b
| 1,503
|
py
|
Python
|
clusterresults/rundr12xpdf10k.py
|
rohinkumar/CorrelCalc
|
d7887448af8d3dc3170c00c0aae6ee2561b8a3d5
|
[
"MIT"
] | null | null | null |
clusterresults/rundr12xpdf10k.py
|
rohinkumar/CorrelCalc
|
d7887448af8d3dc3170c00c0aae6ee2561b8a3d5
|
[
"MIT"
] | null | null | null |
clusterresults/rundr12xpdf10k.py
|
rohinkumar/CorrelCalc
|
d7887448af8d3dc3170c00c0aae6ee2561b8a3d5
|
[
"MIT"
] | null | null | null |
from correlcalc import *
bins = np.arange(0.002,0.062,0.002)
#corrdr12flcdmls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lcdm',weights='eq')
print("--------------------------------------------")
corrdr12flcls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lcdm',weights=True)
print("--------------------------------------------")
#corrdr12flcls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights='eq')
#print("--------------------------------------------")
#corrdr12olcls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights='eq',geometry='open')
print("--------------------------------------------")
corrdr12flclsw=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights=True)
print("--------------------------------------------")
corrdr12flolsw=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights=True,geometry='open')
| 107.357143
| 204
| 0.685961
|
3227a055c835557ad7f0f841ab6676069d791695
| 10,965
|
py
|
Python
|
verify/imagenet.py
|
CAS-LRJ/DeepPAC
|
75059572c23474d32a762aca5640f4d799fd992a
|
[
"Apache-2.0"
] | null | null | null |
verify/imagenet.py
|
CAS-LRJ/DeepPAC
|
75059572c23474d32a762aca5640f4d799fd992a
|
[
"Apache-2.0"
] | null | null | null |
verify/imagenet.py
|
CAS-LRJ/DeepPAC
|
75059572c23474d32a762aca5640f4d799fd992a
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torchvision import transforms
from PIL import Image
import numpy as np
import math
from sklearn.linear_model import LinearRegression
from .grid import Grid, grid_split
import torch.backends.cudnn as cudnn
'''
Global Constants:
TASK_NAME: Name of the verification task (deprecated)
PATH: The path of the model file. (Initialized in imagenet_verify)
mean, stdvar: The normalization parameters of the data. (Initialized in imagenet_verify, default mean=(0.4914,0.4822,0.4465) stdvar=(0.2023,0.1994,0.2010))
delta: The radius of the L-inf Ball. (Initialized in imagenet_verify, default 4/255)
significance, error: The significance and the error rate of the PAC-Model. (Initialized in imagenet_verify, default 0.01 and 0.001)
final_samples: The number of samples needed to calculate the final margin. (Initialized in imagenet_verify, default 1600, according to defualt error rate and significance)
Batchsize: The batchsize of sampling procedure. (Initialized in imagenet_verify, defualt 200)
device: Which device to be utilised by Pytorch. (Initialized in imagenet_verify, default 'cuda')
model: The Pytorch Network to be verified. (Initialized in imagenet_verify)
pretrans: The torchvision transform to process the image. (Resize and Tensorize)
normalization_trans: The normalization transform to normalize the data. (Initialized in imagenet_verify)
sampling_budget: The sampling limit for each stepwise splitting. (Initialized in imagenet_verify)
init_grid: The Grid for Imagenet Data (224*224)
Functions:
grid_batch_sample: Grid-based Sampling for Scenario Optimization (Untargetted)
scenario_optimization: Main Verification Function (Focused Learning, Stepwise-Splitting)
imagenet_verify: Entry Function
'''
pretrans = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
])
mean = (0.485, 0.456, 0.406)
stdvar = (0.229, 0.224, 0.225)
normalization_trans = transforms.Normalize(mean, stdvar)
sampling_budget = 20000
delta = 4/255
error = 1e-2
significance = 1e-3
Batchsize = 200
device = 'cuda'
init_grid = [Grid(0, 0, 224, 224)]
PATH = './models/imagenet_linf_4.pth'
| 43.685259
| 188
| 0.639216
|
322854f1b6ad1bef2a63f035b0bf9ea507c22498
| 5,537
|
py
|
Python
|
src/main.py
|
ronikleyton/script-backup-switch-huawei
|
80c990afa3561c350823cb96e25174262d8d4ab1
|
[
"MIT"
] | null | null | null |
src/main.py
|
ronikleyton/script-backup-switch-huawei
|
80c990afa3561c350823cb96e25174262d8d4ab1
|
[
"MIT"
] | null | null | null |
src/main.py
|
ronikleyton/script-backup-switch-huawei
|
80c990afa3561c350823cb96e25174262d8d4ab1
|
[
"MIT"
] | null | null | null |
from telnetlib import Telnet
from exception.exceptions import *
from datetime import date
import time
import os
from dotenv import load_dotenv
import json
load_dotenv()
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
f = open(f'{ROOT_DIR}/equipamentos.json')
equipamentos = json.load(f)['equipamentos']
for switch in equipamentos:
try:
USER = os.environ.get('USER')
PASS = os.environ.get('PASS')
PORT_TELNET = os.environ.get('PORT_TELNET')
print(f"Iniciando Backup no Switch {switch['hostname']}")
equipamento = Equipamento(switch['hostname'],switch['ip'],PORT_TELNET,USER,PASS)
main(equipamento)
except:
pass
| 35.722581
| 106
| 0.641683
|
3228d6088055f54b7b82121a3d3e109e936942b3
| 1,623
|
py
|
Python
|
setup.py
|
cakebread/musubi
|
5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c
|
[
"BSD-2-Clause"
] | 5
|
2015-05-18T13:18:26.000Z
|
2020-01-14T08:24:08.000Z
|
setup.py
|
cakebread/musubi
|
5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
cakebread/musubi
|
5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
PROJECT = 'musubi'
VERSION = '0.2'
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = 'Uh oh, we may need a new hard drive.'
setup(
name=PROJECT,
version=VERSION,
description='Musubi is a command-line DNSBL checker and MX toolkit.',
long_description=long_description,
author='Rob Cakebread',
author_email='cakebread@gmail.com',
url='https://github.com/cakebread/musubi',
download_url='https://github.com/cakebread/musubi/tarball/master',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Environment :: Console',
],
platforms=['Any'],
scripts=[],
provides=[],
install_requires=['requests', 'dnspython', 'IPy', 'distribute',
'cliff', 'cliff-tablib', 'gevent', 'greenlet'],
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'musubi = musubi.main:main'
],
'musubi.cli': [
'ips = musubi.ips:GetIPs',
'mx = musubi.mx:GetMX',
'spf = musubi.spf:GetSPF',
'scan = musubi.scan:Scan',
],
},
zip_safe=False,
)
| 29.509091
| 73
| 0.590265
|
3229164df79c432f6f7ad72e86350bc6d3ce6e18
| 1,048
|
py
|
Python
|
airflow_ml_dags/images/airflow-preprocess/preprocess.py
|
made-ml-in-prod-2021/holyketzer
|
f693f2d5fce8cced03873e2b89cbe10617996c64
|
[
"MIT"
] | null | null | null |
airflow_ml_dags/images/airflow-preprocess/preprocess.py
|
made-ml-in-prod-2021/holyketzer
|
f693f2d5fce8cced03873e2b89cbe10617996c64
|
[
"MIT"
] | 2
|
2021-05-21T09:09:23.000Z
|
2021-06-05T08:13:40.000Z
|
airflow_ml_dags/images/airflow-preprocess/preprocess.py
|
made-ml-in-prod-2021/holyketzer
|
f693f2d5fce8cced03873e2b89cbe10617996c64
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import click
from datetime import date
if __name__ == '__main__':
preprocess()
| 29.942857
| 101
| 0.621183
|
32298c15e29bc9b924d33fac9a984d4c8170430a
| 581
|
py
|
Python
|
estrutura_while/barra-de-progresso.py
|
BEp0/Estudos_de_Python
|
da32a01d3f4462b3e6b1b6035106895afe9c7627
|
[
"MIT"
] | 1
|
2021-02-15T19:14:44.000Z
|
2021-02-15T19:14:44.000Z
|
estrutura_while/barra-de-progresso.py
|
BEp0/Estudos_de_Python
|
da32a01d3f4462b3e6b1b6035106895afe9c7627
|
[
"MIT"
] | null | null | null |
estrutura_while/barra-de-progresso.py
|
BEp0/Estudos_de_Python
|
da32a01d3f4462b3e6b1b6035106895afe9c7627
|
[
"MIT"
] | null | null | null |
from time import sleep
from sys import stdout
if __name__ == "__main__":
main()
| 17.088235
| 50
| 0.504303
|
3229bb9f7088946e3efcc3fcbb6cba8d90bd5930
| 4,329
|
py
|
Python
|
models/show.py
|
wanderindev/fyyur
|
acf3a44ce7fae6b24576a320afd447c0595d76e5
|
[
"MIT"
] | null | null | null |
models/show.py
|
wanderindev/fyyur
|
acf3a44ce7fae6b24576a320afd447c0595d76e5
|
[
"MIT"
] | null | null | null |
models/show.py
|
wanderindev/fyyur
|
acf3a44ce7fae6b24576a320afd447c0595d76e5
|
[
"MIT"
] | 2
|
2020-07-16T22:02:13.000Z
|
2020-11-22T21:16:28.000Z
|
from datetime import datetime
from sqlalchemy import or_
from app import db
from .mixin import ModelMixin
| 30.921429
| 74
| 0.495033
|
322b0d39d0e86bb9ee65efcc180b2518cde85315
| 2,141
|
py
|
Python
|
backend/sponsors/migrations/0001_initial.py
|
marcoacierno/pycon
|
2b7b47598c4929769cc73e322b3fce2c89151e21
|
[
"MIT"
] | 56
|
2018-01-20T17:18:40.000Z
|
2022-03-28T22:42:04.000Z
|
backend/sponsors/migrations/0001_initial.py
|
marcoacierno/pycon
|
2b7b47598c4929769cc73e322b3fce2c89151e21
|
[
"MIT"
] | 2,029
|
2018-01-20T11:37:24.000Z
|
2022-03-31T04:10:51.000Z
|
backend/sponsors/migrations/0001_initial.py
|
marcoacierno/pycon
|
2b7b47598c4929769cc73e322b3fce2c89151e21
|
[
"MIT"
] | 17
|
2018-03-17T09:44:28.000Z
|
2021-12-27T19:57:35.000Z
|
# Generated by Django 2.2.4 on 2019-08-30 21:56
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
| 44.604167
| 182
| 0.611397
|
322bb384475b3968baa795c394e1297ef1e165d8
| 156
|
py
|
Python
|
__init__.py
|
sacherjj/python-AlienRFID
|
aaddd846d46cca533dca43c256890c072e8f5ec5
|
[
"MIT"
] | 1
|
2021-03-21T13:52:00.000Z
|
2021-03-21T13:52:00.000Z
|
__init__.py
|
sacherjj/python-AlienRFID
|
aaddd846d46cca533dca43c256890c072e8f5ec5
|
[
"MIT"
] | null | null | null |
__init__.py
|
sacherjj/python-AlienRFID
|
aaddd846d46cca533dca43c256890c072e8f5ec5
|
[
"MIT"
] | 2
|
2015-10-12T10:02:50.000Z
|
2020-03-09T13:30:12.000Z
|
from .alien_config import AlienConfig
from .alien_connection import AlienConnection
from .alien_tag import AlienTag
from .alien_tag_list import AlienTagList
| 39
| 45
| 0.878205
|
322bb4e6bc6b91b44404b73d00ac6be4830c39c7
| 658
|
py
|
Python
|
01_Hello_PGP/solution.py
|
3-24/id0-rsa.pub
|
633e974a330d0dc09d37e423168974b7fba69830
|
[
"MIT"
] | 1
|
2020-03-29T16:10:54.000Z
|
2020-03-29T16:10:54.000Z
|
01_Hello_PGP/solution.py
|
3-24/id0-rsa.pub
|
633e974a330d0dc09d37e423168974b7fba69830
|
[
"MIT"
] | null | null | null |
01_Hello_PGP/solution.py
|
3-24/id0-rsa.pub
|
633e974a330d0dc09d37e423168974b7fba69830
|
[
"MIT"
] | null | null | null |
from subprocess import run, PIPE
main()
| 24.37037
| 124
| 0.575988
|
322c0212f8148c0b38508aaf2672d99f9c4007b4
| 8,524
|
py
|
Python
|
src/apodeixi/text_layout/tests_unit/test_column_layout.py
|
ChateauClaudia-Labs/apodeixi
|
dd668e210e92cabc2682ad3049781c06e58e3101
|
[
"MIT"
] | null | null | null |
src/apodeixi/text_layout/tests_unit/test_column_layout.py
|
ChateauClaudia-Labs/apodeixi
|
dd668e210e92cabc2682ad3049781c06e58e3101
|
[
"MIT"
] | null | null | null |
src/apodeixi/text_layout/tests_unit/test_column_layout.py
|
ChateauClaudia-Labs/apodeixi
|
dd668e210e92cabc2682ad3049781c06e58e3101
|
[
"MIT"
] | null | null | null |
import sys as _sys
import pandas as _pd
from apodeixi.testing_framework.a6i_unit_test import ApodeixiUnitTest
from apodeixi.util.formatting_utils import DictionaryFormatter
from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace
from apodeixi.text_layout.column_layout import ColumnWidthCalculator
if __name__ == "__main__":
# execute only if run as a script
main(_sys.argv)
| 51.660606
| 141
| 0.530737
|
322c5954da97025867a532a5c2f025836a221df3
| 944
|
py
|
Python
|
evolute/operators/mate.py
|
ysglh/evolute
|
ea868e5d04e6bb59760a9b6dec709303637b9f10
|
[
"MIT"
] | 174
|
2018-08-15T21:48:30.000Z
|
2022-03-13T01:34:48.000Z
|
evolute/operators/mate.py
|
ysglh/evolute
|
ea868e5d04e6bb59760a9b6dec709303637b9f10
|
[
"MIT"
] | null | null | null |
evolute/operators/mate.py
|
ysglh/evolute
|
ea868e5d04e6bb59760a9b6dec709303637b9f10
|
[
"MIT"
] | 27
|
2018-05-16T16:25:36.000Z
|
2021-11-02T20:51:38.000Z
|
import numpy as np
DefaultMate = RandomPickMate
| 20.977778
| 80
| 0.635593
|
322da51e0820f1bb72e55d0a9cb187b9bcde3c32
| 223
|
py
|
Python
|
LandingPage/forms.py
|
Mihai925/EduCoding-Legacy
|
7c6de105deb186c3442f8d7f9f1b9f99708f8fb6
|
[
"MIT"
] | null | null | null |
LandingPage/forms.py
|
Mihai925/EduCoding-Legacy
|
7c6de105deb186c3442f8d7f9f1b9f99708f8fb6
|
[
"MIT"
] | null | null | null |
LandingPage/forms.py
|
Mihai925/EduCoding-Legacy
|
7c6de105deb186c3442f8d7f9f1b9f99708f8fb6
|
[
"MIT"
] | null | null | null |
__author__ = 'varun'
from django import forms
| 22.3
| 52
| 0.713004
|
322e21d79121fc682dbbeaf19bfb0822ed607a7a
| 4,236
|
py
|
Python
|
pru/db/geo/geo_admin.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
pru/db/geo/geo_admin.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
pru/db/geo/geo_admin.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
Administration operations for the geo db.
"""
import os
import socket
import time
from pru.db.geo.geo_init import load_airspace, remove_all_sectors, tear_down
from pru.db.geo.geo_init import load_airports, remove_all_airports
from pru.db.geo.geo_init import load_user_airspace, remove_all_user_defined_sectors
from pru.db.common_init import create as create_db, DB_TYPE_GEO
from pru.db.geo.geo_init import create as create_geo_db
from pru.logger import logger
import pru.db.context as ctx
log = logger(__name__)
def remove_geo_db():
"""
Remove the db
"""
remove_all_sectors()
remove_all_airports()
remove_all_user_defined_sectors()
tear_down()
def create_geo_database():
"""
Create a geo db.
"""
log.info("Starting to create the geo db")
log.info("Waiting for the database to be ready")
log.info(f"Testing connection on host: {ctx.geo_db_hostname} and port {ctx.geo_db_port}")
# We need to sleep and retry ubtil the db wakes up
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
s.connect((ctx.geo_db_hostname, int(ctx.geo_db_port)))
s.close()
break
except socket.error as ex:
log.debug("Database not ready..")
time.sleep(5) # 5 seconds between tests
log.info("Geo database is now ready.")
if create_db(DB_TYPE_GEO):
if create_geo_db():
log.info("Geo database creation is complete.")
return True
else:
log.info("Failed to make the airspace db, could not create the tables.")
else:
log.info("Failed to make the airspace db, could not create the database.")
def initialise_airspace(sector_file_path, reset=False):
"""
Uses the provided file path to load the sectors file,
may be csv or geojson.
If no sectors file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these sectors to the sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(sector_file_path):
if reset:
remove_all_sectors()
load_airspace(sector_file_path, context, connection)
return True
else:
return (False, "Path not found " + sector_file_path)
def initialise_airports(airports_file_path, reset=False):
"""
Uses the provided file path to load an airports file,
must be csv.
If no airports file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these airports to the sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(airports_file_path):
if reset:
remove_all_airports()
load_airports(airports_file_path, context, connection)
return True
else:
return (False, "Path not found " + airports_file_path)
def initialise_user_airspace(user_sector_file_path, reset=False):
"""
Uses the provided file path to load the users sectors file,
may be csv or geojson.
If no sectors file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these sectors to the user sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(user_sector_file_path):
if reset:
remove_all_user_defined_sectors()
load_user_airspace(user_sector_file_path, context, connection)
return True
else:
return (False, "Path not found " + user_sector_file_path)
| 31.377778
| 93
| 0.674929
|
322f9af92fcd6688ac16683be314d7931fa1f2eb
| 4,040
|
py
|
Python
|
tests/test_autogeometry.py
|
fabiommendes/easymunk
|
420dfc4a006997c47887f6876876249674feb3cd
|
[
"MIT"
] | 1
|
2021-07-02T11:59:07.000Z
|
2021-07-02T11:59:07.000Z
|
tests/test_autogeometry.py
|
fabiommendes/easymunk
|
420dfc4a006997c47887f6876876249674feb3cd
|
[
"MIT"
] | null | null | null |
tests/test_autogeometry.py
|
fabiommendes/easymunk
|
420dfc4a006997c47887f6876876249674feb3cd
|
[
"MIT"
] | 1
|
2022-01-14T20:18:35.000Z
|
2022-01-14T20:18:35.000Z
|
from typing import List, Tuple
import easymunk as a
from easymunk import BB, Vec2d
| 28.652482
| 87
| 0.366832
|
32306c14bb390e41af15482d3244081bad57ece0
| 13,144
|
py
|
Python
|
darshan-util/pydarshan/darshan/backend/cffi_backend.py
|
gaocegege/darshan
|
2d54cd8ec96d26db23e9ca421df48d2031a4c55e
|
[
"mpich2"
] | null | null | null |
darshan-util/pydarshan/darshan/backend/cffi_backend.py
|
gaocegege/darshan
|
2d54cd8ec96d26db23e9ca421df48d2031a4c55e
|
[
"mpich2"
] | null | null | null |
darshan-util/pydarshan/darshan/backend/cffi_backend.py
|
gaocegege/darshan
|
2d54cd8ec96d26db23e9ca421df48d2031a4c55e
|
[
"mpich2"
] | null | null | null |
# -*- coding: utf-8 -*-
import cffi
import ctypes
import numpy as np
import pandas as pd
from darshan.api_def_c import load_darshan_header
from darshan.discover_darshan import find_utils
from darshan.discover_darshan import check_version
API_def_c = load_darshan_header()
ffi = cffi.FFI()
ffi.cdef(API_def_c)
libdutil = None
libdutil = find_utils(ffi, libdutil)
def log_open(filename):
"""
Opens a darshan logfile.
Args:
filename (str): Path to a darshan log file
Return:
log handle
"""
b_fname = filename.encode()
handle = libdutil.darshan_log_open(b_fname)
log = {"handle": handle, 'modules': None, 'name_records': None}
return log
def log_close(log):
"""
Closes the logfile and releases allocated memory.
"""
libdutil.darshan_log_close(log['handle'])
#modules = {}
return
def log_get_job(log):
"""
Returns a dictionary with information about the current job.
"""
job = {}
jobrec = ffi.new("struct darshan_job *")
libdutil.darshan_log_get_job(log['handle'], jobrec)
job['uid'] = jobrec[0].uid
job['start_time'] = jobrec[0].start_time
job['end_time'] = jobrec[0].end_time
job['nprocs'] = jobrec[0].nprocs
job['jobid'] = jobrec[0].jobid
mstr = ffi.string(jobrec[0].metadata).decode("utf-8")
md = {}
for kv in mstr.split('\n')[:-1]:
k,v = kv.split('=', maxsplit=1)
md[k] = v
job['metadata'] = md
return job
def log_get_exe(log):
"""
Get details about the executable (path and arguments)
Args:
log: handle returned by darshan.open
Return:
string: executeable path and arguments
"""
exestr = ffi.new("char[]", 4096)
libdutil.darshan_log_get_exe(log['handle'], exestr)
return ffi.string(exestr).decode("utf-8")
def log_get_mounts(log):
"""
Returns a list of available mounts recorded for the log.
Args:
log: handle returned by darshan.open
"""
mntlst = []
mnts = ffi.new("struct darshan_mnt_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_mounts(log['handle'], mnts, cnt)
for i in range(0, cnt[0]):
mntlst.append((ffi.string(mnts[0][i].mnt_path).decode("utf-8"),
ffi.string(mnts[0][i].mnt_type).decode("utf-8")))
return mntlst
def log_get_modules(log):
"""
Return a dictionary containing available modules including information
about the contents available for each module in the current log.
Args:
log: handle returned by darshan.open
Return:
dict: Modules with additional info for current log.
"""
# use cached module index if already present
if log['modules'] != None:
return log['modules']
modules = {}
mods = ffi.new("struct darshan_mod_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_modules(log['handle'], mods, cnt)
for i in range(0, cnt[0]):
modules[ffi.string(mods[0][i].name).decode("utf-8")] = \
{'len': mods[0][i].len, 'ver': mods[0][i].ver, 'idx': mods[0][i].idx}
# add to cache
log['modules'] = modules
return modules
def log_get_name_records(log):
"""
Return a dictionary resovling hash to string (typically a filepath).
Args:
log: handle returned by darshan.open
hash: hash-value (a number)
Return:
dict: the name records
"""
# used cached name_records if already present
if log['name_records'] != None:
return log['name_records']
name_records = {}
nrecs = ffi.new("struct darshan_name_record **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_name_records(log['handle'], nrecs, cnt)
for i in range(0, cnt[0]):
name_records[nrecs[0][i].id] = ffi.string(nrecs[0][i].name).decode("utf-8")
# add to cache
log['name_records'] = name_records
return name_records
def log_lookup_name_records(log, ids=[]):
"""
Resolve a single hash to it's name record string (typically a filepath).
Args:
log: handle returned by darshan.open
hash: hash-value (a number)
Return:
dict: the name records
"""
name_records = {}
#cids = ffi.new("darshan_record_id *") * len(ids)
whitelist = (ctypes.c_ulonglong * len(ids))(*ids)
whitelist_cnt = len(ids)
whitelistp = ffi.from_buffer(whitelist)
nrecs = ffi.new("struct darshan_name_record **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_filtered_name_records(log['handle'], nrecs, cnt, ffi.cast("darshan_record_id *", whitelistp), whitelist_cnt)
for i in range(0, cnt[0]):
name_records[nrecs[0][i].id] = ffi.string(nrecs[0][i].name).decode("utf-8")
# add to cache
log['name_records'] = name_records
return name_records
def log_get_dxt_record(log, mod_name, mod_type, reads=True, writes=True, mode='dict'):
"""
Returns a dictionary holding a dxt darshan log record.
Args:
log: Handle returned by darshan.open
mod_name (str): Name of the Darshan module
mod_type (str): String containing the C type
Return:
dict: generic log record
Example:
The typical darshan log record provides two arrays, on for integer counters
and one for floating point counters:
>>> darshan.log_get_dxt_record(log, "DXT_POSIX", "struct dxt_file_record **")
{'rank': 0, 'read_count': 11, 'read_segments': array([...]), ...}
"""
modules = log_get_modules(log)
#name_records = log_get_name_records(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules[mod_name]['idx'], buf)
if r < 1:
return None
filerec = ffi.cast(mod_type, buf)
clst = []
rec['id'] = filerec[0].base_rec.id
rec['rank'] = filerec[0].base_rec.rank
rec['hostname'] = ffi.string(filerec[0].hostname).decode("utf-8")
#rec['filename'] = name_records[rec['id']]
wcnt = filerec[0].write_count
rcnt = filerec[0].read_count
rec['write_count'] = wcnt
rec['read_count'] = rcnt
rec['write_segments'] = []
rec['read_segments'] = []
size_of = ffi.sizeof("struct dxt_file_record")
segments = ffi.cast("struct segment_info *", buf[0] + size_of )
for i in range(wcnt):
seg = {
"offset": segments[i].offset,
"length": segments[i].length,
"start_time": segments[i].start_time,
"end_time": segments[i].end_time
}
rec['write_segments'].append(seg)
for i in range(rcnt):
i = i + wcnt
seg = {
"offset": segments[i].offset,
"length": segments[i].length,
"start_time": segments[i].start_time,
"end_time": segments[i].end_time
}
rec['read_segments'].append(seg)
if mode == "pandas":
rec['read_segments'] = pd.DataFrame(rec['read_segments'])
rec['write_segments'] = pd.DataFrame(rec['write_segments'])
return rec
def log_get_generic_record(log, mod_name, mod_type, mode='numpy'):
"""
Returns a dictionary holding a generic darshan log record.
Args:
log: Handle returned by darshan.open
mod_name (str): Name of the Darshan module
mod_type (str): String containing the C type
Return:
dict: generic log record
Example:
The typical darshan log record provides two arrays, on for integer counters
and one for floating point counters:
>>> darshan.log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
{'counters': array([...], dtype=int64), 'fcounters': array([...])}
"""
modules = log_get_modules(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules[mod_name]['idx'], buf)
if r < 1:
return None
rbuf = ffi.cast(mod_type, buf)
rec['id'] = rbuf[0].base_rec.id
rec['rank'] = rbuf[0].base_rec.rank
clst = []
for i in range(0, len(rbuf[0].counters)):
clst.append(rbuf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.int64)
cdict = dict(zip(counter_names(mod_name), rec['counters']))
flst = []
for i in range(0, len(rbuf[0].fcounters)):
flst.append(rbuf[0].fcounters[i])
rec['fcounters'] = np.array(flst, dtype=np.float64)
fcdict = dict(zip(fcounter_names(mod_name), rec['fcounters']))
if mode == "dict":
rec = {'counters': cdict, 'fcounter': fcdict}
if mode == "pandas":
rec = {
'counters': pd.DataFrame(cdict, index=[0]),
'fcounters': pd.DataFrame(fcdict, index=[0])
}
return rec
def counter_names(mod_name, fcnts=False):
"""
Returns a list of available counter names for the module.
By default only integer counter names are listed, unless fcnts is set to
true in which case only the floating point counter names are listed.
Args:
mod_name (str): Name of the module to return counter names.
fcnts (bool): Switch to request floating point counters instead of integer. (Default: False)
Return:
list: Counter names as strings.
"""
if mod_name == 'MPI-IO':
mod_name = 'MPIIO'
names = []
i = 0
if fcnts:
F = "f_"
else:
F = ""
end = "{0}_{1}NUM_INDICES".format(mod_name.upper(), F.upper())
var_name = "{0}_{1}counter_names".format(mod_name.lower(), F.lower())
while True:
try:
var = getattr(libdutil, var_name)
except:
var = None
if not var:
return None
name = ffi.string(var[i]).decode("utf-8")
if name == end:
break
names.append(name)
i += 1
return names
def fcounter_names(mod_name):
"""
Returns a list of available floating point counter names for the module.
Args:
mod_name (str): Name of the module to return counter names.
Return:
list: Available floiting point counter names as strings.
"""
return counter_names(mod_name, fcnts=True)
def log_get_bgq_record(log):
"""
Returns a darshan log record for BG/Q.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "BG/Q", "struct darshan_bgq_record **")
def log_get_hdf5_file_record(log):
"""
Returns a darshan log record for an HDF5 file.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "H5F", "struct darshan_hdf5_file **")
def log_get_hdf5_dataset_record(log):
"""
Returns a darshan log record for an HDF5 dataset.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "H5D", "struct darshan_hdf5_dataset **")
def log_get_lustre_record(log):
"""
Returns a darshan log record for Lustre.
Args:
log: handle returned by darshan.open
"""
modules = log_get_modules(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules['LUSTRE']['idx'], buf)
if r < 1:
return None
rbuf = ffi.cast("struct darshan_lustre_record **", buf)
rec['id'] = rbuf[0].base_rec.id
rec['rank'] = rbuf[0].base_rec.rank
clst = []
for i in range(0, len(rbuf[0].counters)):
clst.append(rbuf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.int64)
cdict = dict(zip(counter_names('LUSTRE'), rec['counters']))
# FIXME
ostlst = []
for i in range(0, cdict['LUSTRE_STRIPE_WIDTH']):
print(rbuf[0].ost_ids[i])
rec['ost_ids'] = np.array(ostlst, dtype=np.int64)
print(rec['ost_ids'])
sys.exit()
if mode == "dict":
rec = {'counters': cdict, 'fcounter': fcdict}
if mode == "pandas":
rec = {
'counters': pd.DataFrame(cdict, index=[0]),
'fcounters': pd.DataFrame(fcdict, index=[0])
}
return rec
def log_get_mpiio_record(log):
"""
Returns a darshan log record for MPI-IO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "MPI-IO", "struct darshan_mpiio_file **")
def log_get_pnetcdf_record(log):
"""
Returns a darshan log record for PnetCDF.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "PNETCDF", "struct darshan_pnetcdf_file **")
def log_get_posix_record(log):
"""
Returns a darshan log record for
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
def log_get_stdio_record(log):
"""
Returns a darshan log record for STDIO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "STDIO", "struct darshan_stdio_file **")
| 24.295749
| 137
| 0.614197
|
32316e929a4d5ae59c28e0cfefeaa04b18e91623
| 1,017
|
py
|
Python
|
authApp/views/userDetailView.py
|
juan-skill/django_vue_bank
|
109f3b84086f4520a5220c311d9d3403a7adc3a2
|
[
"MIT"
] | null | null | null |
authApp/views/userDetailView.py
|
juan-skill/django_vue_bank
|
109f3b84086f4520a5220c311d9d3403a7adc3a2
|
[
"MIT"
] | null | null | null |
authApp/views/userDetailView.py
|
juan-skill/django_vue_bank
|
109f3b84086f4520a5220c311d9d3403a7adc3a2
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework_simplejwt.backends import TokenBackend
from rest_framework.permissions import IsAuthenticated
from authApp.models.user import User
from authApp.serializers.userSerializer import UserSerializer
| 39.115385
| 80
| 0.738446
|
32363a369f2abd8123a3c352cf5267f2cd8f6e3e
| 882
|
py
|
Python
|
pluggklockan.py
|
Vforsh03/Pluggklockan
|
845dbe82476ad3ecd8664b7cd99ce74311b92830
|
[
"MIT"
] | null | null | null |
pluggklockan.py
|
Vforsh03/Pluggklockan
|
845dbe82476ad3ecd8664b7cd99ce74311b92830
|
[
"MIT"
] | null | null | null |
pluggklockan.py
|
Vforsh03/Pluggklockan
|
845dbe82476ad3ecd8664b7cd99ce74311b92830
|
[
"MIT"
] | null | null | null |
import time
if __name__ == "__main__":
main()
| 25.941176
| 73
| 0.538549
|
32364b003eb60db5ffb76e4251c347561207ed8b
| 1,397
|
py
|
Python
|
gallery/views.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
gallery/views.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | 8
|
2021-04-08T21:57:55.000Z
|
2022-03-12T00:50:38.000Z
|
gallery/views.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from .models import PhotoAlbum, VideoAlbum
from blog.utils import get_pagination_page
| 32.488372
| 105
| 0.689334
|
3236d1e8e71e93e12b492398d92736947474b9fb
| 2,134
|
py
|
Python
|
test/test_post.py
|
enjoy233/zhihu-py3
|
bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc
|
[
"MIT"
] | 1,321
|
2015-02-16T13:19:42.000Z
|
2022-03-25T15:03:58.000Z
|
test/test_post.py
|
fru1tw4ter/zhihu-py3
|
bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc
|
[
"MIT"
] | 64
|
2015-07-03T12:30:08.000Z
|
2022-03-01T00:55:50.000Z
|
test/test_post.py
|
fru1tw4ter/zhihu-py3
|
bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc
|
[
"MIT"
] | 551
|
2015-02-22T11:21:40.000Z
|
2022-03-25T13:22:13.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
import unittest
import os
import json
from zhihu import Post
from test_utils import TEST_DATA_PATH
| 34.419355
| 77
| 0.638238
|
32370b765a15f6662dcf75810cbf2bc84feab958
| 69
|
py
|
Python
|
tensorflow_toolkit/lpr/lpr/__init__.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 256
|
2020-09-09T03:27:57.000Z
|
2022-03-30T10:06:06.000Z
|
tensorflow_toolkit/lpr/lpr/__init__.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 604
|
2020-09-08T12:29:49.000Z
|
2022-03-31T21:51:08.000Z
|
tensorflow_toolkit/lpr/lpr/__init__.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 160
|
2020-09-09T14:06:07.000Z
|
2022-03-30T14:50:48.000Z
|
from tfutils.helpers import import_transformer
import_transformer()
| 17.25
| 46
| 0.869565
|
3239f81ec2f0770c90334bbc02e94fc7a5de13e9
| 354
|
py
|
Python
|
torch_lazy/nn/__init__.py
|
simaki/torch-lazy
|
e3ce23b118bdf36a019c029a67bf5ec84f89a4d7
|
[
"BSD-3-Clause"
] | null | null | null |
torch_lazy/nn/__init__.py
|
simaki/torch-lazy
|
e3ce23b118bdf36a019c029a67bf5ec84f89a4d7
|
[
"BSD-3-Clause"
] | 18
|
2021-04-01T08:24:48.000Z
|
2022-03-28T20:18:28.000Z
|
torch_lazy/nn/__init__.py
|
simaki/torch-lazy
|
e3ce23b118bdf36a019c029a67bf5ec84f89a4d7
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T19:29:12.000Z
|
2021-07-22T19:29:12.000Z
|
from .modules.linear import LazyBilinear
from .modules.mlp import MLP
from .modules.mlp import LazyMLP
from .modules.normalization import LazyBatchNorm
from .modules.normalization import LazyBatchNorm1d
from .modules.normalization import LazyBatchNorm2d
from .modules.normalization import LazyBatchNorm3d
from .modules.normalization import LazyLayerNorm
| 39.333333
| 50
| 0.864407
|
323a9cf1657540b38e66a69c1561146bd14bceb9
| 874
|
py
|
Python
|
functest/lmfunctest.py
|
mitsuba-rei/lightmetrica-v3
|
db5b7d5a9a245fb7c0d25124433c38d09b62813e
|
[
"MIT"
] | 1
|
2019-11-20T13:24:58.000Z
|
2019-11-20T13:24:58.000Z
|
functest/lmfunctest.py
|
mitsuba-rei/lightmetrica-v3
|
db5b7d5a9a245fb7c0d25124433c38d09b62813e
|
[
"MIT"
] | null | null | null |
functest/lmfunctest.py
|
mitsuba-rei/lightmetrica-v3
|
db5b7d5a9a245fb7c0d25124433c38d09b62813e
|
[
"MIT"
] | null | null | null |
import sys
import json
import numpy as np
import imageio
from argparse import Namespace
def loadenv(config_path):
"""Load configuration file of Lightmetrica environment"""
# Open config file
with open(config_path) as f:
config = json.load(f)
# Add root directory and binary directory to sys.path
if config['path'] not in sys.path:
sys.path.insert(0, config['path'])
if config['bin_path'] not in sys.path:
sys.path.insert(0, config['bin_path'])
return Namespace(**config)
# Environment configuration
env = loadenv('.lmenv')
def save(path, img):
"""Save image"""
imageio.imwrite(path, np.clip(np.power(img, 1/2.2) * 256, 0, 255).astype(np.uint8))
| 26.484848
| 87
| 0.662471
|
323ae527f5aea6328f8ca830f729b3e6114a8c51
| 503
|
py
|
Python
|
algorithm implement (python)/mergesort.py
|
yedkk/algorithm-design
|
433b70e8302ec91b74542e9144dd93fdb5b0f8d3
|
[
"MIT"
] | 2
|
2021-06-01T02:31:06.000Z
|
2021-06-01T02:39:45.000Z
|
algorithm implement (python)/mergesort.py
|
yedkk/algorithm-design
|
433b70e8302ec91b74542e9144dd93fdb5b0f8d3
|
[
"MIT"
] | null | null | null |
algorithm implement (python)/mergesort.py
|
yedkk/algorithm-design
|
433b70e8302ec91b74542e9144dd93fdb5b0f8d3
|
[
"MIT"
] | null | null | null |
s1 = getArray()
s2 = getArray()
s = merge(s1, s2)
output(s)
| 13.236842
| 48
| 0.508946
|
323b7d2cb5ec3fee745d90ccfecbe50bdd67fcc2
| 1,276
|
py
|
Python
|
src/CSVtoJSON.py
|
CloudSevenConsulting/DustyDynamo
|
335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453
|
[
"MIT"
] | null | null | null |
src/CSVtoJSON.py
|
CloudSevenConsulting/DustyDynamo
|
335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453
|
[
"MIT"
] | null | null | null |
src/CSVtoJSON.py
|
CloudSevenConsulting/DustyDynamo
|
335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453
|
[
"MIT"
] | null | null | null |
import csv
import json
from pprint import pprint
import os
stockData = ['RIO']
for i in range(0,len(stockData)):
csvfile = open(stockData[i]+'.csv', 'r')
fieldnames = ("NetworkTime","StockID","Open","High", "Low", "Close", "Adj Close", "Volume")
reader = csv.DictReader( csvfile, fieldnames)
data = open(stockData[i]+'.json', 'w')
data.write('[\n')
for row in reader:
data.write('{ \n' \
+ '"MoteTimestamp": "%s",' %row['NetworkTime'] \
+ '\n"MoteID": %s,' %row['StockID'] \
+ '\n "StockData":{' \
+ '\n "OpenPrice": %s,' %row['Open'] \
+ '\n "HighPrice": %s,' %row['High'] \
+ '\n "LowPrice": %s,' %row['Low'] \
+ '\n "ClosePrice": %s,' %row['Close'] \
+ '\n "Adj Close": %s,' %row['Adj Close'] \
+ '\n "VolumeNumber": %s' %row['Volume'] \
+ '\n }' \
+ '\n},\n'
)
data.close()
with open(stockData[i]+'.json', 'rb+') as filehandle:
filehandle.seek(-3, os.SEEK_END)
filehandle.truncate()
filehandle.close()
with open(stockData[i]+'.json', 'a') as filehandle:
filehandle.write("\n]")
| 29.674419
| 95
| 0.462382
|
323d0642bd0b2e71b6ea4028021ab212c0e0889f
| 700
|
py
|
Python
|
core/api.py
|
rastos/Mi-Fit-and-Zepp-workout-exporter
|
e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa
|
[
"MIT"
] | 13
|
2021-04-13T14:27:58.000Z
|
2022-02-09T18:32:37.000Z
|
core/api.py
|
rastos/Mi-Fit-and-Zepp-workout-exporter
|
e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa
|
[
"MIT"
] | 3
|
2021-06-03T20:27:34.000Z
|
2021-06-04T06:24:18.000Z
|
core/api.py
|
rastos/Mi-Fit-and-Zepp-workout-exporter
|
e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa
|
[
"MIT"
] | 2
|
2021-06-03T20:29:54.000Z
|
2021-08-13T22:28:59.000Z
|
import requests
| 24.137931
| 95
| 0.547143
|
323d1294966a8fc8cdc72a192c1cd2b6b80bbc84
| 1,431
|
py
|
Python
|
lib/tools/tools_watch_cub.py
|
galena503/SCR
|
d5b6581808b4f2fac775e7ff48b3eef548164ca1
|
[
"MIT"
] | null | null | null |
lib/tools/tools_watch_cub.py
|
galena503/SCR
|
d5b6581808b4f2fac775e7ff48b3eef548164ca1
|
[
"MIT"
] | null | null | null |
lib/tools/tools_watch_cub.py
|
galena503/SCR
|
d5b6581808b4f2fac775e7ff48b3eef548164ca1
|
[
"MIT"
] | null | null | null |
import time,sys,os
import subprocess
st = 0
cmd = ['tasklist','/fo','csv']
subs = set('')
# win
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
while True:
if st == 0:
st = 1
time.sleep(1)
#for sub_pid in subs:
# ps_line = line.split(',').replace('\"','')
# if str(sub_pid) == :
# print(str(sub_pid) + '')
#print(os.getpid())
# log = popen_obj.returncode
#print(log)
#print(type(popen_obj.communicate()))
#print(popen_obj.communicate())
| 23.459016
| 81
| 0.533892
|
323de0cd069365ae5cc57c4534ae993e3a17cc39
| 7,616
|
py
|
Python
|
Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
#!/usr/bin/env python
"""
DBS 3 Migrate REST model unittests
The DBS3 Migration Service must be stopped before executing the unittest. In addition, take care
that no instance is running on the same DB. Else the single unittests can happen to fail due to
race conditions with DBS3 Migration Service.
"""
from dbsserver_t.utils.DBSRestApi import DBSRestApi
from dbsserver_t.utils.DBSDataProvider import DBSBlockDataProvider, create_child_data_provider
from dbsserver_t.utils.TestTools import expectedFailure
from itertools import chain
import os
import socket
import unittest
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(DBSMigrateModel_t)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| 55.591241
| 125
| 0.689207
|
323e018247ff04ecd6fd2937c2a4145cd45afc55
| 844
|
py
|
Python
|
setup.py
|
sgang007/audio_chat_client
|
e2c1caf6ec1a781be0d22f516e55434099514da1
|
[
"MIT"
] | null | null | null |
setup.py
|
sgang007/audio_chat_client
|
e2c1caf6ec1a781be0d22f516e55434099514da1
|
[
"MIT"
] | null | null | null |
setup.py
|
sgang007/audio_chat_client
|
e2c1caf6ec1a781be0d22f516e55434099514da1
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
# from distutils.core import setup
# import py2exe
# import sys
import os
del os.link
# sys.setrecursionlimit(5000)
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(name='varta-chat',
version='1.0',
description='Audio Chat framework',
long_description=readme(),
url='https://github.com/sgang007/audio_chat_client',
author='Shubhojyoti Ganguly',
author_email='shubho.important@gmail.com',
license='MIT',
packages=find_packages(),
install_requires=required,
entry_points={
'console_scripts': [
'varta = client.__main__:key_listener',
]
},
include_package_data=True,
zip_safe=True)
| 23.444444
| 58
| 0.64455
|
323e28eb5aa06c996913613c2bfc7c17a0e85d7c
| 2,334
|
py
|
Python
|
kglib/tests/end_to_end/kgcn/diagnosis_debug.py
|
graknlabs/research
|
ae3ee07106739efd10f0627058210038ab5956d3
|
[
"Apache-2.0"
] | 13
|
2018-09-25T13:29:08.000Z
|
2018-12-10T11:04:38.000Z
|
kglib/tests/end_to_end/kgcn/diagnosis_debug.py
|
graknlabs/research
|
ae3ee07106739efd10f0627058210038ab5956d3
|
[
"Apache-2.0"
] | 23
|
2018-09-17T20:31:44.000Z
|
2018-12-14T11:21:52.000Z
|
kglib/tests/end_to_end/kgcn/diagnosis_debug.py
|
graknlabs/research
|
ae3ee07106739efd10f0627058210038ab5956d3
|
[
"Apache-2.0"
] | 1
|
2018-09-25T15:56:32.000Z
|
2018-09-25T15:56:32.000Z
|
#
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import unittest
from kglib.kgcn_tensorflow.examples.diagnosis.diagnosis import diagnosis_example
if __name__ == "__main__":
# This handles the fact that additional arguments that are supplied by our py_test definition
# https://stackoverflow.com/a/38012249
unittest.main(argv=['ignored-arg'])
| 42.436364
| 163
| 0.707798
|
324140adbf8ce6a27b7f51c371562021ff506dae
| 1,668
|
py
|
Python
|
python/math_utils.py
|
PROrock/codin-game-puzzles
|
a0444719f9a629fc97b1da6f175ecd462a9ff59b
|
[
"MIT"
] | 1
|
2021-06-16T02:33:57.000Z
|
2021-06-16T02:33:57.000Z
|
python/math_utils.py
|
PROrock/codin-game-puzzles
|
a0444719f9a629fc97b1da6f175ecd462a9ff59b
|
[
"MIT"
] | null | null | null |
python/math_utils.py
|
PROrock/codin-game-puzzles
|
a0444719f9a629fc97b1da6f175ecd462a9ff59b
|
[
"MIT"
] | null | null | null |
# copy of Python 3.5 implementation - probably not needed
def gcd(a, b):
"""Greatest common divisor"""
return _gcd_internal(abs(a), abs(b))
def _gcd_internal(a, b):
"""Greatest common divisor internal"""
# Impl. notes: Euler algorithm, both a and b are not negative
# There exists faster algorithm (which uses division by 2, which is faster)
# -> Stein's algorithm https://en.wikipedia.org/wiki/Binary_GCD_algorithm
# print a, b
if a == b:
return a
if b == 1:
return 1
if a == 0 or b == 0:
return max(a, b)
return gcd(b, a % b)
| 32.076923
| 93
| 0.607914
|
32426c09b1bd20f92239fee3f6494dab7ae72789
| 2,477
|
py
|
Python
|
BASS_2_OM_testOnSyntheticData.py
|
oliviermirat/BASS
|
fe595fdc60795b09bb6c264b6da914a6e8e0c415
|
[
"MIT"
] | 1
|
2020-10-10T11:20:32.000Z
|
2020-10-10T11:20:32.000Z
|
BASS_2_OM_testOnSyntheticData.py
|
oliviermirat/BASS
|
fe595fdc60795b09bb6c264b6da914a6e8e0c415
|
[
"MIT"
] | null | null | null |
BASS_2_OM_testOnSyntheticData.py
|
oliviermirat/BASS
|
fe595fdc60795b09bb6c264b6da914a6e8e0c415
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(1, './GR_BASS/BASS_only_original/')
sys.path.insert(1, './GR_BASS/')
import bass as md
import numpy as np
import sys
import bassLibrary as bl
# BASS algorithm parameters
eps = 0.1
p_d = 0.2
Jthr = 0.15
seed = 0
# Creating synthetic data for process BASS on and to learn the GMM model
nbClasses = 5
classNames = ['a', 'b', 'c', 'd', 'e']
nbInstDataAnalyze = 4000
probElemDictAppear = 0.05
[dataToAnalyze1, dataForLearn] = bl.createSyntheticDataSet(nbClasses, nbInstDataAnalyze, [[3, 2, 1, 0], [0, 1, 2, 3]], [probElemDictAppear, probElemDictAppear])
l = int(len(dataToAnalyze1)/4)
lengths_data1 = np.array([l, l, l, l])
# Learning the model with the data previously created
model_fit = md.GMM_model(nbClasses)
model_fit.solve(dataForLearn)
# Launch BASS on the synthetic data previously created
posteriorProb1 = bl.getPosteriorProbabilities(dataToAnalyze1, lengths_data1, model_fit)
[P_w1, nbInstances1, w_dict1] = bl.launchBASS(posteriorProb1, lengths_data1, model_fit, eps, p_d, Jthr, seed)
[transmat_, stationary_probs_, a, b, c] = bl.launchMarkovianCompare(posteriorProb1, lengths_data1, model_fit, eps, p_d, Jthr, seed, w_dict1, classNames, 0, {'nameOfFile' : 'syntheticDataTest'})
# Comparing different dataset with different amounts of insertions
for idx, probElemDictAppear2 in enumerate([0.1, 0.05]):
print("Comparing two different dataset with SAME amounts of insertions. Probability: ", probElemDictAppear2)
[dataToAnalyze2, dataForLearn2] = bl.createSyntheticDataSet(nbClasses, nbInstDataAnalyze, [[3, 2, 1, 0], [0, 1, 2, 3]], [probElemDictAppear2, probElemDictAppear2])
l = int(len(dataToAnalyze2)/4)
lengths_data2 = np.array([l, l, l, l])
posteriorProb2 = bl.getPosteriorProbabilities(dataToAnalyze2, lengths_data2, model_fit)
[P_w2, nbInstances2, w_dict2] = bl.launchBASS(posteriorProb2, lengths_data2, model_fit, eps, p_d, Jthr, seed)
w_thr = 1e-4
p_ins = 0.2
mu = 1.0
H_beta_fac = 0
Sigma = dataToAnalyze1.shape[1]
std = 0.05
params = np.array([eps,p_d,p_ins, mu, w_thr,H_beta_fac, Jthr, Sigma, std], dtype =float)
bl.compareTwoBASSresults(w_dict1, w_dict2, params, model_fit, dataToAnalyze1, lengths_data1, dataToAnalyze2, lengths_data2, {'nameOfFile' : 'syntheticDataTest'}, classNames, str(idx)) # TODO: change compareTwoBASSresults for it to accept the posterior probabilities posteriorProb1 and posteriorProb2 instead of the data dataToAnalyze1 and dataToAnalyze2
| 43.45614
| 355
| 0.749697
|
3242f191734e1ec3faebeb7b0fb07f008db4254c
| 108
|
py
|
Python
|
auth/view/resource/create_reset_password_request.py
|
nicolaszein/auth
|
90112f1a4d6f368714b19daad7e8a4226594b383
|
[
"MIT"
] | null | null | null |
auth/view/resource/create_reset_password_request.py
|
nicolaszein/auth
|
90112f1a4d6f368714b19daad7e8a4226594b383
|
[
"MIT"
] | null | null | null |
auth/view/resource/create_reset_password_request.py
|
nicolaszein/auth
|
90112f1a4d6f368714b19daad7e8a4226594b383
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel, EmailStr
| 18
| 44
| 0.814815
|
3247a207cdb1e57a605f9bb8949d6c37632fda73
| 3,707
|
py
|
Python
|
pymt/grids/map.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | null | null | null |
pymt/grids/map.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | null | null | null |
pymt/grids/map.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | null | null | null |
#! /bin/env python
"""
Examples
========
**Rectilinear**
Create a rectilinear grid that is 2x3::
(0) --- (1) --- (2)
| | |
| | |
| [0] | [1] |
| | |
| | |
(3) --- (4) --- (5)
Numbers in parens are node IDs, and numbers in square brackets are
cell IDs.
>>> g = RectilinearMap ([0, 2], [0, 1, 2])
>>> g.get_x ()
array([ 0., 1., 2., 0., 1., 2.])
>>> g.get_y ()
array([ 0., 0., 0., 2., 2., 2.])
Node 1 is shared by both cell 0, and 1; node 5 only is part of cell 1.
>>> g.get_shared_cells (1)
[0, 1]
>>> g.get_shared_cells (5)
[1]
Point (.5, 1.) is contained only within cell 0.
>>> g.is_in_cell (.5, 1., 0)
True
>>> g.is_in_cell (.5, 1., 1)
False
Point (1., 1.) is on a border and so is contained by both cells.
>>> g.is_in_cell (1, 1., 0)
True
>>> g.is_in_cell (1, 1., 1)
True
"""
from shapely.geometry import Point, asLineString, asPoint, asPolygon
from pymt.grids import (
Rectilinear,
Structured,
UniformRectilinear,
Unstructured,
UnstructuredPoints,
)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| 23.916129
| 84
| 0.555705
|
3247e08ee12a7d9414679491f0b3e5ad060be2e8
| 27,447
|
py
|
Python
|
jogo.py
|
AdamastorLinsFrancaNetto/jogo-academic-journey
|
ad312d255abe23e243ba39732e972cf45f092b08
|
[
"MIT"
] | null | null | null |
jogo.py
|
AdamastorLinsFrancaNetto/jogo-academic-journey
|
ad312d255abe23e243ba39732e972cf45f092b08
|
[
"MIT"
] | null | null | null |
jogo.py
|
AdamastorLinsFrancaNetto/jogo-academic-journey
|
ad312d255abe23e243ba39732e972cf45f092b08
|
[
"MIT"
] | null | null | null |
import pygame
from conteudo import Conteudo, Nave, Tiro
import random
| 47.651042
| 134
| 0.603709
|
32485d3d2f97d8719c9ad7891c585aced9f9c6ac
| 1,308
|
py
|
Python
|
xpresso/binders/dependants.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 75
|
2022-01-18T02:17:57.000Z
|
2022-03-24T02:30:04.000Z
|
xpresso/binders/dependants.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 73
|
2022-01-18T03:01:27.000Z
|
2022-03-27T16:41:38.000Z
|
xpresso/binders/dependants.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 3
|
2022-01-18T22:47:06.000Z
|
2022-01-25T02:03:53.000Z
|
import inspect
import typing
from di.api.dependencies import CacheKey
from di.dependant import Dependant, Marker
from xpresso._utils.typing import Protocol
from xpresso.binders.api import SupportsExtractor, SupportsOpenAPI
T = typing.TypeVar("T", covariant=True)
| 26.693878
| 70
| 0.683486
|
3248e7edee7a47a71c97765cef8dd8859b78769c
| 3,698
|
py
|
Python
|
test/test_grid_to_triple.py
|
NCAR/geocat-f2py
|
fee07e680f61ca2ebfbb33f1554d9d85271fa32a
|
[
"Apache-2.0"
] | 4
|
2021-02-20T20:02:11.000Z
|
2021-11-24T13:35:32.000Z
|
test/test_grid_to_triple.py
|
NCAR/geocat-f2py
|
fee07e680f61ca2ebfbb33f1554d9d85271fa32a
|
[
"Apache-2.0"
] | 27
|
2020-12-07T17:00:05.000Z
|
2022-03-24T16:42:17.000Z
|
test/test_grid_to_triple.py
|
NCAR/geocat-f2py
|
fee07e680f61ca2ebfbb33f1554d9d85271fa32a
|
[
"Apache-2.0"
] | 4
|
2021-01-07T01:50:11.000Z
|
2021-07-07T13:05:42.000Z
|
import sys
import unittest as ut
import numpy as np
import xarray as xr
# Import from directory structure if coverage test, or from installed
# packages otherwise
if "--cov" in str(sys.argv):
from src.geocat.f2py import grid_to_triple
else:
from geocat.f2py import grid_to_triple
# Size of the grids
ny = 2
mx = 3
# Nominal input
data = np.asarray([2.740655, 2.745848, 4.893587, 2.965059, 1.707929,
0.746007]).reshape((ny, mx))
# Missing value = np.nan input
data_nan = data.copy()
data_nan[0, 1] = np.nan
data_nan[1, 2] = np.nan
# Missing value = -99 input
data_msg = data_nan.copy()
data_msg[np.isnan(data_msg)] = -99
# Coordinates
x = np.asarray([1.0, 3.0, 5.0])
y = np.asarray([2.0, 4.0])
# Expected output
out_expected = np.asarray([1, 3, 5, 1, 3, 5, 2, 2, 2, 4, 4, 4, 2.740655, 2.745848, 4.893587, 2.965059, 1.707929, 0.746007])\
.reshape((3, ny * mx))
out_expected_msg = np.asarray([1, 5, 1, 3, 2, 2, 4, 4, 2.740655, 4.893587, 2.965059, 1.707929])\
.reshape((3, 4))
| 32.156522
| 124
| 0.635479
|
3249b98ec0603abf9f97a5033a897bd1e2965b76
| 440
|
py
|
Python
|
Cisco/Python/Modulo_3/for/exercicio1.py
|
ThiagoKS-7/Python_Essencials_1_cisco
|
a417747e873f69bb307c4d36205797b191b5b45a
|
[
"MIT"
] | null | null | null |
Cisco/Python/Modulo_3/for/exercicio1.py
|
ThiagoKS-7/Python_Essencials_1_cisco
|
a417747e873f69bb307c4d36205797b191b5b45a
|
[
"MIT"
] | null | null | null |
Cisco/Python/Modulo_3/for/exercicio1.py
|
ThiagoKS-7/Python_Essencials_1_cisco
|
a417747e873f69bb307c4d36205797b191b5b45a
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 27.5
| 89
| 0.584091
|
324db02ef7101b8e262f2ae0d6adf964eaf48e55
| 1,252
|
py
|
Python
|
scripts/pegasus/build_test_sample_spm_no_bos.py
|
liminghao1630/transformers
|
207594be81b8e5a8589c8b11c3b236924555d806
|
[
"Apache-2.0"
] | 50,404
|
2019-09-26T09:55:55.000Z
|
2022-03-31T23:07:49.000Z
|
scripts/pegasus/build_test_sample_spm_no_bos.py
|
liminghao1630/transformers
|
207594be81b8e5a8589c8b11c3b236924555d806
|
[
"Apache-2.0"
] | 13,179
|
2019-09-26T10:10:57.000Z
|
2022-03-31T23:17:08.000Z
|
scripts/pegasus/build_test_sample_spm_no_bos.py
|
liminghao1630/transformers
|
207594be81b8e5a8589c8b11c3b236924555d806
|
[
"Apache-2.0"
] | 13,337
|
2019-09-26T10:49:38.000Z
|
2022-03-31T23:06:17.000Z
|
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script builds a small sample spm file tests/fixtures/test_sentencepiece_no_bos.model, with features needed by pegasus
# 1. pip install sentencepiece
#
# 2. wget https://raw.githubusercontent.com/google/sentencepiece/master/data/botchan.txt
# 3. build
import sentencepiece as spm
# pegasus:
# 1. no bos
# 2. eos_id is 1
# 3. unk_id is 2
# build a sample spm file accordingly
spm.SentencePieceTrainer.train('--input=botchan.txt --model_prefix=test_sentencepiece_no_bos --bos_id=-1 --unk_id=2 --eos_id=1 --vocab_size=1000')
# 4. now update the fixture
# mv test_sentencepiece_no_bos.model ../../tests/fixtures/
| 36.823529
| 148
| 0.761182
|
3252c61f7a71dbc22f9e4a1f7ba0cf98c90f9ea0
| 8,931
|
py
|
Python
|
pytorch-transformers-extensions/examples/run_inference.py
|
deepchatterjeevns/nlp_projects
|
8ea4a846138da0bcee2970907ea3340b1cdc74cb
|
[
"MIT"
] | 21
|
2019-07-25T08:39:56.000Z
|
2020-12-14T09:59:06.000Z
|
pytorch-transformers-extensions/examples/run_inference.py
|
deepchatterjeevns/nlp_projects
|
8ea4a846138da0bcee2970907ea3340b1cdc74cb
|
[
"MIT"
] | 1
|
2019-08-05T03:23:54.000Z
|
2019-08-05T03:24:39.000Z
|
pytorch-transformers-extensions/examples/run_inference.py
|
deepchatterjeevns/nlp_projects
|
8ea4a846138da0bcee2970907ea3340b1cdc74cb
|
[
"MIT"
] | 15
|
2019-07-31T13:37:14.000Z
|
2021-09-28T19:01:27.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Running inference for sequence classification on various datasets (Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import numpy as np
from scipy.special import softmax
import torch
from torch.utils.data import (DataLoader, SequentialSampler, TensorDataset)
from tqdm import tqdm, trange
from pytorch_transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_dataset import (compute_metrics, convert_examples_to_features,
output_modes, processors, InputExample)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
}
if __name__ == "__main__":
main()
| 46.759162
| 163
| 0.665659
|
3254729c0575b8bd980f42074c2cb939b0ad6cf0
| 1,382
|
py
|
Python
|
problems/p012.py
|
10jmellott/ProjectEuler
|
eb84d129bbc37ba10ad7814ad2138d81568e0085
|
[
"Unlicense"
] | null | null | null |
problems/p012.py
|
10jmellott/ProjectEuler
|
eb84d129bbc37ba10ad7814ad2138d81568e0085
|
[
"Unlicense"
] | null | null | null |
problems/p012.py
|
10jmellott/ProjectEuler
|
eb84d129bbc37ba10ad7814ad2138d81568e0085
|
[
"Unlicense"
] | null | null | null |
"""<a href="https://projecteuler.net/problem=12" class="title-custom-link">Highly divisible triangular number</a>
The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle
number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
from utils.oeis import triangular_numbers
from utils.fibonacci import trial_division
from utils.fibonacci import factors_to_dictionary
def main():
"""Solves this problem
Utilizes [A000005](http://oeis.org/A000005) which is solved via a
lemma to Euler's Totient Function
Returns:
Integer: Solution to this problem
"""
i = 1
divisors = 0
while divisors <= 500:
triangle = triangular_numbers(i)
prime_factors = trial_division(triangle)
prime_factors = factors_to_dictionary(prime_factors)
divisors = 1
for k, v in prime_factors.items():
divisors = divisors * (v + 1)
i = i + 1
return triangular_numbers(i - 1)
| 32.139535
| 113
| 0.664978
|
3255418e552bf21eec558aa0897845fa6583a29c
| 4,984
|
py
|
Python
|
u3s2m1ass1-pt6/code/rpg_queries.py
|
LambdaTheda/lambdata-Unit3
|
b44b20f2f3e28d2b17613660ddb562afe4825686
|
[
"MIT"
] | null | null | null |
u3s2m1ass1-pt6/code/rpg_queries.py
|
LambdaTheda/lambdata-Unit3
|
b44b20f2f3e28d2b17613660ddb562afe4825686
|
[
"MIT"
] | null | null | null |
u3s2m1ass1-pt6/code/rpg_queries.py
|
LambdaTheda/lambdata-Unit3
|
b44b20f2f3e28d2b17613660ddb562afe4825686
|
[
"MIT"
] | 1
|
2020-05-11T04:33:24.000Z
|
2020-05-11T04:33:24.000Z
|
import sqlite3
import os
#DB_FILEPATH = "data/chinook.db"
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "rpg_db.sqlite3")
conn = sqlite3.connect(DB_FILEPATH)
conn.row_factory = sqlite3.Row
print(type(conn)) #> <class 'sqlite3.Connection'>
curs = conn.cursor()
print(type(curs)) #> <class 'sqlite3.Cursor'>
query = """SELECT
count(DISTINCT character_id) as character_count
FROM charactercreator_character"""
# query1 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_cleric"""
# query2 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_fighter"""
# query3 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_mage"""
# query4 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_thief"""
queries_combined = """SELECT
count(distinct c.character_ptr_id) as total_clerics
,count(distinct f.character_ptr_id) as total_fighters
,count(distinct m.character_ptr_id) as total_mages
,count(distinct n.mage_ptr_id) as total_necromancers
,count(distinct t.character_ptr_id) as total_thieves
FROM charactercreator_character ccc
LEFT JOIN charactercreator_fighter f
ON ccc.character_id = f.character_ptr_id
LEFT JOIN charactercreator_cleric c
ON ccc.character_id= c.character_ptr_id
LEFT JOIN charactercreator_mage m
ON ccc.character_id = m.character_ptr_id
LEFT JOIN charactercreator_necromancer n
ON ccc.character_id = n.mage_ptr_id
LEFT JOIN charactercreator_thief t
ON ccc.character_id = t.character_ptr_id"""
query5 = """SELECT
count(DISTINCT item_id ) as total_item
FROM armory_item"""
query6 = """SELECT
count(DISTINCT item_ptr_id) as weapons
FROM armory_weapon"""
query7 = """SELECT
count(DISTINCT item_id) - count(DISTINCT item_ptr_id) as total_non_weapons
FROM armory_item, armory_weapon"""
query8 = """SELECT item_id
, count(DISTINCT item_id) as item
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20
"""
query9 = """SELECT cci.character_id
, count(DISTINCT aw.item_ptr_id) as number_of_weapons
FROM charactercreator_character_inventory as cci
LEFT JOIN armory_item as ai ON cci.item_id = ai.item_id
LEFT JOIN armory_weapon as aw ON ai.item_id = aw.item_ptr_id
GROUP BY character_id
LIMIT 20"""
query10 = """SELECT avg(total_items) as avg_items
FROM (
-- row per character = 302
SELECT
c.character_id
,c.name
--,ci.item_id
,count(distinct ci.item_id) as total_items
FROM charactercreator_character c
LEFT JOIN charactercreator_character_inventory ci
ON c.character_id = ci.character_id
GROUP BY c.character_id
) subz"""
query11 = """SELECT avg(weapon_count) as avg_weapon
FROM (
SELECT
cci.character_id
,count(DISTINCT aw.item_ptr_id) as weapon_count
FROM charactercreator_character_inventory cci
LEFT JOIN armory_item ai ON cci.item_id = ai.item_id
LEFT JOIN armory_weapon aw ON ai.item_id = aw.item_ptr_id
GROUP BY 1
) subz"""
print("----------")
result = curs.execute(query).fetchone()
print("RESULTS FOR CHARACTERCREATOR_CHARACTER", result)
print(result["character_count"])
# print("-------------")
# result1 = curs.execute(query1).fetchone()
# print("Results for charactercreator_cleric", result1)
# print(result1["character_ptr_count"])
# print("---------")
# result2 = curs.execute(query2).fetchone()
# print("Results for charactercreator_fighter", result2)
# print(result2["character_ptr_count"])
# print("---------")
# result3 = curs.execute(query3).fetchone()
# print("Results for charactercreator_mage", result3)
# print(result3["character_ptr_count"])
# print('--------')
# result4 = curs.execute(query4).fetchone()
# print("Results for charactercreator_thief", result4)
# print(result4["character_ptr_count"])
# print("-------------")
# result5 = curs.execute(query5).fetchone()
# print("Results for total Items", result5)
# print(result5["total_item"])
result_queries = curs.execute(queries_combined).fetchall()
print("Results of each specific subclass", result_queries)
result6 = curs.execute(query6).fetchone()
print("Results for total weapons", result6)
print(result6["weapons"])
print("---------")
result7 = curs.execute(query7).fetchone()
print("Results for total non weapons", result7)
print(result7["total_non_weapons"])
print("---------")
result8 = curs.execute(query8).fetchall()
for rw in result8:
print(rw[0], rw[1])
print("---------")
result9 = curs.execute(query9).fetchall()
for rw in result9:
print(rw['character_id'], rw['number_of_weapons'])
print("---------")
result10 = curs.execute(query10).fetchone()
print("Average item per character", result10)
print(result10["avg_items"])
print("---------")
result11= curs.execute(query11).fetchone()
print("Average weapon per character", result11)
print(result11["avg_weapon"])
print("---------")
| 30.576687
| 85
| 0.731742
|
3256173ee4e9a424745cf36c9f1ac6cf9bf2bc08
| 7,872
|
py
|
Python
|
tools/table.py
|
asterick/minimon.js
|
4876544525eb1bfef1b81a12807e7ba37cdd4949
|
[
"0BSD"
] | 5
|
2019-04-25T00:19:56.000Z
|
2020-09-02T01:24:40.000Z
|
tools/table.py
|
asterick/minimon.js
|
4876544525eb1bfef1b81a12807e7ba37cdd4949
|
[
"0BSD"
] | 6
|
2020-05-23T23:17:59.000Z
|
2022-02-17T21:50:46.000Z
|
tools/table.py
|
asterick/minimon.js
|
4876544525eb1bfef1b81a12807e7ba37cdd4949
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python3
# ISC License
#
# Copyright (c) 2019, Bryon Vandiver
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from json import dumps
import os
import csv
CSV_LOCATION = os.path.join(os.path.abspath(os.path.dirname(__file__)), 's1c88.csv')
op0s, op1s, op2s = [None] * 0x100, [None] * 0x100, [None] * 0x100
CONDITIONS = {
'C': 'cpu.reg.flag.c',
'NC': '!cpu.reg.flag.c',
'Z': 'cpu.reg.flag.z',
'NZ': '!cpu.reg.flag.z',
'V': 'cpu.reg.flag.v',
'NV': '!cpu.reg.flag.v',
'M': 'cpu.reg.flag.n',
'P': '!cpu.reg.flag.n',
'LT': 'cpu.reg.flag.n != cpu.reg.flag.v',
'LE': '(cpu.reg.flag.n != cpu.reg.flag.v) || cpu.reg.flag.z',
'GT': '(cpu.reg.flag.n == cpu.reg.flag.v) && !cpu.reg.flag.z',
'GE': 'cpu.reg.flag.n == cpu.reg.flag.v',
'F0': 'cpu.reg.flag.f0',
'F1': 'cpu.reg.flag.f1',
'F2': 'cpu.reg.flag.f2',
'F3': 'cpu.reg.flag.f3',
'NF0': '!cpu.reg.flag.f0',
'NF1': '!cpu.reg.flag.f1',
'NF2': '!cpu.reg.flag.f2',
'NF3': '!cpu.reg.flag.f3',
}
ARGUMENTS = {
'A': (8, False, False, 'a'),
'B': (8, False, False, 'b'),
'L': (8, False, False, 'l'),
'H': (8, False, False, 'h'),
'BR': (8, False, False, 'br'),
'SC': (8, False, False, 'sc'),
'EP': (8, False, False, 'ep'),
'XP': (8, False, False, 'xp'),
'YP': (8, False, False, 'yp'),
'NB': (8, False, False, 'nb'),
'BA': (16, False, False, 'ba'),
'HL': (16, False, False, 'hl'),
'IX': (16, False, False, 'ix'),
'IY': (16, False, False, 'iy'),
'SP': (16, False, False, 'sp'),
'PC': (16, False, False, 'pc'),
'#nn': (8, True, False, 'imm8'),
'rr': (8, True, False, 'imm8'),
'#mmnn': (16, True, False, 'imm16'),
'qqrr': (16, True, False, 'imm16'),
'[kk]': (16, True, True, 'vect'), # Special
'[hhll]': (-1, True, True, 'ind16'),
'[HL]': (-1, True, True, 'absHL'),
'[IX]': (-1, True, True, 'absIX'),
'[IY]': (-1, True, True, 'absIY'),
'[BR:ll]': (-1, True, True, 'absBR'),
'[SP+dd]': (-1, True, True, 'indDSP'),
'[IX+dd]': (-1, True, True, 'indDIX'),
'[IY+dd]': (-1, True, True, 'indDIY'),
'[IX+L]': (-1, True, True, 'indIIX'),
'[IY+L]': (-1, True, True, 'indIIY'),
}
OPERATIONS = {
'INC': (8, 'ReadWrite'),
'DEC': (8, 'ReadWrite'),
'SLA': (8, 'ReadWrite'),
'SLL': (8, 'ReadWrite'),
'SRA': (8, 'ReadWrite'),
'SRL': (8, 'ReadWrite'),
'RL': (8, 'ReadWrite'),
'RLC': (8, 'ReadWrite'),
'RR': (8, 'ReadWrite'),
'RRC': (8, 'ReadWrite'),
'CPL': (8, 'ReadWrite'),
'NEG': (8, 'ReadWrite'),
'LD': (8, 'Write', 'Read'),
'ADD': (8, 'ReadWrite', 'Read'),
'ADC': (8, 'ReadWrite', 'Read'),
'SUB': (8, 'ReadWrite', 'Read'),
'SBC': (8, 'ReadWrite', 'Read'),
'AND': (8, 'ReadWrite', 'Read'),
'OR': (8, 'ReadWrite', 'Read'),
'XOR': (8, 'ReadWrite', 'Read'),
'CP': (8, 'Read', 'Read'),
'BIT': (8, 'Read', 'Read'),
'CALL': (16, 'Read'),
'CARS': (8, 'Read'),
'CARL': (16, 'Read'),
'JRS': (8, 'Read'),
'JRL': (16, 'Read'),
'JP': (8, 'Read'),
'INT': (8, 'Read'),
'RETE': (8,),
'PUSH': (-1, 'Read'),
'POP': (-1, 'Write'),
'EX': (-1, 'ReadWrite', 'ReadWrite'),
'SWAP': (8, 'ReadWrite')
}
# Generate switch table
with open(CSV_LOCATION, 'r') as csvfile:
spamreader = csv.reader(csvfile)
next(spamreader)
for row in spamreader:
code, cycles0, op0, arg0_1, arg0_2, cycles1, op1, arg1_1, arg1_2, cycles2, op2, arg2_1, arg2_2 = row
code = int(code, 16)
if op0 != 'undefined':
op0s[code] = format(cycles0, op0, arg0_1, arg0_2)
if op1 != 'undefined':
op1s[code] = format(cycles1, op1, arg1_1, arg1_2)
if op2 != 'undefined':
op2s[code] = format(cycles2, op2, arg2_1, arg2_2)
print ("int inst_advance(Machine::State& cpu) {")
print ("\tswitch (cpu_imm8(cpu)) {")
dump_table(op0s, '\t')
print ("\tcase 0xCE:")
print ("\t\tswitch (cpu_imm8(cpu)) {")
dump_table(op1s, '\t\t')
print ("\t\t}")
print ("\tcase 0xCF:")
print ("\t\tswitch (cpu_imm8(cpu)) {")
dump_table(op2s, '\t\t')
print ("\t\t}")
print ("\t}")
print ("}")
| 32.528926
| 123
| 0.506225
|
325927f14aed5b03fe28e7161da22ac9db1b0f2b
| 15,364
|
py
|
Python
|
test_log.py
|
erkooi/desp_tools
|
2bea2e44591ceeeb62cbfe163b4635a3157f6582
|
[
"Apache-2.0"
] | null | null | null |
test_log.py
|
erkooi/desp_tools
|
2bea2e44591ceeeb62cbfe163b4635a3157f6582
|
[
"Apache-2.0"
] | null | null | null |
test_log.py
|
erkooi/desp_tools
|
2bea2e44591ceeeb62cbfe163b4635a3157f6582
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
#
# Copyright (C) 2012
# ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/>
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""Test logging utilities
* Provide logging with standardized prefixes:
. time : self, if notime = 0
. verbosity level : self, if noVLevel = 0
. test case ID : self, if noTestId = 0
. message text : argument msgString, the actual text to log
* All append_log statements that have verbosity level equal or lower than the
test case verbosity level will get logged.
* The logging gets output to the stdio and to a file if a file name is provided.
* It is also possible to append other files to the test logging file.
* Best practise is to use the following verbosity levels for the append_log
argument:
-v 0 Log test result
-v 1 Log test title
-v 2 Log errors
-v 3 Log info
-v 4 Log error details
-v 5 Log info details
-v 6 Log debug
-v 7 Log debug details
"""
################################################################################
# System imports
import sys
import time
import common as cm
################################################################################
# Functions
| 48.466877
| 164
| 0.570034
|
325b56ca169aa22d3b3e5e502acb535b1e7a8a46
| 868
|
py
|
Python
|
subaudible/subparse.py
|
RobbieClarken/subaudible
|
f22bdec90693727b36eff426e96d6960387fb94d
|
[
"MIT"
] | null | null | null |
subaudible/subparse.py
|
RobbieClarken/subaudible
|
f22bdec90693727b36eff426e96d6960387fb94d
|
[
"MIT"
] | null | null | null |
subaudible/subparse.py
|
RobbieClarken/subaudible
|
f22bdec90693727b36eff426e96d6960387fb94d
|
[
"MIT"
] | null | null | null |
import re
| 27.125
| 67
| 0.59447
|
325b89ab7374be326978f10a334f001191bd3ead
| 1,971
|
py
|
Python
|
application/models/basemodel.py
|
ahmedsadman/festive
|
e0e739f126de2e8368014398f5c928c410098da5
|
[
"MIT"
] | 2
|
2020-10-19T23:26:23.000Z
|
2020-10-20T02:14:10.000Z
|
application/models/basemodel.py
|
ahmedsadman/fest-management-api
|
e0e739f126de2e8368014398f5c928c410098da5
|
[
"MIT"
] | null | null | null |
application/models/basemodel.py
|
ahmedsadman/fest-management-api
|
e0e739f126de2e8368014398f5c928c410098da5
|
[
"MIT"
] | 1
|
2021-08-04T15:45:29.000Z
|
2021-08-04T15:45:29.000Z
|
from sqlalchemy import func
from application import db
from application.helpers.error_handlers import ServerError
| 33.982759
| 78
| 0.597666
|
325ca5543e9808ec6039d4cf69192bb2bde47b8f
| 522
|
py
|
Python
|
tests/core/resource_test_base.py
|
alteia-ai/alteia-python-sdk
|
27ec7458334334ed6a1edae52cb25d5ce8734177
|
[
"MIT"
] | 11
|
2020-12-22T14:39:21.000Z
|
2022-02-18T16:34:34.000Z
|
tests/core/resource_test_base.py
|
alteia-ai/alteia-python-sdk
|
27ec7458334334ed6a1edae52cb25d5ce8734177
|
[
"MIT"
] | 1
|
2021-08-05T14:21:12.000Z
|
2021-08-09T13:22:55.000Z
|
tests/core/resource_test_base.py
|
alteia-ai/alteia-python-sdk
|
27ec7458334334ed6a1edae52cb25d5ce8734177
|
[
"MIT"
] | null | null | null |
import os
from unittest.mock import patch
import alteia
from tests.alteiatest import AlteiaTestBase
| 27.473684
| 89
| 0.726054
|
325dd1dcfd3afeca98237f91ac72ec8dacd09a26
| 137
|
py
|
Python
|
scripts/viterbi.py
|
Tereshchenkolab/digitize-ecg-cli
|
fa5a17c5390a11ce07e39e6a8eecb56ed38b16a1
|
[
"MIT"
] | 6
|
2021-06-12T08:20:33.000Z
|
2022-03-01T15:32:35.000Z
|
scripts/viterbi.py
|
Tereshchenkolab/ecg-digitize
|
fa5a17c5390a11ce07e39e6a8eecb56ed38b16a1
|
[
"MIT"
] | null | null | null |
scripts/viterbi.py
|
Tereshchenkolab/ecg-digitize
|
fa5a17c5390a11ce07e39e6a8eecb56ed38b16a1
|
[
"MIT"
] | null | null | null |
from ecgdigitize.signal.extraction.viterbi import *
if __name__ == "__main__":
print(list(interpolate(Point(0,0), Point(5,5))))
| 27.4
| 56
| 0.70073
|
325fc49ee449fcf77d594c853f23436486f7b300
| 2,711
|
py
|
Python
|
tests/io/s3/test_s3_fetcher.py
|
ToucanToco/PeaKina
|
afaeec65d9b136d42331f140c3048d27bcddb6b1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/io/s3/test_s3_fetcher.py
|
ToucanToco/PeaKina
|
afaeec65d9b136d42331f140c3048d27bcddb6b1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/io/s3/test_s3_fetcher.py
|
ToucanToco/PeaKina
|
afaeec65d9b136d42331f140c3048d27bcddb6b1
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Dict
import boto3
import pytest
from s3fs import S3FileSystem
from peakina.io.s3.s3_fetcher import S3Fetcher
| 33.8875
| 94
| 0.693471
|
3262d7cd59e5780cbf71323fcb7c77c193d6904e
| 324
|
py
|
Python
|
testemunhoweb/consulta/migrations/0002_auto_20191202_0219.py
|
danielcamilo13/testemunhoWEB
|
46825e31123058fa6ee21e4e71e9e0bedde32bb4
|
[
"bzip2-1.0.6"
] | 1
|
2019-12-03T01:37:13.000Z
|
2019-12-03T01:37:13.000Z
|
testemunhoweb/consulta/migrations/0002_auto_20191202_0219.py
|
danielcamilo13/testemunhoWEB
|
46825e31123058fa6ee21e4e71e9e0bedde32bb4
|
[
"bzip2-1.0.6"
] | 11
|
2020-06-06T01:28:35.000Z
|
2022-03-12T00:16:34.000Z
|
testemunhoweb/consulta/migrations/0002_auto_20191202_0219.py
|
danielcamilo13/testemunhoWEB
|
46825e31123058fa6ee21e4e71e9e0bedde32bb4
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-12-02 05:19
from django.db import migrations
| 18
| 47
| 0.58642
|