hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d364f1bcfe20f4a4dd1ba1db0e841b93b086bab
| 1,672
|
py
|
Python
|
yagoMoviesDown.py
|
afshinsadeghi/DBPediaDownloder
|
f9c98cab6fc7ce1d4f1c707ce1491c5dacbaf2cc
|
[
"Apache-2.0"
] | 1
|
2019-04-02T11:12:52.000Z
|
2019-04-02T11:12:52.000Z
|
yagoMoviesDown.py
|
afshinsadeghi/DBPediaDownloder
|
f9c98cab6fc7ce1d4f1c707ce1491c5dacbaf2cc
|
[
"Apache-2.0"
] | null | null | null |
yagoMoviesDown.py
|
afshinsadeghi/DBPediaDownloder
|
f9c98cab6fc7ce1d4f1c707ce1491c5dacbaf2cc
|
[
"Apache-2.0"
] | null | null | null |
import os
from time import sleep
import requests
querysub0 = 'https://linkeddata1.calcul.u-psud.fr/sparql?default-graph-uri=&query=construct%7B+%3Fs+%3Fp+%3Fo%7D+where+%7B+%0D%0Aselect+distinct+%3Fs+%3Fp+%3Fo+where+%7B%0D%0A%7B%0D%0A%3Fs1+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2FactedIn%3E+++%3Fs+.%0D%0A%3Fs2+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2Fdirected%3E+++%3Fs+.%0D%0A%3Fs+++%3Fp+++%3Fo.%0D%0A%7D+Union%7B%0D%0A%3Fs+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2FactedIn%3E+++%3Fs3+.%0D%0A%3Fs4+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2Fdirected%3E+++%3Fs3+.%0D%0A%3Fs+++%3Fp+++%3Fo.%0D%0A%7D+Union%7B%0D%0A%3Fs7+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2FactedIn%3E+++%3Fs5+.%0D%0A%3Fs+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2Fdirected%3E+++%3Fs5+.%0D%0A%3Fs+++%3Fp+++%3Fo.%7D+%0D%0A%7D%0D%0ALimit+10000+offset+'
querysub1 = '+%7D%0D%0A&format=text%2Fplain&timeout=0'
for counter in range(1, 700, 1):
sleep(10) # sleep so to let the server breath
download_big_file(counter)
print "making yagoMovies.nt ..."
os.system('find . -name "*.nt" -size -15 -delete')
os.system("cat *.nt > a.ntt")
os.system("rm *.nt")
os.system("mv a.ntt yagoMovies.nt")
print "yagoMovies.nt is created. have fun!"
| 59.714286
| 819
| 0.692584
|
9d3825f401efd886ac45bc856e7732ecfff783b3
| 1,418
|
py
|
Python
|
src/TestValues/TestProtractor3D.py
|
SvenKratz/Protractor3D
|
39b6c877cc88cae028ca938e994034b83fcccb68
|
[
"MIT"
] | 4
|
2018-02-06T14:41:26.000Z
|
2020-03-19T14:16:05.000Z
|
src/TestValues/TestProtractor3D.py
|
SvenKratz/Protractor3D
|
39b6c877cc88cae028ca938e994034b83fcccb68
|
[
"MIT"
] | null | null | null |
src/TestValues/TestProtractor3D.py
|
SvenKratz/Protractor3D
|
39b6c877cc88cae028ca938e994034b83fcccb68
|
[
"MIT"
] | null | null | null |
'''
Created on Apr 12, 2011
@author: svenkratz
'''
import TestGestures
import Protractor3D.Protractor3D
from Protractor3D.Protractor3D import *
triangle = triplify(TestGestures.Triangle)
print triangle
circle1 = triplify(TestGestures.Circle_1)
circle2 = triplify(TestGestures.Circle_2)
rectangle = triplify(TestGestures.Rectangle)
p_triangle = Protractor3D(triangle)
p_circle1 = Protractor3D(circle1)
p_circle2 = Protractor3D(circle2)
p_rectangle = Protractor3D(rectangle)
#print p_circle1.trace
#
#print "Trace", p_triangle.trace
#print "Resampled", p_triangle.resampled
#print "Scaled", p_triangle.scaled
#print "Centered", p_triangle.centered
#print "Template", p_triangle.template
print "========== Evaluations =============="
Protractor3D.DEBUG = 5
gesturesAndNames = [(p_triangle,"Triangle"), (p_circle1,"Circle1"), ( p_circle2, "Circle2") , (p_rectangle, "Rectangle")]
while gesturesAndNames != []:
gesture = gesturesAndNames[0]
templates = gesturesAndNames[1:]
gesturesAndNames = templates
if len(templates) != 0:
for t in templates:
print "======================================="
print "Results for", gesture[1]," <---> ", t[1]
gesture[0].protractor3D_classify(gesture[0].template, t[0].template)
| 22.507937
| 121
| 0.693935
|
9d3874299d6c36b60cba6fdb324222e4353364ea
| 481
|
py
|
Python
|
tests/test_actor.py
|
sdss/HAL
|
c7a2111f8737a498a124f5571d6f0e6b46e5c371
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_actor.py
|
sdss/HAL
|
c7a2111f8737a498a124f5571d6f0e6b46e5c371
|
[
"BSD-3-Clause"
] | 2
|
2022-01-14T04:50:58.000Z
|
2022-02-28T22:31:06.000Z
|
tests/test_actor.py
|
sdss/HAL
|
c7a2111f8737a498a124f5571d6f0e6b46e5c371
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Jos Snchez-Gallego (gallegoj@uw.edu)
# @Date: 2021-03-24
# @Filename: test_hal.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import pytest
from hal import __version__
pytestmark = [pytest.mark.asyncio]
| 20.913043
| 74
| 0.706861
|
9d3a4036188d6088bc1ce4cfe8dfff01c0a9fdb1
| 490
|
py
|
Python
|
day_07/puzzles.py
|
electronsandstuff/Advent-of-Code-2021
|
9c23872640e8d092088dcb6d5cb845cd11d98994
|
[
"BSD-3-Clause"
] | null | null | null |
day_07/puzzles.py
|
electronsandstuff/Advent-of-Code-2021
|
9c23872640e8d092088dcb6d5cb845cd11d98994
|
[
"BSD-3-Clause"
] | null | null | null |
day_07/puzzles.py
|
electronsandstuff/Advent-of-Code-2021
|
9c23872640e8d092088dcb6d5cb845cd11d98994
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
if __name__ == '__main__':
with open('input.txt') as f:
pin = np.array([int(x) for x in f.read().split(',')])
distances = np.abs(pin[None, :] - np.arange(pin.max() + 1)[:, None])
total_fuel = np.sum(distances, axis=1)
print(f'Solution 1: {total_fuel.min()}')
distances_v2 = crab_fuel(distances)
total_fuel_v2 = np.sum(distances_v2, axis=1)
print(f'Solution 2: {total_fuel_v2.min()}')
| 25.789474
| 72
| 0.608163
|
9d3ac14e7019eef027448de09382a14cd8c888c7
| 352
|
py
|
Python
|
radical_translations/core/migrations/0033_delete_work.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | 3
|
2022-02-08T18:03:44.000Z
|
2022-03-18T18:10:43.000Z
|
radical_translations/core/migrations/0033_delete_work.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | 19
|
2020-05-11T15:36:35.000Z
|
2022-02-08T11:26:40.000Z
|
radical_translations/core/migrations/0033_delete_work.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-05-18 10:49
from django.db import migrations
| 19.555556
| 63
| 0.616477
|
9d3b2ee3ee8d1f5868d497f89b1766382405982d
| 16,114
|
py
|
Python
|
sampling.py
|
bigdata-inha/FedDC
|
c90c48fc7e35b6cb80890194c8cdfb0d412a0819
|
[
"MIT"
] | null | null | null |
sampling.py
|
bigdata-inha/FedDC
|
c90c48fc7e35b6cb80890194c8cdfb0d412a0819
|
[
"MIT"
] | null | null | null |
sampling.py
|
bigdata-inha/FedDC
|
c90c48fc7e35b6cb80890194c8cdfb0d412a0819
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import numpy as np
from torchvision import datasets, transforms
import logging
import random
import torch
# Settings for a multiplicative linear congruential generator (aka Lehmer
# generator) suggested in 'Random Number Generators: Good
# Ones are Hard to Find' by Park and Miller.
MLCG_MODULUS = 2**(31) - 1
MLCG_MULTIPLIER = 16807
# Default quantiles for federated evaluations.
DEFAULT_QUANTILES = (0.0, 0.25, 0.5, 0.75, 1.0)
def mnist_iid(dataset, num_users):
"""
Sample I.I.D. client data from MNIST dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
num_items = int(len(dataset) / num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items,
replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def mnist_noniid(dataset, num_users):
"""
Sample non-I.I.D client data from MNIST dataset
:param dataset:
:param num_users:
:return:
"""
# 60,000 training imgs --> 200 imgs/shard X 300 shards
num_shards, num_imgs = 200, 300
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
labels = dataset.train_labels.numpy()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
# divide and assign 2 shards/client
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, 2, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0)
return dict_users
def mnist_noniid_unequal(dataset, num_users):
"""
Sample non-I.I.D client data from MNIST dataset s.t clients
have unequal amount of data
:param dataset:
:param num_users:
:returns a dict of clients with each clients assigned certain
number of training imgs
"""
# 60,000 training imgs --> 50 imgs/shard X 1200 shards
num_shards, num_imgs = 1200, 50
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
labels = dataset.train_labels.numpy()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
# Minimum and maximum shards assigned per client:
min_shard = 1
max_shard = 30
# Divide the shards into random chunks for every client
# s.t the sum of these chunks = num_shards
random_shard_size = np.random.randint(min_shard, max_shard + 1,
size=num_users)
random_shard_size = np.around(random_shard_size /
sum(random_shard_size) * num_shards)
random_shard_size = random_shard_size.astype(int)
# Assign the shards randomly to each client
if sum(random_shard_size) > num_shards:
for i in range(num_users):
# First assign each client 1 shard to ensure every client has
# atleast one shard of data
rand_set = set(np.random.choice(idx_shard, 1, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
random_shard_size = random_shard_size - 1
# Next, randomly assign the remaining shards
for i in range(num_users):
if len(idx_shard) == 0:
continue
shard_size = random_shard_size[i]
if shard_size > len(idx_shard):
shard_size = len(idx_shard)
rand_set = set(np.random.choice(idx_shard, shard_size,
replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
else:
for i in range(num_users):
shard_size = random_shard_size[i]
rand_set = set(np.random.choice(idx_shard, shard_size,
replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
if len(idx_shard) > 0:
# Add the leftover shards to the client with minimum images:
shard_size = len(idx_shard)
# Add the remaining shard to the client with lowest data
k = min(dict_users, key=lambda x: len(dict_users.get(x)))
rand_set = set(np.random.choice(idx_shard, shard_size,
replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[k] = np.concatenate(
(dict_users[k], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
return dict_users
def cifar_iid(dataset, num_users, args):
"""
Sample I.I.D. client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
num_items = int(len(dataset) / num_users)
# dict_users? 0~100 50000 100 . indx list
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items,
replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def imagenet_noniid(dataset, num_users, args, class_num=2):
"""
Sample non-I.I.D client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return:
"""
#num_shards -> / num_imgs -> .but imagenet . / # idxs
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# .
#idx_shards -> n(n .) -> 2 x 100 = 200
#num_imgs -> . 5/100 =500, 2 500
num_shards, num_imgs = num_users*class_num, int(len(dataset)/num_users/class_num)
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
# labels = dataset.train_labels.numpy()
labels = np.array(dataset.targets)
# sort labels
idxs = np.argsort(labels)
class_count = [0 for i in range(num_shards)]
for i in labels:
class_count[i] += 1
accumulate_class_count = [0 for i in range(num_shards)]
for c in range(num_shards):
if c==0:
accumulate_class_count[c] = class_count[0]
else:
accumulate_class_count[c] = accumulate_class_count[c-1] + class_count[c]
idx_shuffle = np.random.permutation(idx_shard)
client_class_set = []
for i in range(num_users):
user_class_set = idx_shuffle[i*class_num:(i+1)*class_num]
client_class_set.append(user_class_set)
for class_seed in user_class_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[accumulate_class_count[class_seed] -class_count[class_seed] :accumulate_class_count[class_seed]]), axis=0)
return dict_users,client_class_set
def cifar_noniid(dataset, num_users, args, class_num=2):
"""
Sample non-I.I.D client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return:
"""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# .
#idx_shards -> n(n .) -> 2 x 100 = 200
#num_imgs -> . 5/100 =500, 2 500
num_shards, num_imgs = num_users*class_num, int(len(dataset)/num_users/class_num)
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
# labels = dataset.train_labels.numpy()
labels = np.array(dataset.targets)
#sort_index = np.argsort(labels)
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
user_classs_dict = []
# divide and assign
for i in range(num_users):
# 200 2 .
rand_set = set(np.random.choice(idx_shard, class_num, replace=False))
if class_num > 1 and i != num_users-1:
while dataset.targets[idxs[list(rand_set)[1] * num_imgs]] == dataset.targets[idxs[list(rand_set)[0] *num_imgs]]:
rand_set = set(np.random.choice(idx_shard, class_num, replace=False))
#print(dataset.targets[idxs[list(rand_set)[1] * num_imgs]])
#print(dataset.targets[idxs[list(rand_set)[0] * num_imgs]])
#print('\t')
user_classs_dict.append(rand_set)
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0)
# for data_idx, j in enumerate(dict_users[i]):
# print(i, data_idx, dataset.targets[int(j)])
return dict_users, user_classs_dict
if __name__ == '__main__':
dataset_train = datasets.MNIST('./data/mnist/', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))
]))
num = 100
d = mnist_noniid(dataset_train, num)
| 37.561772
| 151
| 0.608353
|
9d3ca477c6b29581c9b909f6a0a67fb1fa79ccca
| 2,502
|
py
|
Python
|
codeforcesRating/codeforcesRating.py
|
gaurav512/Python-Scripts
|
46483ab09cccef380c8425d6924507e029745479
|
[
"MIT"
] | 3
|
2020-05-23T14:31:35.000Z
|
2020-11-12T12:56:08.000Z
|
codeforcesRating/codeforcesRating.py
|
gaurav512/Python-Scripts
|
46483ab09cccef380c8425d6924507e029745479
|
[
"MIT"
] | null | null | null |
codeforcesRating/codeforcesRating.py
|
gaurav512/Python-Scripts
|
46483ab09cccef380c8425d6924507e029745479
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
# Author: gaurav512
''' Script written to scrape basic information about a
Codeforces profile given the user id
Usage: Enter the userid as command line argument OR as the input
after running the following in terminal- python3 codeforces.py [userid]
'''
import requests, bs4, sys
if __name__ == '__main__':
main()
| 30.144578
| 136
| 0.691847
|
9d3e62b9c9792273ad0f8b50076e62ff7aa9fb5b
| 566
|
py
|
Python
|
tests/test_combinator.py
|
BrunoSanchez/capsule_N1
|
a5ee3b74afc27de1a954ae2f9f96c278a4723226
|
[
"BSD-3-Clause"
] | 12
|
2017-04-13T06:49:42.000Z
|
2019-11-19T09:27:43.000Z
|
tests/test_combinator.py
|
BrunoSanchez/capsule_N1
|
a5ee3b74afc27de1a954ae2f9f96c278a4723226
|
[
"BSD-3-Clause"
] | 56
|
2017-09-05T16:00:57.000Z
|
2020-11-20T18:02:58.000Z
|
tests/test_combinator.py
|
BrunoSanchez/capsule_N1
|
a5ee3b74afc27de1a954ae2f9f96c278a4723226
|
[
"BSD-3-Clause"
] | 5
|
2017-10-08T16:55:40.000Z
|
2020-09-22T14:04:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_combinator.py
#
# Copyright 2020 QuatroPe
#
# This file is part of ProperImage (https://github.com/quatrope/ProperImage)
# License: BSD-3-Clause
# Full Text: https://github.com/quatrope/ProperImage/blob/master/LICENSE.txt
#
"""
test_combinator module from ProperImage
for analysis of astronomical images
Written by Bruno SANCHEZ
PhD of Astromoy - UNC
bruno@oac.unc.edu.ar
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina
Of 301
"""
| 17.151515
| 76
| 0.740283
|
9d3f7a7d27e1b7136efc12dc236457c627b3164e
| 1,025
|
py
|
Python
|
ch09-linear_model/src/score_card.py
|
ahitboyZBW/book-ml-sem
|
73208e7e492c9cbe82c4aaa6459a41e3ac1317be
|
[
"MIT"
] | 137
|
2020-10-26T11:11:46.000Z
|
2022-03-29T01:21:22.000Z
|
ch09-linear_model/src/score_card.py
|
zengzhongjie/book-ml-sem
|
5d452a427db5ee65538d968ba5b938af013bb87c
|
[
"MIT"
] | 4
|
2021-01-18T08:57:04.000Z
|
2021-07-29T02:39:00.000Z
|
ch09-linear_model/src/score_card.py
|
zengzhongjie/book-ml-sem
|
5d452a427db5ee65538d968ba5b938af013bb87c
|
[
"MIT"
] | 46
|
2020-10-26T11:11:57.000Z
|
2022-03-08T00:15:32.000Z
|
'''
parameter
---------
df:woe,logit
logitsklearn,
return
------
Score
example:
df= cal_score(df,logit)
'''
| 21.808511
| 77
| 0.559024
|
9d41431a104dca3b80f9642ad172c2f1314cf033
| 3,790
|
py
|
Python
|
Tools/ecl_ekf/batch_process_logdata_ekf.py
|
lgarciaos/Firmware
|
26dba1407bd1fbc65c23870a22fed904afba6347
|
[
"BSD-3-Clause"
] | 4,224
|
2015-01-02T11:51:02.000Z
|
2020-10-27T23:42:28.000Z
|
Tools/ecl_ekf/batch_process_logdata_ekf.py
|
choudhary0parivesh/Firmware
|
02f4ad61ec8eb4f7906dd06b4eb1fd6abb994244
|
[
"BSD-3-Clause"
] | 11,736
|
2015-01-01T11:59:16.000Z
|
2020-10-28T17:13:38.000Z
|
Tools/ecl_ekf/batch_process_logdata_ekf.py
|
choudhary0parivesh/Firmware
|
02f4ad61ec8eb4f7906dd06b4eb1fd6abb994244
|
[
"BSD-3-Clause"
] | 11,850
|
2015-01-02T14:54:47.000Z
|
2020-10-28T16:42:47.000Z
|
#! /usr/bin/env python3
"""
Runs process_logdata_ekf.py on the .ulg files in the supplied directory. ulog files are skipped from the analysis, if a
corresponding .pdf file already exists (unless the overwrite flag was set).
"""
# -*- coding: utf-8 -*-
import argparse
import os, glob
from process_logdata_ekf import process_logdata_ekf
if __name__ == '__main__':
main()
| 43.563218
| 125
| 0.656201
|
9d429d9ff49854612f73350299d50ebaeb16c00a
| 1,468
|
py
|
Python
|
goodok_mlu/trackers/neptune.py
|
roma-goodok/ml_utils
|
c1d6630021a519102b5c4e029cecccdd8a0da946
|
[
"MIT"
] | null | null | null |
goodok_mlu/trackers/neptune.py
|
roma-goodok/ml_utils
|
c1d6630021a519102b5c4e029cecccdd8a0da946
|
[
"MIT"
] | null | null | null |
goodok_mlu/trackers/neptune.py
|
roma-goodok/ml_utils
|
c1d6630021a519102b5c4e029cecccdd8a0da946
|
[
"MIT"
] | 1
|
2021-03-29T13:15:02.000Z
|
2021-03-29T13:15:02.000Z
|
import inspect
import warnings
from pathlib import Path
| 30.583333
| 92
| 0.632834
|
9d438aadf58244488ff98e5078d8104573590578
| 3,099
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
# Python
import logging
from os import path
# Abstract
from genie.abstract import Lookup
# Parser
from genie.libs import parser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
# unicon
from unicon.eal.dialogs import Statement, Dialog
log = logging.getLogger(__name__)
def save_device_information(device, **kwargs):
"""Install the commit packages. This is for IOSXR devices.
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
True: Result is PASSED
False: Result is PASSX
Raises:
None
Example:
>>> save_device_information(device=Device())
"""
# Checking the config-register has 0x2
# if not configure 0x2
# RP/0/RSP1/CPU0:PE1#admin config-register 0x2
if device.is_ha:
conn = device.active
else:
conn = device
# Install commit ( when thre are package to bring up features)
# from admin prompt
conn.admin_execute('install commit')
def get_default_dir(device):
""" Get the default directory of this device
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
default_dir (`str`): Default directory of the system
Raises:
Exception
Example:
>>> get_default_dir(device=device)
"""
try:
lookup = Lookup.from_device(device)
parsed_dict = lookup.parser.show_platform.Dir(device=device).parse()
if ":" in parsed_dict['dir']['dir_name']:
default_dir = parsed_dict['dir']['dir_name']
else:
default_dir = ''
except SchemaEmptyParserError as e:
raise Exception("No output when executing 'dir' command") from e
except Exception as e:
raise Exception("Unable to execute 'dir' command") from e
# Return default_dir to caller
log.info("Default directory on '{d}' is '{dir}'".format(d=device.name,
dir=default_dir))
return default_dir
def configure_replace(device, file_location, timeout=60, file_name=None):
"""Configure replace on device
Args:
device (`obj`): Device object
file_location (`str`): File location
timeout (`int`): Timeout value in seconds
file_name (`str`): File name
Returns:
None
Raises:
pyATS Results
"""
if file_name:
file_location = '{}{}'.format(
file_location,
file_name)
try:
# check if file exist
device.execute.error_pattern.append('.*Path does not exist.*')
device.execute("dir {}".format(file_location))
except Exception:
raise Exception("File {} does not exist".format(file_location))
dialog = Dialog([
Statement(pattern=r'\[no\]',
action='sendline(y)',
loop_continue=True,
continue_timer=False)])
device.configure("load {}\ncommit replace".format(file_location),
timeout=timeout, reply=dialog)
| 26.042017
| 77
| 0.601162
|
9d4487b1ae1496a3f2089388dee11fd461798de0
| 2,933
|
py
|
Python
|
whisper_scalability/plot.py
|
Evalir/research
|
0128cdc7c3cecaad4cc057886fd84e79b78f6b9c
|
[
"MIT"
] | 42
|
2019-08-03T18:04:47.000Z
|
2022-02-28T14:24:56.000Z
|
whisper_scalability/plot.py
|
Evalir/research
|
0128cdc7c3cecaad4cc057886fd84e79b78f6b9c
|
[
"MIT"
] | 88
|
2019-10-03T23:11:12.000Z
|
2022-03-30T05:28:44.000Z
|
whisper_scalability/plot.py
|
Evalir/research
|
0128cdc7c3cecaad4cc057886fd84e79b78f6b9c
|
[
"MIT"
] | 3
|
2019-09-03T17:19:39.000Z
|
2021-12-27T16:53:44.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from labellines import labelLines
# # Trying to get interpolation to work but getting error:
# # ValueError: The number of derivatives at boundaries does not match: expected 1, got 0+0
# from scipy.interpolate import make_interp_spline, BSpline
# n_users = np.array([100, 10000, 1000000])
# bw_case8 = np.array([1, 1.5, 98.1])
# # 300 represents number of points to make between T.min and T.max
# n_users_new = np.linspace(n_users.min(), n_users.max(), 300)
# spl8 = make_interp_spline(n_users, bw_case8, k=3) # type: BSpline
# bw_case8_smooth = spl8(n_users_new)
# plt.plot(n_users_new, bw_case8_smooth, label='case 8', linewidth=2)
n_users = [100, 10000, 1000000]
bw_case1 = [1, 1, 1]
bw_case2 = [97.7, 9.5*1000, 935.7*1000]
bw_case3 = [49.3, 4.*10008, 476.8*1000]
bw_case4 = [1, 1.5, 98.1]
bw_case5 = [10.7, 978, 95.5*1000]
bw_case6 = [21.5, 1.9*1000, 190.9*1000]
bw_case7 = [3.9, 284.8, 27.8*1000]
bw_case8 = [1, 1.5, 98.1]
plt.xlim(100, 10**6)
plt.ylim(1, 10**6)
plt.plot(n_users, bw_case1, label='case 1', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case2, label='case 2', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case3, label='case 3', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case4, label='case 4', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case5, label='case 5', linewidth=4)
plt.plot(n_users, bw_case6, label='case 6', linewidth=4)
plt.plot(n_users, bw_case7, label='case 7', linewidth=4)
plt.plot(n_users, bw_case8, label='case 8', linewidth=4)
#labelLines(plt.gca().get_lines(),zorder=0)
case1 = "Case 1. Only receiving messages meant for you [naive case]"
case2 = "Case 2. Receiving messages for everyone [naive case]"
case3 = "Case 3. All private messages go over one discovery topic [naive case]"
case4 = "Case 4. All private messages partitioned into shards [naive case]"
case5 = "Case 5. Case 4 + All messages passed through bloom filter"
case6 = "Case 6. Case 5 + Benign duplicate receives"
case7 = "Case 7. Case 6 + Mailserver case under good conditions with small bloom fp and mostly offline"
case8 = "Case 8. Waku - No metadata protection with bloom filter and one node connected; static shard"
plt.xlabel('number of users (log)')
plt.ylabel('mb/day (log)')
plt.legend([case1, case2, case3, case4, case5, case6, case7, case8], loc='upper left')
plt.xscale('log')
plt.yscale('log')
plt.axhspan(0, 10, facecolor='0.2', alpha=0.2, color='blue')
plt.axhspan(10, 30, facecolor='0.2', alpha=0.2, color='green')
plt.axhspan(30, 100, facecolor='0.2', alpha=0.2, color='orange')
plt.axhspan(100, 10**6, facecolor='0.2', alpha=0.2, color='red')
#plt.axvspan(0, 10**2+3, facecolor='0.2', alpha=0.5)
#plt.axvspan(10**4, 10**4+10**2, facecolor='0.2', alpha=0.5)
#plt.axvspan(10**6, 10**6+10**4, facecolor='0.2', alpha=0.5)
#for i in range(0, 5):
# plt.axhspan(i, i+.2, facecolor='0.2', alpha=0.5)
plt.show()
| 41.309859
| 103
| 0.703034
|
9d44910e8c82debe9ba07f0a00ed736a65d972a9
| 2,000
|
py
|
Python
|
polydomino/search.py
|
PsiACE/polydomino
|
ade7cdb303cb4073d8c075659a5494392d31f8b4
|
[
"MIT"
] | null | null | null |
polydomino/search.py
|
PsiACE/polydomino
|
ade7cdb303cb4073d8c075659a5494392d31f8b4
|
[
"MIT"
] | null | null | null |
polydomino/search.py
|
PsiACE/polydomino
|
ade7cdb303cb4073d8c075659a5494392d31f8b4
|
[
"MIT"
] | null | null | null |
# import the necessary packages
import argparse
import cv2
import numpy as np
from polydomino.colordescriptor import ColorDescriptor
from polydomino.searcher import Searcher
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-i",
"--index",
required=True,
help="Path to where the computed index will be stored",
)
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
ap.add_argument(
"-fm", "--features", required=True, help="Method to get features of pics",
)
ap.add_argument(
"-sm", "--searcher", required=True, help="Method to search pics",
)
# ap.add_argument("-r", "--result-path", required=True, help="Path to the result path")
args = vars(ap.parse_args())
# initialize the image descriptor
cd = ColorDescriptor((8, 12, 3))
# load the query image and describe it
query = cv2.imread(args["query"])
if args["features"] == "color-moments":
features = cd.color_moments(query)
elif args["features"] == "hsv-describe":
features = cd.hsv_describe(query)
elif args["features"] == "gray-matrix":
features = cd.gray_matrix(query)
elif args["features"] == "humoments":
features = cd.humoments(query)
elif args["features"] == "ahash":
features = cd.ahash(query)
elif args["features"] == "phash":
features = cd.phash(query)
elif args["features"] == "dhash":
features = cd.dhash(query)
elif args["features"] == "mse":
features = cd.mse(query)
elif args["features"] == "hog":
features = cd.hog(query)
else:
print("Sorry, we don't support this method.")
exit(1)
# perform the search
method = args["searcher"]
searcher = Searcher(args["index"])
results = searcher.search(features, method)
print(results)
# display the query
cv2.namedWindow("Query", 0)
cv2.resizeWindow("Query", 640, 480)
cv2.imshow("Query", query)
# loop over the results
for (score, resultID) in results:
result = cv2.imread(resultID)
cv2.imshow("Result", result)
cv2.waitKey(0)
| 30.30303
| 87
| 0.6935
|
9d44ee135064ae4d96e5b84d0ccf61816f50cfb1
| 1,253
|
py
|
Python
|
irekua_dev_tools/cli.py
|
CONABIO-audio/irekua-dev-tools
|
87485592b7d4793c5e1f6eda2e00247810bfc99c
|
[
"BSD-4-Clause"
] | null | null | null |
irekua_dev_tools/cli.py
|
CONABIO-audio/irekua-dev-tools
|
87485592b7d4793c5e1f6eda2e00247810bfc99c
|
[
"BSD-4-Clause"
] | null | null | null |
irekua_dev_tools/cli.py
|
CONABIO-audio/irekua-dev-tools
|
87485592b7d4793c5e1f6eda2e00247810bfc99c
|
[
"BSD-4-Clause"
] | null | null | null |
import click
from irekua_dev_tools.utils import load_config
from irekua_dev_tools.utils import get_working_directory
from irekua_dev_tools.utils import load_environment_variables
from irekua_dev_tools.utils import load_repository_info
from . import git
from . import dev
from . import config
from . import db
from .extra import clean
cli.add_command(dev.cli)
cli.add_command(git.cli)
cli.add_command(config.cli)
cli.add_command(db.cli)
cli.add_command(clean)
| 29.833333
| 73
| 0.751796
|
9d451d7664d2140e40043248faa30a6b327e59ee
| 2,880
|
py
|
Python
|
optimism/test/testMinimizeScalar.py
|
btalamini/optimism
|
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
|
[
"MIT"
] | null | null | null |
optimism/test/testMinimizeScalar.py
|
btalamini/optimism
|
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
|
[
"MIT"
] | 1
|
2022-03-12T00:01:12.000Z
|
2022-03-12T00:01:12.000Z
|
optimism/test/testMinimizeScalar.py
|
btalamini/optimism
|
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
|
[
"MIT"
] | 3
|
2021-12-23T19:53:31.000Z
|
2022-03-27T23:12:03.000Z
|
from optimism.JaxConfig import *
from optimism import MinimizeScalar
from optimism.test import TestFixture
from optimism.material import J2Plastic
df = jacfwd(f)
if __name__ == '__main__':
TestFixture.unittest.main()
| 34.698795
| 95
| 0.515278
|
9d46c2badf319d174f35513f77f2237bac4308e9
| 2,709
|
py
|
Python
|
anima/ui/review_dialog.py
|
MehmetErer/anima
|
f92ae599b5a4c181fc8e131a9ccdde537e635303
|
[
"MIT"
] | 101
|
2015-02-08T22:20:11.000Z
|
2022-03-21T18:56:42.000Z
|
anima/ui/review_dialog.py
|
MehmetErer/anima
|
f92ae599b5a4c181fc8e131a9ccdde537e635303
|
[
"MIT"
] | 23
|
2016-11-30T08:33:21.000Z
|
2021-01-26T12:11:12.000Z
|
anima/ui/review_dialog.py
|
MehmetErer/anima
|
f92ae599b5a4c181fc8e131a9ccdde537e635303
|
[
"MIT"
] | 27
|
2015-01-03T06:49:45.000Z
|
2021-12-28T03:30:54.000Z
|
# -*- coding: utf-8 -*-
"""
import datetime
from anima import defaults
defaults.timing_resolution = datetime.timedelta(minutes=10)
from anima.ui import SET_PYSIDE2
SET_PYSIDE2()
from anima.ui.widgets.review import APPROVE, REQUEST_REVISION
from anima.ui import review_dialog
review_dialog.UI(review_type=REQUEST_REVISION)
"""
from anima.ui.lib import QtCore, QtWidgets
from anima.ui.base import ui_caller, AnimaDialogBase
def UI(app_in=None, executor=None, **kwargs):
"""
:param app_in: A Qt Application instance, which you can pass to let the UI
be attached to the given applications event process.
:param executor: Instead of calling app.exec_ the UI will call this given
function. It also passes the created app instance to this executor.
"""
return ui_caller(app_in, executor, ReviewDialog, **kwargs)
| 29.769231
| 80
| 0.655592
|
9d47cbe33f2156eddf7fcd553e506425ed8d1607
| 12,737
|
py
|
Python
|
squares/dsl/interpreter.py
|
Vivokas20/SKEL
|
d8766ceaa8aa766ea3580bbb61b747572ebfe77c
|
[
"Apache-2.0"
] | 1
|
2022-01-20T14:57:30.000Z
|
2022-01-20T14:57:30.000Z
|
squares/dsl/interpreter.py
|
Vivokas20/SKEL
|
d8766ceaa8aa766ea3580bbb61b747572ebfe77c
|
[
"Apache-2.0"
] | null | null | null |
squares/dsl/interpreter.py
|
Vivokas20/SKEL
|
d8766ceaa8aa766ea3580bbb61b747572ebfe77c
|
[
"Apache-2.0"
] | null | null | null |
import math
import re
from itertools import permutations
from logging import getLogger
from typing import Tuple, Union
from rpy2 import robjects
from rpy2.rinterface_lib.embedded import RRuntimeError
from z3 import BitVecVal
from .. import util, results
from ..decider import RowNumberInfo
from ..program import LineInterpreter
from ..tyrell.interpreter import InterpreterError
logger = getLogger('squares.interpreter')
| 42.885522
| 176
| 0.544241
|
9d4857e094a5401228d6f2b6484e13982abb69b9
| 7,869
|
py
|
Python
|
src/data_preparation/process_airbnb_data.py
|
ejgenc/Data-Analysis_Istanbul-Health-Tourism
|
34b9838690ca640c6a7a60f63eb2f51983ec46ef
|
[
"MIT"
] | 1
|
2020-11-18T15:27:53.000Z
|
2020-11-18T15:27:53.000Z
|
src/data_preparation/process_airbnb_data.py
|
ejgenc/Data-Analysis_Istanbul-Health-Tourism
|
34b9838690ca640c6a7a60f63eb2f51983ec46ef
|
[
"MIT"
] | null | null | null |
src/data_preparation/process_airbnb_data.py
|
ejgenc/Data-Analysis_Istanbul-Health-Tourism
|
34b9838690ca640c6a7a60f63eb2f51983ec46ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
------ What is this file? ------
This script targets the istanbul_airbnb_raw.csv file. It cleans the .csv
file in order to prepare it for further analysis
"""
#%% --- Import Required Packages ---
import os
import pathlib
from pathlib import Path # To wrap around filepaths
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import iqr
from src.helper_functions.data_preparation_helper_functions import sample_and_read_from_df
from src.helper_functions.data_preparation_helper_functions import report_null_values
#%% --- Set proper directory to assure integration with doit ---
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
#%% --- Import Data ---
import_fp = Path("../../data/raw/istanbul_airbnb_raw.csv")
airbnb = pd.read_csv(import_fp, encoding='utf-8-sig')
#%% --- Get a general sense of the datasets ---
# Shape of the data
print(airbnb.shape) # 16251 rows, 16 cols
# First few lines
print(airbnb.head())
#Not much info, let's print the columns
airbnb_columns = airbnb.columns
#%% --- Clean the dataset: Relevant - Irrelevant Columns ---
airbnb_unwanted_columns = ["neighbourhood_group", "last_review", "number_of_reviews",
"minimum_nights",
"reviews_per_month",
"calculated_host_listings_count",
"availability_365"]
#Drop unwanted columns
airbnb.drop(columns = airbnb_unwanted_columns,
axis = 1,
inplace = True)
# Check shape now
print(airbnb.shape) # 16251 rows, 9 cols
#%% --- Clean the dataset: Further Troubleshooting ---
#I want to be able to randomly take n samples from each dataset and then print them
#on a clean format to see the potential problems
#If i had something to test for, i'd strive for somewhat of a representative sample size
#while sampling. However, i think the best to do here would be to print what i can read
#because i don't have any computational measure to test for something:
sample_and_read_from_df(airbnb, 20)
#SPOTTED PROBLEMS:
# dataframe airbnb column neigborhood is not properly formatted:
# Formatting fixes
# should actually be called "district_tr"
# There should be an accompanying "district_eng" column.
#%% --- Fix column naming ---
#I can use either dataframe.columns attribute to assign new columns
#or i can pass a dictionary with old names/new names into dataframe.rename()
airbnb_columns_in_english = ["listing_id", "name", "host_id", "host_name", "district_eng",
"latitude", "longitude", "room_type", "price"]
airbnb.columns = airbnb_columns_in_english
#%% --- One-off fix for districts named "Eyup" ---
eyup_mask = airbnb.loc[:,"district_eng"] == "Eyup"
airbnb.loc[eyup_mask, "district_eng"] = "Eyupsultan"
#%% --- Add a new "district_tr" column
airbnb.loc[:,"district_tr"] = airbnb.loc[:,"district_eng"].str.lower().str.capitalize()
#I will be using df.map() method, so i'll need two dataframes: one for existing values - tr values
#and one for exixsting values - eng values
unique_districts_tr_corrected = ["Kadky", "Fatih", "Tuzla", "Gaziosmanpaa",
"skdar", "Adalar", "Saryer", "Arnavutky",
"Silivri", "atalca", "Kkekmece", "Beyolu",
"ile", "Kartal", "ili", "Beikta", "Kathane",
"Esenyurt", "Bahelievler", "Avclar", "Baakehir",
"Sultangazi", "Maltepe", "Sancaktepe", "Beykoz",
"Bykekmece", "Bakrky", "Pendik", "Baclar",
"Esenler", "Beylikdz", "mraniye", "Eypsultan",
"ekmeky", "Ataehir", "Sultanbeyli", "Zeytinburnu",
"Gngren", "Bayrampaa"]
unique_districts_eng_corrected = ["Kadikoy", "Fatih", "Tuzla", "Gaziosmanpasa",
"Uskudar", "Adalar", "Sariyer", "Arnavutkoy",
"Silivri", "Catalca", "Kucukcekmece", "Beyoglu",
"Sile", "Kartal", "Sisli", "Besiktas", "Kagithane",
"Esenyurt", "Bahcelievler", "Avcilar", "Basaksehir",
"Sultangazi", "Maltepe", "Sancaktepe", "Beykoz",
"Buyukcekmece", "Bakirkoy", "Pendik", "Bagcilar",
"Esenler", "Beylikduzu", "Umraniye", "Eyupsultan",
"Cekmekoy", "Atasehir", "Sultanbeyli", "Zeytinburnu",
"Gungoren", "Bayrampasa"]
airbnb_unique_districts_dict_tr = dict(zip(unique_districts_eng_corrected, unique_districts_tr_corrected))
airbnb.loc[:,"district_tr"] = airbnb.loc[:,"district_tr"].map(airbnb_unique_districts_dict_tr)
#%% --- EDA: Explore Missing Values ---
#Let's check null values first
null_report = report_null_values(airbnb)
#We have so few missing values, dropping them won't affect our quality at all.
# Let's do exactly that.
airbnb.dropna(axis = 0,
inplace = True)
#%% --- EDA: Explore Datatype agreement ---
#Now, let's check data type agreement for each column.
data_types = airbnb.dtypes
# The data types with "object" warrant further investigation
#They could just be strings, but mixed data types also show as "object"
# Let's select "object" data types and query once again.
airbnb_dtype_object_only = airbnb.select_dtypes(include = ["object"])
print(airbnb_dtype_object_only.columns)
#As all the column names seem to accomodate only strings, we can be
#pretty sure that showing up as object is correct behavior.
#%% --- EDA - Explore Outliers in price ---
fig = plt.figure(figsize = (19.20, 10.80))
ax = fig.add_subplot(1,1,1)
ax.hist(x = airbnb.loc[:,"price"],
bins = 20)
#Our histogram is very wonky. It's obvious that there are some issues. Let's see:
# It doesn't make sense for a airbnb room to cost 0 liras. That's for sure.
print(airbnb.loc[:,"price"].sort_values().head(20))
#What about maxes?
print(airbnb.loc[:,"price"].sort_values(ascending = False).head(30))
#There are some very high maxes, that's for sure. Let's try to make heads and tails of
#what these houses are:
possible_outliers = airbnb.sort_values(by = "price",
axis = 0,
ascending = False).head(30)
# A qualitative analysis of such houses show that there really aappears to be a problem
#with pricing. Let's calculate the IQR to drop the outliers:
#Calculate the iqr
price_iqr = iqr(airbnb.loc[:,"price"], axis = 0)
#Calculate q3 and q1
q1 = airbnb["price"].quantile(0.25)
q3 = airbnb["price"].quantile(0.75)
#Create min and max mask
min_mask = airbnb.loc[:,"price"] >= q1 - (1.5 * price_iqr)
max_mask = airbnb.loc[:,"price"] <= q3 + (1.5 * price_iqr)
#Combine masks
combined_mask = min_mask & max_mask
#Create subset
airbnb_within_iqr = airbnb.loc[combined_mask]
fig = plt.figure(figsize = (19.20, 10.80))
ax = fig.add_subplot(1,1,1)
ax.hist(x = airbnb_within_iqr.loc[:,"price"],
bins = 20)
#Alright, limiting our data to an IQR appears to omit a whole lot of data.
#I am sure that some of the outliers we have are errors of entry.
#However, the only ones that we can conclusively prove are the entries that are rated at 0.
#We'll drop these
#Create a mask for zeros
zero_mask = (airbnb.loc[:,"price"] > 0)
#Filter using the mask
airbnb = airbnb.loc[zero_mask,:]
# #%% --- Export Data ---
export_fp = Path("../../data/processed/istanbul_airbnb_processed.csv")
airbnb.to_csv(export_fp,
encoding='utf-8-sig',
index = False)
| 38.199029
| 106
| 0.641632
|
9d4ac45e3a86ef95dc9b84f578aa4f83f679c9b6
| 3,695
|
py
|
Python
|
py/shure.py
|
dman776/micboard
|
166987dfad529dc35654f402fdbbde7f16b60f77
|
[
"MIT"
] | 44
|
2019-08-30T02:51:59.000Z
|
2022-03-15T13:47:18.000Z
|
py/shure.py
|
dman776/micboard
|
166987dfad529dc35654f402fdbbde7f16b60f77
|
[
"MIT"
] | 21
|
2019-09-01T16:17:22.000Z
|
2022-02-01T15:47:55.000Z
|
py/shure.py
|
dman776/micboard
|
166987dfad529dc35654f402fdbbde7f16b60f77
|
[
"MIT"
] | 16
|
2019-09-01T01:40:09.000Z
|
2022-03-15T17:12:28.000Z
|
import time
import select
import queue
import atexit
import sys
import logging
from networkdevice import ShureNetworkDevice
from channel import chart_update_list, data_update_list
# from mic import WirelessMic
# from iem import IEM
NetworkDevices = []
DeviceMessageQueue = queue.Queue()
# @atexit.register
# atexit.register(on_exit)
# signal.signal(signal.SIGTERM, on_exit)
# signal.signal(signal.SIGINT, on_exit)
| 29.56
| 97
| 0.603518
|
9d4cf41e0ad2b23397f4ee9bbfa792895ce345d0
| 277
|
py
|
Python
|
alviscorpus/status.py
|
Bibliome/alviscorpus
|
ec7bf45efbc6a3cd864fda48e0066090cfb93313
|
[
"MIT"
] | null | null | null |
alviscorpus/status.py
|
Bibliome/alviscorpus
|
ec7bf45efbc6a3cd864fda48e0066090cfb93313
|
[
"MIT"
] | null | null | null |
alviscorpus/status.py
|
Bibliome/alviscorpus
|
ec7bf45efbc6a3cd864fda48e0066090cfb93313
|
[
"MIT"
] | null | null | null |
import enum
QUEUED = Status.QUEUED
STARTED = Status.STARTED
FINISHED = Status.FINISHED
ERROR = Status.ERROR
| 17.3125
| 26
| 0.67148
|
9d4d51c8583e8a9e583bab2100d07d40d0fad696
| 977
|
py
|
Python
|
01_simulate_competition_experiment.py
|
stevenshave/competition-label-affinity
|
2383309e852954a1bd88c6364087c3d57e7acec0
|
[
"MIT"
] | null | null | null |
01_simulate_competition_experiment.py
|
stevenshave/competition-label-affinity
|
2383309e852954a1bd88c6364087c3d57e7acec0
|
[
"MIT"
] | null | null | null |
01_simulate_competition_experiment.py
|
stevenshave/competition-label-affinity
|
2383309e852954a1bd88c6364087c3d57e7acec0
|
[
"MIT"
] | null | null | null |
"""Simulation 1:1:1 comptition binding"""
import numpy as np
from high_accuracy_binding_equations import *
# We can choose to work in a common unit, typically nM, or uM, as long as all
# numbers are in the same unit, the result is valid. We assume uM for all
# concentrations bellow.
# First, lets simulate a few single points from three different systems
# p, l and i are protrin, ligand and inhibitor concentrations respectively
# kdpl is the dissociation constant (KD) of the protein-ligand interaction
# kdpi is the dissociation constant (KD) of the protein-inhibitor interaction
# We can either expand the dictionary with ** as shown in the example with
# system1, or we can pass arguments to competition_pl with the following
# singature: competition_pl(p, l , i, kdpl, kdpi)
system1={"p":1, "l":2, "i":10, "kdpl":0.1, "kdpi":10}
pl_conc=competition_pl(**system1)
print(f"pl_conc = {round(pl_conc,4)}, fraction ligand bound = {round(pl_conc/system1['l'],4)}")
| 36.185185
| 95
| 0.745138
|
9d50b18aa63e6f3b4b6406ced31f91d878b8ae26
| 773
|
py
|
Python
|
e_vae_proj/qualitative/mnist/btcvae/gen_train.py
|
kuangdai/disentangling-vae
|
9a5f9da44a82a2c643b7289c4945320621b86247
|
[
"MIT"
] | 1
|
2021-06-30T08:58:49.000Z
|
2021-06-30T08:58:49.000Z
|
e_vae_proj/qualitative/mnist/btcvae/gen_train.py
|
kuangdai/disentangling-vae
|
9a5f9da44a82a2c643b7289c4945320621b86247
|
[
"MIT"
] | null | null | null |
e_vae_proj/qualitative/mnist/btcvae/gen_train.py
|
kuangdai/disentangling-vae
|
9a5f9da44a82a2c643b7289c4945320621b86247
|
[
"MIT"
] | null | null | null |
import numpy as np
from pathlib import Path
import sys
if __name__ == '__main__':
# absolute path
my_path = Path(__file__).parent.resolve().expanduser()
main_path = my_path.parent.parent
seed = 0
nlat = 10
alpha = 1.0
beta = 6.0
gamma = 1.0
epochs = 100
# cmd template
cmd = f'python main.py btcvae_mnist_{epochs}ep/z{nlat}_a{alpha}_b{beta}_g{gamma}_s{seed} -s {seed} ' \
f'--checkpoint-every 25 -d mnist -e {epochs} -b 64 --lr 0.0005 ' \
f'-z {nlat} -l btcvae --btcvae-A {alpha} --btcvae-B {beta} --btcvae-G {gamma} ' \
f'--no-test\n'
with open(my_path / f'train_beta{beta}.sh', 'w') as f:
unnormalized_beta = beta * nlat
f.write(cmd)
| 28.62963
| 107
| 0.564036
|
9d5197f8d1796538860fe2f3fb98a1af46c8ef38
| 3,331
|
py
|
Python
|
tests/test_load.py
|
tom3131/simfin
|
8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1
|
[
"MIT"
] | 231
|
2019-09-25T13:30:00.000Z
|
2022-03-26T08:00:47.000Z
|
tests/test_load.py
|
tom3131/simfin
|
8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1
|
[
"MIT"
] | 11
|
2019-10-01T14:50:15.000Z
|
2022-02-23T10:35:47.000Z
|
tests/test_load.py
|
tom3131/simfin
|
8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1
|
[
"MIT"
] | 36
|
2019-09-30T16:14:48.000Z
|
2022-03-19T19:59:30.000Z
|
##########################################################################
#
# Unit tests (pytest) for load.py
#
##########################################################################
# SimFin - Simple financial data for Python.
# www.simfin.com - www.github.com/simfin/simfin
# See README.md for instructions and LICENSE.txt for license details.
##########################################################################
import simfin as sf
from simfin.datasets import iter_all_datasets
##########################################################################
# Test configuration.
# Set data directory.
sf.set_data_dir(data_dir='~/simfin_data/')
# Load API key or use default 'free' if key-file doesn't exist.
sf.load_api_key(path='~/simfin_api_key.txt', default_key='free')
# Set number of days before refreshing data from SimFin server.
refresh_days = 30
##########################################################################
# Helper functions.
def _create_kwargs(variant, market):
"""
Create a dict with keyword args for sf.load() functions that take
variant, market and refresh_days as kwargs.
"""
kwargs = \
{
'variant': variant,
'market': market,
'refresh_days': refresh_days,
}
return kwargs
##########################################################################
# Test functions.
def test_load():
"""Test simfin.bulk.load()"""
for dataset, variant, market in iter_all_datasets():
sf.load(dataset=dataset,
variant=variant,
market=market,
refresh_days=refresh_days)
def test_load_income():
"""Test simfin.bulk.load_income()"""
for dataset, variant, market in iter_all_datasets(datasets='income'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_income(**kwargs)
sf.load_income_banks(**kwargs)
sf.load_income_insurance(**kwargs)
def test_load_balance():
"""Test simfin.bulk.load_balance()"""
for dataset, variant, market in iter_all_datasets(datasets='balance'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_balance(**kwargs)
sf.load_balance_banks(**kwargs)
sf.load_balance_insurance(**kwargs)
def test_load_cashflow():
"""Test simfin.bulk.load_cashflow()"""
for dataset, variant, market in iter_all_datasets(datasets='cashflow'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_cashflow(**kwargs)
sf.load_cashflow_banks(**kwargs)
sf.load_cashflow_insurance(**kwargs)
def test_load_shareprices():
"""Test simfin.bulk.load_shareprices()"""
for dataset, variant, market in iter_all_datasets(datasets='shareprices'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_shareprices(**kwargs)
def test_load_companies():
"""Test simfin.bulk.load_companies()"""
for dataset, variant, market in iter_all_datasets(datasets='companies'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_companies(**kwargs)
def test_load_industries():
"""Test simfin.bulk.load_industries()"""
sf.load_industries(refresh_days=refresh_days)
##########################################################################
| 31.424528
| 78
| 0.576403
|
9d5477ef2956d3615e64662b0ab23440b2cbff69
| 447
|
py
|
Python
|
lab/__init__.py
|
patel-zeel/lab
|
cc0df2c03196863041e78fa4179445341e86958c
|
[
"MIT"
] | 36
|
2018-05-08T20:54:21.000Z
|
2022-02-24T09:15:58.000Z
|
lab/__init__.py
|
patel-zeel/lab
|
cc0df2c03196863041e78fa4179445341e86958c
|
[
"MIT"
] | 4
|
2021-06-24T11:59:29.000Z
|
2022-02-01T15:51:30.000Z
|
lab/__init__.py
|
patel-zeel/lab
|
cc0df2c03196863041e78fa4179445341e86958c
|
[
"MIT"
] | 3
|
2021-02-14T13:00:26.000Z
|
2021-12-10T08:55:17.000Z
|
import sys
from plum import Dispatcher
B = sys.modules[__name__] # Allow both import styles.
dispatch = Dispatcher() # This dispatch namespace will be used everywhere.
from .generic import *
from .shaping import *
from .linear_algebra import *
from .random import *
from .numpy import *
from .types import *
from .control_flow import *
# Fix namespace issues with `B.bvn_cdf` simply by setting it explicitly.
B.bvn_cdf = B.generic.bvn_cdf
| 22.35
| 75
| 0.756152
|
9d556827bb836c6e6f6530ec156f0777935a5dea
| 1,514
|
py
|
Python
|
async_nbgrader/apps/exportapp.py
|
IllumiDesk/async_nbgrader
|
427e1b634277c043a1ed9f00bf7e417e0f611aca
|
[
"Apache-2.0"
] | 2
|
2021-06-23T17:58:22.000Z
|
2021-09-27T10:00:01.000Z
|
async_nbgrader/apps/exportapp.py
|
IllumiDesk/async-nbgrader
|
427e1b634277c043a1ed9f00bf7e417e0f611aca
|
[
"Apache-2.0"
] | 6
|
2021-06-17T21:40:24.000Z
|
2021-11-11T17:48:15.000Z
|
async_nbgrader/apps/exportapp.py
|
IllumiDesk/async-nbgrader
|
427e1b634277c043a1ed9f00bf7e417e0f611aca
|
[
"Apache-2.0"
] | 2
|
2021-06-10T18:16:22.000Z
|
2021-06-17T02:52:45.000Z
|
# coding: utf-8
from nbgrader.api import Gradebook
from nbgrader.apps import ExportApp as BaseExportApp
from traitlets import Instance
from traitlets import Type
from traitlets import default
from ..plugins import CanvasCsvExportPlugin
from ..plugins import CustomExportPlugin
aliases = {
"log-level": "Application.log_level",
"db": "CourseDirectory.db_url",
"to": "CanvasCsvExportPlugin.to",
"canvas_import": "CanvasCsvExportPlugin.canvas_import",
"exporter": "ExportApp.plugin_class",
"assignment": "CanvasCsvExportPlugin.assignment",
"student": "CanvasCsvExportPlugin.student",
"course": "CourseDirectory.course_id",
}
flags = {}
| 27.527273
| 78
| 0.703435
|
9d55833e8ac84841e916071829ab4546156cae04
| 2,810
|
py
|
Python
|
django_ocr_server/conf.py
|
shmakovpn/django_ocr_server
|
4d694629c39c18a6c13bcdfafdb8258b78e5a859
|
[
"Apache-2.0"
] | 17
|
2019-12-04T03:14:56.000Z
|
2022-03-27T07:05:19.000Z
|
django_ocr_server/conf.py
|
shmakovpn/django_ocr_server
|
4d694629c39c18a6c13bcdfafdb8258b78e5a859
|
[
"Apache-2.0"
] | 1
|
2020-04-17T07:32:30.000Z
|
2020-04-17T07:32:30.000Z
|
django_ocr_server/conf.py
|
shmakovpn/django_ocr_server
|
4d694629c39c18a6c13bcdfafdb8258b78e5a859
|
[
"Apache-2.0"
] | 5
|
2020-03-16T10:43:03.000Z
|
2021-07-14T14:43:49.000Z
|
"""
django_ocr_server/conf.py
+++++++++++++++++++++++++
The settings manager for **django_ocr_server**.
Usage:
.. code-block:: python
from django_ocr_server.conf import ocr_settings
# Next line will print a value of **OCR_TESSERACT_LANG**
# using the variable from the Django's *settings.py* file
# if the variable is set there.
# Or the default value of **OCR_TESSERACT_LANG** from
# *django_ocr_server/default_settings.py* otherwise.
print(ocr_settings.OCR_TESSERACT_LANG)
| Author: shmakovpn <shmakovpn@yandex.ru>
| Date: 2021-01-20
"""
from typing import List
from datetime import timedelta
from django.conf import settings as _s
import django_ocr_server.default_settings as _ds
ocr_settings: DjangoOcrSettings = DjangoOcrSettings()
"""The instance of settings manager of **django_ocr_server**"""
| 29.893617
| 79
| 0.687544
|
9d56f0959997626e16345a92ca50c1b01d2ed5e6
| 105
|
py
|
Python
|
ibms_project/sfm/apps.py
|
mohdbakhrayba/ibms
|
029e1f3bf108586289c65bb1d547f86851f9494f
|
[
"Apache-2.0"
] | null | null | null |
ibms_project/sfm/apps.py
|
mohdbakhrayba/ibms
|
029e1f3bf108586289c65bb1d547f86851f9494f
|
[
"Apache-2.0"
] | null | null | null |
ibms_project/sfm/apps.py
|
mohdbakhrayba/ibms
|
029e1f3bf108586289c65bb1d547f86851f9494f
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
| 17.5
| 33
| 0.704762
|
9d5735cba5c6faf4bc0915b6d346541d85cbb4ac
| 15,960
|
py
|
Python
|
torsion/model/symmetry_function.py
|
hnlab/TorsionNet
|
e81ab624f1340765345b34240a049a8cc5f4d581
|
[
"MIT"
] | 15
|
2021-01-15T01:54:26.000Z
|
2022-03-31T16:00:52.000Z
|
torsion/model/symmetry_function.py
|
hnlab/TorsionNet
|
e81ab624f1340765345b34240a049a8cc5f4d581
|
[
"MIT"
] | 2
|
2021-07-21T22:42:09.000Z
|
2021-11-22T06:39:20.000Z
|
torsion/model/symmetry_function.py
|
hnlab/TorsionNet
|
e81ab624f1340765345b34240a049a8cc5f4d581
|
[
"MIT"
] | 6
|
2021-01-16T04:07:17.000Z
|
2022-02-23T02:11:49.000Z
|
import math
import numpy as np
from openeye import oechem
from torsion.inchi_keys import get_torsion_oeatom_list, get_torsion_oebond
def GetPairwiseDistanceMatrix(icoords, jcoords):
'''
input: two sets of coordinates, icoords, jcoords; each of which are a list
of OEDoubleArray(3) containing x, y, and z component
output:
xij - the x component of the distance matrix
yij - the y component of the distance matrix
zij - the z component of the distance matrix
rij - the distance matrix
rij2 - square of the distance matrix
'''
nullRet = [None, None, None, None, None]
ni = len(icoords)
nj = len(jcoords)
try:
iArrayX = np.array([c[0] for c in icoords])
iArrayY = np.array([c[1] for c in icoords])
iArrayZ = np.array([c[2] for c in icoords])
iArrayX = np.repeat(iArrayX, nj)
iArrayY = np.repeat(iArrayY, nj)
iArrayZ = np.repeat(iArrayZ, nj)
iArrayX = iArrayX.reshape(ni, nj)
iArrayY = iArrayY.reshape(ni, nj)
iArrayZ = iArrayZ.reshape(ni, nj)
jArrayX = np.array([c[0] for c in jcoords])
jArrayY = np.array([c[1] for c in jcoords])
jArrayZ = np.array([c[2] for c in jcoords])
jArrayX = np.repeat(jArrayX, ni)
jArrayY = np.repeat(jArrayY, ni)
jArrayZ = np.repeat(jArrayZ, ni)
jArrayX = jArrayX.reshape(nj, ni)
jArrayY = jArrayY.reshape(nj, ni)
jArrayZ = jArrayZ.reshape(nj, ni)
jArrayX = np.transpose(jArrayX)
jArrayY = np.transpose(jArrayY)
jArrayZ = np.transpose(jArrayZ)
ijArrayX = jArrayX - iArrayX
ijArrayY = jArrayY - iArrayY
ijArrayZ = jArrayZ - iArrayZ
rijArraySq = (ijArrayX * ijArrayX) + (ijArrayY * ijArrayY) + (ijArrayZ * ijArrayZ)
rijArray = np.sqrt(rijArraySq)
return ijArrayX, ijArrayY, ijArrayZ, rijArray, rijArraySq
except:
return nullRet
def GetThetaIJKMatrix(iCoords, jCoords, kCoords):
'''
Using the given input, calculates a matrix of angles ijk
iCoords -> OEDoubleArray containing x, y, and z component of the reference coordinate
jCoordsList -> list of N OEDoubleArrays, each OEDoubleArray is of size 3
kCoordsList -> list of M OEDoubleArrays, each OEDoubleArray is of size 3
return a N-by-M matrix of angle theta_ijk
'''
jiArrayX, jiArrayY, jiArrayZ, rjiArray, rjiArraySq \
= GetPairwiseDistanceMatrix(jCoords, iCoords)
jkArrayX, jkArrayY, jkArrayZ, rjkArray, rjkArraySq \
= GetPairwiseDistanceMatrix(jCoords, kCoords)
if jCoords == kCoords:
rjkArray = np.eye(len(jCoords)) + np.sqrt(rjkArraySq)
else:
rjkArray = np.sqrt(rjkArraySq)
if jCoords == iCoords:
rjiArray = np.eye(len(jCoords)) + np.sqrt(rjiArraySq)
else:
rjiArray = np.sqrt(rjiArraySq)
jiArrayX = jiArrayX / rjiArray
jiArrayY = jiArrayY / rjiArray
jiArrayZ = jiArrayZ / rjiArray
jkArrayX = jkArrayX / rjkArray
jkArrayY = jkArrayY / rjkArray
jkArrayZ = jkArrayZ / rjkArray
dotProduct = (jiArrayX * jkArrayX) + (jiArrayY * jkArrayY) + (jiArrayZ * jkArrayZ)
dotProduct = np.select([dotProduct <= -1.0, dotProduct >= 1.0, np.abs(dotProduct) < 1.0],
[-0.999, 0.999, dotProduct])
theta_ijk = np.arccos(dotProduct)
return theta_ijk
def GetThetaIJKLMatrix(mol, iAtoms, jAtom, kAtom, lAtoms, transform=True):
'''
Using the given input, calculates a matrix of torsion angles around jk
jAtom, kAtom -> OEAtombase, middle two atoms of the torsion
iAtoms -> list of N OEAtombase
lAtoms -> list of M OEAtombase
return a N-by-M matrix of angle theta_ijkl
'''
torsions = []
for iAtom in iAtoms:
for lAtom in lAtoms:
tor_angle = oechem.OEGetTorsion(mol, iAtom, jAtom, kAtom, lAtom)
if not transform:
torsions.append(tor_angle)
else:
torsions.append((math.pi + tor_angle) / 4.0)
theta_ijkl = np.array(torsions)
theta_ijkl = theta_ijkl.reshape(len(iAtoms), len(lAtoms))
return theta_ijkl
| 40.507614
| 111
| 0.553446
|
9d5757c4a8bf60547e9dd883852158e386888c4b
| 6,785
|
py
|
Python
|
recommendation/recommendation.py
|
Jackson-Y/Machine-Learning
|
ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8
|
[
"MIT"
] | 4
|
2017-08-17T02:11:45.000Z
|
2017-09-25T00:46:13.000Z
|
recommendation/recommendation.py
|
Jackson-Y/Machine-Learning
|
ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8
|
[
"MIT"
] | null | null | null |
recommendation/recommendation.py
|
Jackson-Y/Machine-Learning
|
ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8
|
[
"MIT"
] | null | null | null |
""" Candidate generation & LTR Learning to Ranking"""
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
from operator import itemgetter
from math import sqrt
import pandas as pd
import pymysql
from sklearn.model_selection import train_test_split
# from sklearn.metrics.pairwise import pairwise_distances
# from sklearn.metrics import mean_squared_error
FLAGS = None
def main(_):
"""main function"""
user_cf = UserBasedCF(20, 10)
user_cf.load_data()
user_cf.calc_user_similarity()
recommended_articled = user_cf.recommendation(FLAGS.uid)
print(recommended_articled[0:10])
out = PrintArticles(recommended_articled[0:10])
out.output()
# user_cf.evaluate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--uid",
type=int,
default=80871,
help="The user who is going to be recommended articles."
)
parser.add_argument(
"--n",
type=int,
default=10,
help="Number of recommended articles."
)
FLAGS, unparsed = parser.parse_known_args()
print("{} {}".format(sys.argv[0], unparsed))
print(FLAGS)
main(FLAGS)
| 36.875
| 115
| 0.592336
|
9d59344dd6f980db538f0cd26f71a979f4b914e4
| 1,592
|
py
|
Python
|
orchestration/dags/twitter_streaming.py
|
amommendes/tweetstream
|
ef09928a4f3344210c597388332d18a53149bb41
|
[
"Apache-2.0"
] | null | null | null |
orchestration/dags/twitter_streaming.py
|
amommendes/tweetstream
|
ef09928a4f3344210c597388332d18a53149bb41
|
[
"Apache-2.0"
] | null | null | null |
orchestration/dags/twitter_streaming.py
|
amommendes/tweetstream
|
ef09928a4f3344210c597388332d18a53149bb41
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.python_operator import PythonOperator
from tweetstream.consumers.twitter_streaming import TwitterStreamingConsumer
from tweetstream.clients.spark import SparkClient
default_args = {
"owner": "tweeetstream",
"depends_on_past": False,
"start_date": days_ago(1),
"email": ["tweetstream@team.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
dag = DAG(
dag_id="twitter_streaming",
default_args=default_args,
description="Tweets Streaming Consumer",
schedule_interval=timedelta(days=1),
)
start_job_task = PythonOperator(
dag=dag,
task_id="start_streaming",
python_callable=main,
execution_timeout=None,
)
| 30.615385
| 105
| 0.692839
|
9d59ba6b91ae4d068be41f7bfb3634b177f8ade2
| 217
|
py
|
Python
|
tests/expr/expr09.py
|
ktok07b6/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 83
|
2015-11-30T09:59:13.000Z
|
2021-08-03T09:12:28.000Z
|
tests/expr/expr09.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 4
|
2017-02-10T01:43:11.000Z
|
2020-07-14T03:52:25.000Z
|
tests/expr/expr09.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 11
|
2016-11-18T14:39:15.000Z
|
2021-02-23T10:05:20.000Z
|
from polyphony import testbench
test()
| 16.692308
| 38
| 0.654378
|
9d5d5a4039dbeb89722961536cacebbce65b4ec3
| 1,059
|
py
|
Python
|
setup.py
|
fg1/ipynb_format
|
58dc276fca4f1fbb179d7e84ce41d59663d011c2
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
fg1/ipynb_format
|
58dc276fca4f1fbb179d7e84ce41d59663d011c2
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
fg1/ipynb_format
|
58dc276fca4f1fbb179d7e84ce41d59663d011c2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
from setuptools import setup, find_packages
from codecs import open
with open('README.rst', 'r', 'utf-8') as fd:
long_description = fd.read()
setup(name='ipynb_format',
version='0.1.1',
description='A code formatter for python code in ipython notebooks',
long_description=long_description,
url='https://github.com/fg1/ipynb_format',
author='fg1',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
keywords='ipython notebook',
packages=find_packages(),
install_requires=['yapf'],
entry_points={
'console_scripts': [
'ipynb_format=ipynb_format:cli',
],
}, )
| 31.147059
| 74
| 0.588291
|
9d5e11c9180f5fb664452c5f269722fdf9e6a4db
| 140
|
py
|
Python
|
Homework/Homework2/2_3.py
|
404nofound/CS521-Info-Str-Python
|
2ead691c519f29713419e79d600a2d2a1c87d1c1
|
[
"Apache-2.0"
] | null | null | null |
Homework/Homework2/2_3.py
|
404nofound/CS521-Info-Str-Python
|
2ead691c519f29713419e79d600a2d2a1c87d1c1
|
[
"Apache-2.0"
] | null | null | null |
Homework/Homework2/2_3.py
|
404nofound/CS521-Info-Str-Python
|
2ead691c519f29713419e79d600a2d2a1c87d1c1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
feet = eval(input("Enter a value for feet: "))
meter = feet * 0.305
print (feet, "feet is %.4f meters" %(meter))
| 17.5
| 46
| 0.585714
|
9d5e75f8cb60c04470b0412ae613020592c5aad7
| 16,072
|
py
|
Python
|
nn/units/ceecnet.py
|
feevos/ceecnet
|
9dc76f8cd16d44b264cae8c5846eefb8fcf6162d
|
[
"BSD-3-Clause",
"MIT"
] | 45
|
2020-09-07T01:19:44.000Z
|
2022-03-15T14:44:20.000Z
|
nn/units/ceecnet.py
|
xautdestiny/ceecnet
|
897cd5c128ffd85369732d1cac82a6ddc7643afa
|
[
"MIT",
"BSD-3-Clause"
] | 10
|
2020-10-02T10:14:47.000Z
|
2021-10-19T09:34:14.000Z
|
nn/units/ceecnet.py
|
xautdestiny/ceecnet
|
897cd5c128ffd85369732d1cac82a6ddc7643afa
|
[
"MIT",
"BSD-3-Clause"
] | 14
|
2020-09-29T02:46:18.000Z
|
2021-09-27T07:13:47.000Z
|
from mxnet import gluon
from mxnet.gluon import HybridBlock
from ceecnet.nn.layers.conv2Dnormed import *
from ceecnet.utils.get_norm import *
from ceecnet.nn.layers.attention import *
# ======= Definitions for CEEC unit v2 (replace concatenations with Fusion =========================
# -------------------------------------- helper functions -------------------------------------------
# -------------------------------------------------------------------------------------------------------------------
| 45.659091
| 186
| 0.66059
|
9d6069e2ba0263497aa9f814cd08018989e4473f
| 1,928
|
py
|
Python
|
reviewboard/reviews/search_indexes.py
|
znick/reviewboard
|
f32320b267efcdf2feff1661eabe57f99ef490a7
|
[
"MIT"
] | 1
|
2018-08-23T09:19:02.000Z
|
2018-08-23T09:19:02.000Z
|
reviewboard/reviews/search_indexes.py
|
klpyang/reviewboard
|
d7dabf36e5b492f18048dd7084026bf99d6933c5
|
[
"MIT"
] | null | null | null |
reviewboard/reviews/search_indexes.py
|
klpyang/reviewboard
|
d7dabf36e5b492f18048dd7084026bf99d6933c5
|
[
"MIT"
] | 1
|
2021-11-23T15:25:44.000Z
|
2021-11-23T15:25:44.000Z
|
from django.db.models import Q
from haystack import indexes
from reviewboard.reviews.models import ReviewRequest
| 39.346939
| 76
| 0.705394
|
19b2caec75b18b0aa3e0597b5caa0b0c55ce8cad
| 7,365
|
py
|
Python
|
gpss/transaction.py
|
martendo/gpss.py
|
52c6781bd8a65b651381ed11da9e31ddfae6e313
|
[
"MIT"
] | 2
|
2021-11-28T08:48:02.000Z
|
2022-03-09T16:19:06.000Z
|
gpss/transaction.py
|
martendo/gpss.py
|
52c6781bd8a65b651381ed11da9e31ddfae6e313
|
[
"MIT"
] | null | null | null |
gpss/transaction.py
|
martendo/gpss.py
|
52c6781bd8a65b651381ed11da9e31ddfae6e313
|
[
"MIT"
] | null | null | null |
from .statement import Statement, StatementType
from .event import Event
from ._helpers import debugmsg, simulation_error
| 41.610169
| 81
| 0.509029
|
19b32c34ea299311dabdf3d678344f668cb1f1a4
| 234
|
py
|
Python
|
stone/config.py
|
ichengplus/mpmatrix
|
41cf1ac48abe9aef1b92f1174157608a60e30da0
|
[
"Apache-2.0"
] | null | null | null |
stone/config.py
|
ichengplus/mpmatrix
|
41cf1ac48abe9aef1b92f1174157608a60e30da0
|
[
"Apache-2.0"
] | 3
|
2021-03-10T16:23:59.000Z
|
2022-02-13T12:02:00.000Z
|
stone/config.py
|
ichengplus/mpmatrix
|
41cf1ac48abe9aef1b92f1174157608a60e30da0
|
[
"Apache-2.0"
] | null | null | null |
REDIS_URL = "redis://redis:6379/0"
DEBUG = True
TESTING = False
JOBS = [
{
'id': 'actoken_refresh',
'func': 'actoken:refresh',
'args': None,
'trigger': 'interval',
'seconds': 7000
}
]
| 15.6
| 34
| 0.504274
|
19b3b12f916bfa71763e1f5555965f2dffc3a223
| 727
|
py
|
Python
|
utils/update.py
|
adavila0703/warehouse-hub
|
29778d605e372d6d6b41d05a3637edefb047f0bc
|
[
"MIT"
] | null | null | null |
utils/update.py
|
adavila0703/warehouse-hub
|
29778d605e372d6d6b41d05a3637edefb047f0bc
|
[
"MIT"
] | null | null | null |
utils/update.py
|
adavila0703/warehouse-hub
|
29778d605e372d6d6b41d05a3637edefb047f0bc
|
[
"MIT"
] | null | null | null |
from shutil import copy, copytree, rmtree
import pathlib
import os
import time
def update():
"""Update is a script to auto update all the files that the user is using"""
print('Warehouse Hub is updating, do not close this window...')
time.sleep(3)
print('Applying patch...')
time.sleep(1)
copy('C:/warehousehub/warehousehub.exe', pathlib.Path().absolute())
rmtree(f'{pathlib.Path().absolute()}/templates')
copytree('C:/warehousehub/templates', f'{pathlib.Path().absolute()}/templates')
print('Patch Completed!')
print('Warehouse Hub is restarting, please wait...')
os.system(f'cmd /c "{pathlib.Path().absolute()}/warehousehub.exe"')
if __name__ == '__main__':
update()
| 25.068966
| 83
| 0.672627
|
19b3f6aeb28dd07d2770e4ea600d2a99c0c06e65
| 3,134
|
py
|
Python
|
train_video.py
|
jacke121/MBMD
|
2daf5edb4fb40ee652baead4f9332ca00fa111a5
|
[
"MIT"
] | 220
|
2018-09-17T15:42:54.000Z
|
2021-09-13T13:14:22.000Z
|
train_video.py
|
jacke121/MBMD
|
2daf5edb4fb40ee652baead4f9332ca00fa111a5
|
[
"MIT"
] | 12
|
2018-09-19T09:30:42.000Z
|
2019-07-01T04:03:51.000Z
|
train_video.py
|
jacke121/MBMD
|
2daf5edb4fb40ee652baead4f9332ca00fa111a5
|
[
"MIT"
] | 60
|
2018-09-18T00:29:50.000Z
|
2021-02-22T03:55:19.000Z
|
import functools
import tensorflow as tf
from core import trainer_video, input_reader
from core.model_builder import build_man_model
from google.protobuf import text_format
from object_detection.builders import input_reader_builder
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
import os
'''
lijun's code
modify bb to conv1*2 conv3*2
l2 normalization to match
'''
os.environ["CUDA_VISIBLE_DEVICES"]="0"
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('train_dir', 'model/dump',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', 'model/ssd_mobilenet_video.config',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
flags.DEFINE_string('image_root', '/home/xiaobai/Documents/ILSVRC2014_DET_train/image/ILSVRC2014_DET_train',
'Root path to input images')
flags.DEFINE_string('video_root', '/home/xiaobai/Documents/ILSVRC2015/',
'Root path to input videos')
flags.DEFINE_string('image_tfrecord', './train_seq.record',
'Path to image tfrecord.')
flags.DEFINE_string('video_tfrecord', './train_vid.record',
'Path to video tfrecord')
FLAGS = flags.FLAGS
def get_configs_from_pipeline_file():
"""Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads training config from file specified by pipeline_config_path flag.
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model.ssd
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
return model_config, train_config, input_config
if __name__ == '__main__':
# update moving average
tf.app.run()
| 35.613636
| 128
| 0.744735
|
19b4fbf622ea3b5c2b94266b63984fdd1ea1e133
| 460
|
py
|
Python
|
config.py
|
icewing1996/biaffine-parser
|
f5a4ece7ba9a087d81b76dd6a8ea6aa7d90c6c82
|
[
"MIT"
] | 1
|
2019-04-02T14:42:20.000Z
|
2019-04-02T14:42:20.000Z
|
config.py
|
icewing1996/biaffine-parser
|
f5a4ece7ba9a087d81b76dd6a8ea6aa7d90c6c82
|
[
"MIT"
] | null | null | null |
config.py
|
icewing1996/biaffine-parser
|
f5a4ece7ba9a087d81b76dd6a8ea6aa7d90c6c82
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 15.862069
| 24
| 0.558696
|
19b7ef31e8ac32e464e2b7f9641c6ad98cd6de46
| 3,301
|
py
|
Python
|
conf_dblp.py
|
AmiraKetfi/ScientificProductScraper
|
c700fb579ac47266e76ec834ccbd8674abeaff50
|
[
"MIT"
] | 4
|
2018-04-04T12:10:59.000Z
|
2020-02-22T17:26:14.000Z
|
conf_dblp.py
|
AmiraKetfi/ScientificProductScraper
|
c700fb579ac47266e76ec834ccbd8674abeaff50
|
[
"MIT"
] | null | null | null |
conf_dblp.py
|
AmiraKetfi/ScientificProductScraper
|
c700fb579ac47266e76ec834ccbd8674abeaff50
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 23:01:40 2018
@author: pc
"""
import scholarly,re,urllib.request,nltk
import bs4 as bs
# =============================================================================
# #Problme les derniere conf ne se rajoute pas
# =============================================================================
# pass
url_deb='https://dblp.uni-trier.de/db/conf/'
url_deb2='http://dblp.uni-trier.de/db/conf/3dim/3dimpvt2012.html'
url_deb3='http://dblp.uni-trier.de/db/conf/3dpvt/'
#Timeline_of_conferences(url_deb2)
publication_conf_dblp(url_deb3)
#find_ComputerScienceConferences_Workshops_names_DBLP(url_deb)
| 38.383721
| 124
| 0.499546
|
19b8ce0aa97bf71df30c5a8e086263306534c4c7
| 4,540
|
py
|
Python
|
src/robot.py
|
FROG3160/FRC2018-ARWING
|
6635274d79839ea92d8591af2c8e51f8e1112ec1
|
[
"MIT"
] | 1
|
2019-01-15T00:47:16.000Z
|
2019-01-15T00:47:16.000Z
|
src/robot.py
|
FROG3160/FRC2018-ARWING
|
6635274d79839ea92d8591af2c8e51f8e1112ec1
|
[
"MIT"
] | 18
|
2018-02-15T01:07:03.000Z
|
2018-04-10T00:25:59.000Z
|
src/robot.py
|
FROG3160/FRC2018-ARWING
|
6635274d79839ea92d8591af2c8e51f8e1112ec1
|
[
"MIT"
] | 4
|
2018-01-31T01:53:44.000Z
|
2018-02-16T00:30:14.000Z
|
#!/usr/bin/env python3
"""
Main code for Robot
"""
import wpilib
import robotmap
from wpilib import Joystick
from subsystems.drivetrain import DriveTrain as Drive
from subsystems.grabber import cubeGrabber
from subsystems.elevator import Elevator
from subsystems.climber import Climber
from subsystems.autonomous import Autonomous
from wpilib.sendablechooser import SendableChooser
# from robotpy_ext.common_drivers.navx import AHRS
if __name__ == "__main__":
wpilib.run(Robot)
| 32.661871
| 83
| 0.652643
|
19b94d7c9d394f09ecf7228b67004f998dd55522
| 1,764
|
py
|
Python
|
api/attomized_avm.py
|
johncoleman83/attom_python_client
|
2fad572162f481a71cccf6003da4cbd8ec4477d4
|
[
"MIT"
] | null | null | null |
api/attomized_avm.py
|
johncoleman83/attom_python_client
|
2fad572162f481a71cccf6003da4cbd8ec4477d4
|
[
"MIT"
] | null | null | null |
api/attomized_avm.py
|
johncoleman83/attom_python_client
|
2fad572162f481a71cccf6003da4cbd8ec4477d4
|
[
"MIT"
] | 1
|
2020-11-20T19:28:36.000Z
|
2020-11-20T19:28:36.000Z
|
#!/usr/bin/env python3
"""
ATTOM API
https://api.developer.attomdata.com
"""
import requests
from urllib.parse import quote, urlencode
from api import api
PATH = "attomavm/detail"
def get_avm_by_address(number_street, city_state):
"""
API request to get attomavm/detail
"""
params = urlencode(
{
"address1": number_street,
"address2": city_state,
}
)
url = "{}/{}?{}".format(api.ATTOM_URL, PATH, params)
r = requests.get(url, headers=api.headers)
return r.json()
| 27.138462
| 77
| 0.620181
|
19b9c7cf12ec5b8b173b1bc2764d7bfc2577385f
| 7,064
|
py
|
Python
|
idmap/models.py
|
tkhyn/django-idmap
|
383124fc4bd537d053f9d4c0d02a498f66831baa
|
[
"BSD-2-Clause"
] | 1
|
2021-04-24T16:35:15.000Z
|
2021-04-24T16:35:15.000Z
|
idmap/models.py
|
tkhyn/django-idmap
|
383124fc4bd537d053f9d4c0d02a498f66831baa
|
[
"BSD-2-Clause"
] | null | null | null |
idmap/models.py
|
tkhyn/django-idmap
|
383124fc4bd537d053f9d4c0d02a498f66831baa
|
[
"BSD-2-Clause"
] | 1
|
2021-02-27T14:45:48.000Z
|
2021-02-27T14:45:48.000Z
|
import django
from django.db import models
from django.db.models.base import ModelBase
from django.utils import six
from .manager import IdMapManager
from . import tls # thread local storage
META_VALUES = {
'use_strong_refs': False,
'multi_db': False
}
| 33.799043
| 80
| 0.58876
|
19bbd9ee5d1a69e647b6029452a9fd29e645da59
| 1,345
|
py
|
Python
|
test_search_in_rotated_sorted_array.py
|
jaebradley/leetcode.py
|
64634cc7d0e975ddd163f35acb18cc92960b8eb5
|
[
"MIT"
] | null | null | null |
test_search_in_rotated_sorted_array.py
|
jaebradley/leetcode.py
|
64634cc7d0e975ddd163f35acb18cc92960b8eb5
|
[
"MIT"
] | 2
|
2019-11-13T19:55:49.000Z
|
2019-11-13T19:55:57.000Z
|
test_search_in_rotated_sorted_array.py
|
jaebradley/leetcode.py
|
64634cc7d0e975ddd163f35acb18cc92960b8eb5
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from search_in_rotated_sorted_array import Solution
| 37.361111
| 72
| 0.684758
|
19bd0b651a92c3989a6dcd3e14655ea86b1f4a83
| 2,501
|
py
|
Python
|
pyrfu/pyrf/ts_skymap.py
|
ablotekar/irfu-python
|
740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e
|
[
"MIT"
] | 2
|
2020-11-27T11:35:42.000Z
|
2021-07-17T11:08:10.000Z
|
pyrfu/pyrf/ts_skymap.py
|
ablotekar/irfu-python
|
740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e
|
[
"MIT"
] | 1
|
2021-12-04T07:55:48.000Z
|
2021-12-10T12:45:27.000Z
|
pyrfu/pyrf/ts_skymap.py
|
ablotekar/irfu-python
|
740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e
|
[
"MIT"
] | 2
|
2021-07-17T11:08:12.000Z
|
2021-07-18T18:41:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
import xarray as xr
__author__ = "Louis Richard"
__email__ = "louisr@irfu.se"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def ts_skymap(time, data, energy, phi, theta, **kwargs):
r"""Creates a skymap of the distribution function.
Parameters
----------
time : ndarray
List of times.
data : ndarray
Values of the distribution function.
energy : ndarray
Energy levels.
phi : ndarray
Azimuthal angles.
theta : ndarray
Elevation angles.
Other Parameters
---------------
**kwargs
Hash table of keyword arguments with :
* energy0 : ndarray
Energy table 0 (odd time indices).
* energy1 : ndarray
Energy table 1 (even time indices).
* esteptable : ndarray
Time series of the stepping table between energies (burst).
Returns
-------
out : xarray.Dataset
Skymap of the distribution function.
"""
energy0, energy1, esteptable = [None] * 3
energy0_ok, energy1_ok, esteptable_ok = [False] * 3
if energy is None:
if "energy0" in kwargs:
energy0, energy0_ok = [kwargs["energy0"], True]
if "energy1" in kwargs:
energy1, energy1_ok = [kwargs["energy1"], True]
if "esteptable" in kwargs:
esteptable, esteptable_ok = [kwargs["esteptable"], True]
if not energy0_ok and not energy1_ok and not esteptable_ok:
raise ValueError("Energy input required")
energy = np.tile(energy0, (len(esteptable), 1))
energy[esteptable == 1] = np.tile(energy1,
(int(np.sum(esteptable)), 1))
if phi.ndim == 1:
phi = np.tile(phi, (len(time), 1))
out_dict = {"data": (["time", "idx0", "idx1", "idx2"], data),
"phi": (["time", "idx1"], phi), "theta": (["idx2"], theta),
"energy": (["time", "idx0"], energy), "time": time,
"idx0": np.arange(energy.shape[1]),
"idx1": np.arange(phi.shape[1]), "idx2": np.arange(len(theta))}
out = xr.Dataset(out_dict)
if energy0_ok:
out.attrs["energy0"] = energy0
if energy1_ok:
out.attrs["energy1"] = energy1
if energy0_ok:
out.attrs["esteptable"] = esteptable
return out
| 26.892473
| 79
| 0.562575
|
19be0f2de874f8b441c89b5d8fd8cac69393789a
| 2,037
|
py
|
Python
|
src/log_utils.py
|
alexklwong/calibrated-backprojection-network
|
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
|
[
"Intel"
] | 38
|
2021-08-28T06:01:25.000Z
|
2022-03-03T03:23:23.000Z
|
src/log_utils.py
|
alexklwong/calibrated-backprojection-network
|
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
|
[
"Intel"
] | 14
|
2021-11-15T12:30:34.000Z
|
2022-03-30T14:03:16.000Z
|
src/log_utils.py
|
alexklwong/calibrated-backprojection-network
|
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
|
[
"Intel"
] | 9
|
2021-10-19T23:45:07.000Z
|
2021-12-20T07:45:37.000Z
|
'''
Author: Alex Wong <alexw@cs.ucla.edu>
If you use this code, please cite the following paper:
A. Wong, and S. Soatto. Unsupervised Depth Completion with Calibrated Backprojection Layers.
https://arxiv.org/pdf/2108.10531.pdf
@inproceedings{wong2021unsupervised,
title={Unsupervised Depth Completion with Calibrated Backprojection Layers},
author={Wong, Alex and Soatto, Stefano},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={12747--12756},
year={2021}
}
'''
import os
import torch
import numpy as np
from matplotlib import pyplot as plt
def log(s, filepath=None, to_console=True):
'''
Logs a string to either file or console
Arg(s):
s : str
string to log
filepath
output filepath for logging
to_console : bool
log to console
'''
if to_console:
print(s)
if filepath is not None:
if not os.path.isdir(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
with open(filepath, 'w+') as o:
o.write(s + '\n')
else:
with open(filepath, 'a+') as o:
o.write(s + '\n')
def colorize(T, colormap='magma'):
'''
Colorizes a 1-channel tensor with matplotlib colormaps
Arg(s):
T : torch.Tensor[float32]
1-channel tensor
colormap : str
matplotlib colormap
'''
cm = plt.cm.get_cmap(colormap)
shape = T.shape
# Convert to numpy array and transpose
if shape[0] > 1:
T = np.squeeze(np.transpose(T.cpu().numpy(), (0, 2, 3, 1)))
else:
T = np.squeeze(np.transpose(T.cpu().numpy(), (0, 2, 3, 1)), axis=-1)
# Colorize using colormap and transpose back
color = np.concatenate([
np.expand_dims(cm(T[n, ...])[..., 0:3], 0) for n in range(T.shape[0])],
axis=0)
color = np.transpose(color, (0, 3, 1, 2))
# Convert back to tensor
return torch.from_numpy(color.astype(np.float32))
| 26.802632
| 92
| 0.60972
|
19c214d222aa500c556609e883b1ff02ba286869
| 788
|
py
|
Python
|
add-two-numbers/add-two-numbers.py
|
shaurya-src/code-leet
|
f642b81eb7bead46c66404bd48ca74bdfeb2abbb
|
[
"MIT"
] | null | null | null |
add-two-numbers/add-two-numbers.py
|
shaurya-src/code-leet
|
f642b81eb7bead46c66404bd48ca74bdfeb2abbb
|
[
"MIT"
] | null | null | null |
add-two-numbers/add-two-numbers.py
|
shaurya-src/code-leet
|
f642b81eb7bead46c66404bd48ca74bdfeb2abbb
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
| 28.142857
| 98
| 0.497462
|
19c251bd8c7eb79b25c470c6951dca0f932a8918
| 2,834
|
py
|
Python
|
likedtweets.py
|
PoliTwit1984/Politwitverse
|
837dd2d05b3977aa24a70f52a3b951ef22c51dc6
|
[
"MIT"
] | 3
|
2022-01-05T07:12:14.000Z
|
2022-02-19T20:58:25.000Z
|
likedtweets.py
|
PoliTwit1984/Politwitverse
|
837dd2d05b3977aa24a70f52a3b951ef22c51dc6
|
[
"MIT"
] | 25
|
2022-01-05T08:23:59.000Z
|
2022-02-07T01:25:39.000Z
|
likedtweets.py
|
PoliTwit1984/Politwitverse
|
837dd2d05b3977aa24a70f52a3b951ef22c51dc6
|
[
"MIT"
] | 1
|
2022-02-01T22:39:57.000Z
|
2022-02-01T22:39:57.000Z
|
import time
import re
import tweepy
import preprocessor as p
import config
import string
consumer_key = config.consumer_key
consumer_secret = config.consumer_secret
access_token = config.access_token
access_token_secret = config.access_token_secret
bearer_token = config.bearer_token
username = config.username
password = config.password
def clean_text(text):
"""
Function to clean the text.
Parameters:
text: the raw text as a string value that needs to be cleaned
Returns:
cleaned_text: the cleaned text as string
"""
# convert to lower case
cleaned_text = text.lower()
# remove HTML tags
html_pattern = re.compile('<.*?>')
cleaned_text = re.sub(html_pattern, '', cleaned_text)
# remove punctuations
cleaned_text = cleaned_text.translate(
str.maketrans('', '', string.punctuation))
return cleaned_text.strip()
client = tweepy.Client(bearer_token=bearer_token)
list_id = "1467207384011526144" # all missouri legislators
response = client.get_list_members(list_id, max_results = 100)
users = response.data
metadata = response.meta
next_token = metadata.get("next_token")
print(next_token)
while next_token is not None:
for user in users:
string = str(user.name)+","+str(user.id)+","+str(user.username)+"\n"
with open('moleglistmembership.txt', 'a') as f:
f.write(string)
response = client.get_list_members(list_id, pagination_token = next_token, max_results = 100)
users = response.data
metadata = response.meta
next_token = metadata.get("next_token")
print(next_token)
# tweet_text = tweet.text
# tweet_clean_text = clean_tweets(tweet.text)
# tweet_created_at = tweet.created_at
# tweet_clean_text = clean_text(tweet_clean_text)
# print(tweet_clean_text)
# print('\n')
# print(tweet_created_at)
# print('\n')
# print('-----------------------------------------------------------------')
# with open('molegmembership.txt', 'a') as f:
# f.write(tweet_clean_text)
# f.write('\n')
# response = client.get_list_tweets(list_id, max_results=100)
| 26.485981
| 97
| 0.677135
|
19c32bbd1169664ffd8d06d663183110a2d5e53c
| 391
|
py
|
Python
|
src/app/migrations/0004_history_missed.py
|
deadlock-delegate/arkdelegates
|
8a5262f51b519ba3bc10094756c8866fc550df65
|
[
"MIT"
] | 2
|
2018-05-22T13:47:09.000Z
|
2018-05-23T12:45:05.000Z
|
src/app/migrations/0004_history_missed.py
|
deadlock-delegate/arkdelegates
|
8a5262f51b519ba3bc10094756c8866fc550df65
|
[
"MIT"
] | 21
|
2018-05-08T12:56:46.000Z
|
2020-06-05T18:59:38.000Z
|
src/app/migrations/0004_history_missed.py
|
deadlock-delegate/arkdelegates
|
8a5262f51b519ba3bc10094756c8866fc550df65
|
[
"MIT"
] | 4
|
2018-05-04T15:00:59.000Z
|
2019-02-13T02:39:07.000Z
|
# Generated by Django 2.0.3 on 2018-03-14 09:59
from django.db import migrations, models
| 20.578947
| 59
| 0.595908
|
19c43d42b7108f348940b9fd8fc9fb33a8830e2c
| 2,112
|
py
|
Python
|
audclass.py
|
theunafraid/audiofeedback-prevention
|
0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5
|
[
"Apache-2.0"
] | 1
|
2022-01-20T08:30:20.000Z
|
2022-01-20T08:30:20.000Z
|
audclass.py
|
theunafraid/audiofeedback-prevention
|
0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5
|
[
"Apache-2.0"
] | null | null | null |
audclass.py
|
theunafraid/audiofeedback-prevention
|
0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.gen_batch_ops import batch
from model import AudioClass
from qrnn import QRNN
from numpy.random import seed
from numpy.random import randn
from random import randint
from lstmfcn import LSTM_FCN
import librosa
import os
if __name__ == "__main__":
main()
| 26.734177
| 78
| 0.588542
|
19c4cd4bbbc8cea75d64211787db92e8b023d09a
| 3,135
|
py
|
Python
|
pytanga/components/OpenConfig/routing/ospfv2/ospfv2Interface.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
pytanga/components/OpenConfig/routing/ospfv2/ospfv2Interface.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
pytanga/components/OpenConfig/routing/ospfv2/ospfv2Interface.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
from pytanga.components import AbstractComponent
| 32.319588
| 94
| 0.500797
|
19c79aebe6cccec71cf534b0497f44d1a8496883
| 4,127
|
py
|
Python
|
python_implementation/matriz/quadrada.py
|
SousaPedro11/algoritmos
|
86a3601912778d120b9ec8094267c26a7eb6d153
|
[
"MIT"
] | null | null | null |
python_implementation/matriz/quadrada.py
|
SousaPedro11/algoritmos
|
86a3601912778d120b9ec8094267c26a7eb6d153
|
[
"MIT"
] | null | null | null |
python_implementation/matriz/quadrada.py
|
SousaPedro11/algoritmos
|
86a3601912778d120b9ec8094267c26a7eb6d153
|
[
"MIT"
] | null | null | null |
import math
from typing import List, Tuple
if __name__ == '__main__':
solucao_problema()
| 33.282258
| 100
| 0.628544
|
19c7ee6d99159a70af01f16f7e183bb9ec3972a5
| 7,017
|
py
|
Python
|
app_mongo.py
|
emmpets/MongoProject
|
cbef19b590503825158909703125b34c1bf536ec
|
[
"Apache-2.0"
] | null | null | null |
app_mongo.py
|
emmpets/MongoProject
|
cbef19b590503825158909703125b34c1bf536ec
|
[
"Apache-2.0"
] | null | null | null |
app_mongo.py
|
emmpets/MongoProject
|
cbef19b590503825158909703125b34c1bf536ec
|
[
"Apache-2.0"
] | null | null | null |
from pymongo import MongoClient
import pymongo
from datetime import datetime,time
import time
from bson.code import Code
mongo_client=MongoClient('mongodb://localhost:27017/')
db=mongo_client.mydb
db_col=db.things
dbc = mongo_client.mydb.things
print mongo_client
print(db)
print("connected")
data = dbc.find()
ans=True
while ans:
print("""
1.How many unique users are there?
2.How many tweets (%) did the top 10 users (measured by the number of messages) publish?
3.What was the earliest and latest date (YYYY-MM-DD HH:MM:SS) that a message was published?
4.What is the mean time delta between all messages?
5.What is the mean length of a message?
6.What are the 10 most common unigram within the messages?
7.What are the 10 most common bigram within the messages?
8.What is the average number of hashtags (#) used within a message?
10.Exit/Quit
""")
ans = raw_input("What would you like to do? ")
if ans == "1":
print "The summary of all unique users is: ", first_querry()
elif ans == "2":
print("The percentage of the ALL messages of top ten user"), second_querry(), "%",
elif ans == "3":
print"The last message published on:", third_querry()[0]
print"The earliest message published on:", third_querry()[1]
elif ans == "4":
print"The mean time delta between all messages is :", fourth_querry()
elif ans == "5":
print"The mean length of the messages is :", fifth_querry(data)
elif ans == "6":
print"The 10 most common unigrams within the messages are:", sixth_querry()
elif ans == "7":
print"The 10 most common bigrams within the messages are:", seventh_querry()
elif ans == "8":
print"The average number of hashtags (#) used within a message is:", eight_querry(data)
elif ans == "9":
ninth_querry()
elif ans == "10":
print("\n Goodbye")
ans = None
else:
print("\n Not Valid Choice Try again")
| 30.11588
| 110
| 0.584865
|
19c9e0f683fb12bcf45633873b78ecba612bb09f
| 7,399
|
py
|
Python
|
theseus/util/serialize.py
|
shiplift/theseus
|
9324d67e6e0c6b93a7734a5531838c5a909a1424
|
[
"0BSD"
] | null | null | null |
theseus/util/serialize.py
|
shiplift/theseus
|
9324d67e6e0c6b93a7734a5531838c5a909a1424
|
[
"0BSD"
] | null | null | null |
theseus/util/serialize.py
|
shiplift/theseus
|
9324d67e6e0c6b93a7734a5531838c5a909a1424
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
serialize
provide means to persist and recreate the currently known
set of W_Tags and all shapes and transformations reachable
from there.
The rmarshal modules is used for serialization; the format is
marshal_proto = (
int, # number of shapes
[ # shape list
( # a shape
int, # id
(str, int), # tag
[int], # structure: list of id's
{ # _hist
(int, int) : # index, id
int # count
},
{ # transformation_rules
(int, int) : # index, id
int # id
}
)
],
{
(str, int) : # name arity
int #id
}
)
The serialized tree is written to a '.docked' files
"""
import os.path
from rpython.rlib.streamio import open_file_as_stream
from rpython.rlib.rmarshal import get_marshaller, get_unmarshaller
from rpython.rlib.debug import debug_start, debug_stop, debug_print
from theseus.model import W_Tag
from theseus.shape import in_storage_shape, CompoundShape
marshal_proto = (
int, # number of shapes
[ # shape list
( # a shape
int, # id
(str, int), # tag
[int], # structure: list of id's
{ # _hist
(int, int) : # index, id
int # count
},
{ # transformation_rules
(int, int) : # index, id
int # id
}
)
],
{
(str, int) : # name arity
int #id
}
)
marshaller = get_marshaller(marshal_proto)
unmarshaller = get_unmarshaller(marshal_proto)
def punch_shape(s, registry):
"""
Punch a shape to a tuple for marshalling.
See slurp_shapes, configure_shapes for inverse.
Format is
( # a shape
int, # id
(str, int), # tag
[int], # structure: list of id's
{ # _hist
(int, int) : # index, id
int # count
},
{ # transformation_rules
(int, int) : # index, id
int # id
}
)
"""
if s == in_storage_shape:
return (0, ('', 0), [], {}, {})
else:
assert isinstance(s, CompoundShape)
my_index = registry.index(s)
hist = {}
for (index, shape), count in s._hist.items():
shape_id = registry.index(shape)
hist[(index, shape_id)] = count
trans = {}
for (index, shape), to_shape in s.transformation_rules.items():
shape_id = registry.index(shape)
to_shape_id = registry.index(to_shape)
trans[(index, registry.index(shape))] = registry.index(to_shape)
punchee = (
registry.index(s),
(s._tag.name, s._tag.arity()),
[registry.index(subshape) for subshape in s._structure],
hist,
trans
)
return punchee
def recreate_shape(shape_desc, tags, registry):
"""
Recreate a shape from its punched format; see punch_shape.
Does not handle history and transformations.
See configure_shape(s).
"""
id, tag, structure_ids = shape_desc
structure = [None] * len(structure_ids)
for structure_index, sub_id in enumerate(structure_ids):
assert sub_id < id
subshape = registry[sub_id]
assert subshape is not None
structure[structure_index] = subshape
return CompoundShape(tags[tag], structure)
def configure_shape(shape, hist, trans, registry):
"""
Reconfigure a shape from its punched format; see punch_shape.
Does _only_ handle history and transformations.
See configure_shapes.
"""
assert isinstance(shape, CompoundShape)
shape._hist = {}
for (index, s_id), count in hist.items():
k = (index, registry[s_id])
shape._hist[k] = count
shape.transformation_rules = {}
for (index, s_id), to_s_id in trans.items():
k = (index, registry[s_id])
shape.transformation_rules[k] = registry[to_s_id]
def configure_shapes(shapes, registry):
"""
Reconfigure all shapes.
Does _only_ handle history and transformations.
See configure_shapes.
"""
for id, _tag, _structure_ids, hist, trans in shapes:
if id == 0: continue # in_storage_shape, no configure
configure_shape(registry[id], hist, trans, registry)
def slurp_registry(shapes, registry, tags_slurp, tags):
"""
Slurp all shapes from their punched format (see punch_shape)
not including history or transformation
"""
known_ids = [0]
for default_id in tags_slurp.values():
known_ids.append(default_id)
for id, tag, structure_ids, _hist, _trans in shapes:
if id in known_ids: continue
assert registry[id] is None
registry[id] = recreate_shape((id, tag, structure_ids), tags, registry)
def punch_tags(tags):
"""
Punch all tags into marshallable format:
(
int, # number of shapes
[ # shape list
],
{
(str, int) : # name arity
int #id
}
)
"""
reg = [in_storage_shape] + CompoundShape._shapes
punch_reg = [punch_shape(s, reg) for s in reg]
res = {}
for key, value in tags.items():
res[key] = reg.index(value.default_shape)
return (len(punch_reg), punch_reg, res)
def slurp_tags(un_tags):
"""
Slurp all tags from their punched format (see punch_tag).
Recursively slurps shapes and then configures them.
"""
num_shapes, shapes_slurp, tags_slurp = un_tags
registry = [None] * num_shapes
registry[0] = in_storage_shape
tags = {}
for (name, arity), default_id in tags_slurp.items():
tag = W_Tag(name, arity)
tags[(name, arity)] = tag
registry[default_id] = tag.default_shape
slurp_registry(shapes_slurp, registry, tags_slurp, tags)
configure_shapes(shapes_slurp, registry)
return tags
def come_up(basename):
"""
Bring up previously marshalled Tags, shapes and transformations
from '.docked' file un-marshalling, slurping and replacement of
current Tags.
"""
from theseus.shape import CompoundShape
# later
# from os import stat
# statres = stat(path)
debug_start("theseus-come-up")
path = basename + '.docked'
if not os.path.exists(path):
return
try:
f = open_file_as_stream(path, buffering=0)
except OSError as e:
os.write(2, "Error(come_up)%s -- %s\n" % (os.strerror(e.errno), path))
return
try:
res = unmarshaller(f.readall())
finally:
f.close()
del CompoundShape._shapes[:]
W_Tag.tags.clear()
new_tags = slurp_tags(res)
for key, value in new_tags.items():
W_Tag.tags[key] = value
debug_stop("theseus-come-up")
def settle(basename):
"""
Settle Tags, shapes and transformations to a '.docked' file
punching and marshalling all current Tags.
"""
debug_start("theseus-settle")
path = basename + '.docked'
buf = []
marshaller(buf, punch_tags(W_Tag.tags))
try:
f = open_file_as_stream(path, mode="w", buffering=0)
except OSError as e:
os.write(2, "Error(settle)%s -- %s\n" % (os.strerror(e.errno), path))
return
try:
f.write(''.join(buf))
finally:
f.close()
debug_stop("theseus-settle")
| 27.403704
| 79
| 0.592783
|
19cc7f391c49230cd25af4f7949e261ca27ffe2b
| 1,359
|
py
|
Python
|
external_scripts/run2.py
|
AAS97/tokenizRE
|
0186a2b533edaa0045b16b0b111b9637248e5046
|
[
"MIT"
] | null | null | null |
external_scripts/run2.py
|
AAS97/tokenizRE
|
0186a2b533edaa0045b16b0b111b9637248e5046
|
[
"MIT"
] | null | null | null |
external_scripts/run2.py
|
AAS97/tokenizRE
|
0186a2b533edaa0045b16b0b111b9637248e5046
|
[
"MIT"
] | null | null | null |
from web3 import Web3, HTTPProvider
import json
import os
w3 = Web3(HTTPProvider("http://127.0.0.1:7545",
request_kwargs={'timeout': 60}))
print(f"Web3 is connected : {w3.isConnected()}")
accounts = w3.eth.accounts
# ------------------------------- get contract ------------------------------- #
abi_path = "./vapp/src/contracts/"
with open(os.path.join(abi_path, 'TokenHolderPayer.json'), "r") as file:
property_contract_compiled = json.load(file)
property_contract_abi = property_contract_compiled['abi']
contract_address = "0xE5972821D1218120C4E98986A3eEc997931690b4"
property_contract = w3.eth.contract(address=contract_address, abi=property_contract_abi)
# ------------------- buy some token from realestate agent ------------------- #
amount = 500
# Allow token to be sent
property_contract.functions.increaseAllowance(accounts[1], amount).transact({'from':accounts[0], 'gas': 420000, 'gasPrice': 21000})
balance = property_contract.functions.balanceOf(accounts[1]).call()
print(f"initial balance {balance}")
tx_hash = property_contract.functions.transferFrom(accounts[0], accounts[1], 500).transact({'from':accounts[1], 'gas': 420000, 'gasPrice': 21000})
receipt = w3.eth.waitForTransactionReceipt(tx_hash)
balance = property_contract.functions.balanceOf(accounts[1]).call()
print(f"final balance {balance}")
| 37.75
| 146
| 0.693893
|
19cc949ad53b4cdcbc1b975b94608f7737a43f64
| 825
|
py
|
Python
|
main.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
main.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
main.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
from preprocessing import preprocess
from approach1_rulebased import get_predictions_rulebased
from approach2_machine_learning import get_predictions_ml
from bertopic_clustering import cluster_precursors
def main():
'''Main function to use from commandline, preprocess input to generate embeddings, detect agression clauses using
provided approach, extract features and labels from training and features from input data, trains a model and
classifies test data using the trained model, evaluates predictions and goldlabels from input'''
inputfile = 'sample_input.xls'
preprocess(inputfile)
get_predictions_rulebased()
get_predictions_ml()
### only clusters with enough data, else everything in outlier cluster
cluster_precursors()
if __name__ == '__main__':
main()
| 43.421053
| 118
| 0.778182
|
19cd9765a8c1a72e36854304f427fda7349e31d0
| 8,985
|
pyw
|
Python
|
12th project cs/Gui pages.pyw
|
Jatin-Ya/ChatApp-12th-project-
|
77ced9b18205728334f4370fbce8d74687bc5373
|
[
"Apache-2.0"
] | null | null | null |
12th project cs/Gui pages.pyw
|
Jatin-Ya/ChatApp-12th-project-
|
77ced9b18205728334f4370fbce8d74687bc5373
|
[
"Apache-2.0"
] | null | null | null |
12th project cs/Gui pages.pyw
|
Jatin-Ya/ChatApp-12th-project-
|
77ced9b18205728334f4370fbce8d74687bc5373
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
import threading
import sql_manager as ss
g=GUI()
| 35.654762
| 111
| 0.575292
|
19cea24c1060f2d6ff7113c23c57266d177697db
| 1,528
|
py
|
Python
|
19/19a.py
|
jamOne-/adventofcode2018
|
d51c01578ae7e4f30824c4f6ace66958491c1ed4
|
[
"MIT"
] | null | null | null |
19/19a.py
|
jamOne-/adventofcode2018
|
d51c01578ae7e4f30824c4f6ace66958491c1ed4
|
[
"MIT"
] | null | null | null |
19/19a.py
|
jamOne-/adventofcode2018
|
d51c01578ae7e4f30824c4f6ace66958491c1ed4
|
[
"MIT"
] | null | null | null |
import sys
OPERATIONS = {
'addr': lambda a, b, c, registers: registers[a] + registers[b],
'addi': lambda a, b, c, registers: registers[a] + b,
'mulr': lambda a, b, c, registers: registers[a] * registers[b],
'muli': lambda a, b, c, registers: registers[a] * b,
'banr': lambda a, b, c, registers: registers[a] & registers[b],
'bani': lambda a, b, c, registers: registers[a] & b,
'borr': lambda a, b, c, registers: registers[a] | registers[b],
'bori': lambda a, b, c, registers: registers[a] | b,
'setr': lambda a, b, c, registers: registers[a],
'seti': lambda a, b, c, registers: a,
'grir': lambda a, b, c, registers: 1 if a > registers[b] else 0,
'gtri': lambda a, b, c, registers: 1 if registers[a] > b else 0,
'gtrr': lambda a, b, c, registers: 1 if registers[a] > registers[b] else 0,
'eqir': lambda a, b, c, registers: 1 if a == registers[b] else 0,
'eqri': lambda a, b, c, registers: 1 if registers[a] == b else 0,
'eqrr': lambda a, b, c, registers: 1 if registers[a] == registers[b] else 0
}
print(solve(sys.stdin))
| 36.380952
| 77
| 0.633508
|
19d1e57d2a97ef66002ffa2d6966b97f5f533bee
| 1,878
|
py
|
Python
|
examples/websocket_echo_server.py
|
HMXHIU/VeryPowerfulAgents
|
06abd52776aeaf701637533f760176459c9c361c
|
[
"MIT"
] | 2
|
2021-11-30T16:14:01.000Z
|
2022-03-04T09:20:51.000Z
|
examples/websocket_echo_server.py
|
HMXHIU/VeryPowerfulAgents
|
06abd52776aeaf701637533f760176459c9c361c
|
[
"MIT"
] | 8
|
2021-02-10T15:43:49.000Z
|
2021-02-10T16:00:16.000Z
|
examples/websocket_echo_server.py
|
HMXHIU/VeryPowerfulAgents
|
06abd52776aeaf701637533f760176459c9c361c
|
[
"MIT"
] | 1
|
2021-01-01T12:31:48.000Z
|
2021-01-01T12:31:48.000Z
|
from aiohttp import web
from agents import Agent
if __name__ == "__main__":
webserver = WebServer("127.0.0.1", 8080, "/ws")
| 30.786885
| 86
| 0.510117
|
19d3c2532fdc242dd0fdaf80342fa01cfdf2a61d
| 5,401
|
py
|
Python
|
janaganana/tables.py
|
deshetti/janaganana
|
f29ced95fc9f8b98f77560d9afdbd999510dd497
|
[
"MIT"
] | 11
|
2017-02-16T20:45:54.000Z
|
2021-12-31T01:08:40.000Z
|
janaganana/tables.py
|
deshetti/janaganana
|
f29ced95fc9f8b98f77560d9afdbd999510dd497
|
[
"MIT"
] | 18
|
2017-02-15T20:24:29.000Z
|
2022-03-29T21:54:36.000Z
|
janaganana/tables.py
|
deshetti/janaganana
|
f29ced95fc9f8b98f77560d9afdbd999510dd497
|
[
"MIT"
] | 13
|
2017-02-16T20:45:25.000Z
|
2020-09-23T21:40:57.000Z
|
from wazimap.data.tables import FieldTable
# Define our tables so the data API can discover them.
# Household tables
FieldTable(['rural population'], universe='Population', table_per_level=False)
FieldTable(['area', 'sex'], universe='Population', table_per_level=False)
FieldTable(['census_year', 'measure'], universe='A2-Decadal Variation', table_per_level=False)
FieldTable(['census_year', 'sex_vis'], universe='VISUAL', table_per_level=False)
FieldTable(['area', 'sex', 'literacy'], universe='Population', table_per_level=False)
FieldTable(['area','village_town_comparison'], universe='A3APPENDIX', table_per_level=False)
FieldTable(['religion', 'area', 'sex'], universe='Religion', table_per_level=False)
FieldTable(['age', 'area', 'sex'], universe='Age', table_per_level=False)
FieldTable(['village_town_measures','area'], universe='A1-', table_per_level=False)
FieldTable(['education', 'area', 'sex'], universe='Education', table_per_level=False)
FieldTable(['houseless_population','area', 'sex'], universe='A7-Houseless', table_per_level=False)
FieldTable(['sc_houseless_population','area', 'sex'], universe='A8-SC_Houseless', table_per_level=False)
FieldTable(['st_houseless_population','area', 'sex'], universe='A9-ST_Houseless', table_per_level=False)
FieldTable(['village_measures','population_range'], universe='A3-Inhabited Villages', table_per_level=False)
FieldTable(['maritalstatus', 'area', 'sex'], universe='Relation', table_per_level=False)
FieldTable(['workertype','age_group','area','sex'], universe='B1-Workerstype', table_per_level=False)
FieldTable(['sc_workertype','age_group','area','sex'], universe='B1SC-Workerstype', table_per_level=False)
FieldTable(['st_workertype','age_group','area','sex'], universe='B1ST-Workerstype', table_per_level=False)
FieldTable(['workers', 'area', 'workerssex'], universe='Workers', table_per_level=False)
FieldTable(['workertype','education_level', 'area', 'sex'], universe='B3', table_per_level=False)
FieldTable(['education_level', 'area', 'sex_vis'], universe='VISUAL', table_per_level=False)
FieldTable(['sc_workertype','education_level', 'area', 'sex'], universe='B3SC', table_per_level=False)
FieldTable(['st_workertype','education_level', 'area', 'sex'], universe='B3ST', table_per_level=False)
FieldTable(['nonworkertype', 'age_group','area','sex'], universe='B13', table_per_level=False)
FieldTable(['nonworkertype_vis', 'age_group','area','sex'], universe='VISUAL', table_per_level=False)
FieldTable(['sc_nonworkertype', 'age_group','area','sex'], universe='B13SC', table_per_level=False)
FieldTable(['st_nonworkertype', 'age_group','area','sex'], universe='B13ST', table_per_level=False)
FieldTable(['religion','nonworkertype','age_group','area', 'sex'], universe='B14', table_per_level=False)
FieldTable(['religion','area', 'sex'], universe='C1', table_per_level=False)
FieldTable(['religious_community','area', 'sex'], universe='C1APPENDIX', table_per_level=False)
FieldTable(['age_group','marital_status','area', 'sex'], universe='C2', table_per_level=False)
FieldTable(['religion','marital_status','area', 'sex'], universe='C3', table_per_level=False)
FieldTable(['mother_tongue_vis','area', 'sex'], universe='VISUAL', table_per_level=False)
FieldTable(['disability','age_group','area', 'sex'], universe='c20', table_per_level=False)
FieldTable(['mother_tongue','area', 'sex'], universe='c16', table_per_level=False)
FieldTable(['educational_institution','age','area', 'sex'], universe='c10', table_per_level=False)
FieldTable(['sc_educational_institution','age','area', 'sex'], universe='c10sc', table_per_level=False)
FieldTable(['st_educational_institution','age','area', 'sex'], universe='c10st', table_per_level=False)
FieldTable(['economic_activity','age','area', 'sex'], universe='c12', table_per_level=False)
FieldTable(['marriage_duration','age','area', 'sex'], universe='c4', table_per_level=False)
FieldTable(['parity','age','area'], universe='F1', table_per_level=False)
FieldTable(['sc_parity','age','area'], universe='F1sc', table_per_level=False)
FieldTable(['st_parity','age','area'], universe='F1st', table_per_level=False)
FieldTable(['parity_vis','age','area'], universe='VISUAL', table_per_level=False)
FieldTable(['surviving_children','age','area'], universe='F5', table_per_level=False)
FieldTable(['sc_surviving_children','age','area'], universe='F5SC', table_per_level=False)
FieldTable(['st_surviving_children','age','area'], universe='F5ST', table_per_level=False)
FieldTable(['household_size','area'], universe='HH1', table_per_level=False)
FieldTable(['household_size_vis','area'], universe='VISUAL', table_per_level=False)
FieldTable(['sc_household_size','area'], universe='HH1SC', table_per_level=False)
FieldTable(['st_household_size','area'], universe='HH1ST', table_per_level=False)
FieldTable(['household_workers','workers_in_household','area'], universe='HH11', table_per_level=False)
FieldTable(['household_size','available_for_work','area'], universe='HH12', table_per_level=False)
FieldTable(['sevenyearsandabove','literates_in_household','area'], universe='HH08', table_per_level=False)
FieldTable(['age','area', 'head','household_marital_status'], universe='HH06', table_per_level=False)
FieldTable(['houseless_households','area'], universe='HH02', table_per_level=False)
FieldTable(['households_size','aged_persons','area'], universe='HH05', table_per_level=False)
| 66.679012
| 108
| 0.755601
|
19d4df790639614b567c8829dbce219210c26642
| 585
|
py
|
Python
|
src/weekly-reset.py
|
SlimeeGameS/VirginityBot
|
a1745893f21a16112bbf775fb2aff199c14dbbbb
|
[
"CC0-1.0"
] | null | null | null |
src/weekly-reset.py
|
SlimeeGameS/VirginityBot
|
a1745893f21a16112bbf775fb2aff199c14dbbbb
|
[
"CC0-1.0"
] | 14
|
2020-03-26T01:02:31.000Z
|
2021-03-24T23:48:44.000Z
|
src/weekly-reset.py
|
SlimeeGameS/VirginityBot
|
a1745893f21a16112bbf775fb2aff199c14dbbbb
|
[
"CC0-1.0"
] | 2
|
2020-08-09T19:08:41.000Z
|
2021-05-12T17:44:28.000Z
|
import os
import asyncio
import logging
from pony.orm import *
import logger
from database import start_orm, get_biggest_virgin, Guild, Virgin
logger = logging.getLogger('virginity-bot')
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 18.870968
| 65
| 0.729915
|
19d525875da360fb20fb2929a08fff78176398d0
| 1,165
|
py
|
Python
|
hardhat/recipes/racket.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/racket.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/racket.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
import os
import shutil
from .base import GnuRecipe
| 31.486486
| 73
| 0.572532
|
19d5619a8ce652fe7933c1843f9585227eb325de
| 3,257
|
py
|
Python
|
lichess-gist.py
|
swimmy4days/lichess-gist
|
b70e605345f789e032291253df506384ccbaa270
|
[
"MIT"
] | null | null | null |
lichess-gist.py
|
swimmy4days/lichess-gist
|
b70e605345f789e032291253df506384ccbaa270
|
[
"MIT"
] | null | null | null |
lichess-gist.py
|
swimmy4days/lichess-gist
|
b70e605345f789e032291253df506384ccbaa270
|
[
"MIT"
] | null | null | null |
import os
import sys
import berserk
from github import Github, InputFileContent, Gist
SEPARATOR = "."
PADDING = {"puzzle": 0, "crazyhouse": 0, "chess960": 0,
"kingOfTheHill": 0, "threeCheck": 2, "antichess": 0, "atomic": 0, "horde": 0, "racingKings": 0,
"ultraBullet": 0, "blitz": 1, "classical": 1, "rapid": 0, "bullet": 0, "correspondence": 3}
emojis = {"puzzle": "", "crazyhouse": "", "chess960": "960",
"kingOfTheHill": "", "threeCheck": "3", "antichess": "", "atomic": "", "horde": "", "racingKings": "",
"ultraBullet": "", "blitz": "", "classical": "", "rapid": "", "bullet": "", "correspondence": ""}
ENV_VAR_GIST_ID = "GIST_ID"
ENV_VAR_GITHUB_TOKEN = "GH_TOKEN"
ENV_VAR_LICHESS_USERNAME = "LICHESS_USERNAME"
REQUIRED_ENVS = [
ENV_VAR_GIST_ID,
ENV_VAR_GITHUB_TOKEN,
ENV_VAR_LICHESS_USERNAME
]
if __name__ == "__main__":
# test with python lichess-gist.py test <gist> <github-token> <user>
if len(sys.argv) > 1:
os.environ[ENV_VAR_GIST_ID] = sys.argv[2]
os.environ[ENV_VAR_GITHUB_TOKEN] = sys.argv[3]
os.environ[ENV_VAR_LICHESS_USERNAME] = sys.argv[4]
main()
# %%
| 31.317308
| 118
| 0.612834
|
19d5e02630a84a1866bbfe9f9deb571cc98a96cc
| 951
|
py
|
Python
|
alembic/versions/60c735df8d2f_.py
|
brouberol/grand-cedre
|
05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93
|
[
"BSD-3-Clause"
] | null | null | null |
alembic/versions/60c735df8d2f_.py
|
brouberol/grand-cedre
|
05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93
|
[
"BSD-3-Clause"
] | 22
|
2019-09-03T20:08:42.000Z
|
2022-03-11T23:58:02.000Z
|
alembic/versions/60c735df8d2f_.py
|
brouberol/grand-cedre
|
05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93
|
[
"BSD-3-Clause"
] | null | null | null |
"""empty message
Revision ID: 60c735df8d2f
Revises: 88bb7e12da60
Create Date: 2019-09-06 08:27:03.082097
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "60c735df8d2f"
down_revision = "88bb7e12da60"
branch_labels = None
depends_on = None
| 27.171429
| 84
| 0.690852
|
19d5e29e652c7abc55afdd0fed0c5112571018a1
| 3,640
|
py
|
Python
|
python/genre_classifier.py
|
nscharrenberg/Aliran
|
628de0476b8f8b413a6fdddf5392c590e8b27654
|
[
"MIT"
] | null | null | null |
python/genre_classifier.py
|
nscharrenberg/Aliran
|
628de0476b8f8b413a6fdddf5392c590e8b27654
|
[
"MIT"
] | null | null | null |
python/genre_classifier.py
|
nscharrenberg/Aliran
|
628de0476b8f8b413a6fdddf5392c590e8b27654
|
[
"MIT"
] | null | null | null |
import scipy.io.wavfile as wav
import numpy as np
import os
import pickle
import random
import operator
from python_speech_features import mfcc
dataset = []
training_set = []
test_set = []
# Get the distance between feature vectors
# Find Neighbors
# Identify the Nearest Neighbor (Genres)
# Model Evaluation to get the accuracy
# Extract features from the audio files and store them in a model file
# Load in the Dataset
if __name__ == '__main__':
print('Starting....')
local_filename = "dataset.aliran"
extracting = False
if extracting:
print('Extracting Features...')
print('Building Model...')
extract_features(local_filename)
print('Loading Dataset...')
load_dataset(local_filename, 0.66, training_set, test_set)
print('Making a prediction...')
print('(This may take a few minutes)')
predictions = []
for x in range(len(test_set)):
predictions.append(nearest_genre(get_neighbors(training_set, test_set[x], 5)))
accuracy = get_accuracy(test_set, predictions)
print('Prediction Accuracy is:')
print(accuracy)
| 26.376812
| 102
| 0.613462
|
19d94ed3daa7c3c452d53a4b890d6a26c3139991
| 1,653
|
py
|
Python
|
run.py
|
dkosilov/reconciler_anchor_salesforce
|
5cf6a8ccaedce84e7dab6c32955c644ede0c6e07
|
[
"Xnet",
"X11"
] | 1
|
2020-09-22T11:49:07.000Z
|
2020-09-22T11:49:07.000Z
|
run.py
|
dkosilov/reconciler_anchor_salesforce
|
5cf6a8ccaedce84e7dab6c32955c644ede0c6e07
|
[
"Xnet",
"X11"
] | null | null | null |
run.py
|
dkosilov/reconciler_anchor_salesforce
|
5cf6a8ccaedce84e7dab6c32955c644ede0c6e07
|
[
"Xnet",
"X11"
] | null | null | null |
import argparse
from libs.data_model import AnchorNorthstarDataframe, SalesForceDataframe, \
AnchorSalesforceAccountsDataframe, AnchorSalesforceContactsDataframe
from libs.utils import save_dataframes_to_excel
parser = argparse.ArgumentParser(description='Reconcile accounts and contacts between Anchor and Salesforce')
parser.add_argument('-a', '--anchor-file', help='Path to Anchor Excel workbook', required=True)
parser.add_argument('-n', '--northstar-file', help='Path to Northstar Excel workbook', required=True)
parser.add_argument('-s', '--salesforce-file', help='Path to Salesforce Excel workbook', required=True)
parser.add_argument('-t', '--account-name-match-ratio-threshold', type=int,
help='Account names with specified (or above) similarity ratio will be used for joining Anchor and '
'Salesforce account data. Number between 0 and 100.', default=75)
parser.add_argument('-r', '--result-file',
help='Path to result Excel workbook. The file will have 2 spreadsheets for accounts and '
'contacts reconciliation', required=True)
args = parser.parse_args()
anchor_ns = AnchorNorthstarDataframe(args.anchor_file, args.northstar_file)
salesforce = SalesForceDataframe(args.salesforce_file)
anchor_sf_accounts = AnchorSalesforceAccountsDataframe(anchor_ns, salesforce, args.account_name_match_ratio_threshold)
anchor_sf_contacts = AnchorSalesforceContactsDataframe(anchor_ns, salesforce)
save_dataframes_to_excel(args.result_file, {'Accounts': anchor_sf_accounts.df, 'Contacts': anchor_sf_contacts.df},
wrap_text=False)
| 57
| 120
| 0.754991
|
19db3143b0967735343ec7fb40012d028a989ea5
| 1,650
|
py
|
Python
|
billrelease.py
|
arby36/BillAi
|
e5c10c35279a1669d218439671e03bc17acb7fdc
|
[
"MIT"
] | null | null | null |
billrelease.py
|
arby36/BillAi
|
e5c10c35279a1669d218439671e03bc17acb7fdc
|
[
"MIT"
] | null | null | null |
billrelease.py
|
arby36/BillAi
|
e5c10c35279a1669d218439671e03bc17acb7fdc
|
[
"MIT"
] | null | null | null |
bill()
| 27.966102
| 139
| 0.527273
|
19ddf831a5a3b46c86717f74ec094bf9d7bcc0cd
| 757
|
py
|
Python
|
homeworks/hw05/tests/q3b1.py
|
cwakamiya/ieor135
|
084490380f265225927d11b43d948c1206b0aab8
|
[
"Apache-2.0"
] | 28
|
2020-06-15T23:53:36.000Z
|
2022-03-19T09:27:02.000Z
|
homeworks/hw05/tests/q3b1.py
|
cwakamiya/ieor135
|
084490380f265225927d11b43d948c1206b0aab8
|
[
"Apache-2.0"
] | 4
|
2020-06-24T22:20:31.000Z
|
2022-02-28T01:37:36.000Z
|
homeworks/hw05/tests/q3b1.py
|
cwakamiya/ieor135
|
084490380f265225927d11b43d948c1206b0aab8
|
[
"Apache-2.0"
] | 78
|
2020-06-19T09:41:01.000Z
|
2022-02-05T00:13:29.000Z
|
test = { 'name': 'q3b1',
'points': 2,
'suites': [ { 'cases': [ { 'code': '>>> 4 <= '
"sum(list(X1.describe().loc['mean'])) "
'<= 9\n'
'True',
'hidden': False,
'locked': False},
{ 'code': '>>> len(X1) == 768\nTrue',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 47.3125
| 86
| 0.211361
|
19e32a5576ac8d30a109ed4090fee43e0912beb9
| 3,050
|
py
|
Python
|
scanpy/api/__init__.py
|
gioelelm/scanpy
|
97391a0e7908b9644b2d6640c8e26d37bdc7811e
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/api/__init__.py
|
gioelelm/scanpy
|
97391a0e7908b9644b2d6640c8e26d37bdc7811e
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/api/__init__.py
|
gioelelm/scanpy
|
97391a0e7908b9644b2d6640c8e26d37bdc7811e
|
[
"BSD-3-Clause"
] | 1
|
2019-02-18T07:39:59.000Z
|
2019-02-18T07:39:59.000Z
|
"""Scanpy's high-level API provides an overview of all features relevant to pratical use::
import scanpy.api as sc
.. raw:: html
<h3>Preprocessing tools</h3>
Filtering of highly-variable genes, batch-effect correction, per-cell (UMI) normalization, preprocessing recipes.
.. raw:: html
<h4>Basic Preprocessing</h4>
.. autosummary::
:toctree: .
pp.filter_cells
pp.filter_genes
pp.filter_genes_dispersion
pp.log1p
pp.pca
pp.normalize_per_cell
pp.regress_out
pp.scale
pp.subsample
.. raw:: html
<h4>Recipes</h4>
.. autosummary::
:toctree: .
pp.recipe_zheng17
pp.recipe_weinreb16
.. raw:: html
<h3>Machine Learning and Statistics tools<h3>
.. raw:: html
<h4>Visualization</h4>
.. autosummary::
:toctree: .
tl.pca
tl.tsne
tl.diffmap
tl.draw_graph
.. raw:: html
<h4>Branching trajectories and pseudotime, clustering, differential expression</h4>
.. autosummary::
:toctree: .
tl.aga
tl.louvain
tl.dpt
tl.rank_genes_groups
.. raw:: html
<h4>Simulations</h4>
.. autosummary::
:toctree: .
tl.sim
.. raw:: html
<h3>Generic methods</h3>
.. raw:: html
<h4>Reading and Writing</h4>
.. autosummary::
:toctree: .
read
write
read_10x_h5
.. raw:: html
<h4>Data Structures</h4>
.. autosummary::
:toctree: .
AnnData
DataGraph
.. raw:: html
<h3>Plotting</h3>
.. raw:: html
<h4>Generic plotting with AnnData</h4>
.. autosummary::
:toctree: .
pl.scatter
pl.violin
pl.ranking
.. raw:: html
<h4>Plotting tool results</h4>
Methods that extract and visualize tool-specific annotation in an AnnData object.
.. raw:: html
<h5>Visualization</h5>
.. autosummary::
:toctree: .
pl.pca
pl.pca_loadings
pl.pca_scatter
pl.pca_variance_ratio
pl.tsne
pl.diffmap
pl.draw_graph
.. raw:: html
<h5>Branching trajectories and pseudotime, clustering, differential expression</h5>
.. autosummary::
:toctree: .
pl.aga
pl.aga_graph
pl.aga_path
pl.louvain
pl.dpt
pl.dpt_scatter
pl.dpt_groups_pseudotime
pl.dpt_timeseries
pl.rank_genes_groups
pl.rank_genes_groups_violin
.. raw:: html
<h5>Simulations</h5>
.. autosummary::
:toctree: .
pl.sim
.. raw:: html
<h4>Builtin datasets</h4>
Simple functions that provide annotated datasets for benchmarking. See
`here <https://scanpy.readthedocs.io/en/latest/examples.html>`_ for extensive
documented tutorials and use cases.
All of these functions return an Annotated Data object.
.. autosummary::
:toctree: .
datasets.paul15
datasets.toggleswitch
datasets.krumsiek11
datasets.blobs
datasets.moignard15
"""
from .. import __version__
from .. import settings
from .. import logging
from . import tl
tools = tl
from . import pl
plotting = pl
from . import pp
preprocessing = pp
from ..readwrite import read, read_10x_h5, write, read_params, write_params
from . import datasets
from ..data_structs import AnnData, DataGraph
from .. import utils
| 14.95098
| 113
| 0.679016
|
19e36b29ee592d089dc07f0b81f9a1312e103cce
| 34,894
|
py
|
Python
|
sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py
|
yihuajack/EdgeBERT
|
a51ae7557187e3251f4b11bc13ef9cbd336019ff
|
[
"Apache-2.0"
] | 8
|
2021-11-01T01:38:04.000Z
|
2022-03-20T16:03:39.000Z
|
sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py
|
yihuajack/EdgeBERT
|
a51ae7557187e3251f4b11bc13ef9cbd336019ff
|
[
"Apache-2.0"
] | 1
|
2021-11-19T08:04:02.000Z
|
2021-12-19T07:21:48.000Z
|
sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py
|
yihuajack/EdgeBERT
|
a51ae7557187e3251f4b11bc13ef9cbd336019ff
|
[
"Apache-2.0"
] | 5
|
2021-11-19T07:52:44.000Z
|
2022-02-10T08:23:19.000Z
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_albert import AlbertPreTrainedModel, AlbertLayerNorm, AlbertLayerGroup
from .modeling_bert import BertEmbeddings
from .modeling_highway_bert import BertPooler
import numpy as np
| 46.963661
| 148
| 0.611366
|
19e3c7e8cb0d8e13048dc4a21c8f8d2b1867724a
| 1,809
|
py
|
Python
|
tests/test_sar.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 8
|
2021-05-18T02:22:03.000Z
|
2021-09-11T02:49:04.000Z
|
tests/test_sar.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 1
|
2021-04-26T04:38:35.000Z
|
2021-04-26T04:38:35.000Z
|
tests/test_sar.py
|
chris-angeli-rft/cloud-custodian
|
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
|
[
"Apache-2.0"
] | 1
|
2021-11-10T02:28:47.000Z
|
2021-11-10T02:28:47.000Z
|
# Copyright 2020 Kapil Thangavelu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import BaseTest
| 35.470588
| 74
| 0.616363
|
19e3cc99b66e2939b99c81e570efb9afd33fa23d
| 5,773
|
py
|
Python
|
rovina.py
|
Pandoro/tools
|
631c6036cb74dc845668fd912588fd31aae46f8b
|
[
"MIT"
] | 1
|
2019-04-22T16:38:03.000Z
|
2019-04-22T16:38:03.000Z
|
rovina.py
|
afcarl/tools-Pandoro
|
631c6036cb74dc845668fd912588fd31aae46f8b
|
[
"MIT"
] | 2
|
2018-03-13T10:49:48.000Z
|
2018-03-13T10:54:01.000Z
|
rovina.py
|
afcarl/tools-Pandoro
|
631c6036cb74dc845668fd912588fd31aae46f8b
|
[
"MIT"
] | 2
|
2018-03-08T19:40:10.000Z
|
2018-06-11T14:43:49.000Z
|
import json
import os
import sys
sys.path.append('/usr/lib/python2.7/dist-packages')
import cv2
import numpy as np
from tqdm import *
import dataset_utils
| 37.245161
| 161
| 0.625498
|
19e85b96640382129fd31d8131a6692e41afddf9
| 4,952
|
py
|
Python
|
gpgLabs/GPR/GPRlab1.py
|
victortocantins/gpgLabs
|
310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12
|
[
"MIT"
] | null | null | null |
gpgLabs/GPR/GPRlab1.py
|
victortocantins/gpgLabs
|
310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12
|
[
"MIT"
] | null | null | null |
gpgLabs/GPR/GPRlab1.py
|
victortocantins/gpgLabs
|
310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.constants import mu_0, epsilon_0
import matplotlib.pyplot as plt
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from ipywidgets import interact, interactive, IntSlider, widget, FloatText, FloatSlider, fixed
from .Wiggle import wiggle, PrimaryWave, ReflectedWave
import requests
from io import BytesIO
########################################
# DOWNLOAD FUNCTIONS
########################################
########################################
# WIDGETS
########################################
########################################
# FUNCTIONS
########################################
| 24.636816
| 94
| 0.544628
|
19ebb2a3f5203d8e575a8e0bab417177a0a48924
| 5,010
|
py
|
Python
|
third_party/unidecode/x0bd.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 39
|
2015-06-10T23:18:07.000Z
|
2021-10-21T04:29:06.000Z
|
third_party/unidecode/x0bd.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 2
|
2016-08-22T12:38:10.000Z
|
2017-01-26T18:37:33.000Z
|
third_party/unidecode/x0bd.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 26
|
2015-06-10T22:09:15.000Z
|
2021-06-27T15:45:15.000Z
|
data = (
'bols', # 0x00
'bolt', # 0x01
'bolp', # 0x02
'bolh', # 0x03
'bom', # 0x04
'bob', # 0x05
'bobs', # 0x06
'bos', # 0x07
'boss', # 0x08
'bong', # 0x09
'boj', # 0x0a
'boc', # 0x0b
'bok', # 0x0c
'bot', # 0x0d
'bop', # 0x0e
'boh', # 0x0f
'bwa', # 0x10
'bwag', # 0x11
'bwagg', # 0x12
'bwags', # 0x13
'bwan', # 0x14
'bwanj', # 0x15
'bwanh', # 0x16
'bwad', # 0x17
'bwal', # 0x18
'bwalg', # 0x19
'bwalm', # 0x1a
'bwalb', # 0x1b
'bwals', # 0x1c
'bwalt', # 0x1d
'bwalp', # 0x1e
'bwalh', # 0x1f
'bwam', # 0x20
'bwab', # 0x21
'bwabs', # 0x22
'bwas', # 0x23
'bwass', # 0x24
'bwang', # 0x25
'bwaj', # 0x26
'bwac', # 0x27
'bwak', # 0x28
'bwat', # 0x29
'bwap', # 0x2a
'bwah', # 0x2b
'bwae', # 0x2c
'bwaeg', # 0x2d
'bwaegg', # 0x2e
'bwaegs', # 0x2f
'bwaen', # 0x30
'bwaenj', # 0x31
'bwaenh', # 0x32
'bwaed', # 0x33
'bwael', # 0x34
'bwaelg', # 0x35
'bwaelm', # 0x36
'bwaelb', # 0x37
'bwaels', # 0x38
'bwaelt', # 0x39
'bwaelp', # 0x3a
'bwaelh', # 0x3b
'bwaem', # 0x3c
'bwaeb', # 0x3d
'bwaebs', # 0x3e
'bwaes', # 0x3f
'bwaess', # 0x40
'bwaeng', # 0x41
'bwaej', # 0x42
'bwaec', # 0x43
'bwaek', # 0x44
'bwaet', # 0x45
'bwaep', # 0x46
'bwaeh', # 0x47
'boe', # 0x48
'boeg', # 0x49
'boegg', # 0x4a
'boegs', # 0x4b
'boen', # 0x4c
'boenj', # 0x4d
'boenh', # 0x4e
'boed', # 0x4f
'boel', # 0x50
'boelg', # 0x51
'boelm', # 0x52
'boelb', # 0x53
'boels', # 0x54
'boelt', # 0x55
'boelp', # 0x56
'boelh', # 0x57
'boem', # 0x58
'boeb', # 0x59
'boebs', # 0x5a
'boes', # 0x5b
'boess', # 0x5c
'boeng', # 0x5d
'boej', # 0x5e
'boec', # 0x5f
'boek', # 0x60
'boet', # 0x61
'boep', # 0x62
'boeh', # 0x63
'byo', # 0x64
'byog', # 0x65
'byogg', # 0x66
'byogs', # 0x67
'byon', # 0x68
'byonj', # 0x69
'byonh', # 0x6a
'byod', # 0x6b
'byol', # 0x6c
'byolg', # 0x6d
'byolm', # 0x6e
'byolb', # 0x6f
'byols', # 0x70
'byolt', # 0x71
'byolp', # 0x72
'byolh', # 0x73
'byom', # 0x74
'byob', # 0x75
'byobs', # 0x76
'byos', # 0x77
'byoss', # 0x78
'byong', # 0x79
'byoj', # 0x7a
'byoc', # 0x7b
'byok', # 0x7c
'byot', # 0x7d
'byop', # 0x7e
'byoh', # 0x7f
'bu', # 0x80
'bug', # 0x81
'bugg', # 0x82
'bugs', # 0x83
'bun', # 0x84
'bunj', # 0x85
'bunh', # 0x86
'bud', # 0x87
'bul', # 0x88
'bulg', # 0x89
'bulm', # 0x8a
'bulb', # 0x8b
'buls', # 0x8c
'bult', # 0x8d
'bulp', # 0x8e
'bulh', # 0x8f
'bum', # 0x90
'bub', # 0x91
'bubs', # 0x92
'bus', # 0x93
'buss', # 0x94
'bung', # 0x95
'buj', # 0x96
'buc', # 0x97
'buk', # 0x98
'but', # 0x99
'bup', # 0x9a
'buh', # 0x9b
'bweo', # 0x9c
'bweog', # 0x9d
'bweogg', # 0x9e
'bweogs', # 0x9f
'bweon', # 0xa0
'bweonj', # 0xa1
'bweonh', # 0xa2
'bweod', # 0xa3
'bweol', # 0xa4
'bweolg', # 0xa5
'bweolm', # 0xa6
'bweolb', # 0xa7
'bweols', # 0xa8
'bweolt', # 0xa9
'bweolp', # 0xaa
'bweolh', # 0xab
'bweom', # 0xac
'bweob', # 0xad
'bweobs', # 0xae
'bweos', # 0xaf
'bweoss', # 0xb0
'bweong', # 0xb1
'bweoj', # 0xb2
'bweoc', # 0xb3
'bweok', # 0xb4
'bweot', # 0xb5
'bweop', # 0xb6
'bweoh', # 0xb7
'bwe', # 0xb8
'bweg', # 0xb9
'bwegg', # 0xba
'bwegs', # 0xbb
'bwen', # 0xbc
'bwenj', # 0xbd
'bwenh', # 0xbe
'bwed', # 0xbf
'bwel', # 0xc0
'bwelg', # 0xc1
'bwelm', # 0xc2
'bwelb', # 0xc3
'bwels', # 0xc4
'bwelt', # 0xc5
'bwelp', # 0xc6
'bwelh', # 0xc7
'bwem', # 0xc8
'bweb', # 0xc9
'bwebs', # 0xca
'bwes', # 0xcb
'bwess', # 0xcc
'bweng', # 0xcd
'bwej', # 0xce
'bwec', # 0xcf
'bwek', # 0xd0
'bwet', # 0xd1
'bwep', # 0xd2
'bweh', # 0xd3
'bwi', # 0xd4
'bwig', # 0xd5
'bwigg', # 0xd6
'bwigs', # 0xd7
'bwin', # 0xd8
'bwinj', # 0xd9
'bwinh', # 0xda
'bwid', # 0xdb
'bwil', # 0xdc
'bwilg', # 0xdd
'bwilm', # 0xde
'bwilb', # 0xdf
'bwils', # 0xe0
'bwilt', # 0xe1
'bwilp', # 0xe2
'bwilh', # 0xe3
'bwim', # 0xe4
'bwib', # 0xe5
'bwibs', # 0xe6
'bwis', # 0xe7
'bwiss', # 0xe8
'bwing', # 0xe9
'bwij', # 0xea
'bwic', # 0xeb
'bwik', # 0xec
'bwit', # 0xed
'bwip', # 0xee
'bwih', # 0xef
'byu', # 0xf0
'byug', # 0xf1
'byugg', # 0xf2
'byugs', # 0xf3
'byun', # 0xf4
'byunj', # 0xf5
'byunh', # 0xf6
'byud', # 0xf7
'byul', # 0xf8
'byulg', # 0xf9
'byulm', # 0xfa
'byulb', # 0xfb
'byuls', # 0xfc
'byult', # 0xfd
'byulp', # 0xfe
'byulh', # 0xff
)
| 19.343629
| 20
| 0.436128
|
19ecf7e3e7e37b889b168edf93836870ccf82afb
| 55
|
py
|
Python
|
social/backends/azuread.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 1,987
|
2015-01-01T16:12:45.000Z
|
2022-03-29T14:24:25.000Z
|
social/backends/azuread.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 731
|
2015-01-01T22:55:25.000Z
|
2022-03-10T15:07:51.000Z
|
virtual/lib/python3.6/site-packages/social/backends/azuread.py
|
dennismwaniki67/awards
|
80ed10541f5f751aee5f8285ab1ad54cfecba95f
|
[
"MIT"
] | 1,082
|
2015-01-01T16:27:26.000Z
|
2022-03-22T21:18:33.000Z
|
from social_core.backends.azuread import AzureADOAuth2
| 27.5
| 54
| 0.890909
|
19ed8ee16410261911df594fb0af9ff20f20ca7e
| 6,556
|
py
|
Python
|
pystitchy/grid.py
|
iht/Stitchy-Studio
|
f7faf846d7ce498ef5945caaff2b09f9108e2919
|
[
"MIT"
] | 1
|
2021-02-28T17:27:16.000Z
|
2021-02-28T17:27:16.000Z
|
pystitchy/grid.py
|
iht/Stitchy-Studio
|
f7faf846d7ce498ef5945caaff2b09f9108e2919
|
[
"MIT"
] | null | null | null |
pystitchy/grid.py
|
iht/Stitchy-Studio
|
f7faf846d7ce498ef5945caaff2b09f9108e2919
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2012 Israel Herraiz <isra@herraiz.org>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import wx
import numpy
from numpy import zeros
| 33.968912
| 107
| 0.585265
|
19eeab362b9fb0e2d6b801d0a756ec8fc09dd20a
| 2,837
|
py
|
Python
|
usps_tools/exceptions.py
|
pedrovagner/usps-tools
|
6a241fda35db6590684a534b9c3cf78a589ea09d
|
[
"MIT"
] | null | null | null |
usps_tools/exceptions.py
|
pedrovagner/usps-tools
|
6a241fda35db6590684a534b9c3cf78a589ea09d
|
[
"MIT"
] | null | null | null |
usps_tools/exceptions.py
|
pedrovagner/usps-tools
|
6a241fda35db6590684a534b9c3cf78a589ea09d
|
[
"MIT"
] | null | null | null |
import traceback
from typing import Optional
from .i18n import _
| 27.813725
| 114
| 0.601692
|
19effa59bdd92c4854c56be758df2693cacdcb3d
| 1,158
|
py
|
Python
|
scraper/engine.py
|
pesya/scraper
|
c088dc3dc613fec94e297ac71302d2305b44b14c
|
[
"BSD-3-Clause"
] | null | null | null |
scraper/engine.py
|
pesya/scraper
|
c088dc3dc613fec94e297ac71302d2305b44b14c
|
[
"BSD-3-Clause"
] | null | null | null |
scraper/engine.py
|
pesya/scraper
|
c088dc3dc613fec94e297ac71302d2305b44b14c
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import csv
import requests
from parsel import Selector
from scraper.parser import get_features_from_item
start_url = 'http://www.world-art.ru/animation/rating_top.php'
SIGN_STDOUT = '-'
FORMAT_CSV = 'csv'
FORMAT_JL = 'jl'
def parse(url: str, out_path: str, out_format: str):
"""
gets link and returns the response
"""
response = requests.get(url)
assert response.status_code == 200, f'bad status code: {response.status_code}'
response_html = Selector(response.text)
links_to_films = response_html.xpath('//td[@class="review"]/a[@class="review"]/@href').getall()
out_file = sys.stdout if out_path == SIGN_STDOUT else open(out_path, 'w', buffering=1, newline='')
for link in links_to_films:
item_response = requests.get(link)
assert response.status_code == 200, f'bad status code: {item_response.status_code}'
item = get_features_from_item(item_response)
if out_format == FORMAT_CSV:
item_writer = csv.writer(out_file, delimiter=' ', quotechar=',', quoting=csv.QUOTE_MINIMAL)
item_writer.writerow(item.values())
out_file.close()
return
| 28.243902
| 103
| 0.69171
|
19f24b3bd880d9e6bed48acb8886bc868d4be2dd
| 14,592
|
py
|
Python
|
gui/python/photogate/photogate/photogate_app.py
|
iorodeo/photogate_software
|
c9a97dc3da644fe093397dd11024825ba0d79519
|
[
"Apache-2.0"
] | 1
|
2020-07-23T19:02:50.000Z
|
2020-07-23T19:02:50.000Z
|
gui/python/photogate/photogate/photogate_app.py
|
iorodeo/photogate_software
|
c9a97dc3da644fe093397dd11024825ba0d79519
|
[
"Apache-2.0"
] | null | null | null |
gui/python/photogate/photogate/photogate_app.py
|
iorodeo/photogate_software
|
c9a97dc3da644fe093397dd11024825ba0d79519
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import sys
import serial.tools.list_ports
from PyQt4 import QtCore
from PyQt4 import QtGui
from photogate_ui import Ui_PhotogateMainWindow
from photogate_serial import PhotogateDevice
from photogate_serial import getListOfPorts
import dependency_hack
try:
import scipy.io
HAVE_SCIPY_IO = True
except ImportError:
HAVE_SCIPY_IO = False
# Utility functions
# -----------------------------------------------------------------------------
def getPhotogateTimes(photogateDict):
entryTime = uSecToSec(float(photogateDict['entryTime']))
exitTime = uSecToSec(float(photogateDict['exitTime']))
timeInGate = exitTime - entryTime
return entryTime, exitTime, timeInGate
def getTimeBetweenGates(photogateList):
entryTime0 = uSecToSec(float(photogateList[0]['entryTime']))
entryTime1 = uSecToSec(float(photogateList[1]['entryTime']))
timeBetween = entryTime1 - entryTime0
return timeBetween
def autoAddFileExtension(fileName,autoExt):
fileNameBase, fileNameExt = os.path.splitext(fileName)
if not fileNameExt:
# Only add extension if there isn't one already
fileName = '{0}{1}'.format(fileNameBase,autoExt)
return fileName
def uSecToSec(value):
return (1.0e-6)*value
def runPhotogateApp():
app = QtGui.QApplication(sys.argv)
mainWindow = PhotogateMainWindow()
mainWindow.main()
app.exec_()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
runPhotogateApp()
| 37.22449
| 94
| 0.614172
|
19f364dac17ba32accfedb9fef8b6459dc8369f0
| 114
|
py
|
Python
|
playground/step2/test1.py
|
jhson989/jhML
|
eb8b76d3b47df858e82cd971bb32794e12de4747
|
[
"Apache-2.0"
] | null | null | null |
playground/step2/test1.py
|
jhson989/jhML
|
eb8b76d3b47df858e82cd971bb32794e12de4747
|
[
"Apache-2.0"
] | null | null | null |
playground/step2/test1.py
|
jhson989/jhML
|
eb8b76d3b47df858e82cd971bb32794e12de4747
|
[
"Apache-2.0"
] | null | null | null |
from core import Variable
from operation import *
a = Variable(2)
b = square(a)
c = square(b)
print(c.data)
| 8.769231
| 25
| 0.675439
|
19f3a16361bc6bc5804201c9d2a4c2d8f966a4a0
| 106
|
py
|
Python
|
sharepoint/__init__.py
|
nessalc/python-sharepoint
|
d264cf5be56c8f9f619a4f72fd039c167cd01ba8
|
[
"MIT"
] | 1
|
2019-06-03T03:16:44.000Z
|
2019-06-03T03:16:44.000Z
|
sharepoint/__init__.py
|
nessalc/python-sharepoint
|
d264cf5be56c8f9f619a4f72fd039c167cd01ba8
|
[
"MIT"
] | null | null | null |
sharepoint/__init__.py
|
nessalc/python-sharepoint
|
d264cf5be56c8f9f619a4f72fd039c167cd01ba8
|
[
"MIT"
] | null | null | null |
name = 'sharepoint'
from .sharepoint import SharePointSite
__author__='James Classen'
__version__='0.0.2'
| 21.2
| 38
| 0.792453
|
19f6250f9d15cae4fb338cfbac1c36e435b2c1ca
| 3,188
|
py
|
Python
|
third_party/nkata/tests/transformvideo_test.py
|
google/offline-content-packager
|
5a023eeeed4973e452309b434a59ce745487fdd6
|
[
"Apache-2.0"
] | 32
|
2016-05-31T13:01:46.000Z
|
2022-03-18T11:17:36.000Z
|
third_party/nkata/tests/transformvideo_test.py
|
google/offline-content-packager
|
5a023eeeed4973e452309b434a59ce745487fdd6
|
[
"Apache-2.0"
] | null | null | null |
third_party/nkata/tests/transformvideo_test.py
|
google/offline-content-packager
|
5a023eeeed4973e452309b434a59ce745487fdd6
|
[
"Apache-2.0"
] | 29
|
2016-06-08T18:11:00.000Z
|
2021-09-28T04:14:34.000Z
|
# Copyright 2015 The Offline Content Packager Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import makedirs
from os.path import dirname
from os.path import isdir
from os.path import join
import shutil
import tempfile
import unittest
import jinja2
from scripts.transformations import VideoTransformation
import yaml
if __name__ == "__main__":
unittest.main()
| 32.20202
| 78
| 0.69542
|
19f6287b8eec32e7e9ec1cec1c39636f68949b75
| 2,308
|
py
|
Python
|
src/E_get_mpns_v8_analyses_answers/query_mpns_v8_name_relationships.py
|
feiphoon/mpns-pipeline
|
d34a8609dc4cb04ccc3f5c9b79a52bfeecdb38f6
|
[
"MIT"
] | 1
|
2022-03-28T10:46:58.000Z
|
2022-03-28T10:46:58.000Z
|
src/E_get_mpns_v8_analyses_answers/query_mpns_v8_name_relationships.py
|
feiphoon/mpns-pipeline
|
d34a8609dc4cb04ccc3f5c9b79a52bfeecdb38f6
|
[
"MIT"
] | null | null | null |
src/E_get_mpns_v8_analyses_answers/query_mpns_v8_name_relationships.py
|
feiphoon/mpns-pipeline
|
d34a8609dc4cb04ccc3f5c9b79a52bfeecdb38f6
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from pyspark.sql import SparkSession, functions as f
from pyspark.sql.dataframe import DataFrame
# Monkeypatch in case I don't use Spark 3.0
DataFrame.transform = transform
# Sample/demo purposes
mpns_v8_name_relationships_filepath: str = (
"data/analysis/mpns/sample_mpns_v8/name_relationships/"
)
output_filepath: str = "data/analysis/mpns/sample_mpns_v8/name_relationships/query/"
query_mpns_v8_name_relationships(
input_filepath=mpns_v8_name_relationships_filepath,
output_filepath=output_filepath,
sample_run=True,
)
# # Real data
# mpns_v8_name_relationships_filepath: str = (
# "data/analysis/mpns/mpns_v8/name_relationships/"
# )
# output_filepath: str = "data/analysis/mpns/mpns_v8/name_relationships/query/"
# query_mpns_v8_name_relationships(
# input_filepath=mpns_v8_name_relationships_filepath,
# output_filepath=output_filepath,
# sample_run=False,
# )
| 30.368421
| 86
| 0.747834
|
19f803a96ec5d364efd732c5edf09bf82c3dfe31
| 124
|
py
|
Python
|
library_homework/my_project/my_program.py
|
Tommy3121173/tommy
|
429aefb377f84a1d49e85f825a32ac2c160ebc85
|
[
"MIT"
] | null | null | null |
library_homework/my_project/my_program.py
|
Tommy3121173/tommy
|
429aefb377f84a1d49e85f825a32ac2c160ebc85
|
[
"MIT"
] | null | null | null |
library_homework/my_project/my_program.py
|
Tommy3121173/tommy
|
429aefb377f84a1d49e85f825a32ac2c160ebc85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 16:56:57 2018
@author: tommy_mizuki
"""
import my_library
my_func(1,2)
| 11.272727
| 35
| 0.645161
|
19f8e4fcaecd9a3968eed26a324bf80026d1583f
| 246
|
py
|
Python
|
algorithm/python/BAEKJOON_1436.py
|
cjsrhd94/TIL
|
b91bab7d99d10c63f91af0790cb28ec3d228b68b
|
[
"MIT"
] | 1
|
2021-08-19T06:23:00.000Z
|
2021-08-19T06:23:00.000Z
|
algorithm/python/BAEKJOON_1436.py
|
cjsrhd94/TIL
|
b91bab7d99d10c63f91af0790cb28ec3d228b68b
|
[
"MIT"
] | null | null | null |
algorithm/python/BAEKJOON_1436.py
|
cjsrhd94/TIL
|
b91bab7d99d10c63f91af0790cb28ec3d228b68b
|
[
"MIT"
] | null | null | null |
n = int(input())
count = 0
number = 0
while True:
if '666' in str(number): # '666' count .
count += 1
if count == n: #count print -> n
print(number)
break
number += 1
| 24.6
| 68
| 0.565041
|
19f91845aaff11955f6b430aa3684474c464bf80
| 3,599
|
py
|
Python
|
cacheTraceAnalysis/plot/reqRate.py
|
Thesys-lab/cacheWorkloadAnalysisOSDI20
|
cfc5bbb5c8d909571546c78c247561c9db449469
|
[
"Apache-2.0"
] | 6
|
2020-11-12T07:51:02.000Z
|
2022-03-27T20:20:01.000Z
|
cacheTraceAnalysis/plot/reqRate.py
|
Thesys-lab/InMemoryCachingWorkloadAnalysis
|
5f6f9f7e29a164478f3fc28eb64c170bbbafdec7
|
[
"Apache-2.0"
] | null | null | null |
cacheTraceAnalysis/plot/reqRate.py
|
Thesys-lab/InMemoryCachingWorkloadAnalysis
|
5f6f9f7e29a164478f3fc28eb64c170bbbafdec7
|
[
"Apache-2.0"
] | 1
|
2021-12-31T01:16:09.000Z
|
2021-12-31T01:16:09.000Z
|
""" plot request rate
"""
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
from utils.common import *
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--trace", type=str, help="trace path")
ap.add_argument("--type", type=str, default="cnt", help="plot type")
ap.add_argument("--window", type=int, default=60, help="the size of window in sec")
p = ap.parse_args()
plot_req_rate(TwrShortBinTraceReader(p.trace), p.window, plot_type=(p.type, ))
| 38.698925
| 159
| 0.689358
|
19f928aecd4ff7011c0373aab909dea8913438c6
| 1,373
|
py
|
Python
|
config.py
|
ShallweJohn/MonsterBlog
|
f3bd0bdab99af6ba06b7b8fb0eaa6770115fc9c5
|
[
"MIT"
] | null | null | null |
config.py
|
ShallweJohn/MonsterBlog
|
f3bd0bdab99af6ba06b7b8fb0eaa6770115fc9c5
|
[
"MIT"
] | 3
|
2021-03-18T20:36:25.000Z
|
2021-09-07T23:54:49.000Z
|
config.py
|
ShallweJohn/MonsterBlog
|
f3bd0bdab99af6ba06b7b8fb0eaa6770115fc9c5
|
[
"MIT"
] | null | null | null |
import os
import redis
import logging
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 19.614286
| 81
| 0.699927
|
19fac7af0c83f21b636a9b1fa9c53ac1705d1cfb
| 5,097
|
py
|
Python
|
utils.py
|
sjenni/DeepBilevel
|
9db6c9d81188e891104677a7ffc4b045421fb097
|
[
"MIT"
] | 8
|
2019-10-23T12:16:13.000Z
|
2020-11-16T02:20:28.000Z
|
utils.py
|
sjenni/DeepBilevel
|
9db6c9d81188e891104677a7ffc4b045421fb097
|
[
"MIT"
] | null | null | null |
utils.py
|
sjenni/DeepBilevel
|
9db6c9d81188e891104677a7ffc4b045421fb097
|
[
"MIT"
] | 4
|
2020-02-06T14:54:47.000Z
|
2020-10-25T03:03:04.000Z
|
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def montage_tf(imgs, num_h, num_w):
"""Makes a montage of imgs that can be used in image_summaries.
Args:
imgs: Tensor of images
num_h: Number of images per column
num_w: Number of images per row
Returns:
A montage of num_h*num_w images
"""
imgs = tf.unstack(imgs)
img_rows = [None] * num_h
for r in range(num_h):
img_rows[r] = tf.concat(axis=1, values=imgs[r * num_w:(r + 1) * num_w])
montage = tf.concat(axis=0, values=img_rows)
return tf.expand_dims(montage, 0)
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the correspoing variables to initialize. If empty or None,
it would return no_op(), None.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
if ignore_missing_vars:
var_list = remove_missing(var_list, model_path)
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
return callback
def get_variables_to_train(trainable_scopes=None):
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if trainable_scopes is None:
variables_to_train = tf.trainable_variables()
else:
scopes = [scope.strip() for scope in trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
print('Variables to train: {}'.format([v.op.name for v in variables_to_train]))
return variables_to_train
| 36.148936
| 84
| 0.673141
|
19fb9cf0c33a5df90f7ff935997f2b53b4510673
| 1,234
|
py
|
Python
|
paralleldomain/model/annotation/polygon_2d.py
|
parallel-domain/pd-sdk
|
20e3d052a5cb612a2dd84bda7b1b5487a6a60edc
|
[
"Apache-2.0"
] | 10
|
2021-11-17T17:23:49.000Z
|
2022-03-18T09:51:23.000Z
|
paralleldomain/model/annotation/polygon_2d.py
|
parallel-domain/pd-sdk
|
20e3d052a5cb612a2dd84bda7b1b5487a6a60edc
|
[
"Apache-2.0"
] | 3
|
2021-12-02T17:16:20.000Z
|
2022-01-07T12:47:13.000Z
|
paralleldomain/model/annotation/polygon_2d.py
|
parallel-domain/pd-sdk
|
20e3d052a5cb612a2dd84bda7b1b5487a6a60edc
|
[
"Apache-2.0"
] | 2
|
2022-03-09T07:03:54.000Z
|
2022-03-23T15:53:48.000Z
|
from dataclasses import dataclass
from typing import List
from paralleldomain.model.annotation.common import Annotation
from paralleldomain.model.annotation.polyline_2d import Polyline2D
| 29.380952
| 114
| 0.691248
|
19fd46480858b4a1d5b5836cc3a46a14d32272f9
| 828
|
py
|
Python
|
tests/backup_bsps.py
|
LaudateCorpus1/bsp_tool
|
e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19
|
[
"MIT"
] | null | null | null |
tests/backup_bsps.py
|
LaudateCorpus1/bsp_tool
|
e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19
|
[
"MIT"
] | null | null | null |
tests/backup_bsps.py
|
LaudateCorpus1/bsp_tool
|
e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19
|
[
"MIT"
] | null | null | null |
import os
import shutil
import sys
from maplist import installed_games
backup_dir = "F:/bsps"
if len(sys.argv) == 2:
backup_dir = sys.argv[1]
print(f"Making backups in '{backup_dir}'")
i = 0
for base_dir, game_dir in installed_games:
i += 1
print(f"Backing up ({i}/{len(installed_games)}) {game_dir}...")
for map_dir in installed_games[(base_dir, game_dir)]:
src_dir = os.path.join(base_dir, game_dir, map_dir)
dest_dir = os.path.join(backup_dir, game_dir, map_dir)
os.makedirs(dest_dir, exist_ok=True)
try: # note the missed file(s) and continue
shutil.copytree(src_dir, dest_dir, dirs_exist_ok=True)
except shutil.Error as err:
print(f"*** ERROR *** {err}")
except FileNotFoundError as err:
print(f"*** ERROR *** {err}")
| 30.666667
| 67
| 0.642512
|
19fe235467f017d20a959660a872441f0b170a74
| 770
|
py
|
Python
|
infobip_channels/whatsapp/models/response/get_templates.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
infobip_channels/whatsapp/models/response/get_templates.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
infobip_channels/whatsapp/models/response/get_templates.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from pydantic import AnyHttpUrl
from infobip_channels.core.models import CamelCaseModel, ResponseBase
| 19.25
| 69
| 0.711688
|
19ff517f6d368213182e5f5031c40842eae17a49
| 1,391
|
py
|
Python
|
examples/server.py
|
fhamborg/Giveme5W
|
b5f49712654ab466e605716b4cd9f8dce9bcdd88
|
[
"Apache-2.0"
] | 16
|
2018-03-28T11:20:11.000Z
|
2020-09-17T19:39:25.000Z
|
examples/server.py
|
fhamborg/Giveme5W
|
b5f49712654ab466e605716b4cd9f8dce9bcdd88
|
[
"Apache-2.0"
] | 3
|
2018-03-15T10:17:29.000Z
|
2018-05-16T13:14:28.000Z
|
examples/server.py
|
fhamborg/Giveme5W
|
b5f49712654ab466e605716b4cd9f8dce9bcdd88
|
[
"Apache-2.0"
] | 6
|
2018-05-08T12:53:51.000Z
|
2021-09-25T03:21:02.000Z
|
import logging
from flask import Flask, request, jsonify
from extractor.document import Document
from extractor.five_w_extractor import FiveWExtractor
app = Flask(__name__)
log = logging.getLogger(__name__)
host = None
port = 5000
debug = False
options = None
extractor = FiveWExtractor()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log.addHandler(ch)
log.setLevel(logging.DEBUG)
if __name__ == "__main__":
run()
| 26.245283
| 82
| 0.675054
|
19ffa347e490ab19819ef9b329ffa153417391c5
| 826
|
py
|
Python
|
CPAC/utils/tests/test_symlinks.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 125
|
2015-03-04T09:14:46.000Z
|
2022-03-29T07:46:12.000Z
|
CPAC/utils/tests/test_symlinks.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 1,018
|
2015-01-04T16:01:29.000Z
|
2022-03-31T19:23:09.000Z
|
CPAC/utils/tests/test_symlinks.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 117
|
2015-01-10T08:05:52.000Z
|
2022-01-18T05:16:51.000Z
|
import os
import tempfile
import pkg_resources as p
from CPAC.utils.symlinks import create_symlinks
mocked_outputs = \
p.resource_filename(
"CPAC",
os.path.join(
'utils',
'tests',
'test_symlinks-outputs.txt'
)
)
| 19.666667
| 55
| 0.579903
|
c200bfcfb3506f7b5d5aa61e676f674b8d4fef20
| 14,488
|
py
|
Python
|
web/migrations/0001_initial.py
|
jmason-ebi/pdx
|
aec38d74a78c907041332f4623c01047f45f3f0a
|
[
"Apache-2.0"
] | null | null | null |
web/migrations/0001_initial.py
|
jmason-ebi/pdx
|
aec38d74a78c907041332f4623c01047f45f3f0a
|
[
"Apache-2.0"
] | null | null | null |
web/migrations/0001_initial.py
|
jmason-ebi/pdx
|
aec38d74a78c907041332f4623c01047f45f3f0a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-09 15:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
| 50.835088
| 143
| 0.574752
|
c202c2c6ef86a127b7a659f1ab70e457fb054b54
| 4,799
|
py
|
Python
|
dserve/__init__.py
|
JIC-CSB/dserve
|
5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e
|
[
"MIT"
] | null | null | null |
dserve/__init__.py
|
JIC-CSB/dserve
|
5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e
|
[
"MIT"
] | null | null | null |
dserve/__init__.py
|
JIC-CSB/dserve
|
5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e
|
[
"MIT"
] | null | null | null |
"""Script for running the dserve server."""
import os
from flask import (
Flask,
jsonify,
send_file,
abort,
request,
)
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
def overlay_root():
overlays = app._dataset.access_overlays()
content = {
"_links": {
"self": {"href": "/overlays"}},
}
for overlay_name in overlays.keys():
value = {"href": "/overlays/{}".format(overlay_name)}
content["_links"][overlay_name] = value
return jsonify(content)
def specific_overlay(overlay_name):
overlays = app._dataset.access_overlays()
try:
overlay = overlays[overlay_name]
except KeyError:
abort(404)
return jsonify(overlay)
def creaate_new_overlay(overlay_name):
empty_overlay = app._dataset.empty_overlay()
try:
app._dataset.persist_overlay(overlay_name, empty_overlay)
except IOError:
abort(409)
return "", 201
| 25.526596
| 77
| 0.600542
|
c203136ec3038930bc5926aaf959f30e095e46a5
| 1,610
|
py
|
Python
|
kkutil/security.py
|
kaka19ace/kkutils
|
1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1
|
[
"MIT"
] | 1
|
2015-12-13T18:42:52.000Z
|
2015-12-13T18:42:52.000Z
|
kkutil/security.py
|
kaka19ace/kkutil
|
1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1
|
[
"MIT"
] | null | null | null |
kkutil/security.py
|
kaka19ace/kkutil
|
1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
util regex tool
refs:
http://www.symantec.com/connect/articles/detection-sql-injection-and-cross-site-scripting-attacks
"""
import re
INJECTION_REGEX = re.compile(
r"(%27)|(\')|(\-\-)|(%23)|(#)|" # Regex for detection of SQL meta-characters
r"\w*((%27)|(\'))\s+((%6F)|o|(%4F))((%72)|r|(%52))\s*|" # Modified regex for detection of SQL meta-characters eg: ' or 1 = 1' detect word 'or',
r"((%3D)|(=))[^\n]*((%27)|(\')|(\-\-)|(%3B)|(;))" # Regex for typical SQL Injection attack eg: '= 1 --'
r"((%27)|(\'))union|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))select|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))insert|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))update|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))drop", # Regex for detecting SQL Injection with the UNION keyword
re.IGNORECASE
)
CSS_ATTACK_REGREX = re.compile(r"((%3C)|<)((%2F)|/)*[a-z0-9%]+((%3E)|>)", re.IGNORECASE)
CSS_IMG_SRC_ATTACK_REGEX = re.compile(
r"((%3C)|<)((%69)|i|(%49))((%6D)|m|(%4D))((%67)|g|(%47))[^\n]+((%3E)|>)",
re.IGNORECASE
)
CSS_PARANOID_ATTACK_REGEX = re.compile("((%3C)|<)[^\n]+((%3E)|>)", re.IGNORECASE)
| 35
| 148
| 0.608075
|
c204bfd19101390dbf534e7049d9b49aef3685e3
| 1,520
|
py
|
Python
|
update_eeprom_rc.py
|
rkojedzinszky/thermo-sensor
|
f0b5aa6dbf231b566e00a683c5bb1551569d2463
|
[
"BSD-3-Clause"
] | 2
|
2019-04-25T17:38:02.000Z
|
2020-03-03T22:50:04.000Z
|
update_eeprom_rc.py
|
rkojedzinszky/thermo-sensor
|
f0b5aa6dbf231b566e00a683c5bb1551569d2463
|
[
"BSD-3-Clause"
] | null | null | null |
update_eeprom_rc.py
|
rkojedzinszky/thermo-sensor
|
f0b5aa6dbf231b566e00a683c5bb1551569d2463
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
REGISTERS = {
'IOCFG2': 0x00,
'IOCFG1': 0x01,
'IOCFG0': 0x02,
'FIFOTHR': 0x03,
'SYNC1': 0x04,
'SYNC0': 0x05,
'PKTLEN': 0x06,
'PKTCTRL1': 0x07,
'PKTCTRL0': 0x08,
'ADDR': 0x09,
'CHANNR': 0x0A,
'FSCTRL1': 0x0B,
'FSCTRL0': 0x0C,
'FREQ2': 0x0D,
'FREQ1': 0x0E,
'FREQ0': 0x0F,
'MDMCFG4': 0x10,
'MDMCFG3': 0x11,
'MDMCFG2': 0x12,
'MDMCFG1': 0x13,
'MDMCFG0': 0x14,
'DEVIATN': 0x15,
'MCSM2': 0x16,
'MCSM1': 0x17,
'MCSM0': 0x18,
'FOCCFG': 0x19,
'BSCFG': 0x1A,
'AGCCTRL2': 0x1B,
'AGCCTRL1': 0x1C,
'AGCCTRL0': 0x1D,
'WOREVT1': 0x1E,
'WOREVT0': 0x1F,
'WORCTRL': 0x20,
'FREND1': 0x21,
'FREND0': 0x22,
'FSCAL3': 0x23,
'FSCAL2': 0x24,
'FSCAL1': 0x25,
'FSCAL0': 0x26,
'RCCTRL1': 0x27,
'RCCTRL0': 0x28,
'FSTEST': 0x29,
'PTEST': 0x2A,
'AGCTEST': 0x2B,
'TEST2': 0x2C,
'TEST1': 0x2D,
'TEST0': 0x2E,
'PATABLE': 0x3E,
}
if __name__ == '__main__':
import sys
import re
with open('eeprom', 'r+b') as fh:
fh.seek(20)
for line in sys.stdin:
if re.match('^\s*#', line):
continue
m = re.match('(?P<reg>\w+)\s+(?P<value>[0-9a-fA-F]+)', line)
if not m:
continue
m = m.groupdict()
fh.write(chr(REGISTERS[m['reg']]))
fh.write(chr(int(m['value'], 16)))
fh.write(b"\xff" * (512 - fh.tell()))
| 20.540541
| 72
| 0.484211
|
c205b5f889cdcc188c5b89c3efa9505bfb938fe3
| 384
|
py
|
Python
|
UsefulLink.py
|
qyu6/TAILab
|
6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7
|
[
"Apache-2.0"
] | 1
|
2022-01-10T15:14:55.000Z
|
2022-01-10T15:14:55.000Z
|
UsefulLink.py
|
qyu6/TAILab
|
6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7
|
[
"Apache-2.0"
] | null | null | null |
UsefulLink.py
|
qyu6/TAILab
|
6c3e7a7e2e49f7c673ab46b90c1568a96cce75b7
|
[
"Apache-2.0"
] | null | null | null |
'''
@func:to store useful links module
@create:2021.10.20
'''
| 25.6
| 74
| 0.65625
|