hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1d4408048c2582c035650ab9faddb5edccff6fd | 2,498 | py | Python | city_scrapers_core/extensions/status.py | jtotoole/city-scrapers-core | 0c091d91bf8883c6f361a19fbb055abc3b306835 | [
"MIT"
] | null | null | null | city_scrapers_core/extensions/status.py | jtotoole/city-scrapers-core | 0c091d91bf8883c6f361a19fbb055abc3b306835 | [
"MIT"
] | null | null | null | city_scrapers_core/extensions/status.py | jtotoole/city-scrapers-core | 0c091d91bf8883c6f361a19fbb055abc3b306835 | [
"MIT"
] | null | null | null | from datetime import datetime
import pytz
from scrapy import signals
RUNNING = "running"
FAILING = "failing"
STATUS_COLOR_MAP = {RUNNING: "#44cc11", FAILING: "#cb2431"}
STATUS_ICON = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="144" height="20">
<linearGradient id="b" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<clipPath id="a">
<rect width="144" height="20" rx="3" fill="#fff"/>
</clipPath>
<g clip-path="url(#a)">
<path fill="#555" d="M0 0h67v20H0z"/>
<path fill="{color}" d="M67 0h77v20H67z"/>
<path fill="url(#b)" d="M0 0h144v20H0z"/>
</g>
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110">
<text x="345" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">{status}</text>
<text x="345" y="140" transform="scale(.1)">{status}</text>
<text x="1045" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">{date}</text>
<text x="1045" y="140" transform="scale(.1)">{date}</text>
</g>
</svg>
""" # noqa
| 34.694444 | 108 | 0.6249 |
b1d542377c13c57ca40f0aad4217a57a0a2f3e27 | 5,438 | py | Python | tests/test_filters.py | maniospas/pygrank | a92f6bb6d13553dd960f2e6bda4c041a8027a9d1 | [
"Apache-2.0"
] | 19 | 2019-10-07T14:42:40.000Z | 2022-03-24T15:02:02.000Z | tests/test_filters.py | maniospas/pygrank | a92f6bb6d13553dd960f2e6bda4c041a8027a9d1 | [
"Apache-2.0"
] | 13 | 2021-08-25T12:54:37.000Z | 2022-03-05T03:31:34.000Z | tests/test_filters.py | maniospas/pygrank | a92f6bb6d13553dd960f2e6bda4c041a8027a9d1 | [
"Apache-2.0"
] | 4 | 2019-09-25T09:54:51.000Z | 2020-12-09T00:11:21.000Z | import networkx as nx
import pygrank as pg
import pytest
from .test_core import supported_backends
| 39.693431 | 136 | 0.685914 |
b1d70b532712bd846f1a70f021a50fadff6b4449 | 9,816 | py | Python | src/preprocess.py | vkola-lab/ajpa2021 | 67a76ae184b4c9c40c9bc104c8d87ffa5ea69d91 | [
"MIT"
] | null | null | null | src/preprocess.py | vkola-lab/ajpa2021 | 67a76ae184b4c9c40c9bc104c8d87ffa5ea69d91 | [
"MIT"
] | null | null | null | src/preprocess.py | vkola-lab/ajpa2021 | 67a76ae184b4c9c40c9bc104c8d87ffa5ea69d91 | [
"MIT"
] | null | null | null | import os
import PIL
from PIL import Image
from PIL import Image, ImageStat
PIL.Image.MAX_IMAGE_PIXELS = 10000000000
import numpy as np
import cv2
import openslide
import time
PATCH_SIZE = 224
STRIDE = 224
DOWN_SIZE = 508
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Preprocessing')
parser.add_argument('--path_ori', type=str, default='/scratch2/zheng/ajpa2021-master/data/OSUWMC/', help='path to dataset where images store')
parser.add_argument('--path_mask', type=str, default='/scratch2/zheng/ajpa2021-master/data/OSUWMC_MASK/', help='path to dataset where masks (if possible) store')
parser.add_argument('--m', action='store_true', default=False, help='preprocess masks if possible')
parser.add_argument('--g', action='store_true', default=False, help='preprocess data at global level')
parser.add_argument('--p', action='store_true', default=False, help='preprocess data at patch level')
args = parser.parse_args()
if args.m:
preprocess_mask(args.path_ori, args.path_mask)
elif args.g:
preprocess_global(args.path_ori)
elif args.p:
preprocess_patch(args.path_ori)
| 49.326633 | 165 | 0.599124 |
b1d7b3ea3f8d942998560e953fec761fcb002a45 | 2,433 | py | Python | procgen.py | tredfern/rdl2021-tutorial | 18f992c9c09ab18ee8e2927cf53d707c251d4948 | [
"MIT"
] | null | null | null | procgen.py | tredfern/rdl2021-tutorial | 18f992c9c09ab18ee8e2927cf53d707c251d4948 | [
"MIT"
] | null | null | null | procgen.py | tredfern/rdl2021-tutorial | 18f992c9c09ab18ee8e2927cf53d707c251d4948 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Trevor Redfern
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from __future__ import annotations
from typing import Tuple, Iterator, List, TYPE_CHECKING
import random
import tcod
from game_map import GameMap
import tile_types
if TYPE_CHECKING:
from entity import Entity
| 25.610526 | 95 | 0.628442 |
b1d821122ad47a7fa47c073b2ce27f383a3871d3 | 1,492 | py | Python | examples/plot_simulate_bo.py | pmdaly/supereeg | 750f55db3cbfc2f3430e879fecc7a1f5407282a6 | [
"MIT"
] | 1 | 2018-12-10T01:38:48.000Z | 2018-12-10T01:38:48.000Z | examples/plot_simulate_bo.py | pmdaly/supereeg | 750f55db3cbfc2f3430e879fecc7a1f5407282a6 | [
"MIT"
] | null | null | null | examples/plot_simulate_bo.py | pmdaly/supereeg | 750f55db3cbfc2f3430e879fecc7a1f5407282a6 | [
"MIT"
] | 1 | 2019-06-25T21:34:12.000Z | 2019-06-25T21:34:12.000Z | # -*- coding: utf-8 -*-
"""
=============================
Simulating a brain object
=============================
In this example, we demonstrate the simulate_bo function.
First, we'll load in some example locations. Then we'll simulate 1
brain object specifying a noise parameter and the correlational structure
of the data (a toeplitz matrix). We'll then subsample 10 locations from the
original brain object.
"""
# Code source: Lucy Owen & Andrew Heusser
# License: MIT
import supereeg as se
from supereeg.helpers import _corr_column
import numpy as np
# simulate 100 locations
locs = se.simulate_locations(n_elecs=100)
# simulate brain object
bo = se.simulate_bo(n_samples=1000, sample_rate=100, cov='random', locs=locs, noise =.1)
# sample 10 locations, and get indices
sub_locs = locs.sample(90, replace=False).sort_values(['x', 'y', 'z']).index.values.tolist()
# index brain object to get sample patient
bo_sample = bo[: ,sub_locs]
# plot sample patient locations
bo_sample.plot_locs()
# plot sample patient data
bo_sample.plot_data()
# make model from brain object
r_model = se.Model(data=bo, locs=locs)
# predict
bo_s = r_model.predict(bo_sample, nearest_neighbor=False)
# find indices for reconstructed locations
recon_labels = np.where(np.array(bo_s.label) != 'observed')
# find correlations between predicted and actual data
corrs = _corr_column(bo.get_data().as_matrix(), bo_s.get_data().as_matrix())
# index reconstructed correlations
corrs[recon_labels].mean()
| 27.127273 | 92 | 0.731233 |
b1d8a19c3055e7f0d5aa484065ba5f44c533be7b | 420 | py | Python | poc/classes/AuxSTRuleOfInference.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 4 | 2021-11-08T10:09:46.000Z | 2021-11-13T22:25:46.000Z | poc/classes/AuxSTRuleOfInference.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 1 | 2020-09-04T13:02:09.000Z | 2021-06-16T07:07:44.000Z | poc/classes/AuxSTRuleOfInference.py | bookofproofs/fpl | 527b43b0f8bb3d459ee906e5ed8524a676ce3a2c | [
"MIT"
] | 1 | 2021-11-08T10:10:12.000Z | 2021-11-08T10:10:12.000Z | from poc.classes.AuxSTBlockWithSignature import AuxSTBlockWithSignature
from poc.classes.AuxSymbolTable import AuxSymbolTable
| 38.181818 | 81 | 0.785714 |
b1d8cc75992fcd005adcc90ea90aa099fbd29007 | 5,031 | py | Python | examples/fmanipulator.py | mateusmoutinho/python-cli-args | 40b758db808e96b3c12a3e0a87b6904660e90d9b | [
"MIT"
] | null | null | null | examples/fmanipulator.py | mateusmoutinho/python-cli-args | 40b758db808e96b3c12a3e0a87b6904660e90d9b | [
"MIT"
] | null | null | null | examples/fmanipulator.py | mateusmoutinho/python-cli-args | 40b758db808e96b3c12a3e0a87b6904660e90d9b | [
"MIT"
] | null | null | null | from io import TextIOWrapper
from typing import IO, Text
from cli_args_system import Args
from cli_args_system import Args, FlagsContent
from sys import exit
HELP = """this is a basic file manipulator to demonstrate
args_system usage with file flags
-------------------flags----------------------------
-join: join the files passed and save in the --out flag
-replace: replace the text on file and save in the --out flag
if there is no out flag, it will save in the same file
-remove: remove the given text in the file
-------------------usage----------------------------
$ python3 fmanipulator.py -join a.txt b.txt -out c.txt
will join the content on a.txt and b.txt, and save in c.txt
$ python3 fmanipulator.py a.txt -replace a b
will replace the char a for char b in the a.txt file
$ python3 fmanipulator.py a.txt -replace a b -out b.txt
will replace the char a for char b and save in b.txt
$ python3 fmanipulator.py a.txt -r test
will remove the text: test in the file a.txt
$ python3 fmanipulator.py a.txt -r test -out b.txt
will remove the text: test in the file a.txt and save in b.txt"""
def exit_with_mensage(mensage:str):
"""kills the aplcation after printing the mensage \n
mensage: the mensage to print"""
print(mensage)
exit(1)
def get_file_text(args:Args) ->str:
"""returns the file text of args[0] (argv[0]) \n
args:The args Object"""
try:
with open(args[0],'r') as f:
return f.read()
except (FileNotFoundError,IndexError):
#if doenst find the file text,kilss the aplcation
exit_with_mensage(mensage='no file')
def get_out_wraper(args:Args,destroy_if_dont_find=True)->TextIOWrapper or None:
"""returns the out wraper of out[0] flag\n
args: The args Object \n
destroy_if_dont_find: if True it will destroy the aplication
if doesnt find out[0] flag"""
out = args.flags_content('out','o','out-file','outfile','out_file')
if out.filled():
return open(out[0],'w')
else:
#check if is to destroy
if destroy_if_dont_find:
exit_with_mensage(mensage='not out file')
def write_text_in_out_file_or_same_file(text:str,args:Args):
"""write text in out flag if exist,
otherwhise write on same file args(0)\n
text: the text to write \n
args: The args Object \n
"""
out = get_out_wraper(args,destroy_if_dont_find=False)
#if out is not passed it replace in the same file
if out is None:
open(args[0],'w').write(text)
else:
#otherwise write in the out file
out.write(text)
def join_files(join:FlagsContent,args:Args):
"""join the files of join flag, in the out flag content
join: the join FlagsContent \n
args: The args Object"""
if len(join) < 2:
print('must bee at least 2 files')
exit(1)
full_text = ''
#make a iteration on join flag
for file_path in join:
try:
#try to open and add in the full text, the content of
#file path
with open(file_path,'r') as file:
full_text+=file.read()
except FileNotFoundError:
print(f'file {file_path} not exist')
exit(1)
#write the changes in the out file
get_out_wraper(args).write(full_text)
def replace_elements(replace:FlagsContent,args:Args):
"""replace in file (args[0) with replace[0] to replace[1]
replace: the replace FlagsContent
args: The args Object
"""
if len(replace) != 2:
exit_with_mensage(mensage='must bee two elements to replace')
#get the file of args[0]
file = get_file_text(args)
#make the replace
replaced_text = file.replace(replace[0],replace[1])
write_text_in_out_file_or_same_file(text=replaced_text,args=args)
def remove_text(remove:FlagsContent,args:Args):
"""this function remove the text in passed in the remove flags \n
remove: the remove FlagsContent \n
args: The args Object """
if not remove.filled():
exit_with_mensage('not text to remove')
text_file = get_file_text(args)
#goes in a iteration in remove flags
for text in remove:
text_file = text_file.replace(text,'')
write_text_in_out_file_or_same_file(text=text_file,args=args)
if __name__ == '__main__':
#construct the args
args = Args(convert_numbers=False)
#for help flag
help = args.flags_content('h','help')
if help.exist():
print(HELP);exit(0)
join = args.flags_content('join','j')
#if join flag exist, call the join_files
if join.exist():
join_files(join,args)
replace = args.flags_content('replace','substitute')
#if replace flag exist call the replace_elements function
if replace.exist():
replace_elements(replace,args)
remove = args.flags_content('r','remove','pop')
#if remove flag exist call the remove_text
if remove.exist():
remove_text(remove,args)
| 29.25 | 79 | 0.650566 |
b1d9ea1eac536432c7382cf5532afaf25887bbe6 | 276 | py | Python | test/test_main.py | KY64/python-starter-template | 6ba734cec57668db6246e85bf0c324ff04359482 | [
"MIT"
] | null | null | null | test/test_main.py | KY64/python-starter-template | 6ba734cec57668db6246e85bf0c324ff04359482 | [
"MIT"
] | 2 | 2021-09-24T12:57:15.000Z | 2021-09-24T19:55:01.000Z | test/test_main.py | KY64/python-starter-template | 6ba734cec57668db6246e85bf0c324ff04359482 | [
"MIT"
] | null | null | null | import unittest
from src.main import substract, add
| 23 | 36 | 0.655797 |
b1da25a95c5118697812a66adf7849f4cbae7363 | 441 | py | Python | usage.py | nicogetaz/cfile | 305a8e5fd133e4fd36d8958ede4627b008d4664a | [
"MIT"
] | 45 | 2017-11-17T04:44:29.000Z | 2022-03-30T12:30:17.000Z | usage.py | nicogetaz/cfile | 305a8e5fd133e4fd36d8958ede4627b008d4664a | [
"MIT"
] | 4 | 2019-03-25T15:43:26.000Z | 2021-02-09T12:26:03.000Z | usage.py | nicogetaz/cfile | 305a8e5fd133e4fd36d8958ede4627b008d4664a | [
"MIT"
] | 18 | 2017-10-12T13:24:00.000Z | 2021-12-09T05:29:54.000Z | import cfile as C
hello = C.cfile('hello.c')
hello.code.append(C.sysinclude('stdio.h'))
hello.code.append(C.blank())
hello.code.append(C.function('main', 'int',).add_param(C.variable('argc', 'int')).add_param(C.variable('argv', 'char', pointer=2)))
body = C.block(innerIndent=3)
body.append(C.statement(C.fcall('printf').add_arg(r'"Hello World!\n"')))
body.append(C.statement('return 0'))
hello.code.append(body)
print(str(hello))
| 40.090909 | 132 | 0.693878 |
b1daaac896ddc4849cfd241b1e6031646b780a40 | 156 | py | Python | data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-5/exercicio5-2.py | joaovictor-loureiro/data-science | 21ad240e1db94d614e54fcb3fbf6ef74a78af9d8 | [
"MIT"
] | null | null | null | data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-5/exercicio5-2.py | joaovictor-loureiro/data-science | 21ad240e1db94d614e54fcb3fbf6ef74a78af9d8 | [
"MIT"
] | null | null | null | data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-5/exercicio5-2.py | joaovictor-loureiro/data-science | 21ad240e1db94d614e54fcb3fbf6ef74a78af9d8 | [
"MIT"
] | null | null | null | # Exerccio 5.2 - Modifique o programa para exibir os nmeros de 50 a 100.
i = 50
print('\n')
while i <= 100:
print('%d' % i)
i += 1
print('\n') | 14.181818 | 74 | 0.570513 |
b1dab95c84aa79d34c26b83dc05fe89c1233edca | 14,141 | py | Python | scripts/BaxterArmClient.py | mkrizmancic/qlearn_baxter | 0498315212cacb40334cbb97a858c6ba317f52a3 | [
"MIT"
] | 4 | 2017-11-11T18:16:22.000Z | 2018-11-08T13:31:09.000Z | scripts/BaxterArmClient.py | mkrizmancic/qlearn_baxter | 0498315212cacb40334cbb97a858c6ba317f52a3 | [
"MIT"
] | null | null | null | scripts/BaxterArmClient.py | mkrizmancic/qlearn_baxter | 0498315212cacb40334cbb97a858c6ba317f52a3 | [
"MIT"
] | 2 | 2019-09-04T12:28:58.000Z | 2021-09-27T13:02:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CREDIT:
Main layout of this file was done by Lucija Kopic (graduation thesis)
at Faculty of Electrical Engineering and Computing, University of Zagreb.
"""
import math
from threading import Thread
import actionlib
import tf
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Pose
from baxter_moveit_config.msg import baxterAction, baxterGoal
from baxter_interface import Limb
import Errors
from Util import *
# Comments beginning with "noinspection" are PyCharm auto-generated comments
# noinspection PyMethodMayBeStatic,PyUnusedLocal,PyNoneFunctionAssignment,PyRedundantParentheses,PyTypeChecker
if __name__ == '__main__':
rospy.init_node('Baxter_Client', disable_signals=True)
try:
client = BaxterArmClient()
client.start()
except rospy.ROSInterruptException:
rospy.loginfo('Terminating baxter_client.')
| 42.851515 | 119 | 0.592391 |
b1dbac0f835a64d0cbdbae3be2827e7023234d2d | 107 | py | Python | Factorial.py | conbopbi/TEchMasterHK | 81452694de6b5d46a51bdf1eceb7a4346b93cc85 | [
"MIT"
] | null | null | null | Factorial.py | conbopbi/TEchMasterHK | 81452694de6b5d46a51bdf1eceb7a4346b93cc85 | [
"MIT"
] | null | null | null | Factorial.py | conbopbi/TEchMasterHK | 81452694de6b5d46a51bdf1eceb7a4346b93cc85 | [
"MIT"
] | null | null | null | import math
n=int(input('Nhap mot so:'))
output=math.factorial(n)
print('Giai thua cua ',n,' la: ',output) | 21.4 | 40 | 0.682243 |
b1dbc2b8aaeac4063785ede18a17f1f56b8d7356 | 86 | py | Python | flytekit/__init__.py | flytehub/flytekit | f8f53567594069b29fcd3f99abd1da71a5ef0e22 | [
"Apache-2.0"
] | null | null | null | flytekit/__init__.py | flytehub/flytekit | f8f53567594069b29fcd3f99abd1da71a5ef0e22 | [
"Apache-2.0"
] | null | null | null | flytekit/__init__.py | flytehub/flytekit | f8f53567594069b29fcd3f99abd1da71a5ef0e22 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import flytekit.plugins
__version__ = '0.3.1'
| 17.2 | 38 | 0.813953 |
b1dc9ba592a6ef41c372eaa2cd477c8b9c68c9a0 | 7,289 | py | Python | src/Navigate.py | Qu-Xiangjun/CQU_NK_Research_Project | 8634ce3496801610bc94aa3a424bcd9cff8d042e | [
"MIT"
] | 1 | 2021-04-14T12:52:47.000Z | 2021-04-14T12:52:47.000Z | src/Navigate.py | Qu-Xiangjun/CQU_NK_Research_Project | 8634ce3496801610bc94aa3a424bcd9cff8d042e | [
"MIT"
] | null | null | null | src/Navigate.py | Qu-Xiangjun/CQU_NK_Research_Project | 8634ce3496801610bc94aa3a424bcd9cff8d042e | [
"MIT"
] | null | null | null | """
@Author: Qu Xiangjun
@Time: 2021.01.26
@Describe:
"""
import socket
import time
from threading import Thread
import threading
import numpy as np
# python3.8.0 64python 3232DLL
from ctypes import *
from Navigation_help import *
from Can_frame_help import *
VCI_USBCAN2 = 4 # USBCAN-2AUSBCAN-2CCANalyst-II
STATUS_OK = 1
# CAN
# CAN
CanDLLName = './ControlCAN.dll' # DLL
canDLL = windll.LoadLibrary('./ControlCAN.dll')
# Linuxpython3 python3.8.0.py
#canDLL = cdll.LoadLibrary('./libcontrolcan.so')
| 33.589862 | 89 | 0.522568 |
b1dd89557115038bb1f6354ded5195f9ead07ccf | 233 | py | Python | list02/exer_02.py | pedrolucas27/exercising-python | 4b30bbce6b860fb617baf4600d8da83b68023e82 | [
"MIT"
] | null | null | null | list02/exer_02.py | pedrolucas27/exercising-python | 4b30bbce6b860fb617baf4600d8da83b68023e82 | [
"MIT"
] | null | null | null | list02/exer_02.py | pedrolucas27/exercising-python | 4b30bbce6b860fb617baf4600d8da83b68023e82 | [
"MIT"
] | null | null | null | #Faa um Programa que pea um valor e mostre na tela se o valor positivo ou negativo.
valor = int(input("Dgite um nmero:"))
if valor < 0:
print("O nmero",valor," negativo!")
else:
print("O nmero",valor," positivo!") | 33.285714 | 88 | 0.682403 |
b1de66542e990852570d0825e181d49c32975991 | 48 | py | Python | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | f'{1:.5d} {2:3.5d} {3:3d} {"spam":>20} {4:<#d}'
| 24 | 47 | 0.395833 |
b1dea7e1058a7eee3b72428c420020b2fdd458a2 | 6,346 | py | Python | ansys/dpf/core/operators/math/make_one_on_comp.py | jfthuong/pydpf-core | bf2895ebc546e0004f759289bfc9a23196559ac3 | [
"MIT"
] | 18 | 2021-10-16T10:38:29.000Z | 2022-03-29T11:26:42.000Z | ansys/dpf/core/operators/math/make_one_on_comp.py | jfthuong/pydpf-core | bf2895ebc546e0004f759289bfc9a23196559ac3 | [
"MIT"
] | 79 | 2021-10-11T23:18:54.000Z | 2022-03-29T14:53:14.000Z | ansys/dpf/core/operators/math/make_one_on_comp.py | jfthuong/pydpf-core | bf2895ebc546e0004f759289bfc9a23196559ac3 | [
"MIT"
] | 5 | 2021-11-29T18:35:37.000Z | 2022-03-16T16:49:21.000Z | """
make_one_on_comp
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
| 29.654206 | 89 | 0.578159 |
b1e15b56feda70e36690890e29f9ba4efcc55495 | 8,716 | py | Python | cheshire3/web/www_utils.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 3 | 2015-08-02T09:03:28.000Z | 2017-12-06T09:26:14.000Z | cheshire3/web/www_utils.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 5 | 2015-08-17T01:16:35.000Z | 2015-09-16T21:51:27.000Z | cheshire3/web/www_utils.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 6 | 2015-05-17T15:32:20.000Z | 2020-04-22T08:43:16.000Z | #
# Program: www_utils.py
# Version: 0.10
# Description:
# Generic search functions for Cheshire 3
#
# Language: Python
# Author: John Harrison <john.harrison@liv.ac.uk>
# Date: 19 December 2007
#
# Copyright: © University of Liverpool 2005-2007
#
# Version History:
# 0.01 - 13/04/2005 - JH - Ported from Cheshire II compatible scripts
# 0.02 - 14/06/2005 - JH - Improved CGI encoding/decoding
# - Mixed phrase and plain term searching handled
# (e.g. wyndham "science fiction" triffids)
# 0.03 - 17/10/2005 - JH - File logger class added
# keeps all logs for a single request in mem until complete, then flushes to file
# - html_encode() added to allow display of raw SGML in the browser
# 0.04 - 26/01/2006 - JH - Modifications to cgiReplacements
# 0.05 - 31/01/2006 - JH - More tweaks to cgiReplacement characters
# - Speech marks handled sensibly in exact or /string searches
# 0.06 - 27/02/2006 - JH - Booleans extracted first in generate_cqlQuery() - debugs 'NOT' searches
# 0.07 - 04/01/2007 - JH - Check for noComponents moved out of generic generate_cqlQuery function
# - Allow limit to collection
# 0.08 - 25/01/2007 - JH - Mods to allow date searching - decode < > etc from form
# 0.09 - 07/09/2007 - JH - renamed: wwwSearch.py --> www_utils.py
# 0.10 - 19/12/2007 - JH - handling of form character set implemented
# - can handle multiple indexes to be specified in fieldidx
# multiple indexes combine with or/relevant/proxinfo
#
import re
import time
import urlparse
from urllib import unquote
def parse_url(url):
u"""Parse a URL to split it into its component parts."""
bits = urlparse.urlsplit(url)
print bits
transport = bits[0]
uphp = bits[1].split('@')
user = ''
passwd = ''
if len(uphp) == 2:
(user, passwd) = uphp.pop(0).split(':')
hp = uphp[0].split(':')
host = hp[0]
if len(hp) == 2:
port = int(hp[1])
else:
# Require subclass to default
port = 0
dirname, filename = bits[2].rsplit('/', 1)
# params = map(lambda x: x.split('='), bits[3].split('&'))
params = [x.split('=') for x in bits[3].split('&')]
try:
params = dict(params)
except ValueError:
params = {}
anchor = bits[4]
return (transport, user, passwd, host, port, dirname, filename, params, anchor)
phraseRe = re.compile('".*?"')
cgiReplacements = {
#'%': '%25',
'+': '%2B',
' ': '%20',
'<': '%3C',
'>': '%3E',
'#': '%23',
'{': '%7B',
'}': '%7D',
'|': '%7C',
'"': '%22',
"'": '%27',
'^': '%5E',
'~': '%7E',
'[': '%5B',
']': '%5D',
'`': '%60',
';': '%3B',
'/': '%2F',
'?': '%3F',
':': '%3A',
'@': '%40',
'=': '%3D',
'&': '%26',
'$': '%24'
#'=': "%3D",
#'\n\t': "%0A",
#',': "%2C",
#'\'': "%27",
#'/': "%2F",
#'"': "%22",
#'@': "%40",
#'#': "%23",
#'{': "%7B",
#'}': "%7D",
#'[': "%5B",
#']': "%5D",
#'\\': "%5C",
#';': "%3B"
}
#- end cgi_encode
#- end cgi_decode
rawSgmlReplacements = {'<': '<'
,'>': '>'
,"'": '''
,'"': '"'
}
#- end html_encode
#- end multiReplace
#- end read_file()
#- end write_file()
#- end class FileLogger ---------------------------------------------------
| 27.495268 | 128 | 0.519504 |
b1e3076f57089de6bfe7eeff45ef0b802cbca8fa | 5,057 | py | Python | superviselySDK/supervisely_lib/geometry/bitmap_base.py | nicehuster/mmdetection-supervisely-person-datasets | ff1b57e16a71378510571dbb9cebfdb712656927 | [
"Apache-2.0"
] | 40 | 2019-05-05T08:08:18.000Z | 2021-10-17T00:07:58.000Z | superviselySDK/supervisely_lib/geometry/bitmap_base.py | nicehuster/mmdetection-supervisely-person-datasets | ff1b57e16a71378510571dbb9cebfdb712656927 | [
"Apache-2.0"
] | 8 | 2019-06-13T06:00:08.000Z | 2021-07-24T05:25:33.000Z | superviselySDK/supervisely_lib/geometry/bitmap_base.py | nicehuster/mmdetection-supervisely-person-datasets | ff1b57e16a71378510571dbb9cebfdb712656927 | [
"Apache-2.0"
] | 6 | 2019-07-30T06:36:27.000Z | 2021-06-03T11:57:36.000Z | # coding: utf-8
import numpy as np
from supervisely_lib.geometry.constants import DATA, ORIGIN
from supervisely_lib.geometry.geometry import Geometry
from supervisely_lib.geometry.point_location import PointLocation
from supervisely_lib.geometry.rectangle import Rectangle
from supervisely_lib.imaging.image import resize_inter_nearest, restore_proportional_size
# TODO: rename to resize_bitmap_and_origin
def translate(self, drow, dcol):
translated_origin = self.origin.translate(drow, dcol)
return self.__class__(data=self.data, origin=translated_origin)
def fliplr(self, img_size):
flipped_mask = np.flip(self.data, axis=1)
flipped_origin = PointLocation(row=self.origin.row, col=(img_size[1] - flipped_mask.shape[1] - self.origin.col))
return self.__class__(data=flipped_mask, origin=flipped_origin)
def flipud(self, img_size):
flipped_mask = np.flip(self.data, axis=0)
flipped_origin = PointLocation(row=(img_size[0] - flipped_mask.shape[0] - self.origin.row), col=self.origin.col)
return self.__class__(data=flipped_mask, origin=flipped_origin)
def scale(self, factor):
new_rows = round(self._data.shape[0] * factor)
new_cols = round(self._data.shape[1] * factor)
mask = self._resize_mask(self.data, new_rows, new_cols)
origin = self.origin.scale(factor)
return self.__class__(data=mask, origin=origin)
| 40.782258 | 120 | 0.686969 |
b1e369fb08913d130b89cec1f5483abc5621f780 | 851 | py | Python | src/secml/ml/classifiers/gradients/mixin_classifier_gradient_sgd.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 63 | 2020-04-20T16:31:16.000Z | 2022-03-29T01:05:35.000Z | src/secml/ml/classifiers/gradients/mixin_classifier_gradient_sgd.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 5 | 2020-04-21T11:31:39.000Z | 2022-03-24T13:42:56.000Z | src/secml/ml/classifiers/gradients/mixin_classifier_gradient_sgd.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 8 | 2020-04-21T09:16:42.000Z | 2022-02-23T16:28:43.000Z | """
.. module:: CClassifierGradientSGDMixin
:synopsis: Mixin for SGD classifier gradients.
.. moduleauthor:: Ambra Demontis <ambra.demontis@unica.it>
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
from secml.array import CArray
from secml.ml.classifiers.gradients import CClassifierGradientLinearMixin
| 26.59375 | 80 | 0.680376 |
b1e811f35a54f18497c53bcd1b57a0f2b90a05d1 | 455 | py | Python | src/epyodbc/constructs/base_class.py | kingspp/epyodbc | 601ea659c243e7128f946fed264a095f82b25f8a | [
"MIT"
] | null | null | null | src/epyodbc/constructs/base_class.py | kingspp/epyodbc | 601ea659c243e7128f946fed264a095f82b25f8a | [
"MIT"
] | null | null | null | src/epyodbc/constructs/base_class.py | kingspp/epyodbc | 601ea659c243e7128f946fed264a095f82b25f8a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
| **@created on:** 9/4/20,
| **@author:** prathyushsp,
| **@version:** v0.0.1
|
| **Description:**
|
|
| **Sphinx Documentation Status:**
"""
from abc import ABCMeta, abstractmethod
import json
| 18.2 | 61 | 0.586813 |
b1e81c6b8160d531e4161ae5339f729307b428e9 | 1,891 | py | Python | madlib4.py | Leorodr501/Mad-Libs | 0d619908ad4b1f73365e86345dd4023f9aa7f72d | [
"MIT"
] | null | null | null | madlib4.py | Leorodr501/Mad-Libs | 0d619908ad4b1f73365e86345dd4023f9aa7f72d | [
"MIT"
] | null | null | null | madlib4.py | Leorodr501/Mad-Libs | 0d619908ad4b1f73365e86345dd4023f9aa7f72d | [
"MIT"
] | null | null | null | adjective = input("Enter an adjective: ")
adjective = input("Enter another adjective: ")
noun = input("Enter a noun: ")
noun = input("Enter another noun: ")
plural_noun = input("Enter a plural noun: ")
game = input("Enter a game: ")
plural_noun = input("Enter another plural noun: ")
verb = input("Enter a verb ending in 'ing': ")
verb = input("Enter another verb ending in 'ing': ")
plural_noun = input("Enter another plural noun: ")
verb = input("Enter another verb ending in 'ing': ")
noun = input("Enter another noun: ")
plant = input("Enter a plant; ")
body_part = input("Enter a part of the body: ")
place = input("Enter a place: ")
verb = input("Enter another verb ending in 'ing': ")
adjective = input("Enter another adjective: ")
number = input("Enter a number <100: ")
plural_noun = input("Enter another plural noun: ")
print("A vacation is when you take a trip to some " + adjective + " place"),
print(" with you " + adjective + " family.")
print("Usually you go to some place thatis near a/an " + noun ),
print(" or up on a/an " + noun + ".")
print("A good vacation place is one where you can ride " + plural_noun ),
print(" or play " + game + " or go hunting for " + plural_noun + ".")
print("I like to spend my time " + verb + " or " + verb + ".")
print("When parents go on a vacation, they spend their time eating three " + plural_noun + " a day,")
print(" and fathers play golf, and mothers sit around " + verb + ".")
print("Last summer, my little brother fell in a/an " + noun + " and got poison " + plant ),
print(" all over his " + body_part + ".")
print("My family is going to go to (the) " + place + ", and I will practice " + verb + ".")
print("Parents need vacations more than kids because parents are always very " + adjective + " and because they have to work " + number + " hours every day all year making enough " + plural_noun + " to pay for the vacation.")
| 51.108108 | 225 | 0.668429 |
b1ea115bb0ded34de6025f440ccc8b24a383a6be | 5,854 | py | Python | proto_5/connect/__main__.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | null | null | null | proto_5/connect/__main__.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | 6 | 2021-03-19T12:06:56.000Z | 2022-03-12T00:23:09.000Z | proto_5/connect/__main__.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | null | null | null | import argparse
import os
import logging
import sys
from types import SimpleNamespace
from .util.print_tree import print_tree
def is_connect_script_file(filename):
return os.path.basename(filename) == "CONNECT"
def is_card_file(filename):
return filename.endswith('.yaml') or filename == "CONNECT"
def is_connect_file(filename):
return any([predicate(filename)
for predicate in [is_card_file, is_connect_script_file]])
def parse_connect_files(card_dir):
connect_files = []
for dirpath, dirnames, filenames in os.walk(card_dir):
for filename in filenames:
if is_connect_file(filename):
connect_files.append(os.path.join(dirpath, filename))
return connect_files
class CardTree:
class Node:
def __init__(self, name):
self._name = name
def __init__(self):
self._root_node = self.DirNode(".")
def _ensure_child_dir_node(self, parent_dir, nodename):
if nodename not in parent_dir.children:
parent_dir.children[nodename] = self.DirNode(nodename)
return parent_dir.children[nodename]
def _get_card_dir_node(self, card_dir):
dir_parts = card_dir.split(os.sep)[1:]
path_node = self._root_node
for dirname in dir_parts:
path_node = self._ensure_child_dir_node(path_node, dirname)
return path_node
def add_card_node(self, card):
dir_node = self._get_card_dir_node(card.card_dir)
dir_node.children[card.card_name] = self.CardNode(card)
def get_card(self, card_path):
dir_node = self._get_card_dir_node(Card.to_card_dir(card_path))
return dir_node.children.get(Card.to_card_name(card_path), None)
def apply_connect_script(card_dir, script_file, card_tree):
script_lines = open(script_file).readlines()
line_index = 0
relate_cmds = []
while line_index < len(script_lines):
line = script_lines[line_index]
if line.strip() == "relate":
cmd = tuple([arg.strip()
for arg in script_lines[line_index+1: line_index+4]])
relate_cmds.append(cmd)
line_index = line_index + 4
else:
line_index = line_index + 1
script_card_dir = data_to_card_path(card_dir, os.path.dirname(script_file))
for relate_cmd in relate_cmds:
apply_relate_cmd(script_card_dir, relate_cmd, card_tree)
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", help="The directory containing our data")
parser.add_argument("--log-debug", help="Log debug information",
default=False, action="store_true")
sys.exit(main(parser.parse_args())) | 34.233918 | 83 | 0.654766 |
b1ea3252892ba4485f0734ae364981f2c8a20c18 | 35,322 | py | Python | arcadia.py | ntw1103/arcadia | bfefc433a97c13739c9c1b329e0b9af63dba9d1b | [
"BSD-2-Clause"
] | 1 | 2018-12-11T04:36:53.000Z | 2018-12-11T04:36:53.000Z | arcadia.py | ntw1103/arcadia | bfefc433a97c13739c9c1b329e0b9af63dba9d1b | [
"BSD-2-Clause"
] | null | null | null | arcadia.py | ntw1103/arcadia | bfefc433a97c13739c9c1b329e0b9af63dba9d1b | [
"BSD-2-Clause"
] | 1 | 2018-12-11T04:37:35.000Z | 2018-12-11T04:37:35.000Z | # -*- coding: utf-8 -*-
#!/usr/bin/env python2
import fileinput
import json
import random
import socket
import thread #
import urllib2 #
import collections
import time
import traceback
import re
from decimal import Decimal as D
from datetime import datetime
import MySQLdb #
import math
username = 'Arcadia2'
oper_key = 'ee82aeb94474fcc21f05061043cb4' #This is not the actual key
#weather_api="http://api.openweathermap.org/data/2.5/weather?q="
#weather_api="http://api.openweathermap.org/data/2.5/find?q="
weather_api="http://api.openweathermap.org/data/2.5/find?APPID=c61b24ac1edeb6837b377df=" # This is not the actual key
API_KEY="&APPID=c61b24ac1edeb6837b377df" #This is not the actual key
CHANNEL = "#main"
# TODO: @Arcadia.msg_register(name: str, requires_auth: bool, secret: bool, floodrule: str)
# @Arcadia.data_register(....)
# would inspect function for name and arguments, and get docstring, to add as a response to !help.
# would add the decoratee function as a msg or data callback.
# IDEA: floodrule is similar to unrealircd +f channel flag: [*]<attempts>:<seconds>
# TODO: Log !sudo commands to #opers (unless it's issued from #opers), or to file, or to /helpops or /globops or /chatops
# Failed attempts should be logged unobtrusively, to file
# IDEA: Allow user to pipe the output of a command to another channel they are in.
"""class clue:
db=database()
db.connect()
def get_location:
db.connect()
db.run("SELECT * FROM `locations` ORDER BY rand() LIMIT 1")
db.disconnect()
def add_location:
def get_weapon:
def add_weapon:"""
def ago(past_timestamp):
"""
Returns a string, the number of days, hours, minutes, or seconds ago a timestamp is.
Example output:
5h30m ago
3d5h ago
2m30s ago
"""
time_diff = int(time.time() - past_timestamp)
time_diff_units = []
for unit, unitname in [(86400, "d"), (3600, "h"), (60, "m"), (1, "s")]:
u = time_diff // unit
if u > 0:
time_diff_units.append(str(u))
time_diff_units.append(unitname)
time_diff -= u * unit
if len(time_diff_units) >= 4:
break
return "".join(time_diff_units) + " ago"
"""class user_tracker:
db = database()
db.connect()
#m = db.escape(message)
def check_name(name):
clean_name = self.db.escape(name)
#print "Checking: " + word[:-4]
query = "SELECT COUNT(*) from `users` WHERE `username` = '"+clean_name+"'"
row = db.run(query)
#print row[0]
if(row[0] ==1):
return 1
else:
return 0
def insert_name(name):
clean_name = self.db.escape(name)
query = "INSERT INTO `users` VALUES('"+clean_name+"','9')"
row = db.run(query)"""
"""class trivia_engine:
db = database()
db.connect()
#m = db.escape(message)
state = 0
question_id =0
players =[]
def restart(self):
self.players =[]
self.state =0
def question(self,message,nick):
if self.state != 0:
return "A question was already asked: " +self.state
m = self.db.escape(message)
m = m.replace ( '\r\n', '' )
parts = m.split(' ')
bleh = parts[1::]
#one_line = ' '.join([str(i) for i in bleh])[:-4:]
#question = "Who was luke skywalker's father?"
db = database()
db.connect()
m = db.escape(message)
query ="SELECT count(*)FROM `trivia`;"
row = db.run(query)
count = row[0]
next_id = str(random.randint(0,count-1))
self.question_id = next_id
print("Next_ID:"+ next_id)
query ="SELECT `question` FROM `trivia` WHERE id="+next_id+";"
row = db.run(query)
self.state = row[0] # set the state to the current question.
return self.state+" "+nick
def answer(self,message,nick):
if self.state ==0:
return "The game hasn't started yet."
else:
if "darth vader" in message:
return "Good job."
else:
db = database()
db.connect()
m = db.escape(message)
query ="SELECT `id` from `trivia` WHERE `answer` LIKE '%"+m[3:-4:]+"%'"
print query
row = db.run(query)
if row is None:
return "I'm sorry, wrong answer."
print("id: " + str(row[0]) +"Current question id: "+ str(self.question_id))
if(str(row[0]) == str(self.question_id)):
self.state =0
return "Good Job!"
else:
return "Wrong answer"
return "I'm sorry, wrong answer."
"""
db = database()
db.connect()
word = db.escape(dirty_word)
#print "Checking: " + word[:-4]
query = "SELECT COUNT(*) from `badwords`"
query = query + " WHERE `words` = '" + word[:-4]+"'"
query = query + " OR `words` = '"+word[:-4]+".'"
query = query + " OR `words` = '"+word[:-4]+",'"
query = query + " OR `words` = '"+word[:-4]+"!'"
row = db.run(query)
#print row[0]
if(row[0] ==1):
return 1
else:
return 0
print('In get_verse')
db = database()
db.connect()
m = db.escape(message.msgtxt)
m = m.replace ('\r\n', '').replace("\\r\\n", '')
parts = m.split(' ')
for part in parts:
print(part)
print "Length: " + str(len(parts))
#parts.pop(1)
is_range = all(i.isdigit() for i in parts[-3:])
if is_range:
chapt = parts[-3]
start = parts[-2]
end = parts[-1]
book = " ".join(parts[1:-3])
else:
chapt = parts[-2]
start = end = parts[-1]
book = " ".join(parts[1:-2])
start = int(start)
end = int(end)
response = ""
if end - start > 7:
end = start + 7
response += "[Limited verses: %s to %s] " % (start, end)
print "Start: " + str(start) + " , END " + str(end)
for i in range(start,end+1):
query = "SELECT `VerseText` from `BibleKJV` WHERE `book`='" +book+"' AND `Chapter`='"+chapt+"' AND `Verse`='"+str(i)+"'"
print query
row = db.run(query)
if row is None:
return 'Verse(s) not found. Format example (looking up 1 Corinthians 3:18-20): ".bible 1 Corinthians 3 18 20" -- Another (looking up John 3:16): ".bible John 3 16"'
if len(row) > 0:
print "Multiverse"
for i in row:
print "verse: " + i
response += " " + i
else:
response = row[0]
print "response" + response
return response
#def procss_data(server,data,message,nick,b,trivia,last):
def whois(bot, target_nick, cb, target_codes=None):
"""
:param bot:
An Arcadia instance.
:param target_nick:
The nick we have to whois. WHOIS responses from the server pertaining to
any other nick are ignored.
:param cb:
A callable taking two strings, first is the WHOIS RPL numeric code,
second is the string the server put at the end.
If the server responds:
```
:domainname.com 671 username1 username2 :is using a Secure Connection
```
`cb` gets:
```
cb("671", ":is using a Secure Connection")
```
:param target_codes:
The WHOIS RPL numeric codes we should watch out for, others will be ignored.
If argument is not passed, we'll watch out for all known WHOIS numerics.
"""
if target_codes is None:
target_codes = [
"311", # hostname (start)
"379", # modes
"378", # "is connecting from"
"307", # "is identified"
"319", # channels
"312", # server in the net user is connected to
"313", # server operator
"310", # available for help
"671", # using ssl
"320", # is root
"317", # idle time and signon time
"318", # end
"401" # no such user
]
bot.callbacks.append(whois_cb)
bot.server.sendPacket(" WHOIS {0} {0}".format(target_nick))
running=1
storage = []
arcadia = Arcadia()
try:
thread.start_new_thread( arcadia.Arcadia_run, () )
except:
traceback.print_exc()
print "Error: unable to start thread"
try:
while 1:
user_input = raw_input("enter something: ")
if user_input.startswith("#EVAL#"):
user_input = user_input[len("#EVAL#"):]
try:
try:
result = eval(user_input)
print "Eval result:"
print result
print "-----"
except SyntaxError:
exec(user_input)
print "(Executed)"
except:
traceback.print_exc()
else:
if "/join" in user_input:
arcadia.server.sendPacket ( 'JOIN %s' % (user_input[6::]) )
print("trying to")
elif user_input == "/start":
arcadia.server.sendPacket ( 'OPER ntw1103 '+ oper_key)
brain = arcadia.brain
global storage
storage = brain.load()
brain.clean(storage)
else:
arcadia.server.sendPacket ( 'PRIVMSG %s : %s' % (CHANNEL, ' ' +user_input) )
print user_input
except KeyboardInterrupt:
running = "stopped"
print "done"
| 43.233782 | 188 | 0.46832 |
b1edfb7e986ee60ac0da1a869a4e400f7398c3fe | 1,492 | py | Python | app/display_modules/ags/tests/test_tasks.py | MetaGenScope/metagenscope-server | 609cd57c626c857c8efde8237a1f22f4d1e6065d | [
"MIT"
] | null | null | null | app/display_modules/ags/tests/test_tasks.py | MetaGenScope/metagenscope-server | 609cd57c626c857c8efde8237a1f22f4d1e6065d | [
"MIT"
] | null | null | null | app/display_modules/ags/tests/test_tasks.py | MetaGenScope/metagenscope-server | 609cd57c626c857c8efde8237a1f22f4d1e6065d | [
"MIT"
] | null | null | null | """Test suite for Average Genome Size tasks."""
from app.display_modules.ags.ags_tasks import boxplot, ags_distributions
from app.samples.sample_models import Sample
from app.tool_results.microbe_census.tests.factory import create_microbe_census
from tests.base import BaseTestCase
| 38.25641 | 79 | 0.635389 |
b1ee21b42e49b37ad9977b9259b77f5d847cdf1c | 491 | py | Python | tsim/serialization/__init__.py | eduardomezencio/tsim | 60ac63152a98fd7dabb59c66367bca216e6a7370 | [
"MIT"
] | 2 | 2021-04-24T06:48:13.000Z | 2022-01-25T02:38:44.000Z | tsim/serialization/__init__.py | eduardomezencio/tsim | 60ac63152a98fd7dabb59c66367bca216e6a7370 | [
"MIT"
] | null | null | null | tsim/serialization/__init__.py | eduardomezencio/tsim | 60ac63152a98fd7dabb59c66367bca216e6a7370 | [
"MIT"
] | null | null | null | """Global serialization configuration."""
from importlib import import_module
import os
def configure_serialization():
"""Configure serialization for all classes in folder."""
for name in filter(
lambda s: not s.startswith('_') and s.endswith('.py'),
os.listdir(os.path.dirname(os.path.abspath(__file__)))):
module_name = os.path.splitext(name)[0]
module = import_module(f'.{module_name}', 'tsim.serialization')
module.configure()
| 30.6875 | 71 | 0.668024 |
b1eeecae89c5a75d2089876662644291654428d3 | 4,678 | py | Python | windows/winobject/handle.py | 1orenz0/PythonForWindows | f3de7b528b020b45ac6a871c975006fc1db1c3b0 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T16:50:31.000Z | 2021-06-22T16:50:31.000Z | windows/winobject/handle.py | killvxk/PythonForWindows | b253bc5873e7d97087ed22f2753b51fc6880ec18 | [
"BSD-3-Clause"
] | null | null | null | windows/winobject/handle.py | killvxk/PythonForWindows | b253bc5873e7d97087ed22f2753b51fc6880ec18 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T12:58:27.000Z | 2021-05-12T12:58:27.000Z | import os
import ctypes
import windows
from windows import winproxy
from windows.generated_def import windef
from windows.generated_def.winstructs import *
# Remove this ?
current_process_pid = os.getpid()
def enumerate_handles():
size_needed = ULONG()
size = 0x1000
buffer = ctypes.c_buffer(size)
try:
winproxy.NtQuerySystemInformation(16, buffer, size, ReturnLength=ctypes.byref(size_needed))
except WindowsError as e:
pass
size = size_needed.value + 0x1000
buffer = ctypes.c_buffer(size)
winproxy.NtQuerySystemInformation(16, buffer, size, ReturnLength=ctypes.byref(size_needed))
x = SYSTEM_HANDLE_INFORMATION.from_buffer(buffer)
return list(_GENERATED_SYSTEM_HANDLE_INFORMATION.from_buffer_copy(buffer[:size_needed.value]).Handles)
| 33.898551 | 150 | 0.665669 |
b1efcf80cebb01dff50a1e2a45ff4368cec1958a | 4,428 | py | Python | metrics.py | efratkohen/Project | d95d20a1be8fe0e0918b3e699c640f36704639f8 | [
"MIT"
] | 1 | 2020-07-25T11:27:17.000Z | 2020-07-25T11:27:17.000Z | metrics.py | efratkohen/Project | d95d20a1be8fe0e0918b3e699c640f36704639f8 | [
"MIT"
] | null | null | null | metrics.py | efratkohen/Project | d95d20a1be8fe0e0918b3e699c640f36704639f8 | [
"MIT"
] | null | null | null | import traceback
import numpy as np
from matplotlib import pyplot, pyplot as plt
from sklearn.metrics import (
mean_squared_error,
median_absolute_error,
roc_curve,
auc,
f1_score,
precision_recall_curve,
r2_score,
)
from sklearn.metrics import confusion_matrix
import column_labeler as clabel
from math import sqrt
| 28.203822 | 80 | 0.613144 |
b1f0550c1843ad31adf65e89fa5211ad4acfccfc | 1,328 | py | Python | tests/iterators/pull_test.py | SSouik/pyutil | d2250fb585679e49eb9056a3051bf239a58c2e8b | [
"MIT"
] | null | null | null | tests/iterators/pull_test.py | SSouik/pyutil | d2250fb585679e49eb9056a3051bf239a58c2e8b | [
"MIT"
] | 21 | 2022-01-05T04:51:33.000Z | 2022-01-28T05:45:57.000Z | tests/iterators/pull_test.py | SSouik/pyutil | d2250fb585679e49eb9056a3051bf239a58c2e8b | [
"MIT"
] | null | null | null | import pytest
from pyutil import pull
| 20.75 | 54 | 0.582831 |
b1f203c60f7518be9918994e126f2868a0f76ed4 | 30,681 | py | Python | main.py | RohiBaner/Beijing-Air-Quality-Prediction | 4ec823ceacef1b61e1c1e5689a97a1335e4b5867 | [
"MIT"
] | 3 | 2019-09-23T10:04:05.000Z | 2021-03-10T12:12:28.000Z | main.py | RohiBaner/Beijing-Air-Quality-Prediction | 4ec823ceacef1b61e1c1e5689a97a1335e4b5867 | [
"MIT"
] | null | null | null | main.py | RohiBaner/Beijing-Air-Quality-Prediction | 4ec823ceacef1b61e1c1e5689a97a1335e4b5867 | [
"MIT"
] | null | null | null | ''' --------------------------------------------IMPORTING NECESSARY LIBRARIES------------------------------------------- '''
import numpy as np
import pandas as pd
from math import radians, cos, sin, asin, sqrt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from itertools import cycle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn'
''' ---------------------------FUNCTIONS TO FIND NEAREST DISTANCE BETWEEN ALL NECESSARY STATIONS------------------------ '''
# Function to find nearest station between two points using Haversine Distance
# Find nearest AQ to AQ station
# Find nearest GW to GW station
# Find nearest OBW to OBW station
# Find nearest AQ to OBW station
# Find nearest AQ to GW station
# Function to calculate the model error via SMAPE
''' ------------------------------------------TRAIN: AIR QUALITY PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Air Quality Dataset!')
# Read all the air quality datasets
aq_2017 = pd.read_csv("airQuality_201701-201801.csv")
aq_2018 = pd.read_csv("airQuality_201802-201803.csv")
aq_2018a = pd.read_csv("aiqQuality_201804.csv")
# Renaming the header of April AQ dataset to match the other AQ datasets
aq_2018a.rename(columns={'station_id': 'stationId', 'time': 'utc_time', 'PM25_Concentration':'PM2.5'\
,'PM10_Concentration':'PM10','NO2_Concentration':'NO2'\
,'CO_Concentration':'CO', 'O3_Concentration':'O3'\
,'SO2_Concentration':'SO2'}, inplace=True)
aq_2018a= aq_2018a.drop(columns=['id'], axis=1)
# Merge all AQ datasets together into a single dataframe
aq_train = aq_2017.append(aq_2018, ignore_index=True)
aq_train = aq_train.append(aq_2018a, ignore_index=True)
# Convert the entire 'utc_time' column into the same format
aq_train["utc_time"] = pd.to_datetime(aq_train["utc_time"])
# Delete unnecessary dataframes to save space
del(aq_2017)
del(aq_2018)
del(aq_2018a)
# Set the time column as the index of the dataframe
aq_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the AQ dataframe
min_date=aq_train.index.min()
max_date=aq_train.index.max()
# Drop any duplicates present in the AQ dataframe
aq_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the AQ station location file and find nearest station for each AQ station
# This dataset was created by us
station_aq = pd.read_csv("Beijing_AirQuality_Stations.csv")
station_aq["nearest_station"] = station_aq.apply(lambda row: near_aq_to_aq(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
time_hours = pd.DataFrame({"date": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all AQ stations and the above dataframe
aq_all_time = pd.merge(time_hours.assign(key=0), station_aq.assign(key=0), on='key').drop('key', axis=1)
# Join the AQ dataset with the dataframe containing all the timestamps for each AQ station
aq_train1 = pd.merge(aq_train, aq_all_time, how='right', left_on=['stationId','utc_time'], right_on = ['station','date'])
aq_train1 = aq_train1.drop('stationId', axis=1)
aq_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest AQ station (same time interval)
aq_train_copy = aq_train1.copy()
aq_train_copy = aq_train_copy.drop(['nearest_station','longitude', 'latitude', 'type'], axis=1)
aq_train_copy.rename(columns={'PM2.5': 'n_PM2.5','PM10': 'n_PM10', "NO2":"n_NO2","CO":"n_CO","O3":"n_O3",
"SO2":"n_SO2", "date":"n_date", "station":"n_station" }, inplace=True)
# Merge original AQ data and the copy AQ data to get all attributes of a particular AQ station and its nearest AQ station
aq_train2 = pd.merge(aq_train1, aq_train_copy, how='left', left_on=['nearest_station','date'], right_on = ['n_station','n_date'])
# Sort the final dataframe based on AQ station and then time
aq_train2 = aq_train2.sort_values(by=['n_station', 'date'], ascending=[True,True])
aq_train2 = aq_train2.reset_index(drop=True)
# Drop all unncessary attributes
aq_train2.drop(['n_station', 'longitude', 'latitude', 'n_date'], axis=1, inplace=True)
# Create two attributes - month and hour
aq_train2['month'] = pd.DatetimeIndex(aq_train2['date']).month
aq_train2['hour'] = pd.DatetimeIndex(aq_train2['date']).hour
# Fill in missing values of attributes with their corresponding values in the nearest AQ station (within same time)
aq_train2['PM10'].fillna(aq_train2['n_PM10'], inplace=True)
aq_train2['PM2.5'].fillna(aq_train2['n_PM2.5'], inplace=True)
aq_train2['NO2'].fillna(aq_train2['n_NO2'], inplace=True)
aq_train2['CO'].fillna(aq_train2['n_CO'], inplace=True)
aq_train2['O3'].fillna(aq_train2['n_O3'], inplace=True)
aq_train2['SO2'].fillna(aq_train2['n_SO2'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
aq_train2[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']] = aq_train2.groupby(["station","month","hour"])[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']].transform(lambda x: x.fillna(x.mean()))
# Create final AQ dataset after dropping all unnecessary attributes
aq_train_final = aq_train2.drop(['type','nearest_station','n_PM2.5','n_PM10','n_NO2','n_CO','n_O3','n_SO2'],axis=1)
# Delete unnecessary dataframes to save space
del(aq_train1)
del(aq_train2)
del(aq_train_copy)
del(aq_all_time)
print('Done!')
print('-'*50)
''' ------------------------------------------TRAIN: GRID DATASET PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Grid Weather Dataset!')
# Read all the grid weather train datasets
gw_2017 = pd.read_csv("gridWeather_201701-201803.csv")
gw_2018 = pd.read_csv("gridWeather_201804.csv")
# Renaming the headers of the GW data to match each other
gw_2017.rename(columns={'stationName': 'station_id', 'wind_speed/kph': 'wind_speed'}, inplace=True)
gw_2018.rename(columns={'station_id':'station_id', 'time':'utc_time'}, inplace=True)
# Merge all GW train datasets into a single dataframe
gw_train = gw_2017.append(gw_2018, ignore_index=True)
gw_train = gw_train.drop(columns=['id','weather'], axis=1)
# Delete unnecessary dataframes to save space
del(gw_2017)
del(gw_2018)
# Set the time column as the index of the dataframe
gw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the GW dataframe
min_date = gw_train.index.min()
max_date = gw_train.index.max()
# Drop any duplicates present in the GW dataframe
gw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the GW station location file and find nearest station for each GW station
gw_station = pd.read_csv("Beijing_grid_weather_station.csv", header=None, names=['station_id','latitude','longitude'])
gw_station["nearest_station"] = gw_station.apply(lambda row: near_gw_to_gw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
gw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all GW stations and the above dataframe
gw_all_time = pd.merge(gw_time_hours.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
gw_all_time['time'] = gw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the GW dataset with the dataframe containing all the timestamps for each GW station
gw_train1 = pd.merge(gw_train, gw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
gw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest GW station (same time interval)
gw_train_copy = gw_train1.copy()
gw_train_copy.drop(['nearest_station','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
gw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original GW data and the copy GW data to get all attributes of a particular GW station and its nearest GW station
gw_train2 = pd.merge(gw_train1, gw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on GW station and then time
gw_train2 = gw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True])
gw_train2 = gw_train2.reset_index(drop=True)
# Drop all unncessary attributes
gw_train2.drop(['n_station_id', 'n_time','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
# Create two attributes - month and hour
gw_train2['month'] = pd.DatetimeIndex(gw_train2['time']).month
gw_train2['hour'] = pd.DatetimeIndex(gw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest GW station (within same time)
gw_train2['humidity'].fillna(gw_train2['n_humidity'], inplace=True)
gw_train2['pressure'].fillna(gw_train2['n_pressure'], inplace=True)
gw_train2['temperature'].fillna(gw_train2['n_temperature'], inplace=True)
gw_train2['wind_speed'].fillna(gw_train2['n_wind_speed'], inplace=True)
gw_train2['wind_direction'].fillna(gw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
gw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = gw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final GW dataset after dropping all unnecessary attributes
gw_train_final = gw_train2.drop(['nearest_station','n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(gw_train1)
del(gw_train2)
del(gw_train_copy)
del(gw_all_time)
print('Done!')
print('-'*50)
''' -----------------------------------TRAIN: OBSERVED WEATHER DATASET PREPROCESSING------------------------------------ '''
print('Preprocessing and cleaning the train Observed Weather Dataset!')
# Read all the observed weather train datasets
obw_2017 = pd.read_csv("observedWeather_201701-201801.csv")
obw_2018 = pd.read_csv("observedWeather_201802-201803.csv")
obw_2018a = pd.read_csv("observedWeather_201804.csv")
obw_2018a.rename(columns={'time': 'utc_time'}, inplace=True)
# Read the time stamp in the April observed weather data in the same format as the other datasets
#obw_2018a['utc_time'] = pd.to_datetime(obw_2018a['utc_time'], format='%d-%m-%Y %H:%M:%S')
obw_2018a['utc_time'] = obw_2018a['utc_time'].astype(str)
# Merge all OBW train datasets into a single dataframe
obw_train = obw_2017.append(obw_2018, ignore_index=True)
obw_train = obw_train.append(obw_2018a, ignore_index=True)
obw_train.drop(['id','weather'],axis=1, inplace=True) # Drop unnecessary columns
# Delete unnecessary dataframes to save space
del(obw_2017)
del(obw_2018)
del(obw_2018a)
# Set the time column as the index of the dataframe
obw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the OBW dataframe
min_date = obw_train.index.min()
max_date = obw_train.index.max()
# Drop any duplicates present in the OBW dataframe
obw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the OBW station location file
obw_station = obw_train[["station_id","latitude","longitude"]]
obw_station = obw_station.drop_duplicates().dropna()
obw_station = obw_station.reset_index(drop=True)
# Find nearest station for each OBW station
obw_station["nearest_station"] = obw_station.apply(lambda row: near_obw_to_obw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
obw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all OBW stations and the above dataframe
obw_all_time = pd.merge(obw_time_hours.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
obw_all_time['time'] = obw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the OBW dataset with the dataframe containing all the timestamps for each OBW station
obw_train1 = pd.merge(obw_train, obw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
obw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest OBW station (same time interval)
obw_train_copy = obw_train1.copy()
obw_train_copy.drop(['nearest_station','longitude_x', 'latitude_x','longitude_y', 'latitude_y'], axis=1, inplace=True)
obw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original OBW data and the copy OBW data to get all attributes of a particular OBW station and its nearest OBW station
obw_train2 = pd.merge(obw_train1, obw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on OBW station and then time
obw_train2 = obw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True] )
obw_train2.drop(['n_station_id', 'n_time'], axis=1, inplace=True)
obw_train2 = obw_train2.reset_index(drop=True)
# Create two attributes - month and hour
obw_train2['month'] = pd.DatetimeIndex(obw_train2['time']).month
obw_train2['hour'] = pd.DatetimeIndex(obw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest OBW station (within same time)
obw_train2['humidity'].fillna(obw_train2['n_humidity'], inplace=True)
obw_train2['pressure'].fillna(obw_train2['n_pressure'], inplace=True)
obw_train2['temperature'].fillna(obw_train2['n_temperature'], inplace=True)
obw_train2['wind_speed'].fillna(obw_train2['n_wind_speed'], inplace=True)
obw_train2['wind_direction'].fillna(obw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
obw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = obw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final OBW dataset after dropping all unnecessary attributes
obw_train_final = obw_train2.drop(['longitude_x', 'latitude_x','longitude_y', 'latitude_y','nearest_station',\
'n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(obw_train1)
del(obw_train2)
del(obw_train_copy)
del(obw_all_time)
print('Done!')
print('-'*50)
''' --------------------------MERGING ALL TRAINING DATASETS AND GETTING READY FOR MODEL TRAINING------------------------- '''
aq_train_final['date'] = aq_train_final['date'].astype(str)
print('Getting the training model ready!')
# Convert wind speed in grid weather data from kmph to m/s (observed weather data is already in m/s)
gw_train_final['wind_speed'] = (gw_train_final['wind_speed']*5)/18
# Make all start and end times equal for the training datasets
gw_train_final = gw_train_final[gw_train_final['time']>='2017-01-30 16:00:00']
aq_train_final = aq_train_final[aq_train_final['date']>='2017-01-30 16:00:00']
# Replace noise values with previous hours value in both Observed and Grid datasets
obw_train_final.replace(999999,np.NaN,inplace=True)
obw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']] = obw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']].fillna(method='ffill')
gw_train_final.replace(999999,np.NaN,inplace=True)
gw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']] = gw_train_final[['humidity', 'pressure','temperature','wind_direction','wind_speed']].fillna(method='ffill')
# Replace wind direction with the noise value '999017' when wind speed is less than 0.5m/s
# This value will then be replaced with data from the nearest observed or grid station for the same timestamp
obw_train_final.loc[obw_train_final.wind_speed < 0.5, 'wind_direction'] = 999017
gw_train_final.loc[gw_train_final.wind_speed < 0.5, 'wind_direction'] = 999017
# Find nearest OBW and GW station for every AQ station for proper joining of attributes
obw_station.drop(['nearest_station'],axis=1, inplace=True)
station_aq["near_obw"] = station_aq.apply(lambda row: near_aq_to_obw(row['latitude'], row['longitude']), axis=1)
gw_station.drop(['nearest_station'],axis=1, inplace=True)
station_aq["near_gw"] = station_aq.apply(lambda row: near_aq_to_gw(row['latitude'], row['longitude']), axis=1)
# Merge the AQ training dataset with the nearest OBW and GW stations for every time stamp
aq_train1 = pd.merge(aq_train_final, station_aq, how='left', on='station')
aq_train1.drop(['type','nearest_station'], axis=1, inplace=True)
# Append all GW data attributes with the AQ training set based on nearest GW station and time stamp
aq_train2 = pd.merge(aq_train1, gw_train_final, how='left', left_on=['near_gw','date'], right_on=['station_id','time'])
# Remove unnecessary columns and rename columns to prepare for merging of OBW data
aq_train2.drop(['station_id','time','month_y','hour_y'],axis=1, inplace=True)
aq_train2 = aq_train2.rename(columns={'month_x': 'month_aq', 'hour_x': 'hour_aq', 'longitude':'longitude_aq',\
'latitude':'latitude_aq', 'humidity': 'humidity_gw','pressure': 'pressure_gw',\
'wind_direction': 'wind_dir_gw', 'wind_speed':'wind_speed_gw',\
'temperature': 'temperature_gw'})
# Append all OBW data attributes with the AQ training set based on nearest OBW station and time stamp
TRAIN = pd.merge(aq_train2, obw_train_final, how='left', left_on=['near_obw','date'], right_on=['station_id','time'])
TRAIN.drop(['station_id','time','month','hour'],axis=1, inplace=True)
TRAIN = TRAIN.rename(columns={'humidity': 'humidity_obw','pressure': 'pressure_obw',\
'wind_direction': 'wind_dir_obw', 'wind_speed':'wind_speed_obw',\
'temperature': 'temperature_obw'})
# Final clean of all 999017 noise from the OBW and GW for wind direction
TRAIN.loc[TRAIN.wind_dir_gw == 999017, 'wind_dir_gw'] = TRAIN['wind_dir_obw']
TRAIN.loc[TRAIN.wind_dir_obw == 999017, 'wind_dir_obw'] = TRAIN['wind_dir_gw']
# Some observed data points are very outliers (probably wrongly noted by humans)
TRAIN.loc[TRAIN.humidity_obw > 100, 'humidity_obw'] = TRAIN['humidity_gw']
TRAIN.loc[TRAIN.pressure_obw > 1040, 'pressure_obw'] = TRAIN['pressure_gw']
TRAIN.loc[TRAIN.temperature_obw > 50, 'temperature_obw'] = TRAIN['temperature_gw']
TRAIN.loc[TRAIN.wind_dir_obw > 360, 'wind_dir_obw'] = TRAIN['wind_dir_gw']
TRAIN.loc[TRAIN.wind_speed_obw > 20, 'wind_speed_obw'] = TRAIN['wind_speed_gw']
# Sort the final train set based on station and then timestamp
TRAIN = TRAIN.sort_values(by=['station', 'date'], ascending=[True,True])
print('Ready to be trained by the model!')
print('-'*50)
''' ----------------------TEST DATA: CLEANING, PREPROCESSING AND GETTING READY FOR MODEL-------------------------------- '''
print('Getting the testing data ready for the model!')
# Read the AQ test dataset for test data - This dataset was found from the Beijing meteorological datasets
# This dataset helps in getting the values for the NO2, SO2 and CO attributes for the test data timestamps
test_aq = pd.read_csv('MAY_AQ.csv')
test_aq['Time'] = pd.to_datetime(test_aq['Time'], format='%d-%m-%Y %H:%M')
test_aq['Time'] = test_aq['Time'].astype(str)
# Merge the dataset with nearest GW and OBW stations with the AQ test dataset
test1 = pd.merge(test_aq, station_aq, how='left', left_on='station_id', right_on='station').drop(['station','longitude','latitude','type','nearest_station','AQI'],axis=1)
# Find time stamp range for test data: from 1st May 00:00 to 2nd May 23:00
test1.set_index("Time", inplace = True)
min_date_test = test1.index.min()
max_date_test = test1.index.max()
test1.reset_index(inplace=True)
# Grid Test Data Preprocessing
test_gw = pd.read_csv('gridWeather_20180501-20180502.csv') # Read GW test data
test_gw.drop(['id','weather'],axis=1, inplace=True)
# Create new dataframe with all timestamps for all GW stations
test_gw1 = pd.DataFrame({"time": pd.date_range(min_date_test, max_date_test, freq='H')})
test_gw2 = pd.merge(test_gw1.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
test_gw2['time'] = test_gw2['time'].astype(str) # Convert time in correct format
gw_test_final = pd.merge(test_gw2, test_gw, how='left', left_on=['station_id','time'], right_on = ['station_id','time'])
# Observed Test Data Preprocessing
test_obw = pd.read_csv('observedWeather_20180501-20180502.csv') # Read OBW test data
test_obw.drop(['id','weather'],axis=1, inplace=True)
# Create new dataframe with all timestamps for all OBW stations
test_obw1 = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
test_obw2 = pd.merge(test_obw1.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
test_obw2['time'] = test_obw2['time'].astype(str) # Convert time in correct format
obw_test_final = pd.merge(test_obw2, test_obw, how='left', left_on=['station_id','time'], right_on = ['station_id','time'])
# Join AQ Test dataframe with test GW dataframe
test_aq1 = pd.merge(test1, gw_test_final, how='left', left_on=['near_gw','Time'], right_on=['station_id','time'])
test_aq1.drop(['station_id_y','latitude','longitude'],axis=1, inplace=True)
# Rename certain columns to prepare for joining the OBW test dataframe
test_aq1 = test_aq1.rename(columns={'station_id_x':'station_id_aq',\
'humidity': 'humidity_gw',\
'pressure': 'pressure_gw',\
'wind_direction': 'wind_dir_gw',\
'wind_speed':'wind_speed_gw',\
'temperature': 'temperature_gw'})
# Join the updated AQ Test dataframe with test OBW dataframe
TEST = pd.merge(test_aq1, obw_test_final, how='left', left_on=['near_obw','time'], right_on=['station_id','time'])
TEST.drop(['station_id','latitude','longitude','time'],axis=1, inplace=True)
# Rename certain columns
TEST = TEST.rename(columns={'humidity': 'humidity_obw',\
'pressure': 'pressure_obw',\
'wind_direction': 'wind_dir_obw',\
'wind_speed':'wind_speed_obw',\
'temperature': 'temperature_obw'})
# Create attributes for month and hour - to be taken as input parameters
TEST['month'] = pd.DatetimeIndex(TEST['Time']).month
TEST['hour'] = pd.DatetimeIndex(TEST['Time']).hour
# Remove missing values based on nearest GW data (as very few values are missing in OBW data)
TEST = TEST.sort_values(by=['station_id_aq', 'Time'], ascending=[True,True])
TEST['humidity_obw'] = TEST['humidity_obw'].fillna(TEST['humidity_gw'])
TEST['temperature_obw'] = TEST['temperature_obw'].fillna(TEST['temperature_gw'])
TEST['pressure_obw'] = TEST['pressure_obw'].fillna(TEST['pressure_gw'])
TEST['wind_speed_obw'] = TEST['wind_speed_obw'].fillna(TEST['wind_speed_gw'])
TEST['wind_dir_obw'] = TEST['wind_dir_obw'].fillna(TEST['wind_dir_gw'])
# Take care of noise 999017 when wind speed is less than 0.5m/s
TEST.loc[TEST.wind_dir_gw == 999017, 'wind_dir_gw'] = TEST['wind_dir_obw']
TEST.loc[TEST.wind_dir_obw == 999017, 'wind_dir_obw'] = TEST['wind_dir_gw']
print('Ready to be tested by the model!')
''' ---------------------------------TRAINING THE MODEL AND PREDICTING REQUIRED OUTPUT----------------------------------- '''
# Train the model with only April, May and June's data
TRAIN = TRAIN.loc[TRAIN['month_aq'].isin([4,5,6])]
# Extract output columns for training the model
Y = TRAIN[['PM2.5','PM10','O3']].values
# Input parameters for the model
X = TRAIN.drop(['PM2.5','PM10','O3','latitude_aq','longitude_aq'], axis=1)
# Create new features for the model
X['AQ'] = (X['SO2']*X['NO2']*X['CO'])
X['wind'] = X['wind_dir_gw']/X['wind_speed_gw']
# Final input parameters after feature engineering
X_train = X[['station','month_aq','hour_aq','temperature_gw','AQ','humidity_gw','wind','pressure_gw']].values
# One Hot encode the station column and normalize the entire input data
le = LabelEncoder()
ohe = OneHotEncoder(categorical_features=[0])
scaler = MinMaxScaler()
X_train[:,0] = le.fit_transform(X_train[:,0])
X_train = ohe.fit_transform(X_train).toarray()
X_train_sc = scaler.fit_transform(X_train)
# Use Random Forest Regressor to predict the values
model_rf = RandomForestRegressor(random_state=42)
# Use K Fold Cross Validation to check the efficiency of the model
print('-------Printing the Cross Validation SMAPE errors-------')
kf = KFold(n_splits=10, shuffle=True, random_state=42)
for train_index, test_index in kf.split(X_train_sc):
x_train, x_val = X_train_sc[train_index], X_train_sc[test_index]
y_train, y_val = Y[train_index], Y[test_index]
model_rf.fit(x_train, y_train)
pred_val = model_rf.predict(x_val)
print(smape(y_val,pred_val))
# Get the Test data ready for the model by following the above steps
TEST['AQ'] = (TEST['CO']*TEST['SO2']*TEST['NO2'])
TEST['wind'] = TEST['wind_dir_gw']/TEST['wind_speed_gw']
# Final test data input features
X_test = TEST[['station_id_aq','month','hour','temperature_gw','AQ','humidity_gw','wind','pressure_gw']].values
# One hot encode and normalize similair to train data
X_test[:,0] = le.transform(X_test[:,0])
X_test = ohe.transform(X_test).toarray()
X_test_sc = scaler.transform(X_test)
# Predict the results after training the model on the whole final train dataset
model_rf.fit(X_train_sc,Y)
pred = model_rf.predict(X_test_sc)
''' --------------------------EXPORTING THE PREDICTED RESULTS INTO THE SPECIFIED FORMAT---------------------------------- '''
index_test = TEST[['station_id_aq']]
index = list(range(0,48)) # Create a list with all the values in the range (each for one hour over a period of two days)
# Turn the above numbers into a continuous cycle
index1 = cycle(index)
index_test['index'] = [next(index1) for i in range(len(index_test))]
# Create a column with all 35 AQ station names and all time indexes
index_test['test_id'] = index_test['station_id_aq']+'#'+index_test['index'].astype(str)
# Extract the required column and join it with the predicted output
# Both test and train data are sorted by station name and time - hence predicted output will be in arranged order
index_test.drop(['index','station_id_aq'],axis=1, inplace=True)
index_test1 = index_test.values
output = np.concatenate((index_test1, pred), axis=1)
np.savetxt('submission.csv', output, delimiter=',', header='test_id,PM2.5,PM10,O3', fmt='%s,%f,%f,%f', comments='')
print('The code is complete - please find your results in the "submission.csv" file!')
print("--- %s seconds ---" % (time.time() - start_time))
'''-------------------------------------------------------END-------------------------------------------------------------'''
| 57.671053 | 249 | 0.702031 |
b1f22c9adbe507763be9a3e8cffbcec89c6b45a4 | 234 | py | Python | examples/SortTimeDemo.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | null | null | null | examples/SortTimeDemo.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | 4 | 2019-11-07T12:32:19.000Z | 2020-07-19T14:04:44.000Z | examples/SortTimeDemo.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | 5 | 2019-12-04T15:56:55.000Z | 2022-01-14T06:19:18.000Z | import random
import time
n = eval(input("Enter the number of elements to sort: "))
lst = list(range(n))
random.shuffle(lst)
startTime = time.time()
lst.sort()
print("Sort time in Python is", int(time.time() - startTime), "seconds") | 23.4 | 72 | 0.705128 |
b1f274b6140c852afcbc6bb5b744a886df0fb5fe | 102 | py | Python | cloudflare/__init__.py | darylyu/cloudflare | be12ac9fa614a7078a89d7036f3a99e3165bd99d | [
"BSD-3-Clause"
] | 1 | 2015-05-03T12:51:44.000Z | 2015-05-03T12:51:44.000Z | cloudflare/__init__.py | darylyu/cloudflare | be12ac9fa614a7078a89d7036f3a99e3165bd99d | [
"BSD-3-Clause"
] | 2 | 2015-12-17T00:47:01.000Z | 2016-04-04T14:24:14.000Z | cloudflare/__init__.py | darylyu/cloudflare | be12ac9fa614a7078a89d7036f3a99e3165bd99d | [
"BSD-3-Clause"
] | 3 | 2015-09-13T22:43:54.000Z | 2016-04-02T19:44:21.000Z | # -*- coding: utf-8 -*-
__author__ = 'Daryl Yu'
__email__ = 'dyu@fastmail.com'
__version__ = '0.0.2'
| 17 | 30 | 0.627451 |
b1f5f177dec08c59abe32983e95271dfac01dbdf | 1,239 | py | Python | tests/conftest.py | andrewsayre/pysmartapp | 5c3be867584d7e82d00b5998295b20bd12eccf94 | [
"MIT"
] | 10 | 2019-02-07T20:07:10.000Z | 2020-12-30T20:29:32.000Z | tests/conftest.py | andrewsayre/pysmartapp | 5c3be867584d7e82d00b5998295b20bd12eccf94 | [
"MIT"
] | 1 | 2021-12-05T15:00:13.000Z | 2021-12-05T15:00:13.000Z | tests/conftest.py | andrewsayre/pysmartapp | 5c3be867584d7e82d00b5998295b20bd12eccf94 | [
"MIT"
] | 2 | 2020-10-17T20:20:45.000Z | 2021-09-28T12:58:50.000Z | """Define common test configuraiton."""
import pytest
from pysmartapp.dispatch import Dispatcher
from pysmartapp.smartapp import SmartApp, SmartAppManager
| 26.361702 | 66 | 0.67958 |
b1f8c5ac672b61358853182ee48a06e86cda8b9c | 294 | py | Python | to_do_list.py | GYosifov88/Python-Fundamentals | b46ba2822bd2dac6ff46830c6a520e559b448442 | [
"MIT"
] | null | null | null | to_do_list.py | GYosifov88/Python-Fundamentals | b46ba2822bd2dac6ff46830c6a520e559b448442 | [
"MIT"
] | null | null | null | to_do_list.py | GYosifov88/Python-Fundamentals | b46ba2822bd2dac6ff46830c6a520e559b448442 | [
"MIT"
] | null | null | null | todo_list = ["" for i in range(11)]
command = input()
while command != 'End':
task = command.split('-')
importance = int(task[0])
thing_to_do = task[1]
todo_list[importance] = thing_to_do
command = input()
final_list = [x for x in todo_list if x != ""]
print(final_list) | 21 | 46 | 0.629252 |
b1fa447d2310139f7a8d64aba2e5e1395276502b | 6,035 | py | Python | run.py | Tracymbone/password_locker | 346a3c770174d20fe24720fd4875f5f4e222d582 | [
"MIT"
] | null | null | null | run.py | Tracymbone/password_locker | 346a3c770174d20fe24720fd4875f5f4e222d582 | [
"MIT"
] | null | null | null | run.py | Tracymbone/password_locker | 346a3c770174d20fe24720fd4875f5f4e222d582 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.8
from socket import create_server
from users import Users
from credentials import Credentials
if __name__ == '__main__':
main() | 28.875598 | 136 | 0.471914 |
b1faa38cc22b54eb622228d21323a509bcdbceb8 | 2,346 | py | Python | menu_info/menu_details.py | averytorres/WazHack-Clone | e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda | [
"Unlicense"
] | 1 | 2019-06-21T17:13:35.000Z | 2019-06-21T17:13:35.000Z | menu_info/menu_details.py | averytorres/WazHack-Clone | e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda | [
"Unlicense"
] | 18 | 2019-06-25T00:48:11.000Z | 2019-07-11T17:52:24.000Z | menu_info/menu_details.py | averytorres/WazHack-Clone | e53e9b1b64f3828b20e45d4eeaafcdedf9bc6fda | [
"Unlicense"
] | 1 | 2019-06-21T17:08:23.000Z | 2019-06-21T17:08:23.000Z | from game_states import GameStates
from action_consumer.available_actions_enum import Action
| 39.1 | 132 | 0.728048 |
b1fbf96a2060ff4635f4538c1011d07667a95b78 | 270 | py | Python | sciris/sc_version.py | optimamodel/sciris | fc0148fd9352e443a1c9b1a790275bc2904b30b1 | [
"MIT"
] | null | null | null | sciris/sc_version.py | optimamodel/sciris | fc0148fd9352e443a1c9b1a790275bc2904b30b1 | [
"MIT"
] | 4 | 2018-03-27T21:47:13.000Z | 2018-08-28T00:50:00.000Z | sciris/sc_version.py | optimamodel/sciris | fc0148fd9352e443a1c9b1a790275bc2904b30b1 | [
"MIT"
] | 1 | 2018-09-05T07:57:39.000Z | 2018-09-05T07:57:39.000Z | '''
Version and license information.
'''
__all__ = ['__version__', '__versiondate__', '__license__']
__version__ = '1.3.3'
__versiondate__ = '2022-01-16'
__license__ = f'Sciris {__version__} ({__versiondate__}) 2014-2022 by the Sciris Development Team'
| 27 | 107 | 0.692593 |
b1fc50952b7cf799deab08fe85f0849c2cbaf2f0 | 1,154 | py | Python | tests/unit/fileserver/test_hgfs.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | tests/unit/fileserver/test_hgfs.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/fileserver/test_hgfs.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
# Import Salt libs
import salt.fileserver.hgfs as hgfs
| 28.85 | 72 | 0.618718 |
b1fca680c3a855f104f3ad48d1f63a988374a6e5 | 26 | py | Python | constants.py | harryrobertwright/plutus | 7a0d9f1474982d1bb66d7b018f2ce7e28aab7bc3 | [
"MIT"
] | null | null | null | constants.py | harryrobertwright/plutus | 7a0d9f1474982d1bb66d7b018f2ce7e28aab7bc3 | [
"MIT"
] | null | null | null | constants.py | harryrobertwright/plutus | 7a0d9f1474982d1bb66d7b018f2ce7e28aab7bc3 | [
"MIT"
] | null | null | null | INTERVALS = [
"1h",
]
| 6.5 | 13 | 0.423077 |
b1fd1af131dc102c96ef990fe42c7c22c4e492de | 1,273 | py | Python | networks/model_factory.py | DQle38/Fair-Feature-Distillation-for-Visual-Recognition | f0f98728f36528218bf19dce9a26d6ee1ba96e58 | [
"MIT"
] | 5 | 2021-09-07T13:33:45.000Z | 2022-02-12T18:56:45.000Z | networks/model_factory.py | DQle38/Fair-Feature-Distillation-for-Visual-Recognition | f0f98728f36528218bf19dce9a26d6ee1ba96e58 | [
"MIT"
] | null | null | null | networks/model_factory.py | DQle38/Fair-Feature-Distillation-for-Visual-Recognition | f0f98728f36528218bf19dce9a26d6ee1ba96e58 | [
"MIT"
] | 4 | 2021-09-25T06:56:38.000Z | 2022-03-24T18:06:08.000Z | import torch.nn as nn
from networks.resnet import resnet18
from networks.shufflenet import shufflenet_v2_x1_0
from networks.cifar_net import Net
from networks.mlp import MLP
| 31.04878 | 91 | 0.639434 |
b1ff61ec8eb947ca5da56f846d344d35e22df2db | 5,536 | py | Python | main.py | MarySueTeam/Video_Maker | a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671 | [
"MIT"
] | 1 | 2022-03-04T09:25:11.000Z | 2022-03-04T09:25:11.000Z | main.py | MarySueTeam/Video_Maker | a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671 | [
"MIT"
] | null | null | null | main.py | MarySueTeam/Video_Maker | a3bbdeb49b5f887d5f8dbc3b4e57b955d4ee3671 | [
"MIT"
] | 1 | 2022-01-25T16:19:25.000Z | 2022-01-25T16:19:25.000Z | from manim import *
from TTS.TTS import get_mp3_file
from utils import cut, get_duration, deal_text
import time
| 48.99115 | 129 | 0.62211 |
b1ff7639399b3c6d47d30f81feb9b3ec46b39e02 | 106 | py | Python | Discord Bots/Discord Bot/test_Bot.py | SeymoTheDev/skittles-stuff | f9eba3efd0577045085418391b7154f3fd121f70 | [
"MIT"
] | null | null | null | Discord Bots/Discord Bot/test_Bot.py | SeymoTheDev/skittles-stuff | f9eba3efd0577045085418391b7154f3fd121f70 | [
"MIT"
] | null | null | null | Discord Bots/Discord Bot/test_Bot.py | SeymoTheDev/skittles-stuff | f9eba3efd0577045085418391b7154f3fd121f70 | [
"MIT"
] | null | null | null | from unittest import TestCase
| 15.142857 | 30 | 0.641509 |
5901159e3f1532199cb8c881801333e8fca64f93 | 1,518 | py | Python | sevenbridges/models/compound/tasks/batch_by.py | sbg/sevenbridges-python | b3e14016066563470d978c9b13e1a236a41abea8 | [
"Apache-2.0"
] | 46 | 2016-04-27T12:51:17.000Z | 2021-11-24T23:43:12.000Z | sevenbridges/models/compound/tasks/batch_by.py | sbg/sevenbridges-python | b3e14016066563470d978c9b13e1a236a41abea8 | [
"Apache-2.0"
] | 111 | 2016-05-25T15:44:31.000Z | 2022-02-05T20:45:37.000Z | sevenbridges/models/compound/tasks/batch_by.py | sbg/sevenbridges-python | b3e14016066563470d978c9b13e1a236a41abea8 | [
"Apache-2.0"
] | 37 | 2016-04-27T12:10:43.000Z | 2021-03-18T11:22:28.000Z | from sevenbridges.meta.resource import Resource
# noinspection PyUnresolvedReferences,PyProtectedMember
| 27.107143 | 77 | 0.554018 |
5904c0dfbd55d07ecdda6b598e8aefd81056a978 | 2,764 | py | Python | server/core/tests/test_models.py | jleg13/Django-REST-API | 7e2c397ca3d49a320a79356c96b35beb86cc97ff | [
"MIT"
] | null | null | null | server/core/tests/test_models.py | jleg13/Django-REST-API | 7e2c397ca3d49a320a79356c96b35beb86cc97ff | [
"MIT"
] | null | null | null | server/core/tests/test_models.py | jleg13/Django-REST-API | 7e2c397ca3d49a320a79356c96b35beb86cc97ff | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@email.com', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
| 32.904762 | 76 | 0.646527 |
5906bff03b00e79f2660983fe9997b9cd354f2bc | 30 | py | Python | tests/database/__init__.py | dinosv/cobib | 15342de37336a51d87c8f04f8430d0621da69a5c | [
"MIT"
] | 9 | 2020-09-27T19:22:35.000Z | 2022-02-27T20:00:58.000Z | tests/database/__init__.py | dinosv/cobib | 15342de37336a51d87c8f04f8430d0621da69a5c | [
"MIT"
] | null | null | null | tests/database/__init__.py | dinosv/cobib | 15342de37336a51d87c8f04f8430d0621da69a5c | [
"MIT"
] | 2 | 2020-12-07T15:26:03.000Z | 2021-10-03T18:04:57.000Z | """coBib's database tests."""
| 15 | 29 | 0.633333 |
590721cca2145e8661012d52208da3bcc5dbe108 | 230 | py | Python | Semester-1/Lab8/src/lab_A.py | Vipul-Cariappa/Collage-CS-Lab | 0a0193df9575a4e69b60759d974423202ddf544b | [
"MIT"
] | null | null | null | Semester-1/Lab8/src/lab_A.py | Vipul-Cariappa/Collage-CS-Lab | 0a0193df9575a4e69b60759d974423202ddf544b | [
"MIT"
] | null | null | null | Semester-1/Lab8/src/lab_A.py | Vipul-Cariappa/Collage-CS-Lab | 0a0193df9575a4e69b60759d974423202ddf544b | [
"MIT"
] | 2 | 2022-03-04T14:06:15.000Z | 2022-03-16T17:32:10.000Z | # program to display first n lines in a text file
n = int(input("Enter number of lines: "))
with open("note.txt") as file:
while n > 0:
print(
file.readline(),
end=""
)
n -= 1
| 19.166667 | 49 | 0.5 |
59079f538bc9e256df53c65451be92c382f11c5c | 23,420 | py | Python | eplusplus/view/mainWindow.py | labeee/EPlusPlus | da6cbd60575146a8f165fb72e165919cd83ddc24 | [
"MIT"
] | 1 | 2018-02-06T17:41:12.000Z | 2018-02-06T17:41:12.000Z | eplusplus/view/mainWindow.py | labeee/EPlusPlus | da6cbd60575146a8f165fb72e165919cd83ddc24 | [
"MIT"
] | null | null | null | eplusplus/view/mainWindow.py | labeee/EPlusPlus | da6cbd60575146a8f165fb72e165919cd83ddc24 | [
"MIT"
] | 1 | 2021-06-29T02:49:59.000Z | 2021-06-29T02:49:59.000Z | import os
import sys
import ctypes
import webbrowser
from .lineEdit import LineEdit
from .dialogWithCheckBox import DialogWithCheckBox
from eplusplus.controller import ActorUser
from eplusplus.exception import ColumnException, NoIdfException, InstallException, NoCsvException
from PyQt5.QtCore import QSize, Qt, QRect
from PyQt5.QtGui import QPixmap, QIcon, QIntValidator
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QRadioButton
from PyQt5.QtWidgets import QGridLayout, QFileDialog, QMessageBox, QApplication
from PyQt5.QtWidgets import QButtonGroup, QLineEdit, QAction, QMenuBar
##
## @brief This class implements the main window of the eplusplus
## application. The UI use the PyQt to create and configure
## all the components. Also, besides the components like
## labels, radio buttons, buttons and line text, the main
## window has a actorUser, that represents the controller, to call
## all the methods implemented in the logic of the program.
## | 40.589255 | 101 | 0.608839 |
5907d7fbfcc198ea821785faf5ae482c8f858484 | 4,555 | py | Python | CHAPTER 11 (search trees)/red_black_trees_class.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | CHAPTER 11 (search trees)/red_black_trees_class.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | CHAPTER 11 (search trees)/red_black_trees_class.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | from tree_map_class import *
| 46.958763 | 88 | 0.528211 |
59083cdbd1613168bb0ded29e8cc254a35bff318 | 5,170 | py | Python | my_diary_data_structures.py | koechkevin/myDiary | c5f48fa04a5f8c2bce9f1580c0f92f3f0d5f9bcb | [
"Apache-2.0"
] | null | null | null | my_diary_data_structures.py | koechkevin/myDiary | c5f48fa04a5f8c2bce9f1580c0f92f3f0d5f9bcb | [
"Apache-2.0"
] | null | null | null | my_diary_data_structures.py | koechkevin/myDiary | c5f48fa04a5f8c2bce9f1580c0f92f3f0d5f9bcb | [
"Apache-2.0"
] | 1 | 2018-11-04T09:48:46.000Z | 2018-11-04T09:48:46.000Z | from functools import wraps
import datetime
from flask import jsonify, Flask, request, session
from my_class import ExternalFunctions
app = Flask(__name__)
app.config["SECRET_KEY"] = 'kkkoech'
user_details = dict()
diary_entries = dict()
def on_session(t):
return decorator
if __name__ == '__main__':
app.run(port=5555, debug=True)
| 38.014706 | 100 | 0.624178 |
5909eb773cf91122abfbd155ab1ef7779d77f23a | 26 | py | Python | micro-benchmark-key-errs/snippets/parameters/imported_assigned_call/to_import.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
] | 121 | 2020-12-16T20:31:37.000Z | 2022-03-21T20:32:43.000Z | micro-benchmark-key-errs/snippets/parameters/imported_assigned_call/to_import.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
] | 24 | 2021-03-13T00:04:00.000Z | 2022-03-21T17:28:11.000Z | micro-benchmark-key-errs/snippets/parameters/imported_assigned_call/to_import.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
] | 19 | 2021-03-23T10:58:47.000Z | 2022-03-24T19:46:50.000Z | const1 = "a"
const2 = "b"
| 8.666667 | 12 | 0.538462 |
5909f08bda2ad877f9982af2cd854a38d7dd516a | 13,029 | py | Python | intake_sdmx.py | dr-leo/intake_sdmx | dccd51e6ce4aa352fba0a0c25dfac82148acd1e3 | [
"Apache-2.0"
] | null | null | null | intake_sdmx.py | dr-leo/intake_sdmx | dccd51e6ce4aa352fba0a0c25dfac82148acd1e3 | [
"Apache-2.0"
] | 3 | 2021-05-29T19:46:36.000Z | 2022-01-15T14:15:22.000Z | intake_sdmx.py | dr-leo/intake_sdmx | dccd51e6ce4aa352fba0a0c25dfac82148acd1e3 | [
"Apache-2.0"
] | 1 | 2021-05-28T13:14:53.000Z | 2021-05-28T13:14:53.000Z | """intake plugin for SDMX data sources"""
import intake
from intake.catalog import Catalog
from intake.catalog.utils import reload_on_change
from intake.catalog.local import LocalCatalogEntry, UserParameter
import pandasdmx as sdmx
from collections.abc import MutableMapping
from datetime import date
from itertools import chain
__version__ = "0.1.0"
NOT_SPECIFIED = "n/a"
| 36.805085 | 90 | 0.54939 |
5910779f16295dd8d8929f180e23470f2321f629 | 1,388 | py | Python | apps/exp/afe/afe_bfcc.py | yt7589/mgs | 2faae1b69e6d4cde63afb9b2432b1bf49ebdd770 | [
"Apache-2.0"
] | null | null | null | apps/exp/afe/afe_bfcc.py | yt7589/mgs | 2faae1b69e6d4cde63afb9b2432b1bf49ebdd770 | [
"Apache-2.0"
] | null | null | null | apps/exp/afe/afe_bfcc.py | yt7589/mgs | 2faae1b69e6d4cde63afb9b2432b1bf49ebdd770 | [
"Apache-2.0"
] | null | null | null | #
#import scipy
#from scipy import io as sio
import scipy.io.wavfile
from ext.spafe.utils import vis
from ext.spafe.features.bfcc import bfcc | 30.844444 | 77 | 0.50072 |
591491ff550ba32d4e2ae2cbc52705d6ad0c7c72 | 4,673 | py | Python | notifier_bot.py | maticardenas/football_api_notif | 81f9e265d4effb7545e3d9ad80ee1109cd9b8edf | [
"MIT"
] | null | null | null | notifier_bot.py | maticardenas/football_api_notif | 81f9e265d4effb7545e3d9ad80ee1109cd9b8edf | [
"MIT"
] | null | null | null | notifier_bot.py | maticardenas/football_api_notif | 81f9e265d4effb7545e3d9ad80ee1109cd9b8edf | [
"MIT"
] | null | null | null | import logging
from datetime import date
from telegram import Update
from telegram.ext import ApplicationBuilder, CommandHandler
from config.notif_config import NotifConfig
from src.emojis import Emojis
from src.team_fixtures_manager import TeamFixturesManager
from src.telegram_bot.bot_commands_handler import NextAndLastMatchCommandHandler, NotifierBotCommandsHandler
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
if __name__ == "__main__":
application = ApplicationBuilder().token(NotifConfig.TELEGRAM_TOKEN).build()
start_handler = CommandHandler("start", start)
next_match_handler = CommandHandler("next_match", next_match)
last_match_handler = CommandHandler("last_match", last_match)
available_teams_handler = CommandHandler("available_teams", available_teams)
help_handler = CommandHandler("help", help)
application.add_handler(start_handler)
application.add_handler(next_match_handler)
application.add_handler(last_match_handler)
application.add_handler(help_handler)
application.add_handler(available_teams_handler)
application.run_polling()
| 37.685484 | 116 | 0.684571 |
59166d910964300c77243a402c6b75bc3f352d74 | 1,517 | py | Python | homeassistant_cli/plugins/event.py | dotlambda/home-assistant-cli | e8c5a493ca902a739a357d3053a2f09d589e9be1 | [
"Apache-2.0"
] | null | null | null | homeassistant_cli/plugins/event.py | dotlambda/home-assistant-cli | e8c5a493ca902a739a357d3053a2f09d589e9be1 | [
"Apache-2.0"
] | null | null | null | homeassistant_cli/plugins/event.py | dotlambda/home-assistant-cli | e8c5a493ca902a739a357d3053a2f09d589e9be1 | [
"Apache-2.0"
] | null | null | null | """Edit plugin for Home Assistant CLI (hass-cli)."""
import json as json_
import logging
import click
import homeassistant_cli.autocompletion as autocompletion
from homeassistant_cli.cli import pass_context
from homeassistant_cli.config import Configuration
from homeassistant_cli.helper import raw_format_output, req_raw
import yaml
_LOGGING = logging.getLogger(__name__)
| 29.173077 | 75 | 0.661173 |
59177fedfb201ef7cf401094e43b1d49ac1b2c09 | 8,576 | py | Python | events/models.py | Strategy-Tap/Novizi-BackEnd | 536edde68dc79ad5467f2dbb0931a56930a4edea | [
"MIT"
] | null | null | null | events/models.py | Strategy-Tap/Novizi-BackEnd | 536edde68dc79ad5467f2dbb0931a56930a4edea | [
"MIT"
] | 4 | 2021-04-08T21:23:49.000Z | 2022-03-12T00:44:54.000Z | events/models.py | Strategy-Tap/Novizi-BackEnd | 536edde68dc79ad5467f2dbb0931a56930a4edea | [
"MIT"
] | 1 | 2020-06-12T16:08:46.000Z | 2020-06-12T16:08:46.000Z | """Collection of model."""
from typing import Any
from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from djgeojson.fields import PointField
from .utils import get_read_time, unique_slug
def event_upload_to(instance: "Event", filename: str) -> str:
"""A help Function to change the image upload path.
Args:
instance: django model
filename: the uploaded file name
Returns:
path in string format
"""
return f"images/events/cover/{instance.title}/{filename}"
| 30.519573 | 87 | 0.653451 |
5917ad709fbc60f4121dfd8d315e221b94423156 | 1,938 | py | Python | src/nlp/classification/tf1/traditional_cls/bert_embedding.py | wu-uw/OpenCompetition | 9aa9d7a50ada1deb653d295dd8a7fe46321b9094 | [
"Apache-2.0"
] | 15 | 2019-12-22T14:26:47.000Z | 2020-11-02T10:57:37.000Z | src/nlp/classification/tf1/traditional_cls/bert_embedding.py | GT-JLU/OpenCompetition | 5262fc5fa7efd7b483c1dc09cb7747dd75e37175 | [
"Apache-2.0"
] | 2 | 2020-02-03T07:10:11.000Z | 2020-02-11T16:38:56.000Z | src/nlp/classification/tf1/traditional_cls/bert_embedding.py | GT-JLU/OpenCompetition | 5262fc5fa7efd7b483c1dc09cb7747dd75e37175 | [
"Apache-2.0"
] | 12 | 2020-01-06T14:16:52.000Z | 2020-05-23T14:12:30.000Z | # coding = utf-8
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from activation_function import gelu, swish, ACT2FN
import logging
logger = logging.getLogger(__name__)
from bert_layernorm import BertLayerNorm | 35.888889 | 114 | 0.750774 |
5918b94351e68baf0dc788cb62fb44c5a012741d | 2,276 | py | Python | raster_compare/base/raster_data_difference.py | jomey/raster_compare | 5199005d01f569e187e944d62af0ea70c383d16a | [
"MIT"
] | 1 | 2021-11-13T12:59:53.000Z | 2021-11-13T12:59:53.000Z | raster_compare/base/raster_data_difference.py | jomey/raster_compare | 5199005d01f569e187e944d62af0ea70c383d16a | [
"MIT"
] | null | null | null | raster_compare/base/raster_data_difference.py | jomey/raster_compare | 5199005d01f569e187e944d62af0ea70c383d16a | [
"MIT"
] | null | null | null | import numpy as np
from osgeo import gdal
from .median_absolute_deviation import MedianAbsoluteDeviation
from .raster_file import RasterFile
| 27.756098 | 76 | 0.629174 |
591c39e1d0a64ea2423fa974b75251f4ec29ed0a | 3,386 | py | Python | dbcontext/Module/construct.py | jimmg35/Sensor_Crawling_v2 | 5154885cad5173127539487a2fcf2140a4409b8b | [
"MIT"
] | null | null | null | dbcontext/Module/construct.py | jimmg35/Sensor_Crawling_v2 | 5154885cad5173127539487a2fcf2140a4409b8b | [
"MIT"
] | null | null | null | dbcontext/Module/construct.py | jimmg35/Sensor_Crawling_v2 | 5154885cad5173127539487a2fcf2140a4409b8b | [
"MIT"
] | null | null | null | import time
import hmac
import base64
import datetime
import schedule
import psycopg2
from time import mktime
from hashlib import sha1
from pprint import pprint
from requests import request
from datetime import datetime
from wsgiref.handlers import format_date_time
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
| 33.86 | 137 | 0.60632 |
591d8ee660b8804dd218cc4cd1c5374e204e9abe | 1,540 | py | Python | ppqq/src/qq/main.py | xiaomin0322/mypqq | 8bdd5d1dafb5fa53d65cb732b7372fbcfe9c7c2c | [
"Apache-2.0"
] | 1 | 2018-11-11T14:34:53.000Z | 2018-11-11T14:34:53.000Z | ppqq/src/qq/main.py | xiaomin0322/mypqq | 8bdd5d1dafb5fa53d65cb732b7372fbcfe9c7c2c | [
"Apache-2.0"
] | null | null | null | ppqq/src/qq/main.py | xiaomin0322/mypqq | 8bdd5d1dafb5fa53d65cb732b7372fbcfe9c7c2c | [
"Apache-2.0"
] | 1 | 2021-02-04T08:46:09.000Z | 2021-02-04T08:46:09.000Z | # -*- coding: utf8 -*-
# Light..
# QQ1311817771
import AndroidQQ,threading,time
from AndroidQQ import Android
if __name__ == "__main__":
qq = Main()
qq.login("634545399","")
| 35 | 75 | 0.546104 |
591f579f62bec7c986797fa9d6cc59de7656817e | 527 | py | Python | util/logger.py | code4hk/NewsdiffHK-Backend | 76ffd933fe9900a0bd2191597a210ddf86d2a8cd | [
"MIT"
] | 5 | 2015-03-29T19:19:16.000Z | 2015-06-20T09:37:39.000Z | util/logger.py | code4hk/NewsdiffHK-Backend | 76ffd933fe9900a0bd2191597a210ddf86d2a8cd | [
"MIT"
] | 28 | 2015-04-07T13:34:57.000Z | 2015-05-25T13:30:36.000Z | util/logger.py | code4hk/NewsdiffHK-Backend | 76ffd933fe9900a0bd2191597a210ddf86d2a8cd | [
"MIT"
] | null | null | null | from util.env import log_dir
import logging
| 29.277778 | 84 | 0.70019 |
59203bba648641256e52153b47303c1f888cf09a | 1,347 | py | Python | src/core/migrations/0096_auto_20191023_1441.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0096_auto_20191023_1441.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0096_auto_20191023_1441.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-10-23 14:41
from django.db import migrations, models
import tinymce.models
| 35.447368 | 323 | 0.590943 |
592176ee7d34af8c375b741cef8c2df674d9c4b5 | 2,243 | py | Python | piservicebusclient.py | nikkh/pi | 237c0c0effcf69c15c6fb2791c7fd49eb1e254aa | [
"Unlicense"
] | null | null | null | piservicebusclient.py | nikkh/pi | 237c0c0effcf69c15c6fb2791c7fd49eb1e254aa | [
"Unlicense"
] | null | null | null | piservicebusclient.py | nikkh/pi | 237c0c0effcf69c15c6fb2791c7fd49eb1e254aa | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
import colorsys
from azure.servicebus import ServiceBusService
from azure.servicebus import Message
from blinkt import set_pixel, set_brightness, show, clear
import time
import json
def snake( r, g, b ):
"This creates a snake effect on the blinkt using the specified colour"
clear()
for count in range(1,20):
print(count)
for i in range(8):
clear()
set_pixel(i, r, g, b)
show()
time.sleep(0.05)
clear()
return;
set_brightness(0.1)
print('Nicks Raspberry Pi Python Service Bus Client version 0.1')
service_namespace='nixpitest'
key_name = 'RootManageSharedAccessKey' # SharedAccessKeyName from Azure portal
with open('private/keys.txt', 'r') as myfile:
keyval=myfile.read().replace('\n', '')
key_value = keyval # SharedAccessKey from Azure portal
sbs = ServiceBusService(service_namespace,
shared_access_key_name=key_name,
shared_access_key_value=key_value)
sbs.create_queue('testpythonqueue1')
while True:
newmsg = None
newmsg = sbs.receive_queue_message('testpythonqueue1', peek_lock=False)
if newmsg.body is not None:
print ("message: ", newmsg.body, "\n")
p = Payload(newmsg.body)
if p.device: print(p.device)
if p.effect: print(p.effect)
if p.led: print(p.led)
if p.colour: print(p.colour)
if p.state: print(p.state)
if p.effect == 'snake':
if p.colour == 'red':
snake(255,0,0)
elif p.colour == 'green':
snake(0,255,0)
elif p.colour == 'blue':
snake(0,0,255)
if p.effect == 'rainbow':
rainbow()
clear()
time.sleep(1)
| 28.392405 | 78 | 0.602764 |
59245ad69bc6ea799437abf97159920fe65df34f | 149 | py | Python | exercicio-mundo-1/ex010.py | amosxrl/python | 8399a9c42fdb49184fcfe906f8bce82d1a671667 | [
"MIT"
] | 2 | 2020-05-06T23:49:20.000Z | 2020-05-06T23:49:25.000Z | exercicio-mundo-1/ex010.py | amosxrl/python | 8399a9c42fdb49184fcfe906f8bce82d1a671667 | [
"MIT"
] | null | null | null | exercicio-mundo-1/ex010.py | amosxrl/python | 8399a9c42fdb49184fcfe906f8bce82d1a671667 | [
"MIT"
] | null | null | null | print('Quanto dinheiro voce tem na carteiro')
print('-'*20)
re = float(input('R$ '))
dol = re * 0.1874
print('-'*20)
print('US$ {:.2f}'.format(dol))
| 21.285714 | 45 | 0.610738 |
59253833cbcb18241d731edddc82a7004e814b3e | 3,163 | py | Python | catkin_ws/build/baxter_core_msgs/cmake/baxter_core_msgs-genmsg-context.py | roop-pal/robotic-folding | a0e062ac6d23cd07fe10e3f45abc4ba50e533141 | [
"RSA-MD"
] | null | null | null | catkin_ws/build/baxter_core_msgs/cmake/baxter_core_msgs-genmsg-context.py | roop-pal/robotic-folding | a0e062ac6d23cd07fe10e3f45abc4ba50e533141 | [
"RSA-MD"
] | null | null | null | catkin_ws/build/baxter_core_msgs/cmake/baxter_core_msgs-genmsg-context.py | roop-pal/robotic-folding | a0e062ac6d23cd07fe10e3f45abc4ba50e533141 | [
"RSA-MD"
] | null | null | null | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AnalogIOState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AnalogIOStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AnalogOutputCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AssemblyState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/AssemblyStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CameraControl.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CameraSettings.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CollisionAvoidanceState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/CollisionDetectionState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/DigitalIOState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/DigitalIOStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/DigitalOutputCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndEffectorCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndEffectorProperties.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndEffectorState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndpointState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/EndpointStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/HeadPanCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/HeadState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/JointCommand.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/NavigatorState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/NavigatorStates.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/RobustControllerStatus.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/SEAJointState.msg;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg/URDFConfiguration.msg"
services_str = "/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/CloseCamera.srv;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/ListCameras.srv;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/OpenCamera.srv;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/srv/SolvePositionIK.srv"
pkg_name = "baxter_core_msgs"
dependencies_str = "geometry_msgs;sensor_msgs;std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "baxter_core_msgs;/home/parallels/catkin_ws/src/baxter_common/baxter_core_msgs/msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 263.583333 | 2,159 | 0.863737 |
59269ff1d7149784a5bf3e067f0e6975db562830 | 14,031 | py | Python | apps/part_interpolation&replacement/part_replacement.py | GuillaumeDufau/3D-point-capsule-networks | 369206df643edb263d43cf2d05923cf0a26841e5 | [
"MIT"
] | 283 | 2019-04-14T12:58:54.000Z | 2022-03-30T11:49:38.000Z | apps/part_interpolation&replacement/part_replacement.py | LONG-9621/3D-Point-Capsule-Networks | 161ac9042ca9c048f4b531ae26fe94a29b13e777 | [
"MIT"
] | 20 | 2019-05-01T05:40:02.000Z | 2021-11-20T11:15:17.000Z | apps/part_interpolation&replacement/part_replacement.py | LONG-9621/3D-Point-Capsule-Networks | 161ac9042ca9c048f4b531ae26fe94a29b13e777 | [
"MIT"
] | 55 | 2019-04-22T12:14:42.000Z | 2022-03-25T06:26:36.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 17:45:51 2018
@author: zhao
"""
import argparse
import torch
import torch.nn.parallel
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../models')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../dataloaders')))
import shapenet_part_loader
import matplotlib.pyplot as plt
from pointcapsnet_ae import PointCapsNet,PointCapsNetDecoder
from capsule_seg_net import CapsSegNet
import json
from open3d import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2, help='input batch size')
parser.add_argument('--prim_caps_size', type=int, default=1024, help='number of primary point caps')
parser.add_argument('--prim_vec_size', type=int, default=16, help='scale of primary point caps')
parser.add_argument('--latent_caps_size', type=int, default=64, help='number of latent caps')
parser.add_argument('--latent_vec_size', type=int, default=64, help='scale of latent caps')
parser.add_argument('--num_points', type=int, default=2048, help='input point set size')
parser.add_argument('--part_model', type=str, default='../../checkpoints/part_seg_100percent.pth', help='model path for the pre-trained part segmentation network')
parser.add_argument('--model', type=str, default='../../checkpoints/shapenet_part_dataset_ae_200.pth', help='model path')
parser.add_argument('--dataset', type=str, default='shapenet_part', help='dataset: shapenet_part, shapenet_core13, shapenet_core55, modelent40')
parser.add_argument('--n_classes', type=int, default=50, help='part classes in all the catagories')
parser.add_argument('--class_choice', type=str, default='Airplane', help='choose the class to eva')
opt = parser.parse_args()
print(opt)
USE_CUDA = True
main()
| 44.26183 | 170 | 0.662818 |
592749e0c27abaef8d986702717878c311749a54 | 6,839 | py | Python | src/Grid.py | RavinSG/aaivu-ride-hailing-simulation | eb7bc7cc6a5830d40509ce22fe4fa2eb013e6767 | [
"Apache-2.0"
] | 8 | 2021-02-18T19:02:59.000Z | 2022-02-19T13:38:48.000Z | src/Grid.py | Programmer-RD-AI/aaivu-ride-hailing-simulation | f315661c94c9e3f26bab1d8bb9c35d21b1a60479 | [
"Apache-2.0"
] | null | null | null | src/Grid.py | Programmer-RD-AI/aaivu-ride-hailing-simulation | f315661c94c9e3f26bab1d8bb9c35d21b1a60479 | [
"Apache-2.0"
] | 2 | 2021-02-14T03:28:51.000Z | 2022-02-19T13:38:51.000Z | import simpy
import itertools
import numpy as np
from RideSimulator.Driver import Driver
from RideSimulator.HexGrid import HexGrid
def get_spot_locations(width: int, height: int, interval: int) -> np.ndarray:
"""
:param width: width of the grid
:param height: height of the grid
:param interval: distance between two spots
:return: an array of all the spot locations
"""
x_points = np.arange(0, width, interval)
y_points = np.arange(0, height, interval)
# If the distance to the nearest taxi spot from the corner is greater than the minimum search radius additional
# spots are added along the edges of thr map.
if (width - x_points[-1]) > (interval / np.sqrt(2)):
x_points = np.append(x_points, width)
if (height - y_points[-1]) > (interval / np.sqrt(2)):
y_points = np.append(y_points, height)
spots = np.array([list(i) for i in itertools.product(x_points, y_points)])
return np.array([spots, len(y_points), len(x_points)], dtype=object)
| 41.70122 | 120 | 0.640445 |
592751375bcd4d68f888638835a70c28a75cc554 | 427 | py | Python | template.py | imsofi/advent-of-code | 8ac1406cfee689d9da0302363eaee7c8fea5c722 | [
"0BSD"
] | null | null | null | template.py | imsofi/advent-of-code | 8ac1406cfee689d9da0302363eaee7c8fea5c722 | [
"0BSD"
] | null | null | null | template.py | imsofi/advent-of-code | 8ac1406cfee689d9da0302363eaee7c8fea5c722 | [
"0BSD"
] | null | null | null | """
Day $:
https://adventofcode.com/2021/day/$
"""
Data = list
if __name__ == "__main__":
main()
| 12.558824 | 36 | 0.559719 |
59276445280313e61b8b7bc4ae85576dc76c9f96 | 3,174 | py | Python | src/ReichardtDS8.py | smittal6/i3d | e347b5415f5665a6f25b644a3dda5dd32f01dbbb | [
"MIT"
] | null | null | null | src/ReichardtDS8.py | smittal6/i3d | e347b5415f5665a6f25b644a3dda5dd32f01dbbb | [
"MIT"
] | null | null | null | src/ReichardtDS8.py | smittal6/i3d | e347b5415f5665a6f25b644a3dda5dd32f01dbbb | [
"MIT"
] | null | null | null | import numpy as np
def Reichardt8(video, dirs=[0,1,2,3,4,5,6,7]):
'''
Returns a tuple of Reichardt-Hassenstein correlators in 8 directions
args:
video: Shape ~ [TimeSteps, H, W, 1]
'''
vp1, vm1 = Reichardt_vertical_2channels_Vectorized(video) #Directions 1, -1
vp3, vm3 = Reichardt_horizontal_2channels_Vectorized(video) #Directions 3, -3
vp2, vm2 = Reichardt_diagonal1_2channels_Vectorized(video) #Directions 2, -2
vp4, vm4 = Reichardt_diagonal2_2channels_Vectorized(video) #Directions 4, -4
all_dirs = [vp1, vm1, vp2, vm2, vp3, vm3, vp4, vm4]
return [all_dirs[i] for i in dirs]
#timeDelay is unused in the Vectorized method, but may be useful later
def Reichardt_vertical_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_vert_by1back=np.roll(video,1,axis=1)
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_vert_by1back_time_by1forw=np.roll(vc_shift_vert_by1back,-1,axis=0)
vc = vc_shift_vert_by1back*vc_shift_time_by1forw - vc_shift_vert_by1back_time_by1forw*video
vc_neg = vc.clip(max=0)
vc_neg = -1*vc_neg
vc = vc.clip(0)
return vc, vc_neg
def Reichardt_diagonal1_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_diag_by1back=np.roll(video,(1,1),axis=(1,2))
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_diag_by1back_time_by1forw=np.roll(vc_shift_diag_by1back,-1,axis=0)
vc= vc_shift_diag_by1back*vc_shift_time_by1forw - vc_shift_diag_by1back_time_by1forw*video
vc_neg=vc.clip(max=0)
vc_neg=-1*vc_neg
vc=vc.clip(0)
return vc, vc_neg
def Reichardt_horizontal_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_horz_by1back=np.roll(video,1,axis=2)
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_horz_by1back_time_by1forw=np.roll(vc_shift_horz_by1back,-1,axis=0)
vc= vc_shift_horz_by1back*vc_shift_time_by1forw - vc_shift_horz_by1back_time_by1forw*video
vc_neg=vc.clip(max=0)
vc_neg=-1*vc_neg
vc=vc.clip(0)
return vc, vc_neg
def Reichardt_diagonal2_2channels_Vectorized(video,timeDelay=1):
'''
Reichardt-Hassenstein inspired video processing
Put negative values into another tensor and then concat for the two (e.g. red and green) channels
'''
vc_shift_diag_by1back=np.roll(video,(-1,1),axis=(1,2))
vc_shift_time_by1forw=np.roll(video,-1,axis=0)
vc_shift_diag_by1back_time_by1forw=np.roll(vc_shift_diag_by1back,-1,axis=0)
vc= vc_shift_diag_by1back*vc_shift_time_by1forw - vc_shift_diag_by1back_time_by1forw*video
vc_neg=vc.clip(max=0)
vc_neg=-1*vc_neg
vc=vc.clip(0)
return vc, vc_neg
| 34.5 | 104 | 0.732199 |
592904d4f5f76e99f9c27babc8743707b85f9a4e | 1,662 | py | Python | shopdrawing.py | Maxim-Kovalenko/turtle-graphics-programms | 768866f9b6658dc0933b0391387a6bdec64ad6ec | [
"Apache-2.0"
] | 1 | 2020-04-14T08:31:24.000Z | 2020-04-14T08:31:24.000Z | shopdrawing.py | Maxim-Kovalenko/turtle-graphics-programms | 768866f9b6658dc0933b0391387a6bdec64ad6ec | [
"Apache-2.0"
] | null | null | null | shopdrawing.py | Maxim-Kovalenko/turtle-graphics-programms | 768866f9b6658dc0933b0391387a6bdec64ad6ec | [
"Apache-2.0"
] | 1 | 2021-01-05T15:47:59.000Z | 2021-01-05T15:47:59.000Z | from turtle import *
coeficient = 0.5
speed(5)
base()
roof()
window()
door()
move()
tree()
| 17.494737 | 34 | 0.527677 |
592b099ed5239bc2e197e2c20d2d55bdd277f278 | 881 | py | Python | src/block_constants.py | cemulate/minecraft-hdl | a46da8d2a29aad9c2fc84037d677190c6db80dcd | [
"MIT"
] | 5 | 2015-09-11T04:13:01.000Z | 2021-11-17T14:35:28.000Z | src/block_constants.py | cemulate/minecraft-hdl | a46da8d2a29aad9c2fc84037d677190c6db80dcd | [
"MIT"
] | null | null | null | src/block_constants.py | cemulate/minecraft-hdl | a46da8d2a29aad9c2fc84037d677190c6db80dcd | [
"MIT"
] | 1 | 2021-03-15T17:31:27.000Z | 2021-03-15T17:31:27.000Z | REDSTONE = 55
REPEATER = 93
TORCH = 75
AIR = 0
GLASS = 20
SLAB = 44
DOUBLE_SLAB = 43
WOOL = 35
DIR_WEST_POS_Z = 0
DIR_NORTH_NEG_X = 1
DIR_EAST_NEG_Z = 2
DIR_SOUTH_POS_X = 3
TORCH_ON_GROUND = 5
TORCH_POINTING_POS_X = 1
TORCH_POINTING_NEG_X = 2
TORCH_POINTING_POS_Z = 3
TORCH_POINTING_NEG_Z = 4
STONE_SLAB_TOP = 8
DOUBLE_SLAB_STONE = 0
WOOL_BLACK = 15
REPEATER_TOWARD_POS_X = 1
REPEATER_TOWARD_POS_Z = 2
REPEATER_TOWARD_NEG_X = 3
CLOSE_SIDE = 0
FAR_SIDE = 1
WOOL_NAMES = {0: "White",
1: "Orange",
2: "Magenta",
3: "Light blue",
4: "Yellow",
5: "Lime",
6: "Pink",
7: "Grey",
8: "Light grey",
9: "Cyan",
10: "Purple",
11: "Blue",
12: "Brown",
13: "Green",
14: "Red",
15: "Black"} | 17.62 | 29 | 0.538025 |
592b8f8cacb2754ab7e4528631c3f40cfdc1b7e7 | 4,973 | py | Python | qfc/dirhandler.py | akhilkedia/qfc | 101861bd2fb818564245249fc93f278752684b51 | [
"MIT"
] | null | null | null | qfc/dirhandler.py | akhilkedia/qfc | 101861bd2fb818564245249fc93f278752684b51 | [
"MIT"
] | null | null | null | qfc/dirhandler.py | akhilkedia/qfc | 101861bd2fb818564245249fc93f278752684b51 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
def run_command(string):
''' fork a process to execute the command string given as argument, returning the string written to STDOUT '''
DEVNULL = open(os.devnull, 'wb')
out = subprocess.check_output(string, stderr=DEVNULL, shell=True)
if sys.version_info >= (3, 0):
return out.decode('utf-8')
return out
git = CVSHandler(Git)
hg = CVSHandler(Mercurial)
default = DefaultDirHandler()
def get_source_files(directory):
""" check first if the given directory is inside a git tracked project, if no, check with mercurial, if no, fallback to the default handler """
files = git.get_source_files(directory)
# if the returned files list is empty, it's considered not a tracked directory
if files:
return files
files = hg.get_source_files(directory)
if files:
return files
return default.get_source_files(directory)
| 42.87069 | 246 | 0.648904 |
592c8f23fd0453baefac3223ac8d226123072b8f | 436 | py | Python | demo1/jsons.py | dollarkillerx/Python-Data-Analysis | f208d5ce9951e9fca2d084a89290100b7e543154 | [
"MIT"
] | null | null | null | demo1/jsons.py | dollarkillerx/Python-Data-Analysis | f208d5ce9951e9fca2d084a89290100b7e543154 | [
"MIT"
] | null | null | null | demo1/jsons.py | dollarkillerx/Python-Data-Analysis | f208d5ce9951e9fca2d084a89290100b7e543154 | [
"MIT"
] | null | null | null | import json
filename = "data.json"
mydata = {
"title":"",
"lesson":{
"python":"",
'vue':"",
"golang":""
},
"games":{
"GAT":""
},
}
#
with open(filename,'w',encoding="utf-8") as data:
# ,,json
json.dump(mydata,data,indent=4)
#
with open(filename,'r',encoding='utf-8') as data:
#
rdata = json.load(data)
print(rdata)
| 16.769231 | 49 | 0.538991 |
592ca011fcc9c84fa4da0a8bde9dd4daf4629fd5 | 280 | py | Python | Scripts/malware_scan/classess/progress.py | Team-Zed-cf/Team-Zed | 662eee2948502fca0bdc477955db17e2d32f92aa | [
"MIT"
] | null | null | null | Scripts/malware_scan/classess/progress.py | Team-Zed-cf/Team-Zed | 662eee2948502fca0bdc477955db17e2d32f92aa | [
"MIT"
] | null | null | null | Scripts/malware_scan/classess/progress.py | Team-Zed-cf/Team-Zed | 662eee2948502fca0bdc477955db17e2d32f92aa | [
"MIT"
] | null | null | null | import progressbar, time
from .colors import *
# progress bar | 28 | 61 | 0.660714 |
593150e1f3c9a373acbf0b4f5ce7f05a49bde1de | 4,406 | py | Python | single_subject_workflow.py | tknapen/reward_np_analysis | 29bcc02d5acd23689dee7059ecb1607d2814cdf0 | [
"MIT"
] | null | null | null | single_subject_workflow.py | tknapen/reward_np_analysis | 29bcc02d5acd23689dee7059ecb1607d2814cdf0 | [
"MIT"
] | null | null | null | single_subject_workflow.py | tknapen/reward_np_analysis | 29bcc02d5acd23689dee7059ecb1607d2814cdf0 | [
"MIT"
] | null | null | null | # from nipype import config
# config.enable_debug_mode()
# Importing necessary packages
import os
import os.path as op
import glob
import json
import nipype
from nipype import config, logging
import matplotlib.pyplot as plt
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.utils.filemanip import copyfile
import nibabel as nib
from IPython.display import Image
from nipype.interfaces.utility import Function, Merge, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from IPython.display import Image
from IPython import embed as shell
from workflows.pupil_workflow import create_pupil_workflow
from workflows.bold_wholebrain_fir_workflow import create_bold_wholebrain_fir_workflow
# we will create a workflow from a BIDS formatted input, at first for the specific use case
# of a 7T PRF experiment's preprocessing.
# a project directory that we assume has already been created.
raw_data_dir = '/home/raw_data/-2014/reward/human_reward/data/'
preprocessed_data_dir = '/home/shared/-2014/reward/new/'
FS_subject_dir = os.path.join(raw_data_dir, 'FS_SJID')
# booleans that determine whether given stages of the
# analysis are run
pupil = True
wb_fir = True
for si in range(1,7): #
sub_id, FS_ID = 'sub-00%i'%si, 'sub-00%i'%si
sess_id = 'ses-*'
# now we set up the folders and logging there.
opd = op.join(preprocessed_data_dir, sub_id)
try:
os.makedirs(op.join(opd, 'log'))
except OSError:
pass
config.update_config({ 'logging': {
'log_directory': op.join(opd, 'log'),
'log_to_file': True,
'workflow_level': 'INFO',
'interface_level': 'INFO'
},
'execution': {
'stop_on_first_crash': False
}
})
logging.update_logging(config)
# load the sequence parameters from json file
with open(os.path.join(raw_data_dir, 'acquisition_parameters.json')) as f:
json_s = f.read()
acquisition_parameters = json.loads(json_s)
# load the analysis parameters from json file
with open(os.path.join(raw_data_dir, 'analysis_parameters.json')) as f:
json_s = f.read()
analysis_info = json.loads(json_s)
# load the analysis/experimental parameters for this subject from json file
with open(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json')) as f:
json_s = f.read()
experimental_parameters = json.loads(json_s)
analysis_info.update(experimental_parameters)
if not op.isdir(os.path.join(preprocessed_data_dir, sub_id)):
try:
os.makedirs(os.path.join(preprocessed_data_dir, sub_id))
except OSError:
pass
# copy json files to preprocessed data folder
# this allows these parameters to be updated and synced across subjects by changing only the raw data files.
copyfile(os.path.join(raw_data_dir, 'acquisition_parameters.json'), os.path.join(preprocessed_data_dir, 'acquisition_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, 'analysis_parameters.json'), os.path.join(preprocessed_data_dir, 'analysis_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json'), os.path.join(preprocessed_data_dir, sub_id ,'experimental_parameters.json'), copy = True)
if pupil:
pwf = create_pupil_workflow(analysis_info,'pupil')
pwf.inputs.inputspec.sub_id = sub_id
pwf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
pwf.write_graph(opd + '_pupil.svg', format='svg', graph2use='colored', simple_form=False)
pwf.run('MultiProc', plugin_args={'n_procs': 6})
if wb_fir:
wbfwf = create_bold_wholebrain_fir_workflow(analysis_info,'wb_fir')
wbfwf.inputs.inputspec.sub_id = sub_id
wbfwf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
wbfwf.write_graph(opd + '_wb_fir.svg', format='svg', graph2use='colored', simple_form=False)
wbfwf.run('MultiProc', plugin_args={'n_procs': 6})
| 40.422018 | 170 | 0.682705 |
593382e994272402d7ed09a0a47388a40b5bfde8 | 3,692 | py | Python | thm.py | brenolf/k-flow | f2ab6e2e6aa09aad437acb2ef071257adc0464c1 | [
"Apache-2.0"
] | null | null | null | thm.py | brenolf/k-flow | f2ab6e2e6aa09aad437acb2ef071257adc0464c1 | [
"Apache-2.0"
] | null | null | null | thm.py | brenolf/k-flow | f2ab6e2e6aa09aad437acb2ef071257adc0464c1 | [
"Apache-2.0"
] | null | null | null | import sys
N = -1
G = None
H = None
vis = None
vis_aux = None
valence = None
flows = {}
answer = []
allowed_flows = {
3 : [-1, 1],
4 : [-1, 1, 2],
5 : [-1, 1, 2, -2]
}
| 16.93578 | 83 | 0.597237 |
5933bc9be206bb31b3b20546a2f728540ffb2f7a | 45,642 | py | Python | ms/MS/index.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 16 | 2015-01-02T15:39:04.000Z | 2016-03-17T06:38:46.000Z | ms/MS/index.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 37 | 2015-01-28T20:58:05.000Z | 2016-03-22T04:01:32.000Z | ms/MS/index.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 8 | 2015-04-08T02:26:03.000Z | 2016-03-04T05:56:24.000Z | #!/usr/bin/pyhon
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import storage.storagetypes as storagetypes
import storage.shardcounter as shardcounter
import protobufs.ms_pb2 as ms_pb2
import logging
import random
import os
import types
import errno
import time
import datetime
import collections
import pickle
import base64
from common.msconfig import *
| 40.534636 | 267 | 0.619101 |
59346b4914120bc35ba05709bdea9720cdc9dfbc | 3,009 | py | Python | tests/test_utils.py | caseygrun/plates | 156069487560d0c72f080f7e45a4dc2ae7a466ac | [
"MIT"
] | null | null | null | tests/test_utils.py | caseygrun/plates | 156069487560d0c72f080f7e45a4dc2ae7a466ac | [
"MIT"
] | null | null | null | tests/test_utils.py | caseygrun/plates | 156069487560d0c72f080f7e45a4dc2ae7a466ac | [
"MIT"
] | null | null | null | from microplates.utils import *
| 38.576923 | 123 | 0.580592 |
593527dd9bb90c5f584c18500adffe54b948dd13 | 1,066 | py | Python | example.py | train255/deep-speaker | d63b111b904faab34fe58637b3d0e7fd188e1b36 | [
"MIT"
] | 3 | 2021-08-20T16:40:09.000Z | 2022-02-08T23:17:52.000Z | example.py | train255/deep-speaker | d63b111b904faab34fe58637b3d0e7fd188e1b36 | [
"MIT"
] | 1 | 2022-03-22T04:16:15.000Z | 2022-03-22T04:26:03.000Z | example.py | train255/deep-speaker | d63b111b904faab34fe58637b3d0e7fd188e1b36 | [
"MIT"
] | 1 | 2020-11-06T08:07:27.000Z | 2020-11-06T08:07:27.000Z | import numpy as np
import random
from audio import read_mfcc
from batcher import sample_from_mfcc
from constants import SAMPLE_RATE, NUM_FRAMES
from conv_models import DeepSpeakerModel
from test import batch_cosine_similarity
np.random.seed(123)
random.seed(123)
model = DeepSpeakerModel()
model.m.load_weights('/Users/premy/deep-speaker/checkpoints/ResCNN_triplet_training_checkpoint_175.h5', by_name=True)
mfcc_001 = sample_from_mfcc(read_mfcc('samples/PhilippeRemy/PhilippeRemy_001.wav', SAMPLE_RATE), NUM_FRAMES)
mfcc_002 = sample_from_mfcc(read_mfcc('samples/PhilippeRemy/PhilippeRemy_002.wav', SAMPLE_RATE), NUM_FRAMES)
predict_001 = model.m.predict(np.expand_dims(mfcc_001, axis=0))
predict_002 = model.m.predict(np.expand_dims(mfcc_002, axis=0))
mfcc_003 = sample_from_mfcc(read_mfcc('samples/1255-90413-0001.flac', SAMPLE_RATE), NUM_FRAMES)
predict_003 = model.m.predict(np.expand_dims(mfcc_003, axis=0))
print('SAME SPEAKER', batch_cosine_similarity(predict_001, predict_002))
print('DIFF SPEAKER', batch_cosine_similarity(predict_001, predict_003))
| 41 | 117 | 0.827392 |
59373033d6759f87ac888baaf5e7fad69fe7d8fc | 135,252 | py | Python | pychunkedgraph/tests/test.py | perlman/PyChunkedGraph | 2c582f46a8292010e8f9f54c94c63af0b172bdad | [
"MIT"
] | null | null | null | pychunkedgraph/tests/test.py | perlman/PyChunkedGraph | 2c582f46a8292010e8f9f54c94c63af0b172bdad | [
"MIT"
] | null | null | null | pychunkedgraph/tests/test.py | perlman/PyChunkedGraph | 2c582f46a8292010e8f9f54c94c63af0b172bdad | [
"MIT"
] | null | null | null | import sys
import os
import subprocess
import pytest
import numpy as np
from functools import partial
import collections
from grpc._channel import _Rendezvous
from google.cloud import bigtable
from google.auth import credentials
from math import inf
from datetime import datetime, timedelta
from time import sleep
from signal import SIGTERM
from warnings import warn
sys.path.insert(0, os.path.join(sys.path[0], '..'))
from pychunkedgraph.backend import chunkedgraph # noqa
from pychunkedgraph.backend.utils import serializers, column_keys # noqa
from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions # noqa
from pychunkedgraph.creator import graph_tests # noqa
def create_chunk(cgraph, vertices=None, edges=None, timestamp=None):
"""
Helper function to add vertices and edges to the chunkedgraph - no safety checks!
"""
if not vertices:
vertices = []
if not edges:
edges = []
vertices = np.unique(np.array(vertices, dtype=np.uint64))
edges = [(np.uint64(v1), np.uint64(v2), np.float32(aff)) for v1, v2, aff in edges]
isolated_node_ids = [x for x in vertices if (x not in [edges[i][0] for i in range(len(edges))]) and
(x not in [edges[i][1] for i in range(len(edges))])]
edge_ids = {"in_connected": np.array([], dtype=np.uint64).reshape(0, 2),
"in_disconnected": np.array([], dtype=np.uint64).reshape(0, 2),
"cross": np.array([], dtype=np.uint64).reshape(0, 2),
"between_connected": np.array([], dtype=np.uint64).reshape(0, 2),
"between_disconnected": np.array([], dtype=np.uint64).reshape(0, 2)}
edge_affs = {"in_connected": np.array([], dtype=np.float32),
"in_disconnected": np.array([], dtype=np.float32),
"between_connected": np.array([], dtype=np.float32),
"between_disconnected": np.array([], dtype=np.float32)}
for e in edges:
if cgraph.test_if_nodes_are_in_same_chunk(e[0:2]):
this_edge = np.array([e[0], e[1]], dtype=np.uint64).reshape(-1, 2)
edge_ids["in_connected"] = \
np.concatenate([edge_ids["in_connected"], this_edge])
edge_affs["in_connected"] = \
np.concatenate([edge_affs["in_connected"], [e[2]]])
if len(edge_ids["in_connected"]) > 0:
chunk_id = cgraph.get_chunk_id(edge_ids["in_connected"][0][0])
elif len(vertices) > 0:
chunk_id = cgraph.get_chunk_id(vertices[0])
else:
chunk_id = None
for e in edges:
if not cgraph.test_if_nodes_are_in_same_chunk(e[0:2]):
# Ensure proper order
if chunk_id is not None:
if cgraph.get_chunk_id(e[0]) != chunk_id:
e = [e[1], e[0], e[2]]
this_edge = np.array([e[0], e[1]], dtype=np.uint64).reshape(-1, 2)
if np.isinf(e[2]):
edge_ids["cross"] = \
np.concatenate([edge_ids["cross"], this_edge])
else:
edge_ids["between_connected"] = \
np.concatenate([edge_ids["between_connected"],
this_edge])
edge_affs["between_connected"] = \
np.concatenate([edge_affs["between_connected"], [e[2]]])
isolated_node_ids = np.array(isolated_node_ids, dtype=np.uint64)
cgraph.logger.debug(edge_ids)
cgraph.logger.debug(edge_affs)
# Use affinities as areas
cgraph.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_affs,
isolated_node_ids,
time_stamp=timestamp)
def to_label(cgraph, l, x, y, z, segment_id):
return cgraph.get_node_id(np.uint64(segment_id), layer=l, x=x, y=y, z=z)
class TestGraphNodeConversion:
class TestGraphBuild:
class TestGraphSimpleQueries:
"""
L X Y Z S L X Y Z S L X Y Z S L X Y Z S
A B C 1: 1 0 0 0 0 2 0 0 0 1 3 0 0 0 1 4 0 0 0 1
1 324 2: 1 1 0 0 0 2 1 0 0 1 3 0 0 0 2 4 0 0 0 2
3: 1 1 0 0 1
4: 1 2 0 0 0 2 2 0 0 1 3 1 0 0 1
"""
| 45.863683 | 162 | 0.555526 |
593862c08f86b1ec3350fd994c6a0a23e0d407ad | 202 | py | Python | remote_works/graphql/delivery/resolvers.py | tetyanaloskutova/saleor | b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89 | [
"BSD-3-Clause"
] | 7 | 2019-05-17T14:27:13.000Z | 2021-12-17T22:52:40.000Z | remote_works/graphql/delivery/resolvers.py | tetyanaloskutova/saleor | b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89 | [
"BSD-3-Clause"
] | 9 | 2019-04-13T09:24:28.000Z | 2019-09-09T15:35:05.000Z | remote_works/graphql/delivery/resolvers.py | tetyanaloskutova/remote-works | b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89 | [
"BSD-3-Clause"
] | null | null | null | import graphene_django_optimizer as gql_optimizer
from ...delivery import models
| 22.444444 | 49 | 0.787129 |
593db3c128dcad16c4059d93406558fd51b30469 | 5,617 | py | Python | wark.py | rcorre/wark | fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa | [
"MIT"
] | 1 | 2017-05-24T00:25:39.000Z | 2017-05-24T00:25:39.000Z | wark.py | rcorre/wark | fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa | [
"MIT"
] | null | null | null | wark.py | rcorre/wark | fe4fe4789cb63bb2738265c3a008dc3dadb8ddaa | [
"MIT"
] | null | null | null | import os
import json
import uuid
import shlex
import weechat
import requests
from ciscosparkapi import CiscoSparkAPI
from ws4py.client.threadedclient import WebSocketClient
SCRIPT_NAME = "spark"
FULL_NAME = "plugins.var.python.{}".format(SCRIPT_NAME)
SPARK_SOCKET_URL = 'https://wdm-a.wbx2.com/wdm/api/v1/devices'
api = None
listener = None
rooms = None
buffers = []
def unixtime(msg):
"""Get the unix timestamp from a spark message object"""
t = time.strptime(msg.created, '%Y-%m-%dT%H:%M:%S.%fZ')
return int(time.mktime(t))
# Cisco Spark has a websocket interface to listen for message events
# It isn't documented, I found it here:
# https://github.com/marchfederico/ciscospark-websocket-events
def buffer_input_cb(data, buf, input_data):
weechat.prnt(buf, input_data)
return weechat.WEECHAT_RC_OK
def buffer_close_cb(data, buf):
"""Called on closing a buffer."""
return weechat.WEECHAT_RC_OK
def room_list(buf):
"""Print a list of visible rooms."""
weechat.prnt(buf, '--Rooms--')
weechat.prnt(buf, '\n'.join(rooms.keys()))
weechat.prnt(buf, '---------')
def room_open(buf, name):
"""Open a new buffer connected to a spark room."""
room = rooms[name]
newbuf = weechat.buffer_new("spark." + room.title, "buffer_input_cb", "",
"buffer_close_cb", "")
buffers[room.id] = Buffer(buf, room, api)
def rehistory(_buf):
#messages = api.messages.list(roomId=room.id)
#for msg in sorted(messages, key=unixtime):
# text = msg.text.encode('ascii', 'replace') if msg.text else ''
# weechat.prnt_date_tags(newbuf, unixtime(msg), "", text)
pass
COMMANDS = {
'rooms': room_list,
'open': room_open,
}
weechat.register(SCRIPT_NAME, "rcorre", "0.1", "MIT", "Spark Client", "", "")
api = CiscoSparkAPI()
rooms = {room.title: room for room in api.rooms.list()}
listener = EventListener()
listener.connect()
weechat.hook_command(
# Command name and description
'spark', '',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(['history']) +
'\nUse /spark help [command] to find out more\n',
# Completions
'|'.join(COMMANDS.keys()),
# Function name
'spark_command_cb', '')
| 28.226131 | 79 | 0.590707 |
593e946792eaa8675d0ba0dfd7b0ef6bf054d411 | 1,869 | py | Python | src/app.py | jqueguiner/text-to-speech-as-a-service | b66b1593a6c669c77edadb38939de30e82e46425 | [
"MIT"
] | 3 | 2020-03-19T09:49:49.000Z | 2020-03-30T14:18:00.000Z | src/app.py | jqueguiner/text-to-speech-as-a-service | b66b1593a6c669c77edadb38939de30e82e46425 | [
"MIT"
] | 2 | 2021-09-28T01:12:37.000Z | 2022-02-26T06:54:04.000Z | src/app.py | jqueguiner/text-to-speech-as-a-service | b66b1593a6c669c77edadb38939de30e82e46425 | [
"MIT"
] | null | null | null | import os
import sys
import subprocess
import requests
import ssl
import random
import string
import json
from flask import jsonify
from flask import Flask
from flask import request
from flask import send_file
import traceback
from uuid import uuid4
from notebook_utils.synthesize import *
try: # Python 3.5+
from http import HTTPStatus
except ImportError:
try: # Python 3
from http import client as HTTPStatus
except ImportError: # Python 2
import httplib as HTTPStatus
app = Flask(__name__)
if __name__ == '__main__':
global output_directory
global voc_model
global tts_model
output_directory = '/src/output/'
create_directory(output_directory)
init_hparams('notebook_utils/pretrained_hparams.py')
tts_model = get_forward_model('pretrained/forward_100K.pyt')
voc_model = get_wavernn_model('pretrained/wave_800K.pyt')
port = 5000
host = '0.0.0.0'
app.run(host=host, port=port, threaded=True)
| 21.238636 | 82 | 0.687533 |
593ea1a84d21ae7ff3a90ce0dfc4e0f0d6b66ac7 | 4,728 | py | Python | Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py | leander-dsouza/MRM-Tenure | 3f372ffeeb12b04f4c5c636235db61725d47c3c6 | [
"MIT"
] | 2 | 2020-08-26T04:01:03.000Z | 2020-09-11T05:21:32.000Z | Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py | leander-dsouza/MRM-Tenure | 3f372ffeeb12b04f4c5c636235db61725d47c3c6 | [
"MIT"
] | null | null | null | Leander_Stephen_D'Souza/Joystick/Joystick_Motor_Code_using_PWM_library.py | leander-dsouza/MRM-Tenure | 3f372ffeeb12b04f4c5c636235db61725d47c3c6 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
import pygame
from pygame import locals
import pygame.display
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
speedA = 0.000
speedB = 0.000
x = 512.00
y = 512.00
# frequency=100Hz
t_on = 0.00
t_off = 0.00
ledpin1 =35 # left_fwd
ledpin2 =36 # right_fwd
ledpin3 =37 # left_bck
ledpin4 =38 # right_bck
GPIO.setup(ledpin1, GPIO.OUT)
GPIO.setup(ledpin2, GPIO.OUT)
GPIO.setup(ledpin3, GPIO.OUT)
GPIO.setup(ledpin4, GPIO.OUT)
GPIO.output(ledpin1, False)
GPIO.output(ledpin2, False)
GPIO.output(ledpin3, False)
GPIO.output(ledpin4, False)
p=GPIO.PWM(ledpin1,100)
q=GPIO.PWM(ledpin2,100)
r=GPIO.PWM(ledpin3,100)
s=GPIO.PWM(ledpin4,100)
p.start(0.00)
q.start(0.00)
r.start(0.00)
s.start(0.00)
pygame.init()
pygame.display.init()
pygame.joystick.init() # main joystick device system
try:
j = pygame.joystick.Joystick(0) # create a joystick instance
j.init() # init instance
print("Enabled joystick:")
except pygame.error:
print("no joystick found.")
while 1:
for e in pygame.event.get(): # iterate over event stack
if e.type == pygame.locals.JOYAXISMOTION:
x, y = j.get_axis(0), j.get_axis(1)
x = round(arduino_map(x, -1, 1, 1023, 0))
y = round(arduino_map(y, 1, -1, 0, 1023))
print("X=", x)
print("Y=", y)
# QUAD 1
if (x <= 512) & ((y >= 512) & (y <= 1023)):
if (x + y) >= 1023: # OCT1
oct1(x, y)
if (x + y) < 1023: # OCT2
oct2(x, y)
# QUAD 2
if (x <= 512) & (y <= 512):
if (x - y) <= 0: # OCT3
oct3(x, y)
if (x - y) > 0: # OCT4
oct4(x, y)
# QUAD 3
if ((x >= 512) & (x <= 1023)) & (y <= 512):
if (x + y) <= 1023: # OCT5
oct5(x, y)
if (x + y) > 1023: # OCT6
oct6(x, y)
# QUAD 4
if ((x >= 512) & (x <= 1023)) & ((y >= 512) & (y <= 1023)):
if (y - x) <= 0: # OCT7
oct7(x, y)
if (y - x) > 0: # OCT8
oct8(x, y)
| 27.172414 | 78 | 0.556684 |
593f2fd2545bc28f967b04b9e6d7e99629ac3a94 | 8,548 | py | Python | rest_helpers/type_serializers.py | WillFr/restlax | ec47617d915094137077f641427976f04acd8d47 | [
"Apache-2.0"
] | 1 | 2019-07-03T16:29:05.000Z | 2019-07-03T16:29:05.000Z | rest_helpers/type_serializers.py | WillFr/restlax | ec47617d915094137077f641427976f04acd8d47 | [
"Apache-2.0"
] | null | null | null | rest_helpers/type_serializers.py | WillFr/restlax | ec47617d915094137077f641427976f04acd8d47 | [
"Apache-2.0"
] | null | null | null | """
This module contains functions that are geared toward serializing objects,
in particular JSON API objects.
"""
import decimal
from collections import Iterable
from rest_helpers.jsonapi_objects import Resource, Response, Link, JsonApiObject, Relationship
def to_jsonable(obj, no_empty_field=False, is_private=None):
"""
This is a low level function to transform any object into a json
serializable (jsonable) object based on its __dict__.
Arguments:
obj {any type} -- the object to be transformed.
Keyword Arguments:
no_empty_field {bool} -- if set to true, the empty field (empty
string or None) will be removed from the resulting jsonable object
(default: {False})
is_private -- callback/function can be passed through to define what
does or does not surface in json payload.
Returns:
dict -- A dictionary that can be used by json.dumps
"""
if is_private is None:
is_private = lambda k: True if str(k)[0] != '_' else False
if isinstance(obj, list):
return [to_jsonable(r, no_empty_field, is_private) for r in obj]
dic = obj if isinstance(obj, dict) else \
obj.__dict__ if hasattr(obj, "__dict__") else \
None
if dic is None:
if isinstance(obj, decimal.Decimal):
str_rep = str(obj)
return int(obj) if '.' not in str_rep else str_rep
return obj
return {str(k): to_jsonable(v, no_empty_field, is_private)for k, v in dic.items() if is_private(k) and (not no_empty_field or v is not None and v != "")}
def response_to_jsonable(response, generate_self_links=True, id_only=False,is_private=None):
"""
Transform a response object into a json serializable (jsonable) object that
matches the jsonapi requirements.
Arguments:
resource {Response} -- The response to be serialized
Keyword Arguments:
generate_self_links {bool} -- If set to true "self" links will be added appropriately
where they do not exist. (default: {True})
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Response object.
"""
assert isinstance(response, Response)
# Data is a resource object (or a list of resource object,
# hence it needs some special serialization logic)
dic = response.__dict__.copy()
dic.pop("data")
return_value = to_jsonable(dic, no_empty_field=True,is_private=is_private)
if response.data is not None:
jsonable_data = resource_to_jsonable(response.data, generate_self_links,is_private=is_private)
if id_only:
jsonable_data = jsonable_data["id"] if not isinstance(jsonable_data, Iterable) else [x["id"] for x in jsonable_data]
return_value["data"] = jsonable_data
return return_value
def resource_to_jsonable(resource, generate_self_links=True,is_private=None):
"""
Transform a resource object or a resource object list into
a json serializable (jsonable) object that matches the jsonapi
requirements.
Arguments:
resource {Resource|list<Resource>} -- The resource or list of resources
to be serialized
Keyword Arguments:
generate_self_links {bool} -- If set to true "self" links will be added appropriately
where they do not exist. (default: {True})
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Resource object.
"""
if isinstance(resource, list):
return [resource_to_jsonable(x,is_private) for x in resource]
assert isinstance(resource, Resource)
json_resource = resource.to_primitive() if (hasattr(resource, "to_primitive") and callable(resource,to_primitive)) else to_jsonable(resource, is_private=is_private)
special = ["id", "type", "relationships", "links", "meta"]
for key in special:
json_resource.pop(key, None)
relationships = relationships_to_jsonable(
resource.relationships, "{0}?json_path=/{1}".format(resource.id, "relationships"),
generate_self_links)
resource_links = resource.links
if generate_self_links and "self" not in resource_links:
resource_links = resource.links.copy()
resource_links["self"] = Link(resource.id)
links = links_to_jsonable(resource_links)
return_value = {
"id" : resource.id,
"type" : resource.type,
"relationships" : relationships,
"links" : links,
"meta" : resource.meta,
"attributes" :json_resource
}
_remove_empty_fields(return_value)
return return_value
def link_to_jsonable(link):
"""
Transforms a json api link object into a dictionary that can be used by json.dumps.
Arguments:
link {Link} -- the link to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the Link object.
"""
assert isinstance(link, Link)
if link.meta is None:
return link.url
else:
return {
"href": link.url,
"meta": to_jsonable(link.meta)
}
def links_to_jsonable(links):
"""
Transform a json api Link object dictionary into a dictionaty that can be used
by json dumps.
Arguments:
links {dict<Link>} -- the dictionary of Link objects to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the dictionary of link
objects.
"""
if links is None:
return None
assert isinstance(links, dict)
return {k: link_to_jsonable(v) for k, v in links.items()}
def jsonapiobject_to_jsonable(jsonapiobject):
"""
Transforms a jsonapi json api objects into a dictionary that can be used by json dumps
Arguments:
jsonapiobject {JsonApiObject} -- The jsonapiobject to be serialized.
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the JsonApiObject object.
"""
assert isinstance(jsonapiobject, JsonApiObject)
return to_jsonable(jsonapiobject, no_empty_field=True)
def relationship_to_jsonable(relationship, self_link=None):
"""
Tranform a json api relationship object into a json serializable object that matches
the json api specification.
Arguments:
relationship {Relationship} -- a relationship object to be serialized.
Keyword Arguments:
self_link {string} -- link to the relationship to be serialized. If not None, a link
json api object will be created based on this value and added to the links of the
relationship object to be serialized (default: {None}).
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the relationship object.
"""
assert isinstance(relationship, Relationship)
return_value = dict()
links = relationship.links.copy() if relationship.links is not None else dict()
if self_link is not None:
links["self"] = Link(self_link)
if any(links):
return_value["links"] = links_to_jsonable(links)
if relationship.data is not None:
return_value["data"] = {"type": relationship.data.type, "id": relationship.data.id}
return return_value
def relationships_to_jsonable(relationships, self_link_prefix=None, generate_self_link=False):
"""
Tranform a dictionary of json api relationship object nto a json
serializable object that matches the json api specification.
Arguments:
relationships {dict<Relationships>} -- a dict of
relationship objects to be serialized.
Keyword Arguments:
self_link_prefix {string} -- prefix to be used as the link prefix when generate_self_link
is set to true. (default: {None})
generate_self_link {bool} -- when set to true, a self link will be autogenerated when
serializing the relationship object (default: {False}).
Returns:
dict -- a dictionary that can be used by json.dumps to serialize the relationship
dictionary.
"""
if relationships is None:
return None
assert isinstance(relationships, dict)
if generate_self_link:
return {k: relationship_to_jsonable(v, "{0}/{1}".format(self_link_prefix, k))
for k, v in relationships.items()}
else:
return {k: relationship_to_jsonable(v) for k, v in relationships.items()}
#region private
#endregion
| 33.786561 | 168 | 0.681797 |
59424eac730e7540dbabc57af91b0ddacf577089 | 458 | py | Python | hackerearth/Algorithms/Fredo and Sums/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/Fredo and Sums/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerearth/Algorithms/Fredo and Sums/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
n = int(input())
a = sorted(map(int, input().strip().split()))
print(sum(a[1::2]) - sum(a[0::2]), sum(a[n // 2:]) - sum(a[:n // 2]))
| 28.625 | 94 | 0.576419 |
5942ff8661f94ed3c33e9cd05d6389cd70d923f4 | 1,753 | py | Python | Wizard Battle App/wizardbattle.py | rayjustinhuang/PythonApps | ba5572fbff38de71f806558c5d0be5827962aebb | [
"MIT"
] | null | null | null | Wizard Battle App/wizardbattle.py | rayjustinhuang/PythonApps | ba5572fbff38de71f806558c5d0be5827962aebb | [
"MIT"
] | null | null | null | Wizard Battle App/wizardbattle.py | rayjustinhuang/PythonApps | ba5572fbff38de71f806558c5d0be5827962aebb | [
"MIT"
] | null | null | null | import random
import time
from characters import Wizard, Creature
if __name__ == '__main__':
main()
| 25.405797 | 84 | 0.50599 |
5943869d3d4d2e30ae0802900ea733c4c32ec043 | 2,581 | py | Python | xmastreegame/ThreadedTree.py | martinohanlon/GPIOXmasTreeGame | 0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a | [
"MIT"
] | 2 | 2015-01-21T22:13:53.000Z | 2017-12-13T17:57:37.000Z | xmastreegame/ThreadedTree.py | martinohanlon/GPIOXmasTreeGame | 0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a | [
"MIT"
] | null | null | null | xmastreegame/ThreadedTree.py | martinohanlon/GPIOXmasTreeGame | 0d32ff7ca4fe3c2b536f5fa4490d09c1caf54b3a | [
"MIT"
] | null | null | null |
import threading
from time import sleep
import RPi.GPIO as GPIO
illumination_time_default = 0.001
#test
if __name__ == "__main__":
L0 = 1
L1 = 2
L2 = 4
L3 = 8
L4 = 16
L5 = 32
L6 = 64
ALL = 1+2+4+8+16+32+64
GPIO.setmode(GPIO.BCM)
try:
tree = XmasTree()
tree.start()
tree.leds_on(ALL)
while(True):
sleep(0.1)
finally:
tree.stop()
GPIO.cleanup()
| 25.058252 | 62 | 0.533514 |
59445fc42f57f15739274fff9371a3ae622d87a7 | 1,962 | py | Python | cap7/ex5.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap7/ex5.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap7/ex5.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | jogo = [[], [], []], [[], [], []], [[], [], []]
cont = 0
contx = conto = contxc = contoc = 0
while True:
l = int(input('Informe a linha: '))
c = int(input('Informe a coluna: '))
if l < 4 and c < 4:
if cont % 2 == 0:
jogo[l-1][c-1] = 'X'
else:
jogo[l-1][c-1] = 'O'
cont += 1
for x in range(0, 3):
for j in jogo[x]:
if j == 'X':
contx += 1
if j == 'O':
conto +=1
for k in range(0, 3):
if jogo[k][x] == 'X':
contxc += 1
if jogo[k][x] == 'O':
contoc += 1
print(jogo[x])
if jogo[0][0] == 'X' and jogo[1][1] == 'X' and jogo[2][2] == 'X':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabns, X venceu!')
break
if jogo[0][0] == 'O' and jogo[1][1] == 'O' and jogo[2][2] == 'O':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabns, X venceu!')
break
if jogo[0][2] == 'X' and jogo[1][1] == 'X' and jogo[2][0] == 'X':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabns, X venceu!')
break
if jogo[0][2] == 'O' and jogo[1][1] == 'O' and jogo[2][0] == 'O':
print(jogo[x + 1])
print(jogo[x + 2])
print(f'Parabns, X venceu!')
break
if contx == 3 or contxc == 3:
print(jogo[x+1])
print(f'Parabns, X venceu!')
break
if conto == 3 or contoc == 3:
print(jogo[x+1])
print(f'Parabns, O venceu!')
break
contx = conto = contxc = contoc = 0
else:
print('Posio j preenchida')
| 35.035714 | 77 | 0.35474 |
5944d36b482e6230d5854a8d2998c95179d5d03e | 23,625 | py | Python | lib/intercom_test/framework.py | rtweeks/intercom_test | a682088af93d280297764b639f4727ec4716673f | [
"Apache-2.0"
] | null | null | null | lib/intercom_test/framework.py | rtweeks/intercom_test | a682088af93d280297764b639f4727ec4716673f | [
"Apache-2.0"
] | null | null | null | lib/intercom_test/framework.py | rtweeks/intercom_test | a682088af93d280297764b639f4727ec4716673f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 PayTrace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import functools
from io import StringIO
import json
import logging
import os.path
import shutil
import yaml
from .cases import (
IdentificationListReader as CaseIdListReader,
hash_from_fields as _hash_from_fields,
)
from .exceptions import MultipleAugmentationEntriesError, NoAugmentationError
from .augmentation.compact_file import (
augment_dict_from,
case_keys as case_keys_in_compact_file,
TestCaseAugmenter as CompactFileAugmenter,
Updater as CompactAugmentationUpdater,
)
from .augmentation import update_file
from .utils import (
FilteredDictView as _FilteredDictView,
open_temp_copy,
)
from .yaml_tools import (
YAML_EXT,
content_events as _yaml_content_events,
get_load_all_fn as _get_yaml_load_all,
)
logger = logging.getLogger(__name__)
def update_compact_augmentation_on_success(self, fn):
"""Decorator for activating compact data file updates
Using this decorator around the test functions tidies up the logic
around whether to propagate test case augmentation data from update
files to compact files. The compact files will be updated if all
interface tests succeed and not if any of them fail.
The test runner function can be automatically wrapped with this
functionality through :meth:`case_runners`.
"""
CFUpdate = self._UpdateState
return wrapper
def update_compact_files(self, ):
"""Calls the :class:`CaseAugmenter` to apply compact data file updates
:raises NoAugmentationError:
when no case augmentation data was specified during construction
of this object
"""
if self._case_augmenter is None:
raise NoAugmentationError("No augmentation data specified")
return self._case_augmenter.update_compact_files()
def merge_test_extensions(self, ):
"""Merge the extension files of the target group into the group's main file"""
ext_files = sorted(self.extension_files())
with open(self.main_group_test_file, 'ab') as fixed_version_specs:
for ext_file in ext_files:
ext_file_ref = os.path.relpath(ext_file, os.path.join(self.spec_dir, self.group_name))
print("---\n# From {}\n".format(ext_file_ref).encode('utf8'), file=fixed_version_specs)
with open(ext_file, 'rb') as ext_specs:
shutil.copyfileobj(ext_specs, fixed_version_specs)
for ext_file in ext_files:
os.remove(ext_file)
def _augmented_case(self, x):
"""This method is defined to be overwritten on the instance level when augmented data is used"""
return x
def extension_files(spec_dir, group_name):
"""Iterator of file paths for extensions of a test case group
:param spec_dir: Directory in which specifications live
:param group_name: Name of the group to iterate
"""
yield from data_files(os.path.join(spec_dir, group_name))
def data_files(dir_path):
"""Generate data file paths from the given directory"""
try:
dir_listing = os.listdir(dir_path)
except FileNotFoundError:
return
for entry in dir_listing:
entry = os.path.join(dir_path, entry)
if not os.path.isfile(entry):
continue
if not entry.endswith(YAML_EXT):
continue
yield entry
| 40.247019 | 132 | 0.634201 |
5945f3b8e933ce01f957d7f582aa80cb9b902687 | 1,283 | py | Python | 2020/03/day3.py | AlbertVeli/AdventOfCode | 3d3473695318a0686fac720a1a21dd3629f09e33 | [
"Unlicense"
] | null | null | null | 2020/03/day3.py | AlbertVeli/AdventOfCode | 3d3473695318a0686fac720a1a21dd3629f09e33 | [
"Unlicense"
] | null | null | null | 2020/03/day3.py | AlbertVeli/AdventOfCode | 3d3473695318a0686fac720a1a21dd3629f09e33 | [
"Unlicense"
] | 1 | 2021-12-04T10:37:09.000Z | 2021-12-04T10:37:09.000Z | #!/usr/bin/env python3
# Day 3, with some speed optimizations
# Not really necessary for day 3, but probably later
import sys
import typing
import array
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
width = 0
heigth = 0
# Use 1-d array of bytes to keep pixels
a = read_input(sys.argv[1])
# for faster x,y lookup in a
ytab = array.array('I')
for y in range(heigth):
ytab.append(y * width)
# part 1
print(slope(3, 1))
# part 2
slopes = [
(1,1),
(3,1),
(5,1),
(7,1),
(1,2)
]
f = 1
for s in slopes:
f *= slope(s[0], s[1])
print(f)
| 18.070423 | 52 | 0.533125 |
59470f4e50387be73fea566efd45c232849a6813 | 226 | py | Python | Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py | Dittz/Learning_Python | 4c0c97075ef5e1717f82e2cf24b0587f0c8504f5 | [
"MIT"
] | null | null | null | Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py | Dittz/Learning_Python | 4c0c97075ef5e1717f82e2cf24b0587f0c8504f5 | [
"MIT"
] | null | null | null | Introduction to Computer Science and Programing Using Python/Exercises/Week 2 - Function, Strings and Alogorithms/Bisection Search.py | Dittz/Learning_Python | 4c0c97075ef5e1717f82e2cf24b0587f0c8504f5 | [
"MIT"
] | null | null | null | x = 23
epsilon = 0.001
guess = x/2
tries = 0
while abs(guess**2- x) >= epsilon:
if guess**2 > x:
guess /=2
else:
guess *=1.5
tries +=1
print(f'Number of tries: {tries}')
print(f'Guess = {guess}')
| 15.066667 | 34 | 0.535398 |
5949927fb2326eb76fbf268aa983df1f7b22c9a8 | 6,223 | py | Python | SB_Admin_2/templates/dashboard.py | Softyy/sb-admin-2-dash | c57d46fd7f1703696fdd96a7b834beb32ab8a4aa | [
"MIT"
] | null | null | null | SB_Admin_2/templates/dashboard.py | Softyy/sb-admin-2-dash | c57d46fd7f1703696fdd96a7b834beb32ab8a4aa | [
"MIT"
] | null | null | null | SB_Admin_2/templates/dashboard.py | Softyy/sb-admin-2-dash | c57d46fd7f1703696fdd96a7b834beb32ab8a4aa | [
"MIT"
] | null | null | null | import dash_core_components as dcc
import dash_html_components as html
from .layouts.info_card import render as info_card
from .layouts.graph_wrapper import render as graph_wrapper
from .layouts.project_bar import render as project_bar
from .layouts.color_card import render as color_card
from ..data_retrievers.dummy import create_bar_figure, create_line_figure
from ..consts import COLOR, DUMMY_PROJECTS
| 44.134752 | 248 | 0.415877 |
594b9e391b71aa4e58f65f8b436f15f1fdaebd0a | 2,440 | py | Python | tests/unit/test_refresh_utils.py | anukaal/cloud-sql-python-connector | e8799c7de46dbe11a91a9a29173a5cfd279a561d | [
"Apache-2.0"
] | null | null | null | tests/unit/test_refresh_utils.py | anukaal/cloud-sql-python-connector | e8799c7de46dbe11a91a9a29173a5cfd279a561d | [
"Apache-2.0"
] | null | null | null | tests/unit/test_refresh_utils.py | anukaal/cloud-sql-python-connector | e8799c7de46dbe11a91a9a29173a5cfd279a561d | [
"Apache-2.0"
] | null | null | null | """"
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
import aiohttp
import google.auth
import pytest # noqa F401 Needed to run the tests
from google.cloud.sql.connector.refresh_utils import _get_ephemeral, _get_metadata
from google.cloud.sql.connector.utils import generate_keys
| 30.123457 | 84 | 0.690164 |
594bfb1a451c5278cb6eb0568922591b031e3438 | 105 | py | Python | office365/sharepoint/search/query/popularTenantQuery.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | 544 | 2016-08-04T17:10:16.000Z | 2022-03-31T07:17:20.000Z | office365/sharepoint/search/query/popularTenantQuery.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | 438 | 2016-10-11T12:24:22.000Z | 2022-03-31T19:30:35.000Z | office365/sharepoint/search/query/popularTenantQuery.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | 202 | 2016-08-22T19:29:40.000Z | 2022-03-30T20:26:15.000Z | from office365.runtime.client_value import ClientValue
| 17.5 | 54 | 0.828571 |
594c04608e796ac9b1ce2395563d5fd38205ff2d | 1,320 | py | Python | blog/urls.py | MaryamKia/blog | 5274fda9fa67d20f48b0554bd9659f54221ae423 | [
"MIT"
] | null | null | null | blog/urls.py | MaryamKia/blog | 5274fda9fa67d20f48b0554bd9659f54221ae423 | [
"MIT"
] | 10 | 2020-02-12T00:42:03.000Z | 2022-01-13T01:20:37.000Z | blog/urls.py | PilaPont/blog | 61eb5cf30fe9937b4d0c85eb319854946df69a27 | [
"MIT"
] | null | null | null | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path
from django.contrib import admin
from django.conf.urls import include
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
path('admin/', admin.site.urls),
path('posts/', include(('posts.urls', 'posts'), namespace='posts')),
path('api/auth/login/', obtain_jwt_token, name='api-login'),
path('api/posts/', include(('posts.api.urls', 'posts'), namespace='api-posts')),
path('api/comments/', include(('comments.api.urls', 'comments'), namespace='comments-api')),
path('api/accounts/', include(('accounts.api.urls', 'comments'), namespace='accounts-api')),
path('api-auth/', include('rest_framework.urls')),
]
| 42.580645 | 96 | 0.696212 |
594cc653ec79a656a999da000662af797c265edc | 669 | py | Python | source/try_init_models.py | tuanle618/deepArt-generation | bfa11d9f2a825ed53420f85adf3ffe23966b42be | [
"MIT"
] | 8 | 2019-03-25T14:53:55.000Z | 2022-01-09T11:08:30.000Z | source/try_init_models.py | ptl93/deepArt-generation | bfa11d9f2a825ed53420f85adf3ffe23966b42be | [
"MIT"
] | 10 | 2020-01-28T21:56:49.000Z | 2022-02-10T00:10:30.000Z | source/try_init_models.py | ptl93/deepArt-generation | bfa11d9f2a825ed53420f85adf3ffe23966b42be | [
"MIT"
] | 5 | 2019-03-18T13:46:26.000Z | 2022-02-20T15:05:56.000Z | # -*- coding: utf-8 -*-
"""
@title: try_init_models.py
@author: Tuan Le
@email: tuanle@hotmail.de
"""
from dcgan import DCGAN
from vae import VAE
if __name__ == "__main__":
print("Init DCGAN_1 model...")
dcgan_1 = DCGAN(name='DCGAN_1')
print("Init DCGAN_2 model...")
dcgan_2 = DCGAN(name='DCGAN_2')
print("Init DCGAN_3 model...")
dcgan_3 = DCGAN(name='DCGAN_3')
print('Init VAE_1 model...')
vae_1 = VAE(name='VAE_1')
print('Init VAE_2 model...')
vae_2 = VAE(name='VAE_2')
print('Init VAE_3 model...')
vae_3 = VAE(name='VAE_3')
print('Init VAE_4 model...')
vae_4 = VAE(name='VAE_4') | 20.90625 | 35 | 0.588939 |
3ca0c10499ba17cd0bb023edc1433da2fe3b0c6e | 1,144 | py | Python | 03. Programacion orientada a objetos/12. sobrecarga relacional/e1.py | Cidryl/python-desde-cero | fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276 | [
"MIT"
] | null | null | null | 03. Programacion orientada a objetos/12. sobrecarga relacional/e1.py | Cidryl/python-desde-cero | fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276 | [
"MIT"
] | null | null | null | 03. Programacion orientada a objetos/12. sobrecarga relacional/e1.py | Cidryl/python-desde-cero | fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276 | [
"MIT"
] | null | null | null |
# bloque principal
persona1=Persona('juan',22)
persona2=Persona('ana',20)
if persona1==persona2:
print("Las dos personas tienen la misma edad.")
else:
print("No tienen la misma edad.") | 22.88 | 52 | 0.523601 |
3ca23892448af2cabbc53d9df0bfd9fc4244b346 | 1,416 | py | Python | crack-data-structures-and-algorithms/leetcode/sort_list_q148.py | Watch-Later/Eureka | 3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35 | [
"MIT"
] | 20 | 2016-05-16T11:09:04.000Z | 2021-12-08T09:30:33.000Z | crack-data-structures-and-algorithms/leetcode/sort_list_q148.py | Watch-Later/Eureka | 3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35 | [
"MIT"
] | 1 | 2018-12-30T09:55:31.000Z | 2018-12-30T14:08:30.000Z | crack-data-structures-and-algorithms/leetcode/sort_list_q148.py | Watch-Later/Eureka | 3065e76d5bf8b37d5de4f9ee75b2714a42dd4c35 | [
"MIT"
] | 11 | 2016-05-02T09:17:12.000Z | 2021-12-08T09:30:35.000Z | # Definition for singly-linked list.
def merge_sort_list(head):
if not head or not head.next:
return head
slow = fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
# Split into two lists.
# Why head2 starts from the next node of mid(slow)?
# Assume we have only two nodes, A -> B -> ^
# The strategy we use here eseentially is like floor((l + r) / 2), which
# always stucks on A if we make mid the head.
# Logically, mid with floor strategy makes it the **last element** of the first part.
head2 = slow.next
slow.next = None
l1 = merge_sort_list(head)
l2 = merge_sort_list(head2)
return merge_lists(l1, l2)
def merge_lists(l1, l2):
# Introduce dummy node to simplify merge.
# No need to check l1 & l2 up front
dummy = ListNode(0)
p = dummy
while l1 and l2:
if l1.val < l2.val:
p.next = l1
l1 = l1.next
else:
p.next = l2
l2 = l2.next
p = p.next
if l1:
p.next = l1
if l2:
p.next = l2
return dummy.next
| 22.47619 | 89 | 0.57274 |
3ca2ace31bf9ede1d629dd5fbae03c55bc75f2bf | 71 | py | Python | labs/py3code.py | turing4ever/illustrated-python-3-course | d1faff57590713fcd1c6a9215529d6f9c629b046 | [
"MIT"
] | 57 | 2018-04-25T21:57:07.000Z | 2021-12-21T19:09:00.000Z | labs/py3code.py | radovankavicky/illustrated-python-3-course | d1faff57590713fcd1c6a9215529d6f9c629b046 | [
"MIT"
] | 4 | 2018-04-30T05:32:46.000Z | 2021-12-06T17:55:36.000Z | labs/py3code.py | radovankavicky/illustrated-python-3-course | d1faff57590713fcd1c6a9215529d6f9c629b046 | [
"MIT"
] | 26 | 2018-04-27T06:11:35.000Z | 2021-04-11T12:07:37.000Z |
# place super_test.py code here
# place keyword_test.py code here
| 8.875 | 33 | 0.732394 |
3ca2e7b053503c5f1274ef05c3605bdeeddc592f | 71,712 | py | Python | Source Codes/CDBC_Source_Code.py | CDBCTool/CDBC | 70e64241e4fb7687832e3771f316cb036f6fc3c7 | [
"MIT"
] | 13 | 2019-05-13T22:45:32.000Z | 2022-02-27T07:19:16.000Z | Source Codes/CDBC_Source_Code.py | CDBCTool/CDBC | 70e64241e4fb7687832e3771f316cb036f6fc3c7 | [
"MIT"
] | 2 | 2019-09-03T03:57:06.000Z | 2021-11-21T14:01:31.000Z | Source Codes/CDBC_Source_Code.py | CDBCTool/CDBC | 70e64241e4fb7687832e3771f316cb036f6fc3c7 | [
"MIT"
] | 3 | 2019-11-04T17:05:02.000Z | 2021-12-29T18:14:51.000Z | from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys,os,time
from scipy.stats import gamma, norm, beta
import matplotlib.pyplot as plt
from datetime import date, timedelta
import numpy as np
import tkinter
from os import listdir
from os.path import isfile, join
app = QApplication(sys.argv)
widget = BiasCorrection()
app_icon = QIcon()
app_icon.addFile('Interpolation-2.ico', QSize(40,40))
app.setWindowIcon(app_icon)
pixmap = QPixmap("Splash_CDBC.png")
splash = QSplashScreen(pixmap)
splash.show()
screen_resolution = app.desktop().screenGeometry()
width, height = screen_resolution.width(), screen_resolution.height()
widget.move(width/2-widget.width()/2,height/2-widget.height()/2)
time.sleep(2)
## widget.setFixedSize(750,354)
##widget.setFixedWidth(500)
##widget.setFixedHeight(400)
widget.show()
splash.finish(widget)
app.exec_()
| 45.76388 | 228 | 0.526146 |