hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a57fff444e34ab3085f258b8aa57323a8f86efde | 1,683 | py | Python | Exercicios/Exercicio070.py | RicardoMart922/estudo_Python | cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72 | [
"MIT"
] | null | null | null | Exercicios/Exercicio070.py | RicardoMart922/estudo_Python | cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72 | [
"MIT"
] | null | null | null | Exercicios/Exercicio070.py | RicardoMart922/estudo_Python | cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72 | [
"MIT"
] | null | null | null | # Crie um programa que leia a idade e o sexo de vrias pessoas. A cada pessoa cadastrada, o programa dever perguntar se o usurio quer ou no continuar. No final, mostre:
# A) Quantas pessoas tem mais de 18 anos.
# B) Quantos homens foram cadastrados.
# C) Quantas mulheres tem menos de 20 anos.
maisdezoito = 0
qtdmulheres = 0
qtdhomens = 0
idade = 0
opcao = ''
sexo = ''
print('-= Informe a idade e o sexo para o cadastro =-')
while True:
idade = int(input('Idade: '))
if idade > 18:
maisdezoito += 1
while True:
sexo = str(input('Sexo [M/F]: ')).upper()
if sexo == 'M' or sexo == 'F':
if sexo == 'M':
qtdhomens += 1
if sexo == 'F' and idade < 20:
qtdmulheres += 1
break
while True:
opcao = str(input('Quer continuar [S/N]: ')).upper()
if opcao == 'S' or opcao == 'N':
break
if opcao == 'N':
break
if maisdezoito == 0:
print('Nenhuma pessoa com mais de 18 anos foi cadastrada.')
elif maisdezoito == 1:
print('Foi cadastrado uma pessoa com mais de 18 anos.')
else:
print(f'Foi cadastrado {maisdezoito} pessoas com mais de 18 anos.')
if qtdhomens == 0:
print('Nenhum homem foi cadastrado.')
elif qtdhomens == 1:
print('Apenas um homem foi cadastrado.')
else:
print(f'A quantidade de homens cadastrados foi {qtdhomens}.')
if qtdmulheres == 0:
print('Nenhuma mulher com menos de 20 anos foi cadastrada.')
elif qtdmulheres == 1:
print('Apenas uma mulher com menos de 20 anos foi cadastrada.')
else:
print(f'A quantidade de mulheres com menos de 20 anos que foram cadastradas foi {qtdmulheres}.')
| 35.0625 | 172 | 0.62448 |
a583106bd0bb53ab734f77ad352678e3fedf5e53 | 3,050 | py | Python | tests/test_entry.py | anaulin/tasks.py | aa05b4194ff6b01061e6842520752da515e625d6 | [
"MIT"
] | null | null | null | tests/test_entry.py | anaulin/tasks.py | aa05b4194ff6b01061e6842520752da515e625d6 | [
"MIT"
] | 2 | 2020-06-30T20:05:59.000Z | 2020-08-01T03:42:20.000Z | tests/test_entry.py | anaulin/tasks.py | aa05b4194ff6b01061e6842520752da515e625d6 | [
"MIT"
] | null | null | null | import filecmp
import shutil
import tempfile
import os
from .context import entry
TEST_ENTRY = os.path.join(os.path.dirname(__file__), "test_entry.md")
TEST_ENTRY_CONTENT = """
Some content.
## A section in the content
Content that looks like frontmatter:
```
+++
but this is
not really frontmatter
+++
```
More content.
"""
| 30.19802 | 92 | 0.635082 |
a583ce21b151702ce7c45ced989d01eb53545764 | 1,833 | py | Python | plotapp/controllers/window_controller.py | maldata/matplotlib_qtquick_playground | f7da94093315d8f540124d5037406d004574dede | [
"MIT"
] | null | null | null | plotapp/controllers/window_controller.py | maldata/matplotlib_qtquick_playground | f7da94093315d8f540124d5037406d004574dede | [
"MIT"
] | null | null | null | plotapp/controllers/window_controller.py | maldata/matplotlib_qtquick_playground | f7da94093315d8f540124d5037406d004574dede | [
"MIT"
] | null | null | null | import random
from PyQt5.QtCore import pyqtSignal, pyqtProperty, pyqtSlot, QObject
| 25.816901 | 99 | 0.569013 |
a5852febf93eb6f982e8fd189b72f16bda399d56 | 337 | py | Python | training/train.py | gert-janwille/Eleonora | a979dcd9b41231ea3abc9a57d842c680314ac9ca | [
"MIT"
] | 1 | 2017-11-19T10:57:38.000Z | 2017-11-19T10:57:38.000Z | training/train.py | gert-janwille/Eleonora | a979dcd9b41231ea3abc9a57d842c680314ac9ca | [
"MIT"
] | 6 | 2017-11-15T16:04:09.000Z | 2018-01-18T17:12:18.000Z | training/train.py | gert-janwille/Eleonora | a979dcd9b41231ea3abc9a57d842c680314ac9ca | [
"MIT"
] | null | null | null | from training.emotional_training import emotional_training
from training.facial_training import facial_training
| 22.466667 | 58 | 0.682493 |
a585ab12f199b6ce2a2bd25bb26ea5865e4f682d | 9,190 | py | Python | nnaps/mesa/compress_mesa.py | vosjo/nnaps | bc4aac715b511c5df897ef24fb953ad7265927ea | [
"MIT"
] | 4 | 2020-09-24T12:55:58.000Z | 2021-05-19T14:46:10.000Z | nnaps/mesa/compress_mesa.py | vosjo/nnaps | bc4aac715b511c5df897ef24fb953ad7265927ea | [
"MIT"
] | 4 | 2021-06-02T09:28:35.000Z | 2021-06-04T08:32:24.000Z | nnaps/mesa/compress_mesa.py | vosjo/nnaps | bc4aac715b511c5df897ef24fb953ad7265927ea | [
"MIT"
] | 3 | 2020-10-05T13:18:27.000Z | 2021-06-02T09:29:11.000Z | import os
from pathlib import Path
import numpy as np
# repack_fields is necessary since np 1.16 as selecting columns from a recarray returns an array with padding
# that is difficult to work with afterwards.
from numpy.lib import recfunctions as rf
from nnaps.mesa import fileio
from nnaps import __version__
def read_mesa_header(model):
"""
process the MESA history files header.
This will require more work in the future to also deal with correct type conversions. Now everything is considered
a string. This is fine as the header is ignored by the rest of nnaps.
todo: implement converting of header values to the correct data types.
:param model: list of lists
:return: numpy array containing strings with the header info.
"""
res = []
for line in model:
new_line = [l.replace('\"', '') for l in line]
res.append(new_line)
return np.array(res, str).T
def read_mesa_output(filename=None, only_first=False):
"""
Read star.log and .data files from MESA.
This returns a record array with the global and local parameters (the latter
can also be a summary of the evolutionary track instead of a profile if
you've given a 'star.log' file.
The stellar profiles are given from surface to center.
Function writen by Pieter DeGroote
:param filename: name of the log file
:type filename: str
:param only_first: read only the first model (or global parameters)
:type only_first: bool
:return: list of models in the data file (typically global parameters, local parameters)
:rtype: list of rec arrays
"""
models = []
new_model = False
header = None
# -- open the file and read the data
with open(filename, 'r') as ff:
# -- skip first 5 lines when difference file
if os.path.splitext(filename)[1] == '.diff':
for i in range(5):
line = ff.readline()
models.append([])
new_model = True
while 1:
line = ff.readline()
if not line:
break # break at end-of-file
line = line.strip().split()
if not line:
continue
# -- begin a new model
if all([iline == str(irange) for iline, irange in zip(line, range(1, len(line) + 1))]):
# -- wrap up previous model
if len(models):
try:
model = np.array(models[-1], float).T
except:
model = read_mesa_header(models[-1])
models[-1] = np.rec.fromarrays(model, names=header)
if only_first: break
models.append([])
new_model = True
continue
# -- next line is the header of the data, remember it
if new_model:
header = line
new_model = False
continue
models[-1].append(line)
if len(models) > 1:
try:
model = np.array(models[-1], float).T
except:
indices = []
for i, l in enumerate(models[-1]):
if len(l) != len(models[-1][0]):
indices.append(i)
for i in reversed(indices):
del models[-1][i]
print("Found and fixed errors on following lines: ", indices)
model = np.array(models[-1], float).T
models[-1] = np.rec.fromarrays(model, names=header)
return models
| 38.291667 | 120 | 0.586507 |
a5880384a51a2b5216de1db68e0632fb623a8bfc | 1,022 | py | Python | src/_deblaze.py | MenkeTechnologies/zsh-more-completions | c0d4716b695ea9bf3d0e870bc2ced5354db3c031 | [
"MIT"
] | 25 | 2018-07-29T01:49:23.000Z | 2022-01-19T19:21:23.000Z | src/_deblaze.py | MenkeTechnologies/zsh-more-completions | c0d4716b695ea9bf3d0e870bc2ced5354db3c031 | [
"MIT"
] | null | null | null | src/_deblaze.py | MenkeTechnologies/zsh-more-completions | c0d4716b695ea9bf3d0e870bc2ced5354db3c031 | [
"MIT"
] | null | null | null | #compdef deblaze.py
local arguments
arguments=(
'--version[show programs version number and exit]'
'(- * :)'{-h,--help}'[show this help message and exit]'
{-u,--url}'[URL for AMF Gateway]'
{-s,--service}'[remote service to call]'
{-m,--method}'[method to call]'
{-p,--params}'[parameters to send pipe seperated]'
{-f,--fullauto}'[URL to SWF - Download SWF, find remoting services]'
'--fuzz[fuzz parameter values]'
{-c,--creds}'[username and password for service in u:p format]'
{-b,--cookie}'[send cookies with request]'
{-A,--user-agent}'[user-Agent string to send to the server]'
{-1,--bruteService}'[file to load services for brute forcing (mutually]'
{-2,--bruteMethod}'[file to load methods for brute forcing (mutually]'
{-d,--debug}'[enable pyamf/AMF debugging]'
{-v,--verbose}'[print http request/response]'
{-r,--report}'[generate HTML report]'
{-n,--nobanner}'[do not display banner]'
{-q,--quiet}'[do not display messages]'
'*:filename:_files'
)
_arguments -s $arguments
| 36.5 | 74 | 0.662427 |
a58a9d34b89b4bc4bc0e0b2929228a0dbbb74a83 | 1,379 | py | Python | jakso_ml/training_data/white_balancer.py | JaksoSoftware/jakso-ml | 5720ea557ca2fcf9ae16e329c198acd8e31258c4 | [
"MIT"
] | null | null | null | jakso_ml/training_data/white_balancer.py | JaksoSoftware/jakso-ml | 5720ea557ca2fcf9ae16e329c198acd8e31258c4 | [
"MIT"
] | 3 | 2020-09-25T18:40:52.000Z | 2021-08-25T14:44:30.000Z | jakso_ml/training_data/white_balancer.py | JaksoSoftware/jakso-ml | 5720ea557ca2fcf9ae16e329c198acd8e31258c4 | [
"MIT"
] | null | null | null | import random, copy
import cv2 as cv
import numpy as np
from scipy import interpolate
from .augmenter import Augmenter
| 25.072727 | 81 | 0.658448 |
a58ab462ad7e52132f563d3dc36462f69902b7de | 824 | py | Python | app/set_game/deck.py | mmurch/set-game | 8fd1303ab2a4d628547fd7ebca572cf04087cbdb | [
"MIT"
] | null | null | null | app/set_game/deck.py | mmurch/set-game | 8fd1303ab2a4d628547fd7ebca572cf04087cbdb | [
"MIT"
] | 5 | 2021-03-10T04:32:22.000Z | 2022-02-26T22:25:52.000Z | app/set_game/deck.py | mmurch/set-game | 8fd1303ab2a4d628547fd7ebca572cf04087cbdb | [
"MIT"
] | null | null | null | from .card import Card
from .features import Number, Color, Shape, Style
from math import floor
| 20.6 | 49 | 0.54733 |
a58be826db80a8cc6c893e8f64d3265192b6d0a2 | 27,777 | py | Python | tests/test_utils.py | grantsrb/langpractice | 59cf8f53b85fa8b4d639ffc6e175ec22c0d2362c | [
"MIT"
] | null | null | null | tests/test_utils.py | grantsrb/langpractice | 59cf8f53b85fa8b4d639ffc6e175ec22c0d2362c | [
"MIT"
] | null | null | null | tests/test_utils.py | grantsrb/langpractice | 59cf8f53b85fa8b4d639ffc6e175ec22c0d2362c | [
"MIT"
] | null | null | null | from langpractice.utils.utils import *
import unittest
import torch.nn.functional as F
if __name__=="__main__":
unittest.main()
| 31.89093 | 86 | 0.464161 |
a58e0065829efa585d05c036b442a368f95ae6a9 | 1,626 | py | Python | src/entities/git_repo.py | wnjustdoit/devops-py | 54dd722a577c4b3ecda45aa85c067130fd292ab9 | [
"Apache-2.0"
] | null | null | null | src/entities/git_repo.py | wnjustdoit/devops-py | 54dd722a577c4b3ecda45aa85c067130fd292ab9 | [
"Apache-2.0"
] | 6 | 2021-04-08T20:46:56.000Z | 2022-01-13T01:52:06.000Z | src/entities/git_repo.py | wnjustdoit/devops-py | 54dd722a577c4b3ecda45aa85c067130fd292ab9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from .entity import Entity, EntitySchema, Base
from sqlalchemy import Column, Integer, String, Sequence
from marshmallow import Schema, fields, post_load
| 37.813953 | 134 | 0.710947 |
a590274916afd797594033b1e72a778f82d65211 | 4,415 | py | Python | src/algorithms/tcn_utils/tcn_model.py | pengkangzaia/mvts-ano-eval | 976ffa2f151c8f91ce007e9a455bb4f97f89f2c9 | [
"MIT"
] | 24 | 2021-09-04T08:51:55.000Z | 2022-03-30T16:45:54.000Z | src/algorithms/tcn_utils/tcn_model.py | pengkangzaia/mvts-ano-eval | 976ffa2f151c8f91ce007e9a455bb4f97f89f2c9 | [
"MIT"
] | 3 | 2021-10-12T02:34:34.000Z | 2022-03-18T10:37:35.000Z | src/algorithms/tcn_utils/tcn_model.py | pengkangzaia/mvts-ano-eval | 976ffa2f151c8f91ce007e9a455bb4f97f89f2c9 | [
"MIT"
] | 15 | 2021-09-18T03:41:02.000Z | 2022-03-21T09:03:01.000Z | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
"""TCN adapted from https://github.com/locuslab/TCN"""
| 39.070796 | 110 | 0.59479 |
a591a1103146cfd95f29ba55d7e7f556a915a79a | 1,868 | py | Python | static/file/2021-04-10/index.py | yuguo97/nest-node | a3d6cb99005403691779c44a488e3b22f5479538 | [
"MIT"
] | null | null | null | static/file/2021-04-10/index.py | yuguo97/nest-node | a3d6cb99005403691779c44a488e3b22f5479538 | [
"MIT"
] | null | null | null | static/file/2021-04-10/index.py | yuguo97/nest-node | a3d6cb99005403691779c44a488e3b22f5479538 | [
"MIT"
] | null | null | null | '''
Author: your name
Date: 2021-04-08 17:14:41
LastEditTime: 2021-04-09 09:13:28
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \github\test\index.py
'''
#!user/bin/env python3
# -*- coding: utf-8 -*-
import psutil
cpu_info = {'user': 0, 'system': 0, 'idle': 0, 'percent': 0}
memory_info = {'total': 0, 'available': 0,
'percent': 0, 'used': 0, 'free': 0}
disk_id = []
disk_total = []
disk_used = []
disk_free = []
disk_percent = []
# get cpu information
# get memory information
if __name__ == '__main__':
get_cpu_info()
cpu_status = cpu_info['percent']
print('cpu usage is:%s%%' % cpu_status)
get_memory_info()
mem_status = memory_info['percent']
print('memory usage is:%s%%' % mem_status)
get_disk_info()
for i in range(len(disk_id)):
print('%sdisk usage is:%s%%' % (disk_id[i], 100 - disk_percent[i]))
| 26.685714 | 75 | 0.646681 |
a5924218bd91ec5cd3a910146334e0e5acd39d37 | 1,592 | py | Python | SS/p202.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | SS/p202.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | SS/p202.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | """
n
1 1
1
n true false
LeetCode
https://leetcode-cn.com/problems/happy-number
"""
from typing import List
# hash
# for test
if __name__ == "__main__":
ins = Solution()
n = 19
print(ins.isHappy(n))
| 21.808219 | 53 | 0.523241 |
a5964514746ca9cd43f5272151dd592b02ad5040 | 2,309 | py | Python | UI/UIObject.py | R2D2Hud/CharlieOSX | 37c4edb0b31eda8082acd8e31afc3dc85fd75abe | [
"MIT"
] | 12 | 2020-04-11T13:10:14.000Z | 2022-03-24T09:12:54.000Z | UI/UIObject.py | R2D2Hud/CharlieOSX | 37c4edb0b31eda8082acd8e31afc3dc85fd75abe | [
"MIT"
] | 14 | 2020-01-24T14:07:45.000Z | 2020-12-20T19:14:04.000Z | UI/UIObject.py | R2D2Hud/CharlieOSX | 37c4edb0b31eda8082acd8e31afc3dc85fd75abe | [
"MIT"
] | 11 | 2020-06-19T20:12:43.000Z | 2021-04-25T05:02:20.000Z | from profileHelper import ProfileHelper
from pybricks.parameters import Button, Color
from pybricks.media.ev3dev import Image, ImageFile, Font, SoundFile
# from UI.tools import Box
| 37.241935 | 159 | 0.603725 |
a59648f6d46920ef327bbe7ce9659f9fe533785d | 9,558 | py | Python | factory.py | rosinality/vision-transformers-pytorch | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | [
"MIT"
] | 77 | 2021-04-03T06:44:19.000Z | 2021-07-07T07:05:01.000Z | factory.py | rosinality/vision-transformers-pytorch | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | [
"MIT"
] | 1 | 2021-04-08T06:59:41.000Z | 2021-04-08T11:20:32.000Z | factory.py | rosinality/vision-transformers-pytorch | b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f | [
"MIT"
] | 6 | 2021-04-15T13:36:37.000Z | 2022-02-03T12:32:20.000Z | import os
from types import SimpleNamespace
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
import numpy as np
from tensorfn import distributed as dist, nsml, get_logger
try:
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import fn, types, pipeline_def
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
except ImportError:
pass
from autoaugment import RandAugment
from dataset import LMDBDataset
from mix_dataset import MixDataset
from transforms import RandomErasing
# @pipeline_def
def dali_pipeline(source, image_size, training, cpu=False):
images, labels = fn.external_source(source=source, num_outputs=2)
if cpu:
device = "cpu"
images = fn.decoders.image(images, device=device)
else:
device = "gpu"
images = fn.decoders.image(
images,
device="mixed",
device_memory_padding=211025920,
host_memory_padding=140544512,
)
if training:
images = fn.random_resized_crop(
images,
device=device,
size=image_size,
interp_type=types.DALIInterpType.INTERP_CUBIC,
)
coin = fn.random.coin_flip(0.5)
images = fn.flip(images, horizontal=coin)
else:
pass
return images, labels
| 29.319018 | 89 | 0.643022 |
a5965f266f95ad0e2605b8928b40d8635af8fdc1 | 2,990 | py | Python | scripts/binarize-phrase-table.py | grgau/GroundHog | 35fac1b80bdcc6b7516cb82fe2ecd19dbcfa248a | [
"BSD-3-Clause"
] | null | null | null | scripts/binarize-phrase-table.py | grgau/GroundHog | 35fac1b80bdcc6b7516cb82fe2ecd19dbcfa248a | [
"BSD-3-Clause"
] | null | null | null | scripts/binarize-phrase-table.py | grgau/GroundHog | 35fac1b80bdcc6b7516cb82fe2ecd19dbcfa248a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Converts moses phrase table file to HDF5 files
# Written by Bart van Merrienboer (University of Montreal)
import argparse
import cPickle
import gzip
import sys
import tables
import numpy
parser = argparse.ArgumentParser()
parser.add_argument("input",
type=argparse.FileType('r'),
help="The phrase table to be processed")
parser.add_argument("source_output",
type=argparse.FileType('w'),
help="The source output file")
parser.add_argument("target_output",
type=argparse.FileType('w'),
help="The target output file")
parser.add_argument("source_dictionary",
type=argparse.FileType('r'),
help="A pickled dictionary with words and IDs as keys and "
"values respectively")
parser.add_argument("target_dictionary",
type=argparse.FileType('r'),
help="A pickled dictionary with words and IDs as keys and "
"values respectively")
parser.add_argument("--labels",
type=int, default=15000,
help="Set the maximum word index")
args = parser.parse_args()
files = [args.source_output, args.target_output]
vlarrays = []
indices = []
for i, f in enumerate(files):
files[i] = tables.open_file(f.name, f.mode)
vlarrays.append(files[i].createEArray(files[i].root, 'phrases',
tables.Int32Atom(),shape=(0,)))
indices.append(files[i].createTable("/", 'indices', Index, "a table of indices and lengths"))
sfile = gzip.open(args.input.name, args.input.mode)
source_table = cPickle.load(args.source_dictionary)
target_table = cPickle.load(args.target_dictionary)
tables = [source_table, target_table]
count = 0
counts = numpy.zeros(2).astype('int32')
freqs_sum = 0
for line in sfile:
fields = line.strip().split('|||')
for field_index in [0, 1]:
words = fields[field_index].strip().split(' ')
word_indices = [tables[field_index].get(word, 1) for word in words]
if args.labels > 0:
word_indices = [word_index if word_index < args.labels else 1
for word_index in word_indices]
vlarrays[field_index].append(numpy.array(word_indices))
pos = counts[field_index]
length = len(word_indices)
ind = indices[field_index].row
ind['pos'] = pos
ind['length'] = length
ind.append()
counts[field_index] += len(word_indices)
count += 1
if count % 100000 == 0:
print count,
[i.flush() for i in indices]
sys.stdout.flush()
elif count % 10000 == 0:
print '.',
sys.stdout.flush()
for f in indices:
f.flush()
for f in files:
f.close()
sfile.close()
print 'processed', count, 'phrase pairs'
| 30.510204 | 97 | 0.614716 |
a596a50f47d0ab9d4cfb1eb2e63d7c4e56340474 | 1,137 | py | Python | Easy/1207.UniqueNumberofOccurrences.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 39 | 2020-07-04T11:15:13.000Z | 2022-02-04T22:33:42.000Z | Easy/1207.UniqueNumberofOccurrences.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 1 | 2020-07-15T11:53:37.000Z | 2020-07-15T11:53:37.000Z | Easy/1207.UniqueNumberofOccurrences.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 20 | 2020-07-14T19:12:53.000Z | 2022-03-02T06:28:17.000Z | """
Given an array of integers arr, write a function that returns true if and
only if the number of occurrences of each value in the array is unique.
Example:
Input: arr = [1,2,2,1,1,3]
Output: true
Explanation: The value 1 has 3 occurrences, 2 has 2 and 3 has 1. No two
values have the same number of occurrences.
Example:
Input: arr = [1,2]
Output: false
Example:
Input: arr = [-3,0,1,-3,1,1,1,-3,10,0]
Output: true
Constraints:
- 1 <= arr.length <= 1000
- -1000 <= arr[i] <= 1000
"""
#Difficulty: Easy
#63 / 63 test cases passed.
#Runtime: 48 ms
#Memory Usage: 13.8 MB
#Runtime: 48 ms, faster than 39.33% of Python3 online submissions for Unique Number of Occurrences.
#Memory Usage: 13.8 MB, less than 92.46% of Python3 online submissions for Unique Number of Occurrences.
| 29.153846 | 104 | 0.60774 |
a598b26fe309d9bc4db6c62f8d0ba413c791f7b0 | 9,360 | py | Python | Playground3/src/playground/network/devices/pnms/PNMSDevice.py | kandarpck/networksecurity2018 | dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626 | [
"MIT"
] | 3 | 2018-10-25T16:03:53.000Z | 2019-06-13T15:24:41.000Z | Playground3/src/playground/network/devices/pnms/PNMSDevice.py | kandarpck/networksecurity2018 | dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626 | [
"MIT"
] | null | null | null | Playground3/src/playground/network/devices/pnms/PNMSDevice.py | kandarpck/networksecurity2018 | dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626 | [
"MIT"
] | null | null | null | from playground.common.os import isPidAlive
from playground.common import CustomConstant as Constant
from .NetworkManager import NetworkManager, ConnectionDeviceAPI, RoutesDeviceAPI
import os, signal, time
| 39.327731 | 119 | 0.583761 |
a5991177aa084d283fe154f4a7a56db6da664557 | 162 | py | Python | testing/tests/constants_enums/constants_enums.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | testing/tests/constants_enums/constants_enums.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | testing/tests/constants_enums/constants_enums.py | griffinmilsap/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | import enum
"""Declare all enumerations used in test."""
| 16.2 | 44 | 0.685185 |
a59a37e3de5885e67c006743f177528505c3b6da | 3,315 | py | Python | core/eval.py | lmkoch/subgroup-shift-detection | 31971704dc4a768db5e082e6e37a504f4e245224 | [
"MIT"
] | null | null | null | core/eval.py | lmkoch/subgroup-shift-detection | 31971704dc4a768db5e082e6e37a504f4e245224 | [
"MIT"
] | null | null | null | core/eval.py | lmkoch/subgroup-shift-detection | 31971704dc4a768db5e082e6e37a504f4e245224 | [
"MIT"
] | 1 | 2022-01-26T09:54:41.000Z | 2022-01-26T09:54:41.000Z | import os
import pandas as pd
import numpy as np
from core.dataset import dataset_fn
from core.model import model_fn, get_classification_model
from core.mmdd import trainer_object_fn
from core.muks import muks
def eval(exp_dir, exp_name, params, seed, split, sample_sizes=[10, 30, 50, 100, 500],
num_reps=100, num_permutations=1000):
"""Analysis of test power vs sample size for both MMD-D and MUKS
Args:
exp_dir ([type]): exp base directory
exp_name ([type]): experiment name (hashed config)
params (Dict): [description]
seed (int): random seed
split (str): fold to evaluate, e.g. 'validation' or 'test
sample_sizes (list, optional): Defaults to [10, 30, 50, 100, 500].
num_reps (int, optional): for calculation rejection rates. Defaults to 100.
num_permutations (int, optional): for MMD-D permutation test. Defaults to 1000.
"""
log_dir = os.path.join(exp_dir, exp_name)
out_csv = os.path.join(log_dir, f'{split}_consistency_analysis.csv')
df = pd.DataFrame(columns=['sample_size','power', 'power_stderr',
'type_1err', 'type_1err_stderr', 'method'])
for batch_size in sample_sizes:
params['dataset']['dl']['batch_size'] = batch_size
dataloader = dataset_fn(seed=seed, params_dict=params['dataset'])
# MMD-D
model = model_fn(seed=seed, params=params['model'])
trainer = trainer_object_fn(model=model, dataloaders=dataloader, seed=seed,
log_dir=log_dir, **params['trainer'])
res = trainer.performance_measures(dataloader[split]['p'], dataloader[split]['q'], num_batches=num_reps,
num_permutations=num_permutations)
res_mmd = {'exp_hash': exp_name,
'sample_size': batch_size,
'power': res['reject_rate'],
'power_stderr': stderr_proportion(res['reject_rate'], batch_size),
'type_1err': res['type_1_err'] ,
'type_1err_stderr': stderr_proportion(res['type_1_err'] , batch_size),
'method': 'mmd'}
# MUKS
model = get_classification_model(params['model'])
reject_rate, type_1_err = muks(dataloader[split]['p'], dataloader[split]['q'], num_reps, model)
res_rabanser = {'exp_hash': exp_name,
'sample_size': batch_size,
'power': reject_rate,
'power_stderr': stderr_proportion(reject_rate, batch_size),
'type_1err': type_1_err,
'type_1err_stderr': stderr_proportion(type_1_err, batch_size),
'method': 'rabanser'}
print('---------------------------------')
print(f'sample size: {batch_size}')
print(f'mmd: {res_mmd}')
print(f'rabanser: {res_rabanser}')
df = df.append(pd.DataFrame(res_mmd, index=['']), ignore_index=True)
df = df.append(pd.DataFrame(res_rabanser, index=['']), ignore_index=True)
df.to_csv(out_csv)
| 41.4375 | 112 | 0.574962 |
a59a527b87a6e3d50b3ac6e6acea7185a59af36b | 1,423 | py | Python | handlers/product_handlers.py | group-project-carbon-accounting/server | 93155868a0988c04fe79d30ef565c652d2c8f5de | [
"MIT"
] | null | null | null | handlers/product_handlers.py | group-project-carbon-accounting/server | 93155868a0988c04fe79d30ef565c652d2c8f5de | [
"MIT"
] | null | null | null | handlers/product_handlers.py | group-project-carbon-accounting/server | 93155868a0988c04fe79d30ef565c652d2c8f5de | [
"MIT"
] | null | null | null | import tornado.web
import json
from handlers.async_fetch import async_fetch, GET, POST
| 43.121212 | 102 | 0.645819 |
a59ac366b9f4a35b896bc07199abf2aebd42714c | 3,144 | py | Python | Python/lab8 [2, 5, 7, 12, 17]/tz17.py | da-foxbite/KSU121 | 133637abb4f465aeecb845e6735ba383a2fdd689 | [
"MIT"
] | 3 | 2019-09-23T06:06:30.000Z | 2020-02-24T10:22:26.000Z | Python/lab8 [2, 5, 7, 12, 17]/tz17.py | da-foxbite/KSU141 | 133637abb4f465aeecb845e6735ba383a2fdd689 | [
"MIT"
] | null | null | null | Python/lab8 [2, 5, 7, 12, 17]/tz17.py | da-foxbite/KSU141 | 133637abb4f465aeecb845e6735ba383a2fdd689 | [
"MIT"
] | 1 | 2020-10-26T11:00:22.000Z | 2020-10-26T11:00:22.000Z | # 141,
# :09.04.20
# 17. : , ', , , , ; ;
# : , , . ' .
# , .
import names
from faker import Faker
fake = Faker()
import string
import random
customers = []
for i in range(0, 5):
customers.append(Customer(
names.get_first_name(), names.get_first_name(), names.get_first_name(), fake.address(),
getRanNum(16), getRanNum(8)))
# print(" : ", customers[i])
customers.sort(key=lambda customer: customer.fullName)
fixPrintout(customers)
maxNum = getRanNum(16)
#print(maxNum)
print('\033[0;37;49m : ')
for i in range(0, 5):
if CardNumCheck(customers[i], maxNum) == False:
print('-')
pass
else:
print(customers[i])
| 33.094737 | 130 | 0.682252 |
a59c22cef1a85002b71aba681bd1b6e2ffee762e | 7,344 | py | Python | absolv/tests/test_models.py | SimonBoothroyd/absolv | dedb2b6eb567ec1b627dbe50f36f68e0c32931c4 | [
"MIT"
] | null | null | null | absolv/tests/test_models.py | SimonBoothroyd/absolv | dedb2b6eb567ec1b627dbe50f36f68e0c32931c4 | [
"MIT"
] | 30 | 2021-11-02T12:47:24.000Z | 2022-03-01T22:00:39.000Z | absolv/tests/test_models.py | SimonBoothroyd/absolv | dedb2b6eb567ec1b627dbe50f36f68e0c32931c4 | [
"MIT"
] | null | null | null | import numpy
import pytest
from openmm import unit
from pydantic import ValidationError
from absolv.models import (
DeltaG,
EquilibriumProtocol,
MinimizationProtocol,
SimulationProtocol,
State,
SwitchingProtocol,
System,
TransferFreeEnergyResult,
)
from absolv.tests import is_close
class TestState:
def test_unit_validation(self):
state = State(
temperature=298.0 * unit.kelvin, pressure=101.325 * unit.kilopascals
)
assert is_close(state.temperature, 298.0)
assert is_close(state.pressure, 1.0)
| 30.473029 | 90 | 0.631672 |
a59f046e4edcd4dce70590e6b4351f5262990e72 | 868 | py | Python | archiv/tables.py | acdh-oeaw/gtrans | 6f56b1d09de0cad503273bf8a01cd81e25220524 | [
"MIT"
] | 1 | 2020-03-15T16:14:02.000Z | 2020-03-15T16:14:02.000Z | archiv/tables.py | acdh-oeaw/gtrans | 6f56b1d09de0cad503273bf8a01cd81e25220524 | [
"MIT"
] | 14 | 2018-11-09T08:34:23.000Z | 2022-02-10T08:15:53.000Z | archiv/tables.py | acdh-oeaw/gtrans | 6f56b1d09de0cad503273bf8a01cd81e25220524 | [
"MIT"
] | null | null | null | import django_tables2 as tables
from django_tables2.utils import A
from entities.models import *
from archiv.models import *
| 31 | 63 | 0.687788 |
a5a01c24d79e75ecbeea7e8b127b09c3ad1d05e0 | 376 | py | Python | accounts/migrations/0005_auto_20200227_0418.py | inclusive-design/coop-map-directory-index | b215ea95677dc90fafe60eaa494a4fd6af0431fb | [
"BSD-3-Clause"
] | 1 | 2020-01-28T16:16:49.000Z | 2020-01-28T16:16:49.000Z | accounts/migrations/0005_auto_20200227_0418.py | inclusive-design/coop-map-directory-index | b215ea95677dc90fafe60eaa494a4fd6af0431fb | [
"BSD-3-Clause"
] | 114 | 2020-02-12T20:22:07.000Z | 2021-09-22T18:29:50.000Z | accounts/migrations/0005_auto_20200227_0418.py | inclusive-design/coop-map-directory-index | b215ea95677dc90fafe60eaa494a4fd6af0431fb | [
"BSD-3-Clause"
] | 4 | 2020-04-21T21:09:25.000Z | 2021-01-08T14:18:58.000Z | # Generated by Django 3.0.3 on 2020-02-27 04:18
from django.db import migrations
| 20.888889 | 62 | 0.619681 |
a5a08838db67fdc32c63308d4dd034cb11ff2a45 | 3,745 | py | Python | src/FSG/WordEmbedding.py | handsomebrothers/Callback2Vec | 370adbcfcc229d385ba9c8c581489b703a39ca85 | [
"MIT"
] | null | null | null | src/FSG/WordEmbedding.py | handsomebrothers/Callback2Vec | 370adbcfcc229d385ba9c8c581489b703a39ca85 | [
"MIT"
] | null | null | null | src/FSG/WordEmbedding.py | handsomebrothers/Callback2Vec | 370adbcfcc229d385ba9c8c581489b703a39ca85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import multiprocessing
from gensim.models import Word2Vec
import csv
def embedding_sentences(sentences, embedding_size = 64, window = 3, min_count = 0, file_to_load = None, file_to_save = None):
'''
embeding_size Word Embedding Dimension
window : Context window
min_count : Word frequency less than min_count will be deleted
'''
if file_to_load is not None:
w2vModel = Word2Vec.load(file_to_load) # load model
else:
w2vModel = Word2Vec(sentences, size = embedding_size, window = window, min_count = min_count, workers = multiprocessing.cpu_count(),seed=200)
if file_to_save is not None:
w2vModel.save(file_to_save) # Save Model
return w2vModel
# This function is used to represent a sentence as a vector (corresponding to representing a method as a vector)
# This function is used to represent a word as a vector (corresponding to a word in method)
# This function is used to get the vector of a text (corresponding to the word vector of class or apk)
# This function is used to obtain the similarity between two sentences,
# with the help of python's own function to calculate the similarity.
# Used to build corpus
# Used to get the model that has been created
# Used for acquiring corpus
if __name__ == "__main__":
bulid_word2vec_model()
| 40.706522 | 149 | 0.687316 |
a5a1b481c21e6820b7064b6612f4c7a3b1370fc4 | 10,914 | py | Python | hearthstone/player.py | dianarvp/stone_ground_hearth_battles | 450e70eaef21b543be579a6d696676fb148a99b0 | [
"Apache-2.0"
] | null | null | null | hearthstone/player.py | dianarvp/stone_ground_hearth_battles | 450e70eaef21b543be579a6d696676fb148a99b0 | [
"Apache-2.0"
] | null | null | null | hearthstone/player.py | dianarvp/stone_ground_hearth_battles | 450e70eaef21b543be579a6d696676fb148a99b0 | [
"Apache-2.0"
] | null | null | null | import itertools
import typing
from collections import defaultdict
from typing import Optional, List, Callable, Type
from hearthstone.cards import MonsterCard, CardEvent, Card
from hearthstone.events import BuyPhaseContext, EVENTS
from hearthstone.hero import EmptyHero
from hearthstone.monster_types import MONSTER_TYPES
from hearthstone.triple_reward_card import TripleRewardCard
if typing.TYPE_CHECKING:
from hearthstone.tavern import Tavern
from hearthstone.hero import Hero
from hearthstone.randomizer import Randomizer
StoreIndex = typing.NewType("StoreIndex", int)
HandIndex = typing.NewType("HandIndex", int)
BoardIndex = typing.NewType("BoardIndex", int)
| 38.702128 | 133 | 0.660711 |
a5a2a13b3d7e2462a415df9e5bf700f91ae466fd | 12,743 | py | Python | PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | 6 | 2021-09-29T15:46:55.000Z | 2021-12-14T18:39:51.000Z | PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | null | null | null | PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | 3 | 2021-09-27T10:35:20.000Z | 2021-10-02T17:53:07.000Z | # -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""A submodule implementing "zooming in" (Biological) optimization strategy.
This optimization strategy has a single hyperparameter :math:`s`, called the *shrinking factor*.
It consists of of the following steps:
1. The optimization space is a hypercuboid
.. math::
C = [a_1, b_1] \\times [a_2, b_2] \\times \\cdots \\times [a_n, b_n].
2. Find the optimum :math:`x=(x_1, x_2, \\dots, x_n)` among the already collected samples.
3. Construct a new hypercuboid :math:`D` centered at :math:`x`. If this is the :math:`N`th optimization step, the
volume of :math:`D` is given by
.. math::
\\mathrm{vol}\\, D = s^N \\cdot \\mathrm{vol}\\, C
Step :math:`N` is either provided in the configuration file or is estimated as ``n_samples/batch_size``.
4. If :math:`D` is not a subset of :math:`C`, we translate it by a vector.
5. To suggest a new batch we sample the hypercuboid :math:`D`. Many different sampling methods are available, see
:ref:`abex.sample_designs` for this. For example, we can construct a grid, sample in a random way or use Latin
or Sobol sampling.
"""
from pathlib import Path
from typing import List, Tuple
import abex.optimizers.optimizer_base as base
import numpy as np
import pandas as pd
from abex import space_designs as designs
from abex.dataset import Dataset
from abex.settings import OptimizationStrategy, ZoomOptSettings
from emukit.core import ContinuousParameter, ParameterSpace
Interval = Tuple[float, float] # Endpoints of an interval
Hypercuboid = List[Interval] # Optimization space is represented by a rectangular box
def evaluate_optimum(dataset: Dataset) -> pd.DataFrame:
"""
Return the optimum as inferred by the Zoom Opt. algorithm. The inferred optimum is taken as the location
of the observed sample with highest observed objective.
Args:
dataset (dataset.Dataset): Dataset with the data observed so-far.
Returns:
pd.DataFrame: A DataFrame with a single row: the inputs at the inferred optimum
"""
# Get the index of data point with highest observed objective
optimum_idx = dataset.pretransform_df[dataset.pretransform_output_name].argmax()
# Get the inputs of the data point with highest observed objective
optimum_loc = dataset.pretransform_df[dataset.pretransform_input_names].iloc[[optimum_idx]]
return optimum_loc
def _suggest_samples(dataset: Dataset, settings: ZoomOptSettings) -> np.ndarray:
"""Suggests a new batch of samples.
Currently this method doesn't allow categorical inputs.
Returns:
a batch of suggestions. Shape (batch_size, n_inputs).
Raises:
ValueError, if batch size is less than 1
NotImplementedError, if any categorical inputs are present
"""
if settings.batch < 1:
raise ValueError(f"Use batch size at least 1. (Was {settings.batch}).") # pragma: no cover
continuous_dict, categorical_dict = dataset.parameter_space
# If any categorical variable is present, we raise an exception. In theory they should be represented by one-hot
# encodings, but I'm not sure how to retrieve the bounds of this space and do optimization within it (the
# best way is probably to optimize it in an unconstrained space and map it to one-hot vectors using softmax).
# Moreover, in BayesOpt there is iteration over contexts.
if categorical_dict:
raise NotImplementedError("This method doesn't work with categorical inputs right now.") # pragma: no cover
# It seems that continuous_dict.values() contains pandas series instead of tuples, so we need to map over it
# to retrieve the parameter space
original_space: Hypercuboid = [(a, b) for a, b in continuous_dict.values()]
# Find the location of the optimum. We will shrink the space around it
optimum: np.ndarray = _get_optimum_location(dataset)
# Estimate how many optimization iterations were performed.
step_number: int = settings.n_step or _estimate_step_number(
n_points=len(dataset.output_array), batch_size=settings.batch
)
# Convert to per-batch shrinking factor if a per-iteration shrinking factor supplied
per_batch_shrinking_factor = (
settings.shrinking_factor ** settings.batch if settings.shrink_per_iter else settings.shrinking_factor
)
# Calculate by what factor each dimension of the hypercube should be shrunk
shrinking_factor_per_dim: float = _calculate_shrinking_factor(
initial_shrinking_factor=per_batch_shrinking_factor, step_number=step_number, n_dim=len(original_space)
)
# Shrink the space
new_space: Hypercuboid = [
shrink_interval(
shrinking_factor=shrinking_factor_per_dim, interval=interval, shrinking_anchor=optimum_coordinate
)
for interval, optimum_coordinate in zip(original_space, optimum)
]
# The shrunk space may be out of the original bounds (e.g. if the maximum was close to the boundary).
# Translate it.
new_space = _move_to_original_bounds(new_space=new_space, original_space=original_space)
# Sample the new space to get a batch of new suggestions.
parameter_space = ParameterSpace([ContinuousParameter(f"x{i}", low, upp) for i, (low, upp) in enumerate(new_space)])
return designs.suggest_samples(
parameter_space=parameter_space, design_type=settings.design, point_count=settings.batch
)
def _estimate_step_number(n_points: int, batch_size: int) -> int:
"""Estimates which step this is (or rather how many steps were collected previously, basing on the ratio
of number of points collected and the batch size).
Note that this method is provisional and may be replaced with a parameter in the config.
Raises:
ValueError if ``n_points`` or ``batch_size`` is less than 1
"""
if min(n_points, batch_size) < 1:
raise ValueError(
f"Both n_points={n_points} and batch_size={batch_size} must be at least 1."
) # pragma: no cover
return n_points // batch_size
def _calculate_shrinking_factor(initial_shrinking_factor: float, step_number: int, n_dim: int) -> float:
"""The length of each in interval bounding the parameter space needs to be multiplied by this number.
Args:
initial_shrinking_factor: in each step the total volume is shrunk by this amount
step_number: optimization step -- if we collected only an initial batch, this step is 1
n_dim: number of dimensions
Example:
Assume that ``initial_shrinking_factor=0.5`` and ``step_number=1``. This means that the total volume should
be multiplied by :math:`1/2`. Hence, if there are :math:`N` dimensions (``n_dim``), the length of each
bounding interval should be multiplied by :math:`1/2^{1/N}`.
However, if ``step_number=3``, each dimension should be shrunk three times, i.e. we need to multiply it by
:math:`1/2^{3/N}`.
Returns:
the shrinking factor for each dimension
"""
assert 0 < initial_shrinking_factor < 1, (
f"Shrinking factor must be between 0 and 1. " f"(Was {initial_shrinking_factor})."
)
assert step_number >= 1 and n_dim >= 1, (
f"Step number and number of dimensions must be greater than 0. "
f"(Where step_number={step_number}, n_dim={n_dim})."
)
return initial_shrinking_factor ** (step_number / n_dim)
def _get_optimum_location(dataset: Dataset) -> np.ndarray:
"""Returns the position (in the transformed space) of the maximum. Shape (n_inputs,)."""
# Retrieve the observations
X, Y = dataset.inputs_array, dataset.output_array
# Return the location of the maximum
best_index = int(np.argmax(Y))
return X[best_index, :]
def shrink_interval(shrinking_factor: float, interval: Interval, shrinking_anchor: float) -> Interval:
"""Shrinks a one-dimensional interval around the ``shrinking_anchor``. The new interval
is centered around the optimum.
Note:
the shrunk interval may not be contained in the initial one. (E.g. if the shrinking anchor is near the
boundary).
Args:
shrinking_factor: by this amount the length interval is multiplied. Expected to be between 0 and 1
interval: endpoints of the interval
shrinking_anchor: point around which the interval will be shrunk
Returns:
endpoints of the shrunk interval
"""
neighborhood = shrinking_factor * (interval[1] - interval[0])
return shrinking_anchor - neighborhood / 2, shrinking_anchor + neighborhood / 2
def _validate_interval(interval: Interval) -> None:
"""Validates whether an interval is non-empty.
Note:
one-point interval :math:`[a, a]` is allowed
Raises:
ValueError: if the end of the interval is less than its origin
"""
origin, end = interval
if end < origin:
raise ValueError(f"Interval [{origin}, {end}] is not a proper one.") # pragma: no cover
def interval_length(interval: Interval) -> float:
"""Returns interval length."""
_validate_interval(interval)
return interval[1] - interval[0]
def shift_to_within_parameter_bounds(new_interval: Interval, old_interval: Interval) -> Interval:
"""Translates ``new_interval`` to ``old_interval``, without changing its volume.
Raises:
ValueError: if translation is not possible.
"""
if interval_length(new_interval) > interval_length(old_interval):
raise ValueError( # pragma: no cover
f"Translation is not possible. New interval {new_interval} is longer "
f"than the original one {old_interval}."
)
new_min, new_max = new_interval
old_min, old_max = old_interval
if old_min <= new_min and new_max <= old_max: # In this case we don't need to translate the interval
return new_interval
else:
if new_min < old_min: # Figure out the direction of the translation
translation = old_min - new_min
else:
translation = old_max - new_max
return new_min + translation, new_max + translation
def _move_to_original_bounds(new_space: Hypercuboid, original_space: Hypercuboid) -> Hypercuboid:
"""Translates ``new_space`` to be a subset of the ``original_space``, without affecting its volume."""
moved_bounds: Hypercuboid = []
for new_interval, old_interval in zip(new_space, original_space):
moved_bounds.append(shift_to_within_parameter_bounds(new_interval=new_interval, old_interval=old_interval))
return moved_bounds
| 41.106452 | 120 | 0.697167 |
a5a44f9a6a387924ac0536e279f50da03dd8ba3f | 1,146 | py | Python | Labs/lab4/l4e3.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | Labs/lab4/l4e3.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | Labs/lab4/l4e3.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
###############################################################################
# File Name : l4e3.py
# Created By : Flix Chiasson (7138723)
# Creation Date : [2015-10-06 11:43]
# Last Modified : [2015-10-06 11:56]
# Description : Asks user to guess randomly generated number
###############################################################################
from random import randint
r = randint(1, 10)
devine(r)
| 35.8125 | 79 | 0.447644 |
a5a4a070bcfd5efb385e2904922ea624312e4682 | 2,984 | py | Python | python/datamongo/text/dmo/text_query_windower.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/datamongo/text/dmo/text_query_windower.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/datamongo/text/dmo/text_query_windower.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import string
import pandas as pd
from pandas import DataFrame
from base import BaseObject
| 25.947826 | 103 | 0.499665 |
a5a5088a8ab15596ca84187c9c0e0627828850f9 | 683 | py | Python | CondTools/L1Trigger/python/L1ConfigTSCKeys_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CondTools/L1Trigger/python/L1ConfigTSCKeys_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CondTools/L1Trigger/python/L1ConfigTSCKeys_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | from L1TriggerConfig.CSCTFConfigProducers.CSCTFObjectKeysOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1DTTFTSCObjectKeysOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCObjectKeysOnline_cfi import *
from L1TriggerConfig.GMTConfigProducers.L1MuGMTParametersKeysOnlineProd_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1MuTriggerScaleKeysOnlineProd_cfi import *
L1MuTriggerScaleKeysOnlineProd.subsystemLabel = 'GMTScales'
from L1TriggerConfig.RCTConfigProducers.L1RCTObjectKeysOnline_cfi import *
from L1TriggerConfig.GctConfigProducers.L1GctTSCObjectKeysOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtTscObjectKeysOnline_cfi import *
| 68.3 | 84 | 0.90776 |
a5a553d43dc2a036ccb015ad21d1dcf2af2ae50c | 640 | py | Python | hackerrank/interview_prep/making_anagrams.py | luojxxx/CodingPractice | bac357aaddbda8e6e73a49c36f2eefd4304b336d | [
"MIT"
] | null | null | null | hackerrank/interview_prep/making_anagrams.py | luojxxx/CodingPractice | bac357aaddbda8e6e73a49c36f2eefd4304b336d | [
"MIT"
] | null | null | null | hackerrank/interview_prep/making_anagrams.py | luojxxx/CodingPractice | bac357aaddbda8e6e73a49c36f2eefd4304b336d | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/ctci-making-anagrams
from collections import Counter | 29.090909 | 83 | 0.678125 |
a5a5adab4d37dc9f239bb54f261403d5485bdb40 | 803 | py | Python | DongbinNa/19/pt4.py | wonnerky/coteMaster | 360e491e6342c1ee42ff49750b838a2ead865613 | [
"Apache-2.0"
] | null | null | null | DongbinNa/19/pt4.py | wonnerky/coteMaster | 360e491e6342c1ee42ff49750b838a2ead865613 | [
"Apache-2.0"
] | null | null | null | DongbinNa/19/pt4.py | wonnerky/coteMaster | 360e491e6342c1ee42ff49750b838a2ead865613 | [
"Apache-2.0"
] | null | null | null | n = int(input())
numbers = list(map(int, input().split()))
add, sub, mul, div = map(int, input().split())
min_num = 1e9
max_num = -1e9
dfs(numbers[0], 1)
print(max_num)
print(min_num)
| 22.305556 | 48 | 0.414695 |
a5a7f71a8d3d53892df66d8802c0d53865e70be7 | 497 | py | Python | app/store/migrations/0003_auto_20201127_1957.py | Yuehan-Wang/Marvas | d868a152865b9e8308db8d98642016a67b78f31d | [
"MIT"
] | null | null | null | app/store/migrations/0003_auto_20201127_1957.py | Yuehan-Wang/Marvas | d868a152865b9e8308db8d98642016a67b78f31d | [
"MIT"
] | null | null | null | app/store/migrations/0003_auto_20201127_1957.py | Yuehan-Wang/Marvas | d868a152865b9e8308db8d98642016a67b78f31d | [
"MIT"
] | 3 | 2022-01-22T16:14:13.000Z | 2022-01-23T18:25:06.000Z | # Generated by Django 2.2 on 2020-11-27 13:57
from django.db import migrations
| 21.608696 | 58 | 0.573441 |
a5a81b703f6ebb1da895acb3224ef4edc9e40b99 | 19,141 | py | Python | Graded/G3/slam/EKFSLAM.py | chrstrom/TTK4250 | f453c3a59597d3fe6cff7d35b790689919798b94 | [
"Unlicense"
] | null | null | null | Graded/G3/slam/EKFSLAM.py | chrstrom/TTK4250 | f453c3a59597d3fe6cff7d35b790689919798b94 | [
"Unlicense"
] | null | null | null | Graded/G3/slam/EKFSLAM.py | chrstrom/TTK4250 | f453c3a59597d3fe6cff7d35b790689919798b94 | [
"Unlicense"
] | null | null | null | from typing import Tuple
import numpy as np
from numpy import ndarray
from dataclasses import dataclass, field
from scipy.linalg import block_diag
import scipy.linalg as la
from utils import rotmat2d
from JCBB import JCBB
import utils
import solution
| 35.77757 | 141 | 0.539575 |
a5a924ddb3332cd660e8de578d9b220740f27184 | 3,185 | py | Python | pykob/audio.py | Greg-R/PyKOB | fd3c7ca352f900bd14bb10dc71d567221a8af8cf | [
"MIT"
] | 3 | 2020-06-29T19:59:39.000Z | 2021-02-08T19:56:32.000Z | pykob/audio.py | Greg-R/PyKOB | fd3c7ca352f900bd14bb10dc71d567221a8af8cf | [
"MIT"
] | 197 | 2020-04-30T08:08:52.000Z | 2021-03-22T19:10:20.000Z | pykob/audio.py | MorseKOB/pykob-4 | bf86917e4e06ce9590f414ace0eacbde08416137 | [
"MIT"
] | 2 | 2021-04-17T01:05:24.000Z | 2021-11-03T16:43:53.000Z | """
MIT License
Copyright (c) 2020 PyKOB - MorseKOB in Python
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
audio module
Provides audio for simulated sounder.
"""
import wave
from pathlib import Path
from pykob import log
try:
import pyaudio
ok = True
except:
log.log('PyAudio not installed.')
ok = False
BUFFERSIZE = 16
nFrames = [0, 0]
frames = [None, None]
nullFrames = None
iFrame = [0, 0]
sound = 0
if ok:
pa = pyaudio.PyAudio()
# Resource folder
root_folder = Path(__file__).parent
resource_folder = root_folder / "resources"
# Audio files
audio_files = ['clack48.wav', 'click48.wav']
for i in range(len(audio_files)):
fn = resource_folder / audio_files[i]
# print("Load audio file:", fn)
f = wave.open(str(fn), mode='rb')
nChannels = f.getnchannels()
sampleWidth = f.getsampwidth()
sampleFormat = pa.get_format_from_width(sampleWidth)
frameWidth = nChannels * sampleWidth
frameRate = f.getframerate()
nFrames[i] = f.getnframes()
frames[i] = f.readframes(nFrames[i])
iFrame[i] = nFrames[i]
f.close()
nullFrames = bytes(frameWidth*BUFFERSIZE)
if ok:
apiInfo = pa.get_default_host_api_info()
apiName = apiInfo['name']
devIdx = apiInfo['defaultOutputDevice']
devInfo = pa.get_device_info_by_index(devIdx)
devName = devInfo['name']
strm = pa.open(rate=frameRate, channels=nChannels, format=sampleFormat,
output=True, output_device_index=devIdx, frames_per_buffer=BUFFERSIZE,
stream_callback=callback)
| 32.5 | 82 | 0.706122 |
a5a96f07f26b02ec492974bd34c7406e72ba2e22 | 3,333 | py | Python | main.py | DaKidReturns/WikipediaScrapper | 288b0bc3e882ff4ccb45dbdc021eabbc25cc19d0 | [
"MIT"
] | null | null | null | main.py | DaKidReturns/WikipediaScrapper | 288b0bc3e882ff4ccb45dbdc021eabbc25cc19d0 | [
"MIT"
] | null | null | null | main.py | DaKidReturns/WikipediaScrapper | 288b0bc3e882ff4ccb45dbdc021eabbc25cc19d0 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup as bs4
from docx import Document as doc
from docx.shared import Cm
import sys
if len(sys.argv) != 3:
print("The format should be \n./main.py <url> <output_file_name>")
else:
url = sys.argv[1]
doc_name = sys.argv[2]
document = doc()
page = requests.get(url)
if(page.status_code == requests.codes.ok):
soup = bs4(page.content,'html.parser')
headings = soup.find_all("h1",class_="firstHeading")
document.add_heading(headings[0].text)
details = soup.find("div",id="bodyContent")
main_soup = bs4(details.prettify(),'html.parser')
#Extract the table elements to be implemented in the future
table = main_soup.find('table').extract()
#isEmpty is the lambda function that checks if a list is empty
isEmpty = lambda x: True if(x == []) else False
#tableElem = ('table','td','tr')
for x in details.children:
if x != '\n' and x !='' and x != ' ':
if(not isEmpty(list(x.children))):
for i in list(x.children):
# print(i.string)
if i.string == None:
#print(len(list(i.children)))
for j in i.children:
#print(j.name)
if j.string == None:
#print(j.attrs)
if(j.name == 'table' or j.name == 'ol' or j.name == 'ul'):
#print(j.attrs)
continue
#j = j.next_sibling.next_sibling
#search and purge references
if list(j.descendants) != []:
#print(list(j.descendants))
for a in j.descendants:
if a.string == None:
attr = a.attrs.keys()
#print(a.attrs)
if 'class' in attr:
if 'mw-references-wrap' in a.attrs['class']:
#print(a.text)
a.decompose()
break
#if 'href' in attr:
#if '#References' in a.attrs['href']:
#a.decompose()
#print the elements
document.add_paragraph(j.text)
#print(j.prettify())
#print('\n')
if doc_name.endswith('.doc') or doc_name.endswith('.docx'):
document.save(doc_name)
else:
document.save(doc_name+'.doc')
| 42.189873 | 96 | 0.370237 |
a5a9f77ca2671875a0d1fe9de7b77aefb68618a3 | 583 | py | Python | math/count_digits.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | math/count_digits.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | math/count_digits.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | """
https://www.codewars.com/kata/566fc12495810954b1000030/train/python
Given an pos int n,
and a digit that is < 10, d.
Square all ints from 0 - n, and return the number times d is used in the squared results.
"""
def nb_dig(n, d):
'''
results = ''
for i in range(n+1):
results += str(i * i)
return results.count(str(d))
'''
return ''.join([str(i * i) for i in range(n + 1)]).count(str(d))
print(nb_dig(10, 1)) # 4
print(nb_dig(5750, 0)) # 4700
print(nb_dig(11011, 2)) # 9481
print(nb_dig(12224, 8)) # 7733
print(nb_dig(11549, 1)) # 11905
| 23.32 | 89 | 0.61578 |
a5ac9cd651f965f113812d5a35b9a777736d390b | 3,492 | py | Python | {{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py | EMMC-ASBL/oteapi-plugin-template | 31a772a4fb9be6eafabfa206fe6e7a23516bf188 | [
"MIT"
] | null | null | null | {{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py | EMMC-ASBL/oteapi-plugin-template | 31a772a4fb9be6eafabfa206fe6e7a23516bf188 | [
"MIT"
] | 35 | 2022-01-17T10:23:01.000Z | 2022-03-11T19:41:36.000Z | {{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py | EMMC-ASBL/oteapi-plugin-template | 31a772a4fb9be6eafabfa206fe6e7a23516bf188 | [
"MIT"
] | 2 | 2022-01-20T06:45:27.000Z | 2022-02-09T15:59:21.000Z | """Demo resource strategy class."""
# pylint: disable=no-self-use,unused-argument
from typing import TYPE_CHECKING, Optional
from oteapi.models import AttrDict, DataCacheConfig, ResourceConfig, SessionUpdate
from oteapi.plugins import create_strategy
from pydantic import Field
from pydantic.dataclasses import dataclass
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict
| 29.846154 | 86 | 0.665521 |
a5ad0bf99db5282a28fe82ac56a8026546459cf4 | 1,480 | py | Python | unittests/TestSets.py | vtbassmatt/Scrython | 49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976 | [
"MIT"
] | null | null | null | unittests/TestSets.py | vtbassmatt/Scrython | 49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976 | [
"MIT"
] | null | null | null | unittests/TestSets.py | vtbassmatt/Scrython | 49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976 | [
"MIT"
] | null | null | null | # This workaround makes sure that we can import from the parent dir
import sys
sys.path.append('..')
from scrython.sets import Code
import unittest
import time
promo_khans = Code('PKTK')
khans = Code('KTK')
if __name__ == '__main__':
unittest.main() | 25.084746 | 67 | 0.691892 |
a5ad538fb112ec421c158be3cf3243f38640e710 | 194 | py | Python | GUI/check_email.py | BrendanCheong/BT2102-OSHES-Group16 | 2b62772e6c654b8d4e76f09df6473ac88912df28 | [
"MIT"
] | 5 | 2021-09-11T15:07:34.000Z | 2021-09-11T15:16:04.000Z | GUI/check_email.py | BrendanCheong/Online-Smart-Home-Ecommerce-System | 2b62772e6c654b8d4e76f09df6473ac88912df28 | [
"MIT"
] | 1 | 2021-09-18T10:33:00.000Z | 2021-09-18T10:34:01.000Z | GUI/check_email.py | BrendanCheong/BT2102-OSHES-Group16 | 2b62772e6c654b8d4e76f09df6473ac88912df28 | [
"MIT"
] | null | null | null | import re
| 19.4 | 67 | 0.489691 |
a5aea13c60563cdbc4bc77d66b48baaf6efb6ec5 | 1,587 | py | Python | SimpleEmailer.py | dschoonwinkel/InverterMQTT | 75f13900f584d9905a02488eff7bd1dd3e53e73a | [
"Apache-2.0"
] | null | null | null | SimpleEmailer.py | dschoonwinkel/InverterMQTT | 75f13900f584d9905a02488eff7bd1dd3e53e73a | [
"Apache-2.0"
] | null | null | null | SimpleEmailer.py | dschoonwinkel/InverterMQTT | 75f13900f584d9905a02488eff7bd1dd3e53e73a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import smtplib
import time
import configparser
config = configparser.ConfigParser()
config.read('/home/pi/Development/Python/InverterMQTT/emailcredentials.conf')
email = config['credentials']['email']
password = config['credentials']['password']
to_email = config['credentials']['to_email']
#
# Based on tutorial: https://www.bc-robotics.com/tutorials/sending-email-using-python-raspberry-pi/
#Email Variables
SMTP_SERVER = 'smtp.gmail.com' #Email Server (don't change!)
SMTP_PORT = 587 #Server Port (don't change!)
GMAIL_USERNAME = email #change this to match your gmail account
GMAIL_PASSWORD = password #change this to match your gmail password
if __name__ == '__main__':
main()
| 29.943396 | 99 | 0.674228 |
a5b066bc7defe004716762bdcddd92dae0d3fd15 | 876 | py | Python | BaseKnowledge/file/file.py | Kose-i/python_test | d7b031aa33d699aeb9fe196fe0a6d216aa006f0d | [
"Unlicense"
] | null | null | null | BaseKnowledge/file/file.py | Kose-i/python_test | d7b031aa33d699aeb9fe196fe0a6d216aa006f0d | [
"Unlicense"
] | null | null | null | BaseKnowledge/file/file.py | Kose-i/python_test | d7b031aa33d699aeb9fe196fe0a6d216aa006f0d | [
"Unlicense"
] | null | null | null | #! /usr/bin/env python3
import codecs
import os.path
import shutil
import glob
import tempfile
if __name__=='__main__':
print("\nfunc1()")
func1()
print("\nfunc2()")
func2()
print("\nfunc3()")
func3()
print("\nfunc4()")
func4()
print("\nfunc5()")
func5()
print("\nfunc6()")
func6()
print("\nfunc7()")
func7()
| 16.528302 | 53 | 0.592466 |
a5b2bd395585d35f2949dc453f6442697664d6bf | 202 | py | Python | types/msg.py | UltiRequiem/professional-phython-platzi | 0bf8f97b172d0799d6906193090ef69beb1c8b4b | [
"MIT"
] | 4 | 2021-08-02T21:34:46.000Z | 2021-09-24T03:26:35.000Z | types/msg.py | UltiRequiem/professional-phython-platzi | 0bf8f97b172d0799d6906193090ef69beb1c8b4b | [
"MIT"
] | null | null | null | types/msg.py | UltiRequiem/professional-phython-platzi | 0bf8f97b172d0799d6906193090ef69beb1c8b4b | [
"MIT"
] | 4 | 2021-08-02T21:34:47.000Z | 2021-08-11T03:21:37.000Z | def run(msg: str) -> None:
"""
Print the message received parameters.
"""
print(msg)
if __name__ == "__main__":
message: str = "Zero commands Python to be typed!"
run(message)
| 18.363636 | 54 | 0.60396 |
a5b4efb9c597491e24e7c42cb5dac380b74e6e91 | 702 | py | Python | apps/billing/tasks.py | banyanbbt/banyan_data | 4ce87dc1c49920d587a472b70842fcf5b3d9a3d2 | [
"MIT"
] | 2 | 2018-09-08T05:16:39.000Z | 2018-09-10T02:50:31.000Z | apps/billing/tasks.py | banyanbbt/banyan_data | 4ce87dc1c49920d587a472b70842fcf5b3d9a3d2 | [
"MIT"
] | null | null | null | apps/billing/tasks.py | banyanbbt/banyan_data | 4ce87dc1c49920d587a472b70842fcf5b3d9a3d2 | [
"MIT"
] | null | null | null | import logging
from config.celery_configs import app
from lib.sms import client as sms_client
from lib.blockchain.pandora import Pandora
from apps.user.models import UserProfile
logger = logging.getLogger(__name__)
| 23.4 | 45 | 0.763533 |
a5b6d5ce0ce97c7ff9249912738d183eb9ca560c | 449 | py | Python | LBP51.py | Anandgowda18/LogicBasedPrograms | 25baa9fbf19cd45229c87e099877e97281b0e76b | [
"MIT"
] | null | null | null | LBP51.py | Anandgowda18/LogicBasedPrograms | 25baa9fbf19cd45229c87e099877e97281b0e76b | [
"MIT"
] | null | null | null | LBP51.py | Anandgowda18/LogicBasedPrograms | 25baa9fbf19cd45229c87e099877e97281b0e76b | [
"MIT"
] | null | null | null | '''Given a valid IP address, return a defanged version of that IP address. A defanged IP address replaces every period '.' with "[.]".
Input Format
A string
Constraints
non-empty String
Output Format
replacement String
Sample Input 0
1.1.1.1
Sample Output 0
1[.]1[.]1[.]1
Sample Input 1
255.100.50.0
Sample Output 1
255[.]100[.]50[.]0
Sample Input 2
1.2.3.4
Sample Output 2
1[.]2[.]3[.]4'''
#solution
print(input().replace('.','[.]')) | 12.472222 | 134 | 0.67706 |
a5b824b421e3455471988b500baaf9d0bcd0357a | 4,981 | py | Python | website/urls.py | pomo-mondreganto/CTForces-old | 86758192f800108ff109f07fe155d5a98b4a3e14 | [
"MIT"
] | null | null | null | website/urls.py | pomo-mondreganto/CTForces-old | 86758192f800108ff109f07fe155d5a98b4a3e14 | [
"MIT"
] | 6 | 2021-10-01T14:18:34.000Z | 2021-10-01T14:19:17.000Z | website/urls.py | pomo-mondreganto/CTForces-old | 86758192f800108ff109f07fe155d5a98b4a3e14 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.urls import path, re_path
from django.views.static import serve
from .views import *
urlpatterns = [
re_path('^$', MainView.as_view(), name='main_view'),
path('page/<int:page>/', MainView.as_view(), name='main_view_with_page'),
re_path('^signup/$', UserRegistrationView.as_view(), name='signup'),
re_path('^signin/$', UserLoginView.as_view(), name='signin'),
re_path('^logout/$', logout_user, name='logout'),
path('user/<str:username>/', UserInformationView.as_view(), name='user_info'),
re_path('^settings/general/$', SettingsGeneralView.as_view(), name='settings_general_view'),
re_path('^settings/social/$', SettingsSocialView.as_view(), name='settings_social_view'),
re_path('^friends/$', FriendsView.as_view(), name='friends_view'),
path('friends/page/<int:page>/', FriendsView.as_view(), name='friends_view_with_page'),
re_path('^search_users/$', search_users, name='user_search'),
path('user/<str:username>/blog/', UserBlogView.as_view(), name='user_blog_view'),
path('user/<str:username>/blog/page/<int:page>/', UserBlogView.as_view(), name='user_blog_view_with_page'),
path('user/<str:username>/tasks/', UserTasksView.as_view(), name='user_tasks_view'),
path('user/<str:username>/tasks/page/<int:page>/', UserTasksView.as_view(), name='user_tasks_view_with_page'),
path('user/<str:username>/contests/', UserContestListView.as_view(), name='user_contests_view'),
path('user/<str:username>/contests/page/<int:page>/', UserContestListView.as_view(),
name='user_contests_view_with_page'),
path('user/<str:username>/solved_tasks/', UserSolvedTasksView.as_view(),
name='user_solved_tasks_view'),
path('user/<str:username>/solved_tasks/page/<int:page>/', UserSolvedTasksView.as_view(),
name='user_solved_tasks_view_with_page'),
path('top_users/', UserTopView.as_view(), name='users_top_view'),
path('top_users/page/<int:page>/', UserTopView.as_view(), name='users_top_view_with_page'),
path('top_rating_users/', UserRatingTopView.as_view(), name='users_rating_top_view'),
path('top_rating_users/page/<int:page>/', UserRatingTopView.as_view(), name='users_rating_top_view_with_page'),
path('top_rating_users_by_group/', UserByGroupRatingTopView.as_view(), name='users_by_group_rating_top_view'),
path('top_rating_users_by_group/page/<int:page>/', UserByGroupRatingTopView.as_view(),
name='users_by_group_rating_top_view_with_page'),
re_path('^add_post/$', PostCreationView.as_view(), name='post_creation_view'),
path('post/<int:post_id>/', PostView.as_view(), name='post_view'),
re_path('^leave_comment/$', leave_comment, name='leave_comment'),
re_path('^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
path('task/<int:task_id>/', TaskView.as_view(), name='task_view'),
path('task/<int:task_id>/edit/', TaskEditView.as_view(), name='task_edit_view'),
path('task/<int:task_id>/submit/', submit_task, name='task_submit'),
path('task/<int:task_id>/solved/', TaskSolvedView.as_view(), name='task_solved_view'),
path('task/<int:task_id>/solved/page/<int:page>/', TaskSolvedView.as_view(), name='task_solved_view_with_page'),
re_path('^create_task/$', TaskCreationView.as_view(), name='task_creation_view'),
re_path('^tasks/$', TasksArchiveView.as_view(), name='task_archive_view'),
path('tasks/page/<int:page>/', TasksArchiveView.as_view(), name='task_archive_view_with_page'),
re_path('^confirm_email/$', account_confirmation, name='confirm_account'),
re_path('^resend_email/$', EmailResendView.as_view(), name='resend_email_view'),
re_path('^password_reset_email/$', PasswordResetEmailView.as_view(), name='password_reset_email'),
re_path('^reset_password/$', PasswordResetPasswordView.as_view(), name='password_reset_password'),
re_path('^search_tags/$', search_tags, name='search_tags'),
re_path('^get_task/$', get_task, name='get_task_by_id'),
re_path('^create_contest/$', ContestCreationView.as_view(), name='create_contest'),
path('contests/', ContestsMainListView.as_view(), name='contests_main_list_view'),
path('contests/page/<int:page>/', ContestsMainListView.as_view(), name='contests_main_list_view_with_page'),
path('contest/<int:contest_id>/', ContestMainView.as_view(), name='contest_view'),
path('contest/<int:contest_id>/register/', register_for_contest, name='register_for_contest'),
path('contest/<int:contest_id>/scoreboard/', ContestScoreboardView.as_view(), name='contest_scoreboard_view'),
path('contest/<int:contest_id>/task/<int:task_id>/', ContestTaskView.as_view(), name='contest_task_view'),
path('contest/<int:contest_id>/task/<int:task_id>/submit/', submit_contest_flag, name='contest_task_submit'),
re_path('^test', test_view, name='test_view'),
re_path('^debug', debug_view, name='debug_view'),
]
| 54.736264 | 116 | 0.718932 |
a5b8284d0679076f983319f40b4e3ceca65a28c5 | 1,372 | py | Python | part2.py | Tiziana-I/project-covid-mask-classifier | e1619172656f8de92e8faae5dcb7437686f7ca5e | [
"MIT"
] | null | null | null | part2.py | Tiziana-I/project-covid-mask-classifier | e1619172656f8de92e8faae5dcb7437686f7ca5e | [
"MIT"
] | null | null | null | part2.py | Tiziana-I/project-covid-mask-classifier | e1619172656f8de92e8faae5dcb7437686f7ca5e | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import os
cap = cv2.VideoCapture(0)
#model=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_frontalface_default.xml"))
smile=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_smile.xml"))
#eye=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_eye.xml"))
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Face detector
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
#roi = frame[y:y+h,x:x+w]
#faces = model.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
faces = smile.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
#faces = eye.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
print(faces)
for x,y,w,h in faces:
print(x,y,w,h)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) # blue BGR
frame = cv2.putText(frame,"Ciao", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0) , 2, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | 38.111111 | 133 | 0.707726 |
a5b83e7cc19ace3ba764ad74920296c856b01e5f | 375 | py | Python | spikes/function_signatures.py | insequor/webapp | 73990bd74afd6d0f794c447e1bcc5d557ee2ed31 | [
"MIT"
] | 1 | 2020-08-07T12:16:49.000Z | 2020-08-07T12:16:49.000Z | spikes/function_signatures.py | insequor/webapp | 73990bd74afd6d0f794c447e1bcc5d557ee2ed31 | [
"MIT"
] | 1 | 2021-10-30T10:21:34.000Z | 2021-10-30T10:21:34.000Z | spikes/function_signatures.py | insequor/webapp | 73990bd74afd6d0f794c447e1bcc5d557ee2ed31 | [
"MIT"
] | null | null | null |
from inspect import signature
if __name__ == '__main__':
#sig = signature(testFunction)
sig = signature(TestClass.testMethod)
for key in sig.parameters:
param = sig.parameters[key]
print(key, param, dir(param))
print(' ', param.kind)
| 20.833333 | 41 | 0.624 |
a5b8565cb66fcfd69f346054d3bf2453f6824c71 | 1,371 | py | Python | docs/commands.py | immersionroom/vee | 2c6f781dc96e9028f2446777b906ca37dc2f4299 | [
"BSD-3-Clause"
] | 6 | 2017-11-05T02:44:10.000Z | 2021-07-14T19:10:56.000Z | docs/commands.py | immersionroom/vee | 2c6f781dc96e9028f2446777b906ca37dc2f4299 | [
"BSD-3-Clause"
] | null | null | null | docs/commands.py | immersionroom/vee | 2c6f781dc96e9028f2446777b906ca37dc2f4299 | [
"BSD-3-Clause"
] | 1 | 2017-01-31T23:10:09.000Z | 2017-01-31T23:10:09.000Z |
import os
import sys
from argparse import _SubParsersAction
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
from vee.commands.main import get_parser
parser = get_parser()
usage = parser.format_usage().replace('usage:', '')
print('''
top-level
---------
.. _cli_vee:
``vee``
~~~~~~~
::
''')
for line in parser.format_help().splitlines():
print(' ' + line)
subaction = get_sub_action(parser)
for group_name, funcs in parser._func_groups:
did_header = False
visible = set(ca.dest for ca in subaction._choices_actions)
for name, func in funcs:
if not name in visible:
continue
if not did_header:
print('.. _cli_%s:' % group_name.replace(' ', '_'))
print()
print(group_name)
print('-' * len(group_name))
print()
did_header = True
subparser = subaction._name_parser_map[name]
print('.. _cli_vee_%s:' % name)
print()
print('``vee %s``' % name)
print('~' * (8 + len(name)))
print()
print('::')
print()
for line in subparser.format_help().splitlines():
print(' ' + line)
print()
| 18.527027 | 68 | 0.56674 |
a5b88dea17e5a8c345a0188b0209c92393ef06ec | 551 | py | Python | main.py | SciFiTy10/talkLikeSnoop | 1a3408dfa244669a0d723737c62da93feb7d9ba8 | [
"MIT"
] | 1 | 2022-01-07T10:27:14.000Z | 2022-01-07T10:27:14.000Z | main.py | SciFiTy10/talkLikeSnoop | 1a3408dfa244669a0d723737c62da93feb7d9ba8 | [
"MIT"
] | null | null | null | main.py | SciFiTy10/talkLikeSnoop | 1a3408dfa244669a0d723737c62da93feb7d9ba8 | [
"MIT"
] | null | null | null | #imports
from routing_methods import on_launch, intent_router
##############################
# Program Entry
##############################
#lambda_handler (this is like main())
| 34.4375 | 108 | 0.638838 |
a5bc2b0b89e7e05fdfc86ac8ee4661e2d1a71f8f | 13,303 | py | Python | thrift/clients.py | fabiobatalha/processing | f3ad99e161de2befc7908168bfd7843f988c379d | [
"BSD-2-Clause"
] | null | null | null | thrift/clients.py | fabiobatalha/processing | f3ad99e161de2befc7908168bfd7843f988c379d | [
"BSD-2-Clause"
] | null | null | null | thrift/clients.py | fabiobatalha/processing | f3ad99e161de2befc7908168bfd7843f988c379d | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
import os
import thriftpy
import json
import logging
from thriftpy.rpc import make_client
from xylose.scielodocument import Article, Journal
LIMIT = 1000
logger = logging.getLogger(__name__)
ratchet_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/ratchet.thrift')
articlemeta_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/articlemeta.thrift')
citedby_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/citedby.thrift')
accessstats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/access_stats.thrift')
publication_stats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/publication_stats.thrift')
| 29.496674 | 123 | 0.42622 |
a5be28a44a12bd589d156a3a7d0bbad6c6678d9a | 6,705 | py | Python | src/pypsr.py | wagglefoot/TVAE | 74f8c5413d3c0d8607af50ddb0d96c4c2d477261 | [
"MIT"
] | 22 | 2015-03-14T04:23:00.000Z | 2022-03-24T03:29:22.000Z | src/pypsr.py | wagglefoot/TVAE | 74f8c5413d3c0d8607af50ddb0d96c4c2d477261 | [
"MIT"
] | null | null | null | src/pypsr.py | wagglefoot/TVAE | 74f8c5413d3c0d8607af50ddb0d96c4c2d477261 | [
"MIT"
] | 15 | 2015-02-04T13:09:27.000Z | 2022-03-24T03:29:24.000Z | from operator import sub
import numpy as np
from sklearn import metrics
from sklearn.neighbors import NearestNeighbors
from toolz import curry
def global_false_nearest_neighbors(x, lag, min_dims=1, max_dims=10, **cutoffs):
"""
Across a range of embedding dimensions $d$, embeds $x(t)$ with lag $\tau$, finds all nearest neighbors,
and computes the percentage of neighbors that that remain neighbors when an additional dimension is unfolded.
See [1] for more information.
Parameters
----------
x : array-like
Original signal $x(t).
lag : int
Time lag $\tau$ in units of the sampling time $h$ of $x(t)$.
min_dims : int, optional
The smallest embedding dimension $d$ to test.
max_dims : int, optional
The largest embedding dimension $d$ to test.
relative_distance_cutoff : float, optional
The cutoff for determining neighborliness,
in distance increase relative to the original distance between neighboring points.
The default, 15, is suggested in [1] (p. 41).
relative_radius_cutoff : float, optional
The cutoff for determining neighborliness,
in distance increase relative to the radius of the attractor.
The default, 2, is suggested in [1] (p. 42).
Returns
-------
dims : ndarray
The tested dimensions $d$.
gfnn : ndarray
The percentage of nearest neighbors that are false neighbors at each dimension.
See Also
--------
reconstruct
References
----------
[1] Arbanel, H. D. (1996). *Analysis of Observed Chaotic Data* (pp. 40-43). New York: Springer.
"""
x = _vector(x)
dimensions = np.arange(min_dims, max_dims + 1)
false_neighbor_pcts = np.array([_gfnn(x, lag, n_dims, **cutoffs) for n_dims in dimensions])
return dimensions, false_neighbor_pcts
def reconstruct(x, lag, n_dims):
"""Phase-space reconstruction.
Given a signal $x(t)$, dimensionality $d$, and lag $\tau$, return the reconstructed signal
\[
\mathbf{y}(t) = [x(t), x(t + \tau), \ldots, x(t + (d - 1)\tau)].
\]
Parameters
----------
x : array-like
Original signal $x(t)$.
lag : int
Time lag $\tau$ in units of the sampling time $h$ of $x(t)$.
n_dims : int
Embedding dimension $d$.
Returns
-------
ndarray
$\mathbf{y}(t)$ as an array with $d$ columns.
"""
x = _vector(x)
if lag * (n_dims - 1) >= x.shape[0] // 2:
raise ValueError('longest lag cannot be longer than half the length of x(t)')
lags = lag * np.arange(n_dims)
return np.vstack(x[lag:lag - lags[-1] or None] for lag in lags).transpose()
def ami(x, y=None, n_bins=10):
"""Calculate the average mutual information between $x(t)$ and $y(t)$.
Parameters
----------
x : array-like
y : array-like, optional
$x(t)$ and $y(t)$.
If only `x` is passed, it must have two columns;
the first column defines $x(t)$ and the second $y(t)$.
n_bins : int
The number of bins to use when computing the joint histogram.
Returns
-------
scalar
Average mutual information between $x(t)$ and $y(t)$, in nats (natural log equivalent of bits).
See Also
--------
lagged_ami
References
----------
Arbanel, H. D. (1996). *Analysis of Observed Chaotic Data* (p. 28). New York: Springer.
"""
x, y = _vector_pair(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('timeseries must have the same length')
return metrics.mutual_info_score(None, None, contingency=np.histogram2d(x, y, bins=n_bins)[0])
def lagged_ami(x, min_lag=0, max_lag=None, lag_step=1, n_bins=10):
"""Calculate the average mutual information between $x(t)$ and $x(t + \tau)$, at multiple values of $\tau$.
Parameters
----------
x : array-like
$x(t)$.
min_lag : int, optional
The shortest lag to evaluate, in units of the sampling period $h$ of $x(t)$.
max_lag : int, optional
The longest lag to evaluate, in units of $h$.
lag_step : int, optional
The step between lags to evaluate, in units of $h$.
n_bins : int
The number of bins to use when computing the joint histogram in order to calculate mutual information.
See |ami|.
Returns
-------
lags : ndarray
The evaluated lags $\tau_i$, in units of $h$.
amis : ndarray
The average mutual information between $x(t)$ and $x(t + \tau_i)$.
See Also
--------
ami
"""
if max_lag is None:
max_lag = x.shape[0]//2
lags = np.arange(min_lag, max_lag, lag_step)
amis = [ami(reconstruct(x, lag, 2), n_bins=n_bins) for lag in lags]
return lags, np.array(amis)
def _vector_pair(a, b):
a = np.squeeze(a)
if b is None:
if a.ndim != 2 or a.shape[1] != 2:
raise ValueError('with one input, array must have be 2D with two columns')
a, b = a[:, 0], a[:, 1]
return a, np.squeeze(b)
def _vector(x):
x = np.squeeze(x)
if x.ndim != 1:
raise ValueError('x(t) must be a 1-dimensional signal')
return x
| 31.186047 | 113 | 0.631022 |
a5bef664ecd325ec7c754416c8cb289908db04d1 | 2,026 | py | Python | tests/test_fetching_info_from_websites.py | antoniodimariano/websites_metrics_collector | 5113a680612b126005ac7f9f52ed35d26b806ea0 | [
"Apache-2.0"
] | null | null | null | tests/test_fetching_info_from_websites.py | antoniodimariano/websites_metrics_collector | 5113a680612b126005ac7f9f52ed35d26b806ea0 | [
"Apache-2.0"
] | null | null | null | tests/test_fetching_info_from_websites.py | antoniodimariano/websites_metrics_collector | 5113a680612b126005ac7f9f52ed35d26b806ea0 | [
"Apache-2.0"
] | null | null | null | import unittest
from unittest import IsolatedAsyncioTestCase
from websites_metrics_collector.communication import webpages_fetcher
| 44.043478 | 117 | 0.695953 |
a5bef6fa512a2ff46684cc9ce0bb82ae7685d3ba | 773 | py | Python | planegeometry/structures/tests/random_segments.py | ufkapano/planegeometry | fa9309a4e867acedd635665f32d7f59a8eeaf2e3 | [
"BSD-3-Clause"
] | null | null | null | planegeometry/structures/tests/random_segments.py | ufkapano/planegeometry | fa9309a4e867acedd635665f32d7f59a8eeaf2e3 | [
"BSD-3-Clause"
] | null | null | null | planegeometry/structures/tests/random_segments.py | ufkapano/planegeometry | fa9309a4e867acedd635665f32d7f59a8eeaf2e3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import random
import Gnuplot # Python 2 only
from planegeometry.structures.points import Point
from planegeometry.structures.segments import Segment
gnu = Gnuplot.Gnuplot (persist = 1)
visible = True
for i in range(10):
segment = Segment(random.random(), random.random(),
random.random(), random.random())
gnu(segment.gnu(visible))
# Wyswietlenie grafu.
gnu('set terminal pdf enhanced')
gnu('set output "random_segments.pdf"')
gnu('set grid')
gnu('unset key')
gnu('set size square')
#gnu('unset border')
#gnu('unset tics')
gnu('set xlabel "x"')
gnu('set ylabel "y"')
gnu('set title "Random segments"')
gnu('set xrange [{}:{}]'.format(0, 1))
gnu('set yrange [{}:{}]'.format(0, 1))
gnu.plot('NaN title ""')
gnu('unset output')
# EOF
| 23.424242 | 55 | 0.684347 |
3c0172a4b6c39d5c3838a7e6ee2dd86d14d618b0 | 77 | py | Python | proxy/admin.py | jokajak/infinity_tracker | 21f83925d9899dc25bc58b198426f329a549b0e0 | [
"Apache-2.0"
] | 1 | 2021-01-21T08:44:21.000Z | 2021-01-21T08:44:21.000Z | proxy/admin.py | jokajak/infinity_tracker | 21f83925d9899dc25bc58b198426f329a549b0e0 | [
"Apache-2.0"
] | 126 | 2020-08-03T22:07:38.000Z | 2022-03-28T22:25:59.000Z | proxy/admin.py | jokajak/infinity_tracker | 21f83925d9899dc25bc58b198426f329a549b0e0 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin # NOQA: F401
# Register your models here.
| 19.25 | 46 | 0.753247 |
3c01c3ac689a157ca3b1ed4911d58fd47e935434 | 1,050 | py | Python | local/make_fbank.py | coolEphemeroptera/AESRC2020 | b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de | [
"Apache-2.0"
] | 35 | 2020-09-26T13:40:16.000Z | 2022-03-22T19:42:20.000Z | local/make_fbank.py | coolEphemeroptera/ARNet | b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de | [
"Apache-2.0"
] | 4 | 2021-04-10T13:05:52.000Z | 2022-03-14T03:22:32.000Z | local/make_fbank.py | coolEphemeroptera/ARNet | b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de | [
"Apache-2.0"
] | 7 | 2020-09-26T15:52:45.000Z | 2021-06-11T05:05:23.000Z | import python_speech_features as psf
import soundfile as sf
# import scipy.io.wavfile as wav
import pickle as pkl
import sys
import os
import re
# linux to windows
#
if __name__ == "__main__":
audio_file = sys.argv[1]
# audio_file = r"E:/LIBRISPEECH/LibriSpeech/dev/dev-clean/1272/128104/1272-128104-0000.flac"
out_file = sys.argv[2]
dir = os.path.dirname(out_file)
if not os.path.isdir(dir):os.mkdir(out_file)
mel = fbank(audio_file)
save(mel,out_file)
print(path2utt(out_file),mel.shape[0])
exit()
| 23.863636 | 97 | 0.631429 |
3c02f34d8d7c7f266cdc6308a85575de226c48f6 | 2,703 | py | Python | src/tests/test_pyning/test_combinationdict.py | essennell/pyning | c28d8fae99ab6cb4394960b72565a4915aee7adc | [
"MIT"
] | null | null | null | src/tests/test_pyning/test_combinationdict.py | essennell/pyning | c28d8fae99ab6cb4394960b72565a4915aee7adc | [
"MIT"
] | 3 | 2020-03-24T16:25:58.000Z | 2021-06-01T22:57:53.000Z | src/tests/test_pyning/test_combinationdict.py | essennell/pyning | c28d8fae99ab6cb4394960b72565a4915aee7adc | [
"MIT"
] | null | null | null | from pyning.combinationdict import CombinationDict
import pytest
if __name__ == '__main__':
pytest.main()
| 27.865979 | 70 | 0.574547 |
3c045b5de4e55fe90b3f8563b224a0193ac2dff7 | 6,917 | py | Python | stockBOT/Discord/fc_info.py | Chenct-jonathan/LokiHub | 7193589151e88f4e66aee6457926e565d0023fa1 | [
"MIT"
] | 17 | 2020-11-25T07:40:18.000Z | 2022-03-07T03:29:18.000Z | stockBOT/Discord/fc_info.py | Chenct-jonathan/LokiHub | 7193589151e88f4e66aee6457926e565d0023fa1 | [
"MIT"
] | 8 | 2020-12-18T13:23:59.000Z | 2021-10-03T21:41:50.000Z | stockBOT/Discord/fc_info.py | Chenct-jonathan/LokiHub | 7193589151e88f4e66aee6457926e565d0023fa1 | [
"MIT"
] | 43 | 2020-12-02T09:03:57.000Z | 2021-12-23T03:30:25.000Z |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import requests
from requests import post
from requests import codes
| 32.474178 | 134 | 0.675293 |
3c062192bd225720274ca7e3b61333f806b3a7b1 | 6,781 | py | Python | tests/constants.py | phihos/Python-OpenVPN-LDAP-Auth | 87dd986f49555d0fb50ad8d991cf02092a9d55dc | [
"MIT"
] | 1 | 2021-12-17T14:54:36.000Z | 2021-12-17T14:54:36.000Z | tests/constants.py | phihos/python-openvpn-ldap-auth | 87dd986f49555d0fb50ad8d991cf02092a9d55dc | [
"MIT"
] | null | null | null | tests/constants.py | phihos/python-openvpn-ldap-auth | 87dd986f49555d0fb50ad8d991cf02092a9d55dc | [
"MIT"
] | null | null | null | import os
import shutil
from datetime import datetime
# INPUT PARAMS
LDAP_URL = os.environ['TEST_LDAP_URL']
LDAP_BASE_DN = os.environ['TEST_LDAP_BASE_DN']
LDAP_ADMIN_DN = os.environ['TEST_LDAP_ADMIN_DN']
LDAP_ADMIN_PASSWORD = os.environ['TEST_LDAP_ADMIN_PASSWORD']
LDAP_BIND_TIMEOUT = os.environ.get('TEST_LDAP_BIND_TIMEOUT', 5)
OPENVPN_SERVER_START_TIMEOUT = os.environ.get('TEST_OPENVPN_SERVER_START_TIMEOUT', 5)
OPENVPN_CLIENT_CONNECT_TIMEOUT = os.environ.get('TEST_OPENVPN_CLIENT_CONNECT_TIMEOUT', 2)
TEST_TIMEOUT = os.environ.get('TEST_TIMEOUT', 10)
TEST_PROMPT_DEFAULT_TIMEOUT = os.environ.get('TEST_PROMPT_DEFAULT_TIMEOUT', 3)
OPENVPN_BINARY = os.environ.get('TEST_OPENVPN_BINARY', shutil.which('openvpn'))
PYTHON_VERSION = os.environ.get('python_version', 'please set "python_version" in the env vars')
OPENVPN_VERSION = os.environ.get('openvpn_version', 'please set "openvpn_version" in the env vars')
# PATHS
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
AUTH_SCRIPT_PATH = shutil.which('openvpn-ldap-auth')
AUTH_SCRIPT_PATH_PYINSTALLER = shutil.which('openvpn-ldap-auth-pyinstaller')
BENCHMARK_DIR = os.path.join(
SCRIPT_DIR, os.pardir, 'benchmark',
f"python{PYTHON_VERSION}-openvpn{OPENVPN_VERSION}-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
)
# CONSTANTS: SERVER SETUP
OPENVPN_SERVER_PORT = 1194
OPENVPN_SERVER_DH_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'dh2048.pem'))
OPENVPN_SERVER_CA_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'ca.crt'))
OPENVPN_SERVER_CERT_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'server.crt'))
OPENVPN_SERVER_KEY_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'server.key'))
OPENVPN_SERVER_CHALLENGE_RESPONSE_PROMPT = 'Enter challenge response'
OPENVPN_SERVER_LDAP_CONFIG_PATH = '/etc/openvpn/ldap.yaml'
OPENVPN_SERVER_LDAP_C_CONFIG_PATH = '/etc/openvpn/ldap.conf'
# CONSTANTS: CMD ARGS
OPENVPN_SERVER_ARGS = ['--mode', 'server', '--server', '10.5.99.0', '255.255.255.0', '--dev', 'tun', '--port',
str(OPENVPN_SERVER_PORT), '--verb', '4', '--keepalive', '10', '120',
'--verify-client-cert', 'none', '--tls-server', '--dh',
OPENVPN_SERVER_DH_FILE, '--ca', OPENVPN_SERVER_CA_FILE, '--cert',
OPENVPN_SERVER_CERT_FILE, '--key', OPENVPN_SERVER_KEY_FILE, '--script-security', '3', '--user',
'root', '--group', 'root', '--duplicate-cn', '--max-clients', '1000', '--status',
'openvpn-status.log', '--topology', 'subnet']
OPENVPN_SERVER_ARGS_VIA_FILE = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify', AUTH_SCRIPT_PATH,
'via-file']
OPENVPN_SERVER_ARGS_VIA_ENV = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify', AUTH_SCRIPT_PATH,
'via-env']
OPENVPN_SERVER_ARGS_VIA_FILE_PYINSTALLER = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify',
AUTH_SCRIPT_PATH_PYINSTALLER,
'via-file']
OPENVPN_SERVER_ARGS_VIA_ENV_PYINSTALLER = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify',
AUTH_SCRIPT_PATH_PYINSTALLER,
'via-env']
OPENVPN_SERVER_ARGS_C_PLUGIN = OPENVPN_SERVER_ARGS + ['--plugin', '/usr/lib/openvpn/openvpn-auth-ldap.so',
OPENVPN_SERVER_LDAP_C_CONFIG_PATH, 'login',
'--username-as-common-name']
OPENVPN_CLIENT_ARGS = (
'--client', '--dev', 'tun', '--verb', '5', '--proto', 'udp', '--remote', '127.0.0.1',
str(OPENVPN_SERVER_PORT),
'--nobind', '--ifconfig-noexec', '--route-noexec', '--route-nopull', '--ca', OPENVPN_SERVER_CA_FILE,
'--auth-user-pass', '--explicit-exit-notify', '1', '--keepalive', '10', '120',
)
OPENVPN_CLIENT_ARGS_WITH_CHALLENGE = OPENVPN_CLIENT_ARGS + ('--static-challenge',
OPENVPN_SERVER_CHALLENGE_RESPONSE_PROMPT, '1')
OPENVPN_CLIENT_ARGS_WITHOUT_CHALLENGE = OPENVPN_CLIENT_ARGS
# CONSTANTS: ldap.yaml CONFIGS
CONFIG_BASE = {
'ldap': {
'url': LDAP_URL,
'bind_dn': LDAP_ADMIN_DN,
'password': LDAP_ADMIN_PASSWORD,
},
'authorization': {
'base_dn': LDAP_BASE_DN,
'search_filter': '(uid={})'
}
}
CONFIG_CHALLENGE_RESPONSE_APPEND = {**CONFIG_BASE, **{
'authorization': {
'base_dn': LDAP_BASE_DN,
'static_challenge': 'append',
}
}}
CONFIG_CHALLENGE_RESPONSE_PREPEND = {**CONFIG_BASE, **{
'authorization': {
'base_dn': LDAP_BASE_DN,
'static_challenge': 'prepend',
}
}}
CONFIG_CHALLENGE_RESPONSE_IGNORE = {**CONFIG_BASE, **{
'authorization': {
'base_dn': LDAP_BASE_DN,
'static_challenge': 'ignore',
}
}}
CONFIG_C = f"""<LDAP>
URL "{LDAP_URL}"
BindDN {LDAP_ADMIN_DN}
Password {LDAP_ADMIN_PASSWORD}
Timeout 15
TLSEnable no
FollowReferrals yes
</LDAP>
<Authorization>
BaseDN "{LDAP_BASE_DN}"
SearchFilter "(uid=%u)"
RequireGroup false
<Group>
BaseDN "{LDAP_BASE_DN}"
SearchFilter "(|(cn=developers)(cn=artists))"
MemberAttribute member
</Group>
</Authorization>
"""
# CONSTANTS: TEST CREDENTIALS
TEST_USERNAME = 'testuser'
TEST_USER_DN_TEMPLATE = "uid={},{}"
TEST_USER_DN = TEST_USER_DN_TEMPLATE.format(TEST_USERNAME, LDAP_BASE_DN)
TEST_USER_PASSWORD = 'testpass'
TEST_USER_WRONG_PASSWORD = 'wrong_password'
# CONSTANTS: EXPECTED OPENVPN LOG FRAGMENTS
OPENVPN_LOG_SERVER_INIT_COMPLETE = 'Initialization Sequence Completed'
OPENVPN_LOG_CLIENT_INIT_COMPLETE = 'Initialization Sequence Completed'
OPENVPN_LOG_AUTH_SUCCEEDED_SERVER = 'authentication succeeded for username'
OPENVPN_LOG_AUTH_SUCCEEDED_CLIENT = 'Initialization Sequence Completed'
OPENVPN_LOG_AUTH_FAILED_SERVER = 'verification failed for peer'
OPENVPN_LOG_AUTH_FAILED_CLIENT = 'AUTH_FAILED'
# CONSTANTS: BENCHMARK CSV
BENCHMARK_CSV_HEADER_LABEL = 'label'
BENCHMARK_CSV_HEADER_PYTHON = 'python_version'
BENCHMARK_CSV_HEADER_OPENVPN = 'openvpn_version'
BENCHMARK_CSV_HEADER_LOGINS = 'concurrent_logins'
BENCHMARK_CSV_HEADER_MIN = 'min'
BENCHMARK_CSV_HEADER_MAX = 'max'
BENCHMARK_CSV_HEADER_AVG = 'avg'
BENCHMARK_CSV_HEADERS = (BENCHMARK_CSV_HEADER_LABEL, BENCHMARK_CSV_HEADER_PYTHON, BENCHMARK_CSV_HEADER_OPENVPN,
BENCHMARK_CSV_HEADER_LOGINS, BENCHMARK_CSV_HEADER_MIN, BENCHMARK_CSV_HEADER_MAX,
BENCHMARK_CSV_HEADER_AVG)
| 46.765517 | 118 | 0.668191 |
3c06dc2f7a1273c76e68bacba57d4a3e26a88d66 | 1,377 | py | Python | http_utils/recs/top_popular_recommendation_handler.py | drayvs/grouple-recsys-production | 5141bacd5dc64e023059292faff5bfdefefd9f23 | [
"MIT"
] | null | null | null | http_utils/recs/top_popular_recommendation_handler.py | drayvs/grouple-recsys-production | 5141bacd5dc64e023059292faff5bfdefefd9f23 | [
"MIT"
] | null | null | null | http_utils/recs/top_popular_recommendation_handler.py | drayvs/grouple-recsys-production | 5141bacd5dc64e023059292faff5bfdefefd9f23 | [
"MIT"
] | null | null | null | from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
from webargs import fields
from webargs.tornadoparser import use_args
from loguru import logger
from http_utils.base import BaseHandler, MAX_THREADS
| 37.216216 | 102 | 0.658678 |
3c07a5241ac429798f7ed558bc1d6c02e0ff5253 | 662 | py | Python | NucleicAcids/dssrBlock3.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | NucleicAcids/dssrBlock3.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | NucleicAcids/dssrBlock3.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | # Description: DSSR block representation for a multi-state example after loading the dssr_block.py script by Thomas Holder. The x3dna-dssr executable needs to be in the PATH. Edit the path to Thomas Holder's block script.
# Source: Generated while helping Miranda Adams at U of Saint Louis.
"""
cmd.do('reinitialize;')
cmd.do('run ${1:"/Users/blaine/.pymol/startup/dssr_block.py"};')
cmd.do('fetch ${2:2n2d}, async=0;')
cmd.do('dssr_block ${2:2n2d}, 0;')
cmd.do('set all_states;')
"""
cmd.do('reinitialize;')
cmd.do('run "/Users/blaine/.pymol/startup/dssr_block.py";')
cmd.do('fetch 2n2d, async=0;')
cmd.do('dssr_block 2n2d, 0;')
cmd.do('set all_states;')
| 38.941176 | 222 | 0.712991 |
3c091171ce7d459ab7bdf55ac4292ac21cd0a68c | 12,007 | py | Python | custom_components/climate/gree.py | ardeus-ua/gree-python-api | ecfbdef34ff99fc0822f70be17cdeb6c625fd276 | [
"MIT"
] | 1 | 2018-12-10T17:32:48.000Z | 2018-12-10T17:32:48.000Z | custom_components/climate/gree.py | ardeus-ua/gree-python-api | ecfbdef34ff99fc0822f70be17cdeb6c625fd276 | [
"MIT"
] | null | null | null | custom_components/climate/gree.py | ardeus-ua/gree-python-api | ecfbdef34ff99fc0822f70be17cdeb6c625fd276 | [
"MIT"
] | 1 | 2020-08-11T14:51:04.000Z | 2020-08-11T14:51:04.000Z | import asyncio
import logging
import binascii
import socket
import os.path
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (DOMAIN, ClimateDevice, PLATFORM_SCHEMA, STATE_IDLE, STATE_HEAT, STATE_COOL, STATE_AUTO, STATE_DRY,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_FAN_MODE, SUPPORT_SWING_MODE)
from homeassistant.const import (ATTR_UNIT_OF_MEASUREMENT, ATTR_TEMPERATURE, CONF_NAME, CONF_HOST, CONF_MAC, CONF_TIMEOUT, CONF_CUSTOMIZE)
from homeassistant.helpers.event import (async_track_state_change)
from homeassistant.core import callback
from homeassistant.helpers.restore_state import RestoreEntity
from configparser import ConfigParser
from base64 import b64encode, b64decode
REQUIREMENTS = ['gree==0.3.2']
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE
CONF_UNIQUE_KEY = 'unique_key'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
CONF_TARGET_TEMP = 'target_temp'
CONF_TEMP_SENSOR = 'temp_sensor'
CONF_OPERATIONS = 'operations'
CONF_FAN_MODES = 'fan_modes'
CONF_SWING_LIST = 'swing_list'
CONF_DEFAULT_OPERATION = 'default_operation'
CONF_DEFAULT_FAN_MODE = 'default_fan_mode'
CONF_DEFAULT_SWING_MODE = 'default_swing_mode'
CONF_DEFAULT_OPERATION_FROM_IDLE = 'default_operation_from_idle'
STATE_FAN = 'fan'
STATE_OFF = 'off'
DEFAULT_NAME = 'GREE AC Climate'
DEFAULT_TIMEOUT = 10
DEFAULT_RETRY = 3
DEFAULT_MIN_TEMP = 16
DEFAULT_MAX_TEMP = 30
DEFAULT_TARGET_TEMP = 20
DEFAULT_OPERATION_LIST = [STATE_OFF, STATE_AUTO, STATE_COOL, STATE_DRY, STATE_FAN, STATE_HEAT]
OPERATION_LIST_MAP = {
STATE_AUTO: 0,
STATE_COOL: 1,
STATE_DRY: 2,
STATE_FAN: 3,
STATE_HEAT: 4,
}
DEFAULT_FAN_MODE_LIST = ['auto', 'low', 'medium-low', 'medium', 'medium-high', 'high']
FAN_MODE_MAP = {
'auto': 0,
'low': 1,
'medium-low': 2,
'medium': 3,
'medium-high': 4,
'high': 5
}
DEFAULT_SWING_LIST = ['default', 'swing-full-range', 'fixed-up', 'fixed-middle', 'fixed-down', 'swing-up', 'swing-middle', 'swing-down']
SWING_MAP = {
'default': 0,
'swing-full-range': 1,
'fixed-up': 2,
'fixed-middle': 4,
'fixed-down': 6,
'swing-up': 11,
'swing-middle': 9,
'swing-down': 7
}
DEFAULT_OPERATION = 'idle'
DEFAULT_FAN_MODE = 'auto'
DEFAULT_SWING_MODE = 'default'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Required(CONF_UNIQUE_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): cv.positive_int,
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): cv.positive_int,
vol.Optional(CONF_TARGET_TEMP, default=DEFAULT_TARGET_TEMP): cv.positive_int,
vol.Optional(CONF_TEMP_SENSOR): cv.entity_id,
vol.Optional(CONF_DEFAULT_OPERATION, default=DEFAULT_OPERATION): cv.string,
vol.Optional(CONF_DEFAULT_FAN_MODE, default=DEFAULT_FAN_MODE): cv.string,
vol.Optional(CONF_DEFAULT_SWING_MODE, default=DEFAULT_SWING_MODE): cv.string,
vol.Optional(CONF_DEFAULT_OPERATION_FROM_IDLE): cv.string
})
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
if not (self._current_operation.lower() == 'off' or self._current_operation.lower() == 'idle'):
self.send_command()
elif self._default_operation_from_idle is not None:
self.set_operation_mode(self._default_operation_from_idle)
self.schedule_update_ha_state()
def set_fan_mode(self, fan):
"""Set new target temperature."""
self._current_fan_mode = fan
if not (self._current_operation.lower() == 'off' or self._current_operation.lower() == 'idle'):
self.send_command()
self.schedule_update_ha_state()
def set_operation_mode(self, operation_mode):
"""Set new target temperature."""
self._current_operation = operation_mode
self.send_command()
self.schedule_update_ha_state()
def set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
self._current_swing_mode = swing_mode
self.send_command()
self.schedule_update_ha_state()
| 34.404011 | 228 | 0.68077 |
3c09d1eafa4175a7dae038754ad5b4a09e871bc9 | 6,492 | py | Python | overhang/dnastorage_utils/system/header.py | dna-storage/DINOS | 65f4142e80d646d7eefa3fc16d747d21ec43fbbe | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | overhang/dnastorage_utils/system/header.py | dna-storage/DINOS | 65f4142e80d646d7eefa3fc16d747d21ec43fbbe | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | overhang/dnastorage_utils/system/header.py | dna-storage/DINOS | 65f4142e80d646d7eefa3fc16d747d21ec43fbbe | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from dnastorage.codec.base_conversion import convertIntToBytes,convertBytesToInt
from dnastorage.arch.builder import *
import editdistance as ed
#from dnastorage.primer.primer_util import edit_distance
from io import BytesIO
from dnastorage.util.packetizedfile import *
import math
import struct
from dnastorage.system.formats import *
### Designed to fit on a single strand for most use cases
###
### Every header strand begins with special sequence that can't be used at the beginning of indices: ATCGATGC
###
### 1. 'ATCGATGC' [1]
### 2. short index - usually 0, 0-255, at most 16 strands [1]
### 3. major version (0-255) [1]
### 4. minor version (0-255) [1]
### 5. num bytes for size [1]
### 6. size [x]
### 7 num bytes for original filename [2]
### 8. null terminated string
### 9. encoding style [2]
### 10. length of remaining record (2 bytes) [2]
### 11. remaining record byte encoded [?]
### Pad to final width using arbitrary sequence
system_version = { 'major': 0, 'minor':1 }
magic_header = 'ATCGATGC' #'CCATCCAT'
if __name__ == "__main__":
strands = encode_file_header("",0xA,2,[1,2,3,4],"A"*19+"G","T"*19+"G")
for s in strands:
print "{}: strand={}".format(len(s), s)
print decode_file_header(strands,"A"*19+"G","T"*19+"G")
| 31.211538 | 109 | 0.608595 |
3c0c8d1fb6b9a95e3b3506596eae5b34be7226ac | 2,386 | py | Python | numba/containers/typedtuple.py | liuzhenhai/numba | 855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf | [
"BSD-2-Clause"
] | 1 | 2015-01-29T06:52:36.000Z | 2015-01-29T06:52:36.000Z | numba/containers/typedtuple.py | shiquanwang/numba | a41c85fdd7d6abf8ea1ebe9116939ddc2217193b | [
"BSD-2-Clause"
] | null | null | null | numba/containers/typedtuple.py | shiquanwang/numba | a41c85fdd7d6abf8ea1ebe9116939ddc2217193b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from functools import partial
import numba as nb
from numba.containers import orderedcontainer
import numpy as np
INITIAL_BUFSIZE = 5
_tuple_cache = {}
#-----------------------------------------------------------------------
# Runtime Constructor
#-----------------------------------------------------------------------
def typedtuple(item_type, iterable=None, _tuple_cache=_tuple_cache):
"""
>>> typedtuple(nb.int_)
()
>>> ttuple = typedtuple(nb.int_, range(10))
>>> ttuple
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
>>> ttuple[5]
5L
>>> typedtuple(nb.float_, range(10))
(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
"""
typedtuple_ctor = compile_typedtuple(item_type)
return typedtuple_ctor(iterable)
#-----------------------------------------------------------------------
# Typedlist implementation
#-----------------------------------------------------------------------
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26.808989 | 75 | 0.544007 |
3c0cdb9dded53f14973b9af474148c0b7d6c7d6f | 1,353 | py | Python | pythondata_cpu_minerva/__init__.py | litex-hub/litex-data-cpu-minerva | 3896ce15f5d6420f7797b1f95249f948533bf542 | [
"BSD-2-Clause"
] | null | null | null | pythondata_cpu_minerva/__init__.py | litex-hub/litex-data-cpu-minerva | 3896ce15f5d6420f7797b1f95249f948533bf542 | [
"BSD-2-Clause"
] | null | null | null | pythondata_cpu_minerva/__init__.py | litex-hub/litex-data-cpu-minerva | 3896ce15f5d6420f7797b1f95249f948533bf542 | [
"BSD-2-Clause"
] | null | null | null | import os.path
__dir__ = os.path.split(os.path.abspath(os.path.realpath(__file__)))[0]
data_location = os.path.join(__dir__, "sources")
src = "https://github.com/lambdaconcept/minerva"
# Module version
version_str = "0.0.post260"
version_tuple = (0, 0, 260)
try:
from packaging.version import Version as V
pversion = V("0.0.post260")
except ImportError:
pass
# Data version info
data_version_str = "0.0.post120"
data_version_tuple = (0, 0, 120)
try:
from packaging.version import Version as V
pdata_version = V("0.0.post120")
except ImportError:
pass
data_git_hash = "08251daae42ec8cfc54fb82865a5942727186192"
data_git_describe = "v0.0-120-g08251da"
data_git_msg = """\
commit 08251daae42ec8cfc54fb82865a5942727186192
Author: Jean-Franois Nguyen <jf@jfng.fr>
Date: Tue Apr 5 15:33:21 2022 +0200
stage: fix commit 6c3294b9.
"""
# Tool version info
tool_version_str = "0.0.post140"
tool_version_tuple = (0, 0, 140)
try:
from packaging.version import Version as V
ptool_version = V("0.0.post140")
except ImportError:
pass
def data_file(f):
"""Get absolute path for file inside pythondata_cpu_minerva."""
fn = os.path.join(data_location, f)
fn = os.path.abspath(fn)
if not os.path.exists(fn):
raise IOError("File {f} doesn't exist in pythondata_cpu_minerva".format(f))
return fn
| 26.529412 | 83 | 0.719882 |
3c0d77712915106228bf8f6e63542f7a42d1d3f1 | 1,602 | py | Python | config.py | jasonyanglu/fedavgpy | cefbe5854f02d3df1197d849872286439c86e949 | [
"MIT"
] | 1 | 2022-03-18T15:27:29.000Z | 2022-03-18T15:27:29.000Z | config.py | jasonyanglu/fedavgpy | cefbe5854f02d3df1197d849872286439c86e949 | [
"MIT"
] | null | null | null | config.py | jasonyanglu/fedavgpy | cefbe5854f02d3df1197d849872286439c86e949 | [
"MIT"
] | null | null | null | # GLOBAL PARAMETERS
DATASETS = ['sent140', 'nist', 'shakespeare',
'mnist', 'synthetic', 'cifar10']
TRAINERS = {'fedavg': 'FedAvgTrainer',
'fedavg4': 'FedAvg4Trainer',
'fedavg5': 'FedAvg5Trainer',
'fedavg9': 'FedAvg9Trainer',
'fedavg_imba': 'FedAvgTrainerImba',}
OPTIMIZERS = TRAINERS.keys()
MODEL_PARAMS = ModelConfig()
| 38.142857 | 103 | 0.529963 |
3c0dac01937088c28952c4c1e01fa4a3c19fcaa9 | 3,266 | py | Python | Gan/gan.py | caiyueliang/CarClassification | a8d8051085c4e66ed3ed67e56360a515c9762cd5 | [
"Apache-2.0"
] | null | null | null | Gan/gan.py | caiyueliang/CarClassification | a8d8051085c4e66ed3ed67e56360a515c9762cd5 | [
"Apache-2.0"
] | null | null | null | Gan/gan.py | caiyueliang/CarClassification | a8d8051085c4e66ed3ed67e56360a515c9762cd5 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from argparse import ArgumentParser
import os
import model_train
from torchvision import models
if __name__ == '__main__':
args = parse_argvs()
# train_path = args.train_path
# test_path = args.test_path
# output_model_path = args.output_model_path
# num_classes = args.classes_num
# batch_size = args.batch_size
# img_size = args.img_size
# lr = args.lr
# model = models.resnet18(num_classes=num_classes)
# model = models.squeezenet1_1(num_classes=num_classes)
model_train = model_train.ModuleTrain(opt=args)
model_train.train()
# model_train.test(show_img=True)
| 48.029412 | 111 | 0.709124 |
3c1079153ceb5f7b4146c5df6cbab9e874e7d7f4 | 854 | py | Python | Modulo 2/ex068.py | Werberty/Curso-em-Video-Python3 | 24c0299edd635fb9c2db2ecbaf8532d292f92d49 | [
"MIT"
] | 1 | 2022-03-06T11:37:47.000Z | 2022-03-06T11:37:47.000Z | Modulo 2/ex068.py | Werberty/Curso-em-Video-Python3 | 24c0299edd635fb9c2db2ecbaf8532d292f92d49 | [
"MIT"
] | null | null | null | Modulo 2/ex068.py | Werberty/Curso-em-Video-Python3 | 24c0299edd635fb9c2db2ecbaf8532d292f92d49 | [
"MIT"
] | null | null | null | from random import randint
print('-=-'*10)
print('JOGO DO PAR OU IMPAR')
cont = 0
while True:
print('-=-' * 10)
n = int(input('Digite um valor: '))
op = str(input('Par ou impar? [P/I] ')).upper().strip()[0]
ia = randint(0, 10)
res = n + ia
print('-'*30)
print(f'Voc jogou {n} e o computador {ia}. Total de {res} ', end='')
if res % 2 == 0:
print('DEU PAR')
print('-' * 30)
if op == 'P':
print('Voc VENCEU!\nVamos jogar novamente...')
cont += 1
else:
break
elif res % 2 != 0:
print('DEU IMPAR')
print('-' * 30)
if op == 'I':
print('Voc VENCEU!\nVamos jogar novamente...')
cont += 1
else:
break
print('Voc PERDEU!')
print('-=-' * 10)
print(f'GAME OVER! Voc venceu {cont} vez.')
| 25.878788 | 73 | 0.480094 |
3c10cbd008220b779ffa61252edc4ab7bdc901a1 | 5,506 | py | Python | server/inbox/views.py | amy-xiang/CMPUT404_PROJECT | cbcea0cd164d6377ede397e934f960505e8f347a | [
"W3C-20150513"
] | 1 | 2021-04-06T22:35:53.000Z | 2021-04-06T22:35:53.000Z | server/inbox/views.py | amy-xiang/CMPUT404_PROJECT | cbcea0cd164d6377ede397e934f960505e8f347a | [
"W3C-20150513"
] | null | null | null | server/inbox/views.py | amy-xiang/CMPUT404_PROJECT | cbcea0cd164d6377ede397e934f960505e8f347a | [
"W3C-20150513"
] | null | null | null | from django.core.exceptions import ValidationError
from django.shortcuts import render, get_object_or_404
from django.db import IntegrityError
from rest_framework import authentication, generics, permissions, status
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from posts.serializers import PostSerializer
from author.serializers import AuthorProfileSerializer
from main.models import Author
from nodes.models import Node
from main import utils
from posts.models import Post
from likes.models import Like
from .models import Inbox
from .serializers import InboxSerializer
from urllib.parse import urlparse
import requests
import json
# api/author/{AUTHOR_ID}/inbox/
| 44.403226 | 116 | 0.606793 |
3c119513513dbce82555731b084d2de00dc48dc8 | 1,873 | py | Python | black_list_all.py | philipempl/mail_watch | 802df3146c462aeb670a4a973e428976d90abf06 | [
"Apache-2.0"
] | null | null | null | black_list_all.py | philipempl/mail_watch | 802df3146c462aeb670a4a973e428976d90abf06 | [
"Apache-2.0"
] | 1 | 2019-12-11T08:49:51.000Z | 2019-12-11T08:49:51.000Z | black_list_all.py | philipempl/mail_watch | 802df3146c462aeb670a4a973e428976d90abf06 | [
"Apache-2.0"
] | null | null | null | import imaplib, base64, os, email, re, configparser
import tkinter as tk
from tkinter import messagebox
from datetime import datetime
from email import generator
from dateutil.parser import parse
init() | 28.378788 | 99 | 0.620929 |
3c11fb38e2dcb32d635011cf74ded4f173fac7e7 | 539 | py | Python | chpt6/Pentagonal_numbers.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | null | null | null | chpt6/Pentagonal_numbers.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | 2 | 2018-05-21T09:39:00.000Z | 2018-05-27T15:59:15.000Z | chpt6/Pentagonal_numbers.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | 2 | 2018-05-19T14:59:56.000Z | 2018-05-19T15:25:48.000Z | #
# This program is a function that displays the first 100 pentagonal numbers with 10 numbers on each line.
# A pentagonal number is defined as n(3n - 1)/2 for n = 1, 2, c , and so on.
# So, the first few numbers are 1, 5, 12, 22, ....
main()
| 23.434783 | 105 | 0.595547 |
3c129d467e7a619b95bbc8aa752a9a6e384e5ae6 | 4,075 | py | Python | iraclis/_1databases.py | nespinoza/Iraclis | 3b5dd8d6bc073f6d2c24ad14341020694255bf65 | [
"CC-BY-4.0"
] | null | null | null | iraclis/_1databases.py | nespinoza/Iraclis | 3b5dd8d6bc073f6d2c24ad14341020694255bf65 | [
"CC-BY-4.0"
] | null | null | null | iraclis/_1databases.py | nespinoza/Iraclis | 3b5dd8d6bc073f6d2c24ad14341020694255bf65 | [
"CC-BY-4.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ._0errors import *
from ._0imports import *
databases = Databases()
| 38.084112 | 114 | 0.553374 |
3c134e04d61928fa6fcc6871ade77a7efb97baf0 | 1,029 | py | Python | Level2/Ex_5.py | zac11/Python_Excerices | 775739e2639be1f82cc3690c854b9ea0ece05042 | [
"Apache-2.0"
] | 2 | 2019-03-09T20:31:06.000Z | 2020-06-19T12:15:13.000Z | Level2/Ex_5.py | zac11/Python_Excerices | 775739e2639be1f82cc3690c854b9ea0ece05042 | [
"Apache-2.0"
] | null | null | null | Level2/Ex_5.py | zac11/Python_Excerices | 775739e2639be1f82cc3690c854b9ea0ece05042 | [
"Apache-2.0"
] | 1 | 2018-08-11T18:36:49.000Z | 2018-08-11T18:36:49.000Z | """
Write a program that accepts a sequence of whitespace separated words as input and prints the words after removing all
duplicate words and sorting them alphanumerically.
Suppose the following input is supplied to the program:
hello world and practice makes perfect and hello world again
Then, the output should be:
again and hello makes perfect practice world
"""
string_input = input()
words =[word for word in string_input.split(" ")]
print(" ".join(sorted(list(set(words)))))
"""
Let's break it down now
print(set(words))
This will print a set of the words, with all the unique values
print(list(set(words)))
Create a list out of the values of words
print(sorted(list(set(words))))
This will sort the list
print(" ".join(sorted(list(set(words)))))
This is join the sorted list items with a whitespace
For this input :
I like to yawn and I also like to make a music and a car
Now output will be :
I a also and car like make music to yawn
Notice that the uppercase I is sorted at first position
"""
| 19.415094 | 118 | 0.74344 |
3c1675a2a9274be019b322c8830f740dbd48fb14 | 6,063 | py | Python | alfworld/agents/utils/traj_process.py | roy860328/VSGM | 3ec19f9cf1401cecf45527687936b8fe4167f672 | [
"MIT"
] | 6 | 2021-05-22T15:33:42.000Z | 2022-01-12T03:34:39.000Z | alfworld/agents/utils/traj_process.py | roy860328/VSGM | 3ec19f9cf1401cecf45527687936b8fe4167f672 | [
"MIT"
] | 1 | 2021-06-19T10:04:13.000Z | 2021-06-20T03:37:23.000Z | alfworld/agents/utils/traj_process.py | roy860328/VSGM | 3ec19f9cf1401cecf45527687936b8fe4167f672 | [
"MIT"
] | null | null | null | import os
import cv2
import json
import numpy as np
import h5py
from PIL import Image
TASK_TYPES = {1: "pick_and_place_simple",
2: "look_at_obj_in_light",
3: "pick_clean_then_place_in_recep",
4: "pick_heat_then_place_in_recep",
5: "pick_cool_then_place_in_recep",
6: "pick_two_obj_and_place"}
| 41.527397 | 102 | 0.628072 |
3c17265b394405d74fda0b7ba580609c53a824f6 | 846 | py | Python | log.py | bsha3l173/NetDiagBot | c76d00a34ae4587942010b2370dd0ac35a83bcdd | [
"Unlicense"
] | null | null | null | log.py | bsha3l173/NetDiagBot | c76d00a34ae4587942010b2370dd0ac35a83bcdd | [
"Unlicense"
] | null | null | null | log.py | bsha3l173/NetDiagBot | c76d00a34ae4587942010b2370dd0ac35a83bcdd | [
"Unlicense"
] | null | null | null | __author__ = 'bsha3l173'
import logging
import datetime
from conf import LOG_FILENAME
| 33.84 | 108 | 0.611111 |
3c1927e4c80951e764d207f99cb77de8d5e6eb00 | 1,850 | py | Python | selenium-browser.py | steflayanto/international-google-search | 05cc773b158fe11202fdf39fb515b398a08b7e3c | [
"MIT"
] | null | null | null | selenium-browser.py | steflayanto/international-google-search | 05cc773b158fe11202fdf39fb515b398a08b7e3c | [
"MIT"
] | null | null | null | selenium-browser.py | steflayanto/international-google-search | 05cc773b158fe11202fdf39fb515b398a08b7e3c | [
"MIT"
] | null | null | null | import os, time, pyautogui
import selenium
from selenium import webdriver
from location_reference import country_map
# STATIC SETTINGS
DPI = 125 # Scaling factor of texts and apps in display settings
screen_dims = [x / (DPI/100) for x in pyautogui.size()]
code_map = country_map()
print("International Google Search")
print("Supported Countries: USA, UK, Japan, Canada, Germany, Italy, France, Australia, Brasil, India, Korea, Pakistan")
query = input("Please input Search Query: ")
text = " "
codes = []
while text is not "" and len(codes) != 3:
text = input("Input Country. Input nothing to start search: ").lower()
if text not in code_map.keys():
print("\tERROR: Country not recognized")
continue
codes.append(code_map[text])
print("Starting Search")
# Using Chrome Incognito to access web
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--incognito")
drivers = []
for i in range(3):
drivers.append(webdriver.Chrome(chrome_options=chrome_options))
drivers[i].set_window_position(i * screen_dims[0] / 3, 0)
assert len(codes) == len(drivers)
for i, driver in enumerate(drivers):
# Open the website
code = codes[i]
driver.get('https://www.google.com/ncr')
time.sleep(0.5)
driver.get('https://www.google.com/?gl=' + code)
# print(screen_dims)
# print(driver.get_window_size())
driver.set_window_size(screen_dims[0] / 3, screen_dims[1])
# print(driver.get_window_size())
element = driver.find_element_by_name("q")
element.send_keys(query)
element.submit()
# for i in range(3):
# drivers[i].set_window_position(i * screen_dims[0] / 3, 0)
# driver.manage().window().setPosition(0,0)
# Get Search Box
# element = driver.find_element_by_name("q")
# element.send_keys("Hotels")
# element.submit()
input("Press enter to exit") | 28.90625 | 120 | 0.702162 |
3c1ce045f39d2d470a259001626bc914b8162303 | 29 | py | Python | homeassistant/components/thomson/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/thomson/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/thomson/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The thomson component."""
| 14.5 | 28 | 0.655172 |
3c1d0a50a97a1bf750da3e79140c45303971c672 | 2,027 | py | Python | registration/admin.py | allenallen/interedregistration | d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9 | [
"MIT"
] | null | null | null | registration/admin.py | allenallen/interedregistration | d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9 | [
"MIT"
] | 6 | 2020-02-11T23:05:13.000Z | 2021-06-10T20:43:51.000Z | registration/admin.py | allenallen/interedregistration | d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9 | [
"MIT"
] | null | null | null | import csv
from django.contrib import admin
from django.http import HttpResponse
from .models import Student, SchoolList, Event, ShsTrack, SchoolOfficial
admin.site.register(SchoolList)
admin.site.register(ShsTrack)
| 30.712121 | 117 | 0.665022 |
3c1e8f234365a8d2c0de799db1420fb70afb127b | 1,251 | py | Python | python/src/aoc/year2016/day5.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | 1 | 2021-02-16T21:30:04.000Z | 2021-02-16T21:30:04.000Z | python/src/aoc/year2016/day5.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | null | null | null | python/src/aoc/year2016/day5.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | null | null | null | import hashlib
from itertools import islice
from aoc.util import load_input
def part1(lines):
"""
>>> part1(['abc'])
'18f47a30'
"""
door_id = lines[0].strip()
return "".join(islice(search(door_id, is_part1=True), 8))
def part2(lines, be_extra_proud=True):
"""
>>> part2(['abc'], False)
'05ace8e3'
"""
result = 8 * [" "]
count = 0
for position, character in search(lines[0].strip(), is_part2=True):
if result[position] == " ":
result[position] = character
count += 1
if count == 8:
return "".join(result)
if be_extra_proud:
print("".join(result))
if __name__ == "__main__":
data = load_input(__file__, 2016, "5")
print(part1(data))
print(part2(data))
| 24.529412 | 71 | 0.529976 |
3c1f8c82eeba6453a646f8492c4afe649539ab25 | 2,324 | py | Python | arraycircles.py | BastiHz/arraycircles | cf2e8ac48b099570d6b351ae84dc060263ee4e3d | [
"MIT"
] | null | null | null | arraycircles.py | BastiHz/arraycircles | cf2e8ac48b099570d6b351ae84dc060263ee4e3d | [
"MIT"
] | null | null | null | arraycircles.py | BastiHz/arraycircles | cf2e8ac48b099570d6b351ae84dc060263ee4e3d | [
"MIT"
] | null | null | null | import math
import random
import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
import numpy as np
import pygame as pg
WINDOW_SIZE = (800, 600)
FPS = 60
pg.init()
window = pg.display.set_mode(WINDOW_SIZE)
clock = pg.time.Clock()
font = pg.font.SysFont("monospace", 20)
hues = (0, 120, 240)
angles = [math.radians(i) for i in (0, 120, 240)]
window_center_x = WINDOW_SIZE[0] // 2
window_center_y = WINDOW_SIZE[1] // 2
distance_from_center = 75
circle_surfs = [None, None, None]
circle_rects = [None, None, None]
for i in range(3):
circle = make_circle_array(200, hues[i])
circle_surf = pg.surfarray.make_surface(circle)
circle_surfs[i] = circle_surf
circle_rect = circle_surf.get_rect()
circle_rect.center = [
window_center_x + math.sin(angles[i]) * distance_from_center,
window_center_y - math.cos(angles[i]) * distance_from_center
]
circle_rects[i] = circle_rect
running = True
while running:
clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
elif event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
window.fill(pg.Color("black"))
fps_text = font.render(f"{clock.get_fps():.0f}", False, pg.Color("white"))
window.blit(fps_text, (0, 0))
for i in range(3):
window.blit(
circle_surfs[i],
circle_rects[i],
special_flags=pg.BLEND_RGB_ADD
)
pg.display.flip()
| 27.023256 | 84 | 0.623924 |
3c1fbd1f77839d16929ae16aa95f7765710bb079 | 1,268 | py | Python | choosy/star.py | creiht/choosy | 08c18f1480e542ee122b86a0b47a30c8e5b4017e | [
"BSD-3-Clause"
] | null | null | null | choosy/star.py | creiht/choosy | 08c18f1480e542ee122b86a0b47a30c8e5b4017e | [
"BSD-3-Clause"
] | null | null | null | choosy/star.py | creiht/choosy | 08c18f1480e542ee122b86a0b47a30c8e5b4017e | [
"BSD-3-Clause"
] | null | null | null | from flask import (
abort, Blueprint, current_app, flash, g, redirect, render_template, request,
url_for
)
import giphy_client
from werkzeug.exceptions import abort
from choosy.auth import login_required
from choosy import db
bp = Blueprint("star", __name__)
| 26.978723 | 80 | 0.605678 |
3c1ff1fa706a7ee54f33c5565b4c5b7b1c4bf065 | 7,700 | py | Python | src/1-3_autocorrect.py | BernhardSchiffer/1-dynamic-programming | 81d89e6d579a329058a40b0e6c85b45c97db083a | [
"MIT"
] | null | null | null | src/1-3_autocorrect.py | BernhardSchiffer/1-dynamic-programming | 81d89e6d579a329058a40b0e6c85b45c97db083a | [
"MIT"
] | null | null | null | src/1-3_autocorrect.py | BernhardSchiffer/1-dynamic-programming | 81d89e6d579a329058a40b0e6c85b45c97db083a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# %%
# Assignment Pt. 1: Edit Distances
import numpy as np
from bs4 import BeautifulSoup
import math
vocabulary_file = open('../res/count_1w.txt', 'r')
lines = vocabulary_file.readlines()
vocabulary = dict()
word_count = 0
# Strips the newline character
for line in lines:
line = line.strip()
w = line.split('\t')
word = {'word': w[0], 'count': w[1]}
word_count = word_count + int(w[1])
vocabulary[word['word']] = word
print(len(vocabulary))
print(list(vocabulary.values())[0:5])
gem_doppel = [
("GCGTATGAGGCTAACGC", "GCTATGCGGCTATACGC"),
("khler schrank", "schler krank"),
("the longest", "longest day"),
("nicht ausgeloggt", "licht ausgenockt"),
("gurken schaben", "schurkengaben")
]
# %%
assert hamming('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC') == 10
assert hamming('khler schrank', 'schler krank') == 13
assert hamming('the longest', 'longest day') == 11
assert hamming('nicht ausgeloggt', 'licht ausgenockt') == 4
assert hamming('gurken schaben', 'schurkengaben') == 14
# %%
assert levenshtein('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC') == (3, 'mmdmmmmsmmmmmimmmm')
assert levenshtein('khler schrank', 'schler krank') == (6, 'ssmimmmmsddmmmm')
assert levenshtein('the longest', 'longest day') == (8, 'ddddmmmmmmmiiii')
assert levenshtein('nicht ausgeloggt', 'licht ausgenockt') == (4, 'smmmmmmmmmmsmssm')
assert levenshtein('gurken schaben', 'schurkengaben') == (7, 'siimmmmmsdddmmmm')
# %%
# Assignment Pt. 2: Auto-Correct
def suggest(w: str, dist, max_cand=5) -> list:
"""
w: word in question
dist: edit distance to use
max_cand: maximum of number of suggestions
returns a list of tuples (word, dist, score) sorted by score and distance"""
if w in vocabulary:
Pw = math.log(int(vocabulary[w]['count'])/word_count)
return [(w, 0, Pw)]
suggestions = list()
for word in list(vocabulary.values())[:]:
distance, _ = dist(w, word['word'])
Pw = math.log(int(word['count'])/word_count)
suggestions.append((word['word'], distance, 0.5* math.log(1/distance) + Pw))
suggestions.sort(key=lambda s: s[1])
return suggestions[:max_cand]
examples = [
"pirates", # in-voc
"pirutes", # pirates?
"continoisly", # continuosly?
]
for w in examples[:]:
print(w, suggest(w, levenshtein, max_cand=3))
# sample result; your scores may vary!
# pirates [('pirates', 0, -11.408058827802126)]
# pirutes [('pirates', 1, -11.408058827802126), ('minutes', 2, -8.717825438953103), ('viruses', 2, -11.111468702571859)]
# continoisly [('continously', 1, -15.735337826575178), ('continuously', 2, -11.560071979871001), ('continuosly', 2, -17.009283000138204)]
# %%
# Assignment Pt. 3: Needleman-Wunsch
# reading content
file = open("../res/de.xml", "r")
contents = file.read()
# parsing
soup = BeautifulSoup(contents, 'xml')
# get characters
keys = soup.find_all('char')
keyboard = {}
# display content
for key in keys:
k = {'value': key.string}
# get key of character
parent = key.parent
k['left'] = parent['left']
k['top'] = parent['top']
k['width'] = parent['width']
k['height'] = parent['height']
k['fingerIndex'] = parent['fingerIndex']
keyboard[k['value']] = k
# get special keys
specialKeys = soup.find_all('specialKey')
for key in specialKeys:
if key['type'] == 'space':
keyboard[' '] = {
'value': ' ',
'left': key['left'],
'top': key['top'],
'width': key['width'],
'height': key['height']
}
assert nw('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC', sim=lambda x,y: 1) == (12, '++-++++-+++++-++++')
assert nw('khler schrank', 'schler krank', sim=lambda x,y: 1) == (3, '--+-++++---++++')
assert nw('the longest', 'longest day', sim=lambda x,y: 1) == (-1, '----+++++++----')
assert nw('nicht ausgeloggt', 'licht ausgenockt', sim=lambda x,y: 1) == (8, '-++++++++++-+--+')
assert nw('gurken schaben', 'schurkengaben', sim=lambda x,y: 1) == (2, '---+++++----++++')
# How does your suggest function behave with nw and a keyboard-aware similarity?
print(nw('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC'))
print(nw('khler schrank', 'schler krank'))
print(nw('the longest', 'longest day'))
print(nw('nicht ausgeloggt', 'licht ausgenockt'))
print(nw('gurken schaben', 'schurkengaben'))
# %%
| 32.352941 | 138 | 0.587662 |
3c21c614e14a12fda17173ca64af48d998a556ab | 2,451 | py | Python | recipes/Python/577691_Validate_ACNs_AustraliCompany/recipe-577691.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/577691_Validate_ACNs_AustraliCompany/recipe-577691.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/577691_Validate_ACNs_AustraliCompany/recipe-577691.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | def isacn(obj):
"""isacn(string or int) -> True|False
Validate an ACN (Australian Company Number).
http://www.asic.gov.au/asic/asic.nsf/byheadline/Australian+Company+Number+(ACN)+Check+Digit
Accepts an int, or a string of digits including any leading zeroes.
Digits may be optionally separated with spaces. Any other input raises
TypeError or ValueError.
Return True if the argument is a valid ACN, otherwise False.
>>> isacn('004 085 616')
True
>>> isacn('005 085 616')
False
"""
if isinstance(obj, int):
if not 0 <= obj < 10**9:
raise ValueError('int out of range for an ACN')
obj = '%09d' % obj
assert len(obj) == 9
if not isinstance(obj, str):
raise TypeError('expected a str or int but got %s' % type(obj))
obj = obj.replace(' ', '')
if len(obj) != 9:
raise ValueError('ACN must have exactly 9 digits')
if not obj.isdigit():
raise ValueError('non-digit found in ACN')
digits = [int(c) for c in obj]
weights = [8, 7, 6, 5, 4, 3, 2, 1]
assert len(digits) == 9 and len(weights) == 8
chksum = 10 - sum(d*w for d,w in zip(digits, weights)) % 10
if chksum == 10:
chksum = 0
return chksum == digits[-1]
if __name__ == '__main__':
# Check the list of valid ACNs from the ASIC website.
ACNs = '''
000 000 019 * 000 250 000 * 000 500 005 * 000 750 005
001 000 004 * 001 250 004 * 001 500 009 * 001 749 999
001 999 999 * 002 249 998 * 002 499 998 * 002 749 993
002 999 993 * 003 249 992 * 003 499 992 * 003 749 988
003 999 988 * 004 249 987 * 004 499 987 * 004 749 982
004 999 982 * 005 249 981 * 005 499 981 * 005 749 986
005 999 977 * 006 249 976 * 006 499 976 * 006 749 980
006 999 980 * 007 249 989 * 007 499 989 * 007 749 975
007 999 975 * 008 249 974 * 008 499 974 * 008 749 979
008 999 979 * 009 249 969 * 009 499 969 * 009 749 964
009 999 964 * 010 249 966 * 010 499 966 * 010 749 961
'''.replace('*', '\n').split('\n')
ACNs = [s for s in ACNs if s and not s.isspace()]
for s in ACNs:
n = int(s.replace(' ', ''))
if not (isacn(s) and isacn(n) and not isacn(n+1)):
print('test failed for ACN: %s' % s.strip())
break
else:
print('all ACNs tested okay')
| 38.904762 | 95 | 0.565075 |
3c2312e967df908333d00837244d79e34fe4f564 | 2,845 | py | Python | scripts/code_standards/code_standards.py | dolphingarlic/sketch-frontend | e646b7d51405e8a693f45472aa3cc6991a6f38af | [
"X11"
] | 1 | 2020-12-06T03:40:53.000Z | 2020-12-06T03:40:53.000Z | scripts/code_standards/code_standards.py | dolphingarlic/sketch-frontend | e646b7d51405e8a693f45472aa3cc6991a6f38af | [
"X11"
] | null | null | null | scripts/code_standards/code_standards.py | dolphingarlic/sketch-frontend | e646b7d51405e8a693f45472aa3cc6991a6f38af | [
"X11"
] | null | null | null | #!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
from __future__ import print_function
import optparse
import path_resolv
from path_resolv import Path
if __name__ == "__main__":
cmdopts = optparse.OptionParser(usage="%prog [options]")
cmdopts.add_option("--srcdir", default=Path("."),
help="source directory to look through")
cmdopts.add_option("--file_extensions", default="java,scala,py,sh",
help="comma-sepated list of file extensions")
cmdopts.add_option("--show_info", action="store_true",
help="show info for command")
cmdopts.add_option("--override_ignores", action="store_true",
help="ignore \"@code standards ignore [file]\"")
options, args = cmdopts.parse_args()
options.file_extensions = options.file_extensions.split(",")
if not options.show_info:
print("use --show_info to show more notices")
main(**options.__dict__)
| 34.695122 | 86 | 0.59754 |
3c25269f1d545577e247a812c7d95d25ce72bbfe | 2,368 | py | Python | grease/scanner.py | JorgeRubio96/grease-lang | 94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9 | [
"MIT"
] | null | null | null | grease/scanner.py | JorgeRubio96/grease-lang | 94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9 | [
"MIT"
] | null | null | null | grease/scanner.py | JorgeRubio96/grease-lang | 94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9 | [
"MIT"
] | 1 | 2018-10-09T22:57:34.000Z | 2018-10-09T22:57:34.000Z | import ply.lex as lex
from grease.core.indents import Indents
reserved = {
'var': 'VAR',
'if': 'IF',
'else': 'ELSE',
'scan': 'SCAN',
'print': 'PRINT',
'and': 'AND',
'or': 'OR',
'Bool': 'BOOL',
'Int': 'INT',
'Float': 'FLOAT',
'Char': 'CHAR',
'fn': 'FN',
'interface': 'INTERFACE',
'import': 'IMPORT',
'struct':'STRUCT',
'while':'WHILE',
'alias':'ALIAS',
'as':'AS',
'gt': 'GT',
'ge': 'GE',
'lt': 'LT',
'le': 'LE',
'eq': 'EQ',
'not':'NOT',
'from': 'FROM',
'return': 'RETURN',
'true': 'TRUE',
'false': 'FALSE'
}
tokens = [
'ID', 'CONST_INT', 'CONST_REAL', 'CONST_STR', 'CONST_CHAR',
'ARROW', 'SEMICOLON', 'COLON', 'COMMA', 'DOT', 'EQUALS', 'NEW_LINE',
'OPEN_BRACK','CLOSE_BRACK', 'OPEN_PAREN', 'CLOSE_PAREN', 'PLUS', 'MINUS',
'TIMES', 'DIVIDE', 'AMP', 'INDENT', 'DEDENT'
] + list(reserved.values())
t_DOT = r'\.'
t_SEMICOLON = r'\;'
t_COLON = r'\:'
t_COMMA = r'\,'
t_OPEN_BRACK = r'\['
t_CLOSE_BRACK = r'\]'
t_EQUALS = r'\='
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_TIMES = r'\*'
t_DIVIDE = r'\/'
t_AMP = r'\&'
t_ARROW = r'\-\>'
t_ignore = ' '
def t_ignore_SINGLE_COMMENT(t):
r'\#.*\n'
t.lexer.lineno += 1
def t_ignore_MULTI_COMMENT(t):
r'\/\*[\s\S]*\*\/\s*'
t.lexer.lineno += t.value.count('\n')
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = reserved.get(t.value, 'ID')
if t.type == 'CONST_BOOL':
if t.value == 'true':
t.value = True
else:
t.value = False
return t
def t_CONST_REAL(t):
r'[0-9]+\.[0-9]+'
t.value = float(t.value)
return t
def t_CONST_INT(t):
r'[0-9]+'
t.value = int(t.value)
return t
def t_CONST_STR(t):
r'\".+\"'
t.value = t.value[1:-1]
return t
def t_CONST_CHAR(t):
r'\'.+\''
t.value = t.value[1:-1]
return t
def t_NEW_LINE(t):
r'\n\s*[\t ]*'
t.lexer.lineno += t.value.count('\n')
t.value = len(t.value) - 1 - t.value.rfind('\n')
return t
grease_lexer = Indents(lex.lex())
| 19.89916 | 85 | 0.505912 |
3c2804fa00492d199e8c3aefe6c666e804514568 | 768 | py | Python | patan/utils.py | tttlh/patan | d3e5cfec085e21f963204b5c07a85cf1f029560c | [
"MIT"
] | null | null | null | patan/utils.py | tttlh/patan | d3e5cfec085e21f963204b5c07a85cf1f029560c | [
"MIT"
] | null | null | null | patan/utils.py | tttlh/patan | d3e5cfec085e21f963204b5c07a85cf1f029560c | [
"MIT"
] | 1 | 2021-03-01T08:35:34.000Z | 2021-03-01T08:35:34.000Z | # _*_ coding: utf-8 _*_
from importlib import import_module
| 22.588235 | 75 | 0.653646 |
3c2968143388eec54e35192431494447d2c82d24 | 3,673 | py | Python | tests/test_assert_immediate.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_assert_immediate.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_assert_immediate.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | null | null | null | import tempfile
import pytest
import fault as f
import magma as m
from fault.verilator_utils import verilator_version
| 34.980952 | 78 | 0.54288 |
3c2af43cd6a571a35fff3b7b22af4c58d6015098 | 3,098 | py | Python | cs673backend/api/authentication.py | MicobyteMichael/CS673ProjectBackend | 87b28c62f29630059e1906c8bf7383d814880bd0 | [
"Apache-2.0"
] | null | null | null | cs673backend/api/authentication.py | MicobyteMichael/CS673ProjectBackend | 87b28c62f29630059e1906c8bf7383d814880bd0 | [
"Apache-2.0"
] | null | null | null | cs673backend/api/authentication.py | MicobyteMichael/CS673ProjectBackend | 87b28c62f29630059e1906c8bf7383d814880bd0 | [
"Apache-2.0"
] | null | null | null | from flask import session
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from bcrypt import gensalt, hashpw
from hashlib import sha256
from hmac import new as hash_mac
from os import environ
PEPPER = environ["PEPPER"].encode("utf-8")
| 34.422222 | 117 | 0.65042 |
3c2d0e8fef55c7fd0b954db4e7dcf85c4711c86c | 4,606 | py | Python | sunpy/sun/tests/test_sun.py | PritishC/sunpy | 76a7b5994566674d85eada7dcec54bf0f120269a | [
"BSD-2-Clause"
] | null | null | null | sunpy/sun/tests/test_sun.py | PritishC/sunpy | 76a7b5994566674d85eada7dcec54bf0f120269a | [
"BSD-2-Clause"
] | null | null | null | sunpy/sun/tests/test_sun.py | PritishC/sunpy | 76a7b5994566674d85eada7dcec54bf0f120269a | [
"BSD-2-Clause"
] | null | null | null | from astropy.coordinates import Angle
from astropy.time import Time
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from sunpy.sun import sun
| 41.495495 | 124 | 0.721884 |
3c2db6513413d924898e189ce93d55aaff3a377a | 1,031 | py | Python | components/collector/src/source_collectors/file_source_collectors/pyupio_safety.py | Gamer1120/quality-time | f3a0d6f75cd6055d78995d37feae72bc3e837e4b | [
"Apache-2.0"
] | 1 | 2021-02-22T07:53:36.000Z | 2021-02-22T07:53:36.000Z | components/collector/src/source_collectors/file_source_collectors/pyupio_safety.py | Gamer1120/quality-time | f3a0d6f75cd6055d78995d37feae72bc3e837e4b | [
"Apache-2.0"
] | 338 | 2020-10-29T04:28:09.000Z | 2022-02-22T04:09:33.000Z | components/collector/src/source_collectors/file_source_collectors/pyupio_safety.py | dicksnel/quality-time | 4c04f8852aa97175f2bca2b5c5391b3e09b657af | [
"Apache-2.0"
] | 1 | 2022-01-06T04:07:03.000Z | 2022-01-06T04:07:03.000Z | """Pyup.io Safety metrics collector."""
from typing import Final
from base_collectors import JSONFileSourceCollector
from source_model import Entity, SourceMeasurement, SourceResponses
| 36.821429 | 108 | 0.682832 |
3c312cb7c5567e3a8e860f6d1634192c56119a38 | 2,580 | py | Python | jaf/main.py | milano-slesarik/jaf | 97c0a579f4ece70dbfb583d72aa35380f7a82f8d | [
"MIT"
] | null | null | null | jaf/main.py | milano-slesarik/jaf | 97c0a579f4ece70dbfb583d72aa35380f7a82f8d | [
"MIT"
] | null | null | null | jaf/main.py | milano-slesarik/jaf | 97c0a579f4ece70dbfb583d72aa35380f7a82f8d | [
"MIT"
] | null | null | null | import json
import os
import typing
from io import IOBase
from jaf.encoders import JAFJSONEncoder
with JsonArrayFileWriter('output.json', mode=JsonArrayFileWriter.MODE__APPEND_OR_CREATE, indent=4) as j:
d = {1: 2, 2: 3, 3: 4, 4: 6}
for i in range(1000000):
j.write(d)
| 31.084337 | 170 | 0.601163 |
3c3406ddfc224f8162dd8e58c6d1818f19d5fb3c | 812 | py | Python | BluePlug/fork.py | liufeng3486/BluePlug | c7c5c769ed35c71ebc542d34848d6bf309abd051 | [
"MIT"
] | 1 | 2019-01-27T04:08:05.000Z | 2019-01-27T04:08:05.000Z | BluePlug/fork.py | liufeng3486/BluePlug | c7c5c769ed35c71ebc542d34848d6bf309abd051 | [
"MIT"
] | 5 | 2021-03-18T21:35:20.000Z | 2022-01-13T00:58:18.000Z | BluePlug/fork.py | liufeng3486/BluePlug | c7c5c769ed35c71ebc542d34848d6bf309abd051 | [
"MIT"
] | null | null | null | from aip import AipOcr
BAIDU_APP_ID='14490756'
BAIDU_API_KEY = 'Z7ZhXtleolXMRYYGZ59CGvRl'
BAIDU_SECRET_KEY = 'zbHgDUGmRnBfn6XOBmpS5fnr9yKer8C6'
client= AipOcr(BAIDU_APP_ID, BAIDU_API_KEY, BAIDU_SECRET_KEY)
options = {}
options["recognize_granularity"] = "big"
options["language_type"] = "CHN_ENG"
options["detect_direction"] = "true"
options["detect_language"] = "true"
options["vertexes_location"] = "true"
options["probability"] = "true"
if __name__ == '__main__':
r = getcharactor('5.png')
print(r) | 24.606061 | 62 | 0.69335 |
3c34f86c770e6ffff7025e5fd4715854fbee0f6d | 1,233 | py | Python | test/test_model.py | karlsimsBBC/feed-me | e2bc87aef4740c2899b332f1b4036c169b108b79 | [
"MIT"
] | null | null | null | test/test_model.py | karlsimsBBC/feed-me | e2bc87aef4740c2899b332f1b4036c169b108b79 | [
"MIT"
] | 2 | 2020-02-28T16:52:05.000Z | 2020-02-28T16:52:11.000Z | test/test_model.py | karlsimsBBC/feed-me | e2bc87aef4740c2899b332f1b4036c169b108b79 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import Mock
from unittest.mock import mock_open
from contextlib import contextmanager
MOCK_DATA_A = ''
MOCK_DATA_B = '{"article_idx": 0}\n{"article_idx": 1}\n" | 27.4 | 56 | 0.596918 |
3c36a55c48b2843a0df149d905928f2eb9279e29 | 4,596 | py | Python | GuessGame.py | VedantKhairnar/Guess-Game | a959d03cbfea539a63e451e5c65f7cd9790d1b7f | [
"MIT"
] | null | null | null | GuessGame.py | VedantKhairnar/Guess-Game | a959d03cbfea539a63e451e5c65f7cd9790d1b7f | [
"MIT"
] | null | null | null | GuessGame.py | VedantKhairnar/Guess-Game | a959d03cbfea539a63e451e5c65f7cd9790d1b7f | [
"MIT"
] | 1 | 2020-06-05T12:42:39.000Z | 2020-06-05T12:42:39.000Z | from tkinter import *
import random
from tkinter import messagebox
s = GuessGame()
| 42.555556 | 210 | 0.570061 |
3c39dc3a117517ba44438eb56f648a0feefd8459 | 2,051 | py | Python | kanban.py | vtashlikovich/jira-task-analysis | 34690406243fe0b4c5f1400c5bca872923856571 | [
"MIT"
] | null | null | null | kanban.py | vtashlikovich/jira-task-analysis | 34690406243fe0b4c5f1400c5bca872923856571 | [
"MIT"
] | null | null | null | kanban.py | vtashlikovich/jira-task-analysis | 34690406243fe0b4c5f1400c5bca872923856571 | [
"MIT"
] | null | null | null | import configparser
import sys
from jiraparser import JiraJSONParser, TokenAuth
import requests
from requests.auth import AuthBase
""" Getting a list of issues connected to a board id (defined by configuration) and printing analysis information """
# read config
config = configparser.ConfigParser()
config.read("config.ini")
# prepare parameters
jSQLString = JiraJSONParser.formJQLQuery(
projectId=config["default"]["issueKey"],
filter=int(config["default"]["filterId"]),
taskTypes=["Story"],
)
authToken = config["default"]["authentication-token"]
jiraBaseAPIURL = config["default"]["jiraURL"] + "/rest/api/2/issue/"
boardAPIURL = config["default"]["jiraURL"] + "/rest/api/2/search?jql=" + jSQLString
# fetch board issues
resp = requests.get(
boardAPIURL, auth=TokenAuth(authToken), params={"Content-Type": "application/json"}
)
if resp.status_code != 200:
raise Exception("Board information has not been fetched")
result = resp.json()
print("max {:d} out of {:d}".format(result["maxResults"], result["total"]))
# TODO: replace with full list when needed
narrowedList = result["issues"][:5]
for task in narrowedList:
# fetch issue info
issueParser = JiraJSONParser(authToken, jiraBaseAPIURL)
issueParser.parseIssueJson(task)
print(
"Issue: "
+ task["key"]
+ ", type: "
+ issueParser.issueTypeName
+ ", status: "
+ issueParser.issueStatus
)
# if there are subtasks - fetch them one by one
if issueParser.issueHasSubtasks:
issueParser.getAndParseSubtasks(False)
if len(issueParser.subtasksWOEstimation) > 0:
print("Sub-tasks not estimated: " + ",".join(issueParser.subtasksWOEstimation))
# print progress in 1 line
progressInfoLine = issueParser.getCompactProgressInfo()
if len(progressInfoLine) > 0:
print(issueParser.getCompactProgressInfo())
# warn if there is no estimation for task/bug
elif issueParser.issueTypeName.lower() != "story":
print("No estimation")
print("")
| 31.075758 | 117 | 0.694783 |
3c3a5c531bfcc3cf9b1021a5ea94cb71ba7d11b0 | 1,268 | py | Python | duckling/test/test_api.py | handsomezebra/zoo | db9ef7f9daffd34ca859d5a4d76d947e00a768b8 | [
"MIT"
] | 1 | 2020-03-08T07:46:14.000Z | 2020-03-08T07:46:14.000Z | duckling/test/test_api.py | handsomezebra/zoo | db9ef7f9daffd34ca859d5a4d76d947e00a768b8 | [
"MIT"
] | null | null | null | duckling/test/test_api.py | handsomezebra/zoo | db9ef7f9daffd34ca859d5a4d76d947e00a768b8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import requests
import logging
import csv
url = "http://localhost:10000/parse"
| 23.924528 | 96 | 0.621451 |
3c3b9d3f39b8361cf623581c59d5c7de855eb076 | 943 | py | Python | btrfslime/defrag/btrfs.py | tsangwpx/btrfslime | 49c141721c532706f146fea31d2eb171c6dd698b | [
"MIT"
] | 3 | 2020-10-30T12:18:42.000Z | 2022-02-06T20:17:55.000Z | btrfslime/defrag/btrfs.py | tsangwpx/btrfslime | 49c141721c532706f146fea31d2eb171c6dd698b | [
"MIT"
] | null | null | null | btrfslime/defrag/btrfs.py | tsangwpx/btrfslime | 49c141721c532706f146fea31d2eb171c6dd698b | [
"MIT"
] | null | null | null | from __future__ import annotations
import os
import subprocess
from typing import AnyStr
from ..util import check_nonnegative
BTRFS_BIN = '/bin/btrfs'
| 21.930233 | 53 | 0.652174 |
3c3ddb0feb36d17a1b33c822d86fc630d77ff009 | 14,771 | py | Python | fooltrader/api/quote.py | lcczz/fooltrader | fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e | [
"MIT"
] | 1 | 2018-04-03T06:25:24.000Z | 2018-04-03T06:25:24.000Z | fooltrader/api/quote.py | lcczz/fooltrader | fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e | [
"MIT"
] | null | null | null | fooltrader/api/quote.py | lcczz/fooltrader | fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import logging
import os
from ast import literal_eval
import numpy as np
import pandas as pd
from fooltrader.consts import CHINA_STOCK_INDEX, USA_STOCK_INDEX
from fooltrader.contract import data_contract
from fooltrader.contract import files_contract
from fooltrader.contract.files_contract import get_kdata_dir, get_kdata_path
from fooltrader.settings import US_STOCK_CODES
from fooltrader.utils.utils import get_file_name, to_time_str
logger = logging.getLogger(__name__)
# meta
def get_security_list(security_type='stock', exchanges=['sh', 'sz'], start=None, end=None,
mode='simple', start_date=None, codes=None):
"""
get security list.
Parameters
----------
security_type : str
{stock, 'future'},default: stock
exchanges : list
['sh', 'sz','nasdaq','nyse','amex'],default: ['sh','sz']
start : str
the start code,default:None
only works when exchanges is ['sh','sz']
end : str
the end code,default:None
only works when exchanges is ['sh','sz']
mode : str
whether parse more security info,{'simple','es'},default:'simple'
start_date : Timestamp str or Timestamp
the filter for start list date,default:None
codes : list
the exact codes to query,default:None
Returns
-------
DataFrame
the security list
"""
if security_type == 'stock':
df = pd.DataFrame()
df_usa = pd.DataFrame()
for exchange in exchanges:
the_path = files_contract.get_security_list_path(security_type, exchange)
if os.path.exists(the_path):
if exchange == 'sh' or exchange == 'sz':
if mode == 'simple':
df1 = pd.read_csv(the_path,
converters={'code': str})
else:
df1 = pd.read_csv(the_path,
converters={'code': str,
'sinaIndustry': convert_to_list_if_need,
'sinaConcept': convert_to_list_if_need,
'sinaArea': convert_to_list_if_need})
df = df.append(df1, ignore_index=True)
elif exchange == 'nasdaq':
df_usa = pd.read_csv(the_path, dtype=str)
elif security_type == 'index':
df = pd.DataFrame(CHINA_STOCK_INDEX)
df_usa = pd.DataFrame()
if 'nasdaq' in exchanges:
df_usa = pd.DataFrame(USA_STOCK_INDEX)
if df.size > 0:
if start:
df = df[df["code"] <= end]
if end:
df = df[df["code"] >= start]
if start_date:
df['listDate'] = pd.to_datetime(df['listDate'])
df = df[df['listDate'] >= pd.Timestamp(start_date)]
df = df.set_index(df['code'], drop=False)
if df_usa.size > 0:
df_usa = df_usa.set_index(df_usa['code'], drop=False)
if codes:
df_usa = df_usa.loc[codes]
df = df.append(df_usa, ignore_index=True)
return df
def _get_security_item(code=None, id=None, the_type='stock'):
"""
get the security item.
Parameters
----------
code : str
the security code,default: None
id : str
the security id,default: None
the_type : str
the security type
Returns
-------
DataFrame
the security item
"""
df = get_security_list(security_type=the_type)
if id:
df = df.set_index(df['id'])
return df.loc[id,]
if code:
df = df.set_index(df['code'])
return df.loc[code,]
# tick
# kdata
def get_kdata(security_item, the_date=None, start_date=None, end_date=None, fuquan='bfq', dtype=None, source='163',
level='day'):
"""
get kdata.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the kdata for the exact date
start_date : TimeStamp str or TimeStamp
start date
end_date : TimeStamp str or TimeStamp
end date
fuquan : str
{"qfq","hfq","bfq"},default:"bfq"
dtype : type
the data type for the csv column,default: None
source : str
the data source,{'163','sina'},default: '163'
level : str or int
the kdata level,{1,5,15,30,60,'day','week','month'},default : 'day'
Returns
-------
DataFrame
"""
security_item = to_security_item(security_item)
# 163,,'bfq',,
if source == '163':
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan='bfq')
else:
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan=fuquan)
if os.path.isfile(the_path):
if not dtype:
dtype = {"code": str, 'timestamp': str}
df = pd.read_csv(the_path, dtype=dtype)
df.timestamp = df.timestamp.apply(lambda x: to_time_str(x))
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
if the_date:
if the_date in df.index:
return df.loc[the_date]
else:
return pd.DataFrame()
if not start_date:
if security_item['type'] == 'stock':
if type(security_item['listDate']) != str and np.isnan(security_item['listDate']):
start_date = '2002-01-01'
else:
start_date = security_item['listDate']
else:
start_date = datetime.datetime.today() - datetime.timedelta(days=30)
if not end_date:
end_date = datetime.datetime.today()
if start_date and end_date:
df = df.loc[start_date:end_date]
#
if source == '163' and security_item['type'] == 'stock':
if fuquan == 'bfq':
return df
if 'factor' in df.columns:
current_factor = df.tail(1).factor.iat[0]
#
df.close *= df.factor
df.open *= df.factor
df.high *= df.factor
df.low *= df.factor
if fuquan == 'qfq':
# factor
df.close /= current_factor
df.open /= current_factor
df.high /= current_factor
df.low /= current_factor
return df
return pd.DataFrame()
# TODO:use join
if __name__ == '__main__':
print(get_security_list(security_type='stock', exchanges=['nasdaq'], codes=US_STOCK_CODES))
# item = {"code": "000001", "type": "stock", "exchange": "sz"}
# assert kdata_exist(item, 1991, 2) == True
# assert kdata_exist(item, 1991, 3) == True
# assert kdata_exist(item, 1991, 4) == True
# assert kdata_exist(item, 1991, 2) == True
# assert kdata_exist(item, 1990, 1) == False
# assert kdata_exist(item, 2017, 1) == False
#
# df1 = get_kdata(item,
# datetime.datetime.strptime('1991-04-01', settings.TIME_FORMAT_DAY),
# datetime.datetime.strptime('1991-12-31', settings.TIME_FORMAT_DAY))
# df1 = df1.set_index(df1['timestamp'])
# df1 = df1.sort_index()
# print(df1)
#
# df2 = tdx.get_tdx_kdata(item, '1991-04-01', '1991-12-31')
# df2 = df2.set_index(df2['timestamp'], drop=False)
# df2 = df2.sort_index()
# print(df2)
#
# for _, data in df1.iterrows():
# if data['timestamp'] in df2.index:
# data2 = df2.loc[data['timestamp']]
# assert data2["low"] == data["low"]
# assert data2["open"] == data["open"]
# assert data2["high"] == data["high"]
# assert data2["close"] == data["close"]
# assert data2["volume"] == data["volume"]
# try:
# assert data2["turnover"] == data["turnover"]
# except Exception as e:
# print(data2["turnover"])
# print(data["turnover"])
| 32.89755 | 115 | 0.580326 |
3c3f46d21ba0b951765c196ff37b42684f836343 | 432 | py | Python | backend/jobPortal/api/urls.py | KshitijDarekar/hackViolet22 | c54636d3044e1d9a7d8fa92a4d781e79f38af3ca | [
"MIT"
] | 2 | 2022-02-06T04:58:24.000Z | 2022-02-06T05:31:18.000Z | backend/jobPortal/api/urls.py | KshitijDarekar/hackViolet22 | c54636d3044e1d9a7d8fa92a4d781e79f38af3ca | [
"MIT"
] | 5 | 2022-02-06T05:08:04.000Z | 2022-02-06T16:29:51.000Z | backend/jobPortal/api/urls.py | KshitijDarekar/hackViolet22 | c54636d3044e1d9a7d8fa92a4d781e79f38af3ca | [
"MIT"
] | 2 | 2022-02-06T04:58:43.000Z | 2022-02-06T17:56:23.000Z | from django.urls import path
from . import views
# Refer to the corresponding view function for more detials of the url routes
urlpatterns = [
path('', views.getRoutes, name="index"),
path('add/', views.addJob, name="addJob" ),
path('delete/<int:id>', views.removeJob, name="removeJob" ),
path('get-jobs/', views.getJobs, name='getJobs'),
path('company/jobs/', views.getCompanyJobs, name='getCompanyJobs'),
]
| 33.230769 | 77 | 0.685185 |