hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e625df758f3e2fdaeb576f8377536aeeebd5b8b3 | 635 | py | Python | extractors/folha_news_extractor.py | LorhanSohaky/POOA | c604f03f9b7bbfccecb75a982cc76fe428c36433 | [
"MIT"
] | 1 | 2020-12-05T21:01:10.000Z | 2020-12-05T21:01:10.000Z | extractors/folha_news_extractor.py | LorhanSohaky/POOA | c604f03f9b7bbfccecb75a982cc76fe428c36433 | [
"MIT"
] | null | null | null | extractors/folha_news_extractor.py | LorhanSohaky/POOA | c604f03f9b7bbfccecb75a982cc76fe428c36433 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
from news import News
from .abstract_news_extractor import AbstractNewsExtractor
| 27.608696 | 62 | 0.661417 |
e626176d8836eb758155d498bd351936493ec76d | 3,871 | py | Python | pyrbi/__init__.py | hack-a-team/pyrbi | 09296788d9b9a29ae7aaeff960992d2893468372 | [
"MIT"
] | null | null | null | pyrbi/__init__.py | hack-a-team/pyrbi | 09296788d9b9a29ae7aaeff960992d2893468372 | [
"MIT"
] | null | null | null | pyrbi/__init__.py | hack-a-team/pyrbi | 09296788d9b9a29ae7aaeff960992d2893468372 | [
"MIT"
] | null | null | null | """
Provides a simple Python client for RBI REST API
"""
from . import exceptions
import requests
__version__ = "0.1.0"
| 28.674074 | 88 | 0.57427 |
e6266840cb7ce270f6afeec9709e2ac1a2d1d286 | 1,426 | py | Python | scripts/sequence/replace_selenocystein.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 10 | 2015-04-28T14:15:04.000Z | 2021-03-15T00:07:38.000Z | scripts/sequence/replace_selenocystein.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | null | null | null | scripts/sequence/replace_selenocystein.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 6 | 2017-03-16T22:38:41.000Z | 2021-08-11T00:22:52.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
import os
from copy import deepcopy
from Bio import SeqIO
from Bio.Seq import Seq
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input_file",
help="Input file with sequences")
parser.add_argument("-c", "--symbol_to_use", action="store", dest="char_to_use",
default="X",
help="Symbol to use to replace selenocystein. Default - 'X'")
parser.add_argument("-o", "--output", action="store", dest="output",
help="File to write output")
parser.add_argument("-f", "--format", action="store", dest="format", default="fasta",
help="Format of input and output files. Allowed formats genbank, fasta(default)")
args = parser.parse_args()
tmp_index_file = "temp.idx"
print("Parsing %s..." % args.input_file)
sequence_dict = SeqIO.index_db(tmp_index_file, args.input_file, format=args.format)
SeqIO.write(record_with_replacenment_generator(sequence_dict), args.output, args.format)
os.remove(tmp_index_file)
| 33.952381 | 125 | 0.691445 |
e626e6e7f40b567d4b7615f9b578110b40aa795b | 438 | py | Python | Aulas Gustavo Guanabara/Aula018.1.py | RobertoRanulfo/Phyton | d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01 | [
"MIT"
] | null | null | null | Aulas Gustavo Guanabara/Aula018.1.py | RobertoRanulfo/Phyton | d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01 | [
"MIT"
] | null | null | null | Aulas Gustavo Guanabara/Aula018.1.py | RobertoRanulfo/Phyton | d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01 | [
"MIT"
] | null | null | null | teste = list()
teste.append('Gustavo')
teste.append(40)
galera = []
galera.append(teste) #neste caso estamos criando uma ligao entre as duas listas
teste[0] = 'Maria'
teste[1] = 22
galera.append(teste)
print(teste)
print(galera) # No caso os elementos no se acumularam porque no foi feita uma cpia dos elementos da lista
# e sim um elo que espelha a lista... dessa forma ela foi copiada mais uma vez do jeito que estava | 39.818182 | 112 | 0.730594 |
e62704d640c5f34b51dc4894e557ad6bcb2ec7d5 | 1,637 | py | Python | ad_hoc/name_mapper_backfill.py | Connor-R/NSBL | 16615990d058d171fab4790f937846fd1f0b2ee9 | [
"MIT"
] | 1 | 2020-11-19T23:20:19.000Z | 2020-11-19T23:20:19.000Z | ad_hoc/name_mapper_backfill.py | Connor-R/NSBL | 16615990d058d171fab4790f937846fd1f0b2ee9 | [
"MIT"
] | null | null | null | ad_hoc/name_mapper_backfill.py | Connor-R/NSBL | 16615990d058d171fab4790f937846fd1f0b2ee9 | [
"MIT"
] | null | null | null | from py_db import db
import NSBL_helpers as helper
db = db("NSBL")
table_dict = {"register_batting_analytical": "a.player_name"
, "register_batting_primary": "a.player_name"
, "register_batting_secondary": "a.player_name"
, "register_batting_splits": "a.player_name"
, "register_pitching_analytical": "a.player_name"
, "register_pitching_primary": "a.player_name"
, "register_pitching_rates_relief": "a.player_name"
, "register_pitching_rates_start": "a.player_name"
, "register_pitching_secondary": "a.player_name"
, "zips_defense": "a.player_name"
, "zips_fangraphs_batters_counting": "a.Player"
, "zips_fangraphs_batters_rate": "a.Player"
, "zips_fangraphs_pitchers_counting": "a.Player"
, "zips_fangraphs_pitchers_rate": "a.Player"
, "zips_offense": "a.player_name"
, "zips_offense_splits": "a.player_name"
, "zips_pitching": "a.player_name"
, "zips_pitching_splits": "a.player_name"
, "mlb_prospects.fg_raw": "a.playerName"
, "mlb_prospects.minorleagueball_professional": "a.full_name"
, "mlb_prospects.mlb_prospects_draft": "CONCAT(a.fname, ' ', a.lname)"
, "mlb_prospects.mlb_prospects_international": "CONCAT(a.fname, ' ', a.lname)"
, "mlb_prospects.mlb_prospects_professional": "CONCAT(a.fname, ' ', a.lname)"
}
for k,v in table_dict.items():
print k
qry = """
SELECT DISTINCT %s
FROM %s a
LEFT JOIN name_mapper nm ON (%s = nm.wrong_name)
WHERE 1
AND nm.wrong_name IS NULL
""" % (v, k, v)
# raw_input(qry)
names = db.query(qry)
for name in names:
helper.input_name(name[0])
| 32.098039 | 82 | 0.681735 |
e628182131b1688593a8c2682f0d77aa16ecd697 | 1,287 | py | Python | camcommander/watcher.py | tparker-usgs/camcommander | 0e508a1b24cc99496745652e52118000470d7e32 | [
"CC0-1.0"
] | null | null | null | camcommander/watcher.py | tparker-usgs/camcommander | 0e508a1b24cc99496745652e52118000470d7e32 | [
"CC0-1.0"
] | null | null | null | camcommander/watcher.py | tparker-usgs/camcommander | 0e508a1b24cc99496745652e52118000470d7e32 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
#
# I waive copyright and related rights in the this work worldwide
# through the CC0 1.0 Universal public domain dedication.
# https://creativecommons.org/publicdomain/zero/1.0/legalcode
#
# Author(s):
# Tom Parker <tparker@usgs.gov>
""" watch for new webcam images."""
import zmq
import tomputils.util as tutil
| 26.8125 | 79 | 0.655012 |
e629f1eb273463da4f3c8be6f4e44ca1b639ae9f | 1,866 | py | Python | Filter/kalman_filter.py | KNakane/filter | 43ece9771003b63b477499dab2eb8d69e5bfdabe | [
"MIT"
] | null | null | null | Filter/kalman_filter.py | KNakane/filter | 43ece9771003b63b477499dab2eb8d69e5bfdabe | [
"MIT"
] | null | null | null | Filter/kalman_filter.py | KNakane/filter | 43ece9771003b63b477499dab2eb8d69e5bfdabe | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
| 33.927273 | 113 | 0.559486 |
e62b0481e9ee04d621f3915eddb5dfd2397e270a | 4,394 | py | Python | mwarp1d/ui/figures/artists/draggable_points.py | 0todd0000/mwarp1d | 7b40a47e6c112a8da5a1b67aff890fc77fe83d71 | [
"MIT"
] | null | null | null | mwarp1d/ui/figures/artists/draggable_points.py | 0todd0000/mwarp1d | 7b40a47e6c112a8da5a1b67aff890fc77fe83d71 | [
"MIT"
] | 6 | 2019-11-25T08:15:05.000Z | 2020-02-07T13:05:59.000Z | mwarp1d/ui/figures/artists/draggable_points.py | 0todd0000/mwarp1d | 7b40a47e6c112a8da5a1b67aff890fc77fe83d71 | [
"MIT"
] | 2 | 2019-11-28T02:58:14.000Z | 2019-12-18T11:45:33.000Z |
from PyQt5 import QtWidgets, QtCore
from math import floor
import numpy as np
from . _base import _SelectableArtist2D
class SourceLandmarks(_DraggablePoints):
color_active = 0.98, 0.7, 0.3
zorder = 1
class TemplateLandmarks(_DraggablePoints):
color_active = 0.3, 0.3, 0.98
zorder = 3
| 24.824859 | 134 | 0.649067 |
e62bee983944925691e81c42d718cf0680c6b087 | 7,370 | py | Python | convert/tartan_air_to_benchmark.py | AaltoML/vio_benchmark | cb2277026f824f88f3bc131057ebc687cb19d648 | [
"Apache-2.0"
] | 32 | 2021-04-23T15:07:04.000Z | 2022-03-30T08:04:28.000Z | convert/tartan_air_to_benchmark.py | AaltoML/vio_benchmark | cb2277026f824f88f3bc131057ebc687cb19d648 | [
"Apache-2.0"
] | 3 | 2021-02-10T18:54:06.000Z | 2022-03-12T16:58:19.000Z | convert/tartan_air_to_benchmark.py | AaltoML/vio_benchmark | cb2277026f824f88f3bc131057ebc687cb19d648 | [
"Apache-2.0"
] | 4 | 2021-02-08T11:11:09.000Z | 2022-03-15T12:45:05.000Z | #!/usr/bin/env python
#
# Download and convert TartanAir data <https://theairlab.org/tartanair-dataset/>.
#
# NOTE The whole dataset is several terabytes, so be sure to tune the `LEVELS` and
# `DATASETS` variables before running.
#
# It is recommended to install "AzCopy", an official tool for Azure, to get tolerable
# download speeds (pass `--azcopy` flag to enable).
#
# NOTE At the time of writing the data does not include simulated IMU samples.
import argparse
import csv
import json
import os
from pathlib import Path
import subprocess
from tartan_air_transformations import fixTartan
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--azcopy', action='store_true', default=False, help='download the data with AzCopy')
args = parser.parse_args()
# Since the downloads can be slow, an option to leave the downloaded zip files in the RAW directory.
BACKUP_ZIPS = False
RAW = "data/raw/tartan-air"
OUT = "data/benchmark/tartan-air"
# <https://github.com/castacks/tartanair_tools/blob/master/download_training_zipfiles.txt>
RELEASE = "https://tartanair.blob.core.windows.net/tartanair-release1"
LEVELS = ["Easy", "Hard"]
DATASETS = [
"abandonedfactory",
"abandonedfactory_night",
"amusement",
"carwelding",
"endofworld",
"gascola",
"hospital",
"japanesealley",
"neighborhood",
"ocean",
"office",
"office2",
"oldtown",
"seasidetown",
"seasonsforest",
"seasonsforest_winter",
"soulcity",
"westerndesert",
]
DOWNLOAD_CMD = "wget -O"
UNZIP_CMD = "unzip -o -d"
# The data doesn't have time information of any sort,
# so pick something that makes the videos run at a pleasant speed.
FPS = 10
if __name__ == "__main__":
main()
| 31.767241 | 142 | 0.53867 |
e62c2f0e0a2aa9f2cc633c9f3f0f670db80af86f | 24,534 | py | Python | test/test_tsdb.py | eneelo/qats | 9280e2487bde97874cc8857b2780ac830323f363 | [
"MIT"
] | null | null | null | test/test_tsdb.py | eneelo/qats | 9280e2487bde97874cc8857b2780ac830323f363 | [
"MIT"
] | null | null | null | test/test_tsdb.py | eneelo/qats | 9280e2487bde97874cc8857b2780ac830323f363 | [
"MIT"
] | 1 | 2020-10-29T13:40:47.000Z | 2020-10-29T13:40:47.000Z | # -*- coding: utf-8 -*-
"""
Module for testing TsDB class
"""
from qats import TimeSeries, TsDB
import unittest
import os
import numpy as np
import sys
# todo: add tests for listing subset(s) based on specifying parameter `names` (with and wo param. `keys`)
# todo: add test for getm() with fullkey=False (similar to test_get_many_correct_key, but with shorter key)
if __name__ == '__main__':
unittest.main()
| 44.36528 | 121 | 0.59815 |
e62dd8453d35731f8df986056643c1efd1e8ea57 | 1,257 | py | Python | py/py_0105_special_subset_sums_testing.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0105_special_subset_sums_testing.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0105_special_subset_sums_testing.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 105: Special subset sums: testing
# https://projecteuler.net/problem=105
#
# Let S(A) represent the sum of elements in set A of size n.
# We shall call it a special sum set if for any two non-empty disjoint
# subsets, B and C, the following properties are true:
#
# S(B) S(C); that is, sums of subsets cannot be equal.
# If B contains more elements than C then S(B) > S(C).
# For example, {81, 88, 75, 42, 87, 84, 86, 65} is not a special sum set
# because 65 + 87 + 88 = 75 + 81 + 84, whereas
# {157, 150, 164, 119, 79, 159, 161, 139, 158} satisfies both rules
# for all possible subset pair combinations and S(A) = 1286.
#
# Using sets.txt (right click and "Save Link/Target As..."), a 4K text file
# with one-hundred sets containing seven to twelve elements
# (the two examples given above are the first two sets in the file),
# identify all the special sum sets, A1, A2, ..., Ak,
# and find the value of S(A1) + S(A2) + ... + S(Ak).
# NOTE: This problem is related to Problem 103 and Problem 106.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 102
timed.caller(dummy, n, i, prob_id)
| 34.916667 | 76 | 0.67144 |
e62e5420e9280590cadcb41f39f8b617ff3cad05 | 3,673 | py | Python | pyserver/item/attc/annotation.py | lbouma/Cyclopath | d09d927a1e6f9e07924007fd39e8e807cd9c0f8c | [
"Apache-2.0"
] | 15 | 2015-05-06T05:11:48.000Z | 2021-12-03T14:56:58.000Z | pyserver/item/attc/annotation.py | landonb/Cyclopath | d09d927a1e6f9e07924007fd39e8e807cd9c0f8c | [
"Apache-2.0"
] | null | null | null | pyserver/item/attc/annotation.py | landonb/Cyclopath | d09d927a1e6f9e07924007fd39e8e807cd9c0f8c | [
"Apache-2.0"
] | 8 | 2015-05-06T05:11:36.000Z | 2020-11-04T05:11:22.000Z | # Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import conf
import g
from item import attachment
from item import item_base
from item import item_versioned
from item.util.item_type import Item_Type
from util_.streetaddress import ccp_stop_words
log = g.log.getLogger('annotation')
# ***
| 30.355372 | 79 | 0.632181 |
e62e88ca85209412b46b34f3a3135f7a89043c82 | 183 | py | Python | examples/sandbox/sandbox/__main__.py | salt-die/nurses_2 | 29b76c34b9a28bf7c115998f4e81979966c82df0 | [
"MIT"
] | 171 | 2021-06-23T15:29:15.000Z | 2022-03-25T18:53:10.000Z | examples/sandbox/sandbox/__main__.py | salt-die/nurses_2 | 29b76c34b9a28bf7c115998f4e81979966c82df0 | [
"MIT"
] | 1 | 2022-01-07T05:08:35.000Z | 2022-01-10T04:53:57.000Z | examples/sandbox/sandbox/__main__.py | salt-die/nurses_2 | 29b76c34b9a28bf7c115998f4e81979966c82df0 | [
"MIT"
] | 3 | 2021-10-01T09:12:15.000Z | 2022-01-14T21:31:11.000Z | from nurses_2.app import App
from .sandbox import Sandbox
SandboxApp().run()
| 16.636364 | 48 | 0.704918 |
e630f7f1230425fb80852a1c185d9c2e86b9dabb | 4,985 | py | Python | midas2/common/bowtie2.py | czbiohub/microbiome-igg | fd4bc62bee15e53587a947ca32bf3c5b9e8022e6 | [
"MIT"
] | null | null | null | midas2/common/bowtie2.py | czbiohub/microbiome-igg | fd4bc62bee15e53587a947ca32bf3c5b9e8022e6 | [
"MIT"
] | 6 | 2022-03-14T19:37:52.000Z | 2022-03-14T19:51:47.000Z | midas2/common/bowtie2.py | czbiohub/microbiome-igg | fd4bc62bee15e53587a947ca32bf3c5b9e8022e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import numpy as np
from midas2.common.utils import tsprint, command, split, OutputStream
def build_bowtie2_db(bt2_db_dir, bt2_db_name, downloaded_files, num_cores):
""" Build Bowtie2 database for the collections of fasta files """
bt2_db_prefix = f"{bt2_db_dir}/{bt2_db_name}"
if not bowtie2_index_exists(bt2_db_dir, bt2_db_name):
# Primarily for build_bowtie2db.py
if not os.path.exists(bt2_db_dir):
tsprint(f"Create bt2_db_dir: {bt2_db_dir}")
command(f"mkdir -p {bt2_db_dir}")
# Write the species_id to file, that used to build the bowtie2 indexes
with OutputStream(f"{bt2_db_prefix}.species") as stream:
stream.write("\n".join(map(str, downloaded_files.keys())))
command(f"rm -f {bt2_db_dir}/{bt2_db_name}.fa", quiet=False)
command(f"touch {bt2_db_dir}/{bt2_db_name}.fa")
for files in split(downloaded_files.values(), 20): # keep "cat" commands short
command("cat " + " ".join(files) + f" >> {bt2_db_dir}/{bt2_db_name}.fa")
try:
command(f"bowtie2-build --threads {num_cores} {bt2_db_prefix}.fa {bt2_db_prefix} > {bt2_db_dir}/bt2-db-build-{bt2_db_name}.log", quiet=False)
except:
tsprint(f"Bowtie2 index {bt2_db_prefix} run into error")
command(f"rm -f {bt2_db_prefix}.1.bt2")
raise
return bt2_db_prefix
def bowtie2_align(bt2_db_dir, bt2_db_name, bamfile_path, args):
""" Use Bowtie2 to map reads to prebuilt bowtie2 database """
bt2_db_prefix = f"{bt2_db_dir}/{bt2_db_name}"
if os.path.exists(bamfile_path):
tsprint(f"Use existing bamfile {bamfile_path}")
return
# Construct bowtie2 align input arguments
max_reads = f"-u {args.max_reads}" if args.max_reads else ""
aln_mode = "local" if args.aln_mode == "local" else "end-to-end"
aln_speed = args.aln_speed if aln_mode == "end-to-end" else args.aln_speed + "-local"
r2 = ""
max_fraglen = f"-X {args.fragment_length}" if args.r2 else ""
if args.r2:
r1 = f"-1 {args.r1}"
r2 = f"-2 {args.r2}"
elif args.aln_interleaved:
r1 = f"--interleaved {args.r1}"
else:
r1 = f"-U {args.r1}"
try:
bt2_command = f"bowtie2 --no-unal -x {bt2_db_prefix} {max_fraglen} {max_reads} --{aln_mode} --{aln_speed} --threads {args.num_cores} -q {r1} {r2}"
command(f"set -o pipefail; {bt2_command} | \
samtools view --threads {args.num_cores} -b - | \
samtools sort --threads {args.num_cores} -o {bamfile_path}", quiet=False)
except:
tsprint(f"Bowtie2 align to {bamfile_path} run into error")
command(f"rm -f {bamfile_path}")
raise
def _keep_read(aln, aln_mapid, aln_readq, aln_mapq, aln_cov):
""" Check the quality of one alignnment from BAM file """
if aln.is_secondary:
return False
align_len = len(aln.query_alignment_sequence)
query_len = aln.query_length
# min pid
if 100 * (align_len - dict(aln.tags)['NM']) / float(align_len) < aln_mapid:
return False
# min read quality
if np.mean(aln.query_qualities) < aln_readq:
return False
# min map quality
if aln.mapping_quality < aln_mapq:
return False
# min aln cov
if align_len / float(query_len) < aln_cov:
return False
return True
| 38.643411 | 154 | 0.649549 |
e6314fc5be266fa2fd430fad718dac793df709ff | 3,541 | py | Python | src/race/src/my_lane_detection/findpoint.py | young43/ISCC_2020 | 2a7187410bceca901bd87b753a91fd35b73ca036 | [
"MIT"
] | 3 | 2020-11-13T04:59:27.000Z | 2021-04-02T06:36:03.000Z | src/race/src/my_lane_detection/findpoint.py | yongbeomkwak/ISCC_2021 | 7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015 | [
"MIT"
] | null | null | null | src/race/src/my_lane_detection/findpoint.py | yongbeomkwak/ISCC_2021 | 7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015 | [
"MIT"
] | 5 | 2020-09-13T09:06:16.000Z | 2021-06-19T02:31:23.000Z | import numpy as np
import cv2
| 40.238636 | 119 | 0.475572 |
e6315a99e2517f5c7110b8dd1b8d7574b184b340 | 6,198 | py | Python | backend/ibutsu_server/tasks/db.py | john-dupuy/ibutsu-server | ae380fc7a72a4898075291bac8fdb86952bfd06a | [
"MIT"
] | null | null | null | backend/ibutsu_server/tasks/db.py | john-dupuy/ibutsu-server | ae380fc7a72a4898075291bac8fdb86952bfd06a | [
"MIT"
] | null | null | null | backend/ibutsu_server/tasks/db.py | john-dupuy/ibutsu-server | ae380fc7a72a4898075291bac8fdb86952bfd06a | [
"MIT"
] | null | null | null | import time
from datetime import datetime
from datetime import timedelta
from bson import ObjectId
from bson.errors import InvalidId
from dynaconf import settings
from ibutsu_server.mongo import mongo
from ibutsu_server.tasks.queues import task
from ibutsu_server.tasks.results import add_result_start_time
from ibutsu_server.tasks.runs import update_run as update_run_task
from ibutsu_server.util import serialize
from kombu.exceptions import OperationalError
from pymongo import DESCENDING
from redis import Redis
from redis.exceptions import LockError
""" Tasks for DB related things"""
LOCK_EXPIRE = 1
| 35.016949 | 99 | 0.62262 |
e631a24a11407592b87e8e8c899720b7b1343b18 | 1,457 | py | Python | vectorc2/blocks/migrations/0002_initialize_data.py | sebastiankruk/vectorc2 | 13232cd63ebed32346fb4a669511b102b8ed24c0 | [
"Apache-2.0"
] | 11 | 2019-02-27T01:38:47.000Z | 2020-11-13T02:14:58.000Z | vectorc2/blocks/migrations/0002_initialize_data.py | sebastiankruk/vectorc2 | 13232cd63ebed32346fb4a669511b102b8ed24c0 | [
"Apache-2.0"
] | 20 | 2019-02-27T21:22:59.000Z | 2022-01-13T01:22:16.000Z | vectorc2/blocks/migrations/0002_initialize_data.py | sebastiankruk/vectorc2 | 13232cd63ebed32346fb4a669511b102b8ed24c0 | [
"Apache-2.0"
] | 1 | 2020-01-14T09:14:28.000Z | 2020-01-14T09:14:28.000Z | # Generated by Django 2.1.7 on 2019-05-14 20:36
from django.db import migrations
from blocks.models import AnimationName, AnimationTrigger
import anki_vector
def generate_names(apps, schema_editor):
"""
Helper function to populate names of animations and triggers and update their status
"""
def __update_or_create(source, name):
"""
Helper function to create/update a single name index
"""
source.objects.update_or_create(
name=name,
defaults={
'name': name,
'active': True
})
AnimationName = apps.get_model('blocks', 'AnimationName')
AnimationName.objects.filter(active=True).update(active=False)
AnimationTrigger = apps.get_model('blocks', 'AnimationTrigger')
AnimationTrigger.objects.filter(active=True).update(active=False)
with anki_vector.AsyncRobot() as robot:
anim_request = robot.anim.load_animation_list()
anim_request.result()
for anim_name in robot.anim.anim_list:
__update_or_create(AnimationName, anim_name)
anim_trigger_request = robot.anim.load_animation_trigger_list()
anim_trigger_request.result()
for anim_trigger_name in robot.anim.anim_trigger_list:
__update_or_create(AnimationTrigger, anim_trigger_name)
# ----------------------
| 28.019231 | 86 | 0.705559 |
e6329473dcae584a805afb6f3b7a11b77f6eec4b | 130 | py | Python | src/app/groups/apps.py | serious-notreally/cappa | 993a8df35ca6c3b22f3ca811937fd29c07fc71aa | [
"MIT"
] | 9 | 2020-04-05T07:35:55.000Z | 2021-08-03T05:50:05.000Z | src/app/groups/apps.py | serious-notreally/cappa | 993a8df35ca6c3b22f3ca811937fd29c07fc71aa | [
"MIT"
] | 89 | 2020-01-26T11:50:06.000Z | 2022-03-31T07:14:18.000Z | src/app/groups/apps.py | serious-notreally/cappa | 993a8df35ca6c3b22f3ca811937fd29c07fc71aa | [
"MIT"
] | 13 | 2020-03-10T14:45:07.000Z | 2021-07-31T02:43:40.000Z | from django.apps import AppConfig
| 18.571429 | 35 | 0.738462 |
e6338656305747e7dd588f6558bdad231c542786 | 830 | py | Python | Estudos/namedtuple.py | Gbrvi/Python | 02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab | [
"MIT"
] | null | null | null | Estudos/namedtuple.py | Gbrvi/Python | 02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab | [
"MIT"
] | null | null | null | Estudos/namedtuple.py | Gbrvi/Python | 02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab | [
"MIT"
] | null | null | null | from collections import namedtuple
# tipo um dicionario, mais lento, mas imutvel!
#Jogador a classe | #Atributos da classe
J = namedtuple('Jogador', ['nome', 'time', 'camisa', 'numero'])
j = J('Abel Hernadez', 'Flu', 99, 100) #Adicionando valores
j2 = J('Fred', 'Fluminense', 9, 157)
print(j2.nome)
#-------------------------------------------------------
# Nomes repetidos ou destinado ao python (def, class) so subtituidos se colocar o rename
P = namedtuple('Pessoa', ['nome', 'idade', 'def'], rename=True)
p = P('Carlos', 15, 'viano')
#output: Pessoa(nome='Carlos', idade=15, _2='viano')
#Default define um valor padro, mas nececssario que o primeiro valor "x" seja informado
L = namedtuple('valores', ['x', 'y', 'z'], defaults=(None, None))
l = L(2)
print(l) | 31.923077 | 91 | 0.591566 |
e6342f9f6fc2f8be229cda6971a2b29ca77c7c7c | 1,330 | py | Python | src/decker/format/command.py | douglasfarinelli/pydev | 9d43d485b102e5b44ee28894278ae496c3cec024 | [
"MIT"
] | 21 | 2020-12-11T17:59:50.000Z | 2022-03-12T02:22:09.000Z | src/decker/format/command.py | douglasfarinelli/decker | 9d43d485b102e5b44ee28894278ae496c3cec024 | [
"MIT"
] | null | null | null | src/decker/format/command.py | douglasfarinelli/decker | 9d43d485b102e5b44ee28894278ae496c3cec024 | [
"MIT"
] | 2 | 2021-07-31T00:05:25.000Z | 2021-11-04T12:09:26.000Z | import sys
from typing import List
import click
from decker.conf import Config
from decker.utils import print_done
from .pool import FormatterBackendPool
from .services import run_format
| 19 | 80 | 0.635338 |
e63506be46724ae2661303db422a81cac16e9cfd | 709 | py | Python | seeds.py | hazzillrodriguez/Flaskdesk | 16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5 | [
"MIT"
] | null | null | null | seeds.py | hazzillrodriguez/Flaskdesk | 16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5 | [
"MIT"
] | null | null | null | seeds.py | hazzillrodriguez/Flaskdesk | 16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5 | [
"MIT"
] | null | null | null | from app import app, db
from app.models import Category, Priority, Status
from sqlalchemy.exc import SQLAlchemyError
category = 'Uncategorized'
priorities = ['Low', 'Medium', 'High', 'Urgent']
statuses = ['Open', 'Resolved', 'Pending', 'Closed']
with app.app_context():
if db_commit():
for priority, status in zip(priorities, statuses):
db.session.add(Priority(priority=priority))
db.session.add(Status(status=status))
db.session.add(Category(category=category))
db.session.commit() | 28.36 | 63 | 0.734838 |
e637df68b541d6e5860c6604066ab8cbf8d7df24 | 164 | py | Python | xs/nn/__init__.py | eLeVeNnN/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 290 | 2020-07-06T02:13:12.000Z | 2021-01-04T14:23:39.000Z | xs/nn/__init__.py | E1eveNn/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 1 | 2020-12-03T11:11:48.000Z | 2020-12-03T11:11:48.000Z | xs/nn/__init__.py | E1eveNn/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 49 | 2020-07-16T00:27:47.000Z | 2020-11-26T03:03:14.000Z | from .objectives import MSELoss, CrossEntropyLoss
from .models import Sequential, Model, Module
from .grad_fn import Parameter, Tensor
from .td_functional import *
| 32.8 | 49 | 0.823171 |
e63871f321b5d3bb45b965cb63b221c456ac757e | 2,527 | py | Python | eval/plot.py | yhlleo/TriangleGAN | 5bab76561e75145c2645a93e23d22abd3f66f329 | [
"BSD-3-Clause"
] | 32 | 2019-07-15T11:11:57.000Z | 2022-01-09T11:03:00.000Z | eval/plot.py | yhlleo/TriangleGAN | 5bab76561e75145c2645a93e23d22abd3f66f329 | [
"BSD-3-Clause"
] | null | null | null | eval/plot.py | yhlleo/TriangleGAN | 5bab76561e75145c2645a93e23d22abd3f66f329 | [
"BSD-3-Clause"
] | 4 | 2019-07-17T09:00:14.000Z | 2021-11-16T21:20:25.000Z | # plot prd scores
import os
import json
from matplotlib import pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("json_files", nargs="*")
parser.add_argument("--output_fig", type=str, default='prd.png')
args = parser.parse_args()
def plot(precision_recall_pairs, labels=None, out_path=None,
legend_loc='lower left', dpi=300):
"""Plots precision recall curves for distributions.
Creates the PRD plot for the given data and stores the plot in a given path.
Args:
precision_recall_pairs: List of prd_data to plot. Each item in this list is
a 2D array of precision and recall values for the
same number of ratios.
labels: Optional list of labels of same length as list_of_prd_data. The
default value is None.
out_path: Output path for the resulting plot. If None, the plot will be
opened via plt.show(). The default value is None.
legend_loc: Location of the legend. The default value is 'lower left'.
dpi: Dots per inch (DPI) for the figure. The default value is 150.
Raises:
ValueError: If labels is a list of different length than list_of_prd_data.
"""
if labels is not None and len(labels) != len(precision_recall_pairs):
raise ValueError(
'Length of labels %d must be identical to length of '
'precision_recall_pairs %d.'
% (len(labels), len(precision_recall_pairs)))
fig = plt.figure(figsize=(3.5, 3.5), dpi=dpi)
plot_handle = fig.add_subplot(111)
plot_handle.tick_params(axis='both', which='major', labelsize=12)
for i in range(len(precision_recall_pairs)):
precision, recall = precision_recall_pairs[i]
label = labels[i] if labels is not None else None
plt.plot(recall, precision, label=label, alpha=0.5, linewidth=3)
if labels is not None:
plt.legend(loc=legend_loc)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall', fontsize=12)
plt.ylabel('Precision', fontsize=12)
plt.tight_layout()
plt.savefig(out_path, bbox_inches='tight', dpi=dpi)
plt.close()
if __name__ == '__main__':
precision_recall_pairs, labels = load_jsons(args.json_files)
plot(precision_recall_pairs, labels, args.output_fig) | 37.716418 | 79 | 0.693708 |
e63ab07fc8212736ff3ef91cca7ad9e31b8c2243 | 2,218 | py | Python | data_output.py | adebraine/Time-Series-RNN | 2e5ef0a222d84e15ed09141724fa437492c1466e | [
"MIT"
] | null | null | null | data_output.py | adebraine/Time-Series-RNN | 2e5ef0a222d84e15ed09141724fa437492c1466e | [
"MIT"
] | null | null | null | data_output.py | adebraine/Time-Series-RNN | 2e5ef0a222d84e15ed09141724fa437492c1466e | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import keras
| 35.206349 | 92 | 0.595131 |
e63b0d4192a6f56afdb4ff053aeafe21f3a6cf89 | 1,837 | py | Python | vector_auto_regression.py | hotpxl/nebuchadnezzar | b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409 | [
"MIT"
] | 2 | 2015-05-20T18:02:40.000Z | 2016-08-07T18:57:27.000Z | vector_auto_regression.py | hotpxl/nebuchadnezzar | b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409 | [
"MIT"
] | null | null | null | vector_auto_regression.py | hotpxl/nebuchadnezzar | b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.4
import stats.data
import stats.plot
import stats.preprocess
import pandas
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates
import datetime
from statsmodels.tsa.api import VAR, DynamicVAR
sse_indices = stats.data.sse_indices()
for i in sse_indices:
d = stats.data.get_merged(i, 'date', 'volume', 'readCount')
# strip first few data points
d = d[2:]
for window_size in range(3, 10):
# window_size = 7
raw_volume = d[:, 1].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size).astype(float)))
read_count = d[:, 2].astype(float)
data = pandas.DataFrame({'volume': volume, 'readCount': read_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = VAR(data)
lag = model.select_order()['hqic']
length = data.values.shape[0]
print('using lag {}'.format(lag))
results = model.fit(lag)
# import IPython; IPython.embed()
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
pred = np.asarray(prediction).reshape((length, 1))
fig, ax = plt.subplots()
dates = list(map(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), d[:, 0]))
ax.plot(dates, pred, 'r', label='forecast')
ax.plot(dates, volume, 'b', label='real')
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.set_ylabel('Volume')
ax.legend()
plt.show()
# plt.savefig('{}_{}.png'.format(i, window_size))
# stats.plot.twin_x(np.concatenate((d[:, 1].reshape((length, 1)), pred), axis=1))
# import IPython; IPython.embed()
| 37.489796 | 132 | 0.625476 |
e63cd901a3e8b73ecbb160ecf9c349073434a2bf | 2,086 | py | Python | ArticleClassifierTF/src/data_models/weights/theme_weights.py | joduss/ArticleClassifier | 38c0e168cdd74214b7f591c7cfc7b93fc496e46b | [
"Unlicense"
] | null | null | null | ArticleClassifierTF/src/data_models/weights/theme_weights.py | joduss/ArticleClassifier | 38c0e168cdd74214b7f591c7cfc7b93fc496e46b | [
"Unlicense"
] | null | null | null | ArticleClassifierTF/src/data_models/weights/theme_weights.py | joduss/ArticleClassifier | 38c0e168cdd74214b7f591c7cfc7b93fc496e46b | [
"Unlicense"
] | null | null | null | from typing import Dict, List
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
from data_models.ThemeStat import ThemeStat
| 32.59375 | 93 | 0.650527 |
e63cf8d084bbaa33179f664b68770d2a61c1830b | 2,688 | py | Python | installation_text.py | bryanrtboy/videoselector | 6867c14ebb3f9ac563a2aa5533806ec4872a53e9 | [
"MIT"
] | 1 | 2017-12-10T12:42:09.000Z | 2017-12-10T12:42:09.000Z | installation_text.py | bryanrtboy/videoselector | 6867c14ebb3f9ac563a2aa5533806ec4872a53e9 | [
"MIT"
] | null | null | null | installation_text.py | bryanrtboy/videoselector | 6867c14ebb3f9ac563a2aa5533806ec4872a53e9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from pssh import SSHClient, ParallelSSHClient, utils
import datetime
import time
import random
import sys
output = []
hosts = ['client0', 'client1', 'client2','client3', 'client4']
client = ParallelSSHClient(hosts)
values = ["bear","cake","fork","pipe","gun"]
#if you need to get a response back from the client, use this functio
#instead of open_movies().
#Note with --loop argument in cmds, the process will never quit
#requires CTRL-C to end the process
if __name__ == "__main__":
open_movies(values, 15)
| 29.217391 | 111 | 0.679315 |
e63d83d29b28004d4dc6e59ec720b1e34cdc3bc7 | 3,744 | py | Python | poi/cache.py | jchluo/poi | 6892d3e219ee2b841053a41d308887a5e6b60017 | [
"Apache-2.0"
] | 10 | 2016-01-11T09:24:38.000Z | 2021-07-20T06:40:15.000Z | poi/cache.py | jchluo/poi | 6892d3e219ee2b841053a41d308887a5e6b60017 | [
"Apache-2.0"
] | 1 | 2018-04-10T04:48:18.000Z | 2018-04-10T04:48:18.000Z | poi/cache.py | jchluo/poi | 6892d3e219ee2b841053a41d308887a5e6b60017 | [
"Apache-2.0"
] | 8 | 2016-01-11T09:24:56.000Z | 2020-04-23T08:25:53.000Z | # -*- coding: utf-8 -*-
"""Cache Recommender.
dump : run topN predict item for each user, and
dump them to file like object(disk file or memory).
load : recover from file like object, return CacheRecommender.
Note that this recommender just a tiny version of the original one,
which can only predict topN (stored in file) items.
usage:
>>> class M(object):
... def __init__(self):
... self.num_users = 1
... self.num_items = 3
... self.checkins = {0: {0:1}}
... self.name = "Test"
... def predict(self, u, i):
... return 1.0 * i
usage dump:
>>> from StringIO import StringIO
>>> f = StringIO()
>>> md = M()
>>> dump(md, f, attrs=["name"], num_pool=0)
usage load
>>> f.seek(0)
>>> cr = load(f)
>>> print cr.predict(0, 2)
2.0
>>> print cr.name
Test
"""
import time
import json
import logging
import numpy as np
from .utils import threads
from .models import Recommender
log = logging.getLogger(__name__)
__all__ = ["Recommender", "Evaluation"]
def dump(model, fp, num=1000, attrs=None, num_pool=4):
"""Dump predict record to file.
fp: file pointer like object,
num: top num item and its score will be stored,
other item will be abandoned.
attrs: list like, the attributes want to be stored,
num_items and num_users will auto stored.
num_pool: number of threads, 0 will turn off multiple threads.
"""
if model is None:
raise ValueError("model is None.")
t0 = time.time()
args = [(model, i, num) for i in xrange(model.num_users)]
if num_pool > 0:
results = threads(_proxy_predict, args, num_pool)
else:
results = [_proxy_predict(arg) for arg in args]
meta = {}
# write attributes
if attrs is None:
attrs = ["num_users", "num_items"]
else:
attrs = list(attrs)
attrs.extend(["num_users", "num_items"])
attrs = set(attrs)
for attr in attrs:
if not hasattr(model, attr):
raise AttributeError("attribute: %s Not Found." % attr)
meta[attr] = getattr(model, attr)
# write __repr__
meta["__repr__"] = str(model)
print >> fp, json.dumps(meta)
# write recoreds
for one in results:
print >> fp, json.dumps(one)
t1 = time.time()
log.debug("dump ok, time: %.2fs" % (t1 - t0))
def load(fp):
"""Reture a cacherecommender, which is the tiny version of the
original one.
fp: file like object.
"""
cr = CacheRecommender()
# meta
cr._meta = json.loads(fp.readline())
# recoreds
for line in fp:
rd = json.loads(line.strip())
user = int(rd[0])
scores = rd[1]
cr._data[user] = {}
for l, s in scores:
cr._data[user][int(l)] = float(s)
return cr
| 27.328467 | 75 | 0.576656 |
e63da7efdb0e189e1a9e15a53af922678e7b6e0e | 2,335 | py | Python | p2p/protocol.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | null | null | null | p2p/protocol.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | null | null | null | p2p/protocol.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | null | null | null | import logging
from typing import (
Any,
Sequence,
Tuple,
Type,
)
from eth_utils.toolz import accumulate
from p2p.abc import (
CommandAPI,
ProtocolAPI,
TransportAPI,
)
from p2p.constants import P2P_PROTOCOL_COMMAND_LENGTH
from p2p.typing import Capability
def get_cmd_offsets(protocol_types: Sequence[Type[ProtocolAPI]]) -> Tuple[int, ...]:
"""
Computes the `command_id_offsets` for each protocol. The first offset is
always P2P_PROTOCOL_COMMAND_LENGTH since the first protocol always begins
after the base `p2p` protocol. Each subsequent protocol is the accumulated
sum of all of the protocol offsets that came before it.
"""
return tuple(accumulate(
lambda prev_offset, protocol_class: prev_offset + protocol_class.command_length,
protocol_types,
P2P_PROTOCOL_COMMAND_LENGTH,
))[:-1] # the `[:-1]` is to discard the last accumulated offset which is not needed
| 31.986301 | 93 | 0.677088 |
e63f1e8cde7eb9bc19101fd61c76b84d56a931e5 | 6,314 | py | Python | soocii_services_lib/tokens.py | jonascheng/services-lib | 5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc | [
"Apache-2.0"
] | null | null | null | soocii_services_lib/tokens.py | jonascheng/services-lib | 5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc | [
"Apache-2.0"
] | 5 | 2017-11-23T08:24:09.000Z | 2018-12-25T04:42:48.000Z | soocii_services_lib/tokens.py | jonascheng/services-lib | 5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc | [
"Apache-2.0"
] | 3 | 2017-06-28T07:54:40.000Z | 2018-12-25T04:44:42.000Z | import binascii
import json
import time
import jsonschema
from .crypter import AESCipher
from .exceptions import AccessTokenValidationError, RefreshTokenValidationError, TokenExpiredError, TokenSchemaError
| 28.062222 | 116 | 0.464365 |
e648ade42231ae7382e8ffb8232ee7fd02bab1ce | 6,060 | py | Python | software/camera-imu/tools/imu_driver_alt.py | MomsFriendlyRobotCompany/mjolnir | 76f53e8e650ba1051b5f14e94ff2a9a283158da4 | [
"MIT"
] | 1 | 2020-08-17T04:36:14.000Z | 2020-08-17T04:36:14.000Z | software/camera-imu/tools/imu_driver_alt.py | MomsFriendlyRobotCompany/mjolnir | 76f53e8e650ba1051b5f14e94ff2a9a283158da4 | [
"MIT"
] | null | null | null | software/camera-imu/tools/imu_driver_alt.py | MomsFriendlyRobotCompany/mjolnir | 76f53e8e650ba1051b5f14e94ff2a9a283158da4 | [
"MIT"
] | 1 | 2021-04-06T08:26:03.000Z | 2021-04-06T08:26:03.000Z |
from serial import Serial
import struct
from math import log10, sin, cos, acos, atan2, asin, pi, sqrt
import time
from collections import namedtuple
from colorama import Fore
# agmpt_t = namedtuple("agmpt_t", "accel gyro mag pressure temperature timestamp")
# ImageIMU = namedtuple("ImageIMU","image accel gyro temperature timestamp")
AccelGyroMag = namedtuple("AccelGyroMag", "ax ay az gx gy gz mx my mz")
TempPress = namedtuple("TempPress", "temperature pressure")
Light = namedtuple("Light", "lux")
c2f = lambda t: t*9/5+32
Key = {
cAccelGyroMag.header: cAccelGyroMag(),
cAccelGyro.header: cAccelGyro(),
cMag.header: cMag(),
cTempPress.header: cTempPress(),
cLight.header: cLight(),
cIRCamera.header: cIRCamera(),
}
| 25.897436 | 87 | 0.518317 |
e64b61756e2c5141a88d05ce00a52ea06f0af2cf | 1,718 | py | Python | main.py | hwangseonu/pokeka | 39e56c59dfc85a0c73232ac9105766ef060aa90e | [
"MIT"
] | 1 | 2021-06-01T05:26:48.000Z | 2021-06-01T05:26:48.000Z | main.py | hwangseonu/pokeka | 39e56c59dfc85a0c73232ac9105766ef060aa90e | [
"MIT"
] | null | null | null | main.py | hwangseonu/pokeka | 39e56c59dfc85a0c73232ac9105766ef060aa90e | [
"MIT"
] | null | null | null | import base64
import svgwrite
import svgwrite.container
import svgwrite.shapes
import svgwrite.image
import bs4
import os
from urllib.request import urlopen
from selenium import webdriver
index = 0
code = input(' .> ')
os.mkdir(code)
url = 'https://pokemoncard.co.kr/recipe/search?code=' + code
driver = webdriver.PhantomJS('phantomjs.exe')
driver.implicitly_wait(5)
driver.get(url)
soup = bs4.BeautifulSoup(driver.page_source, 'lxml')
card_items = soup.select(f'#show-card-detail-{code} .card-item')
card_list = []
for item in card_items:
cnt = item.select_one('.count')
cnt = int(cnt.text)
for i in range(cnt):
img = item.select_one('img')
card_list.append(img['src'])
pages = (len(card_list) // 9) + 1 if len(card_list) % 9 != 0 else 0
start_x, start_y = 10.5, 16.5
for p in range(0, pages):
x, y = 0, 0
path = os.path.join(code, f'card{p + 1}.svg')
dwg = svgwrite.Drawing(path, size=('210mm', '297mm'))
background = svgwrite.container.Group()
background.add(svgwrite.shapes.Rect(size=('210mm', '297mm'), fill='#ffe659'))
dwg.add(background)
cards_group = svgwrite.container.Group()
for i in range(0, 9):
index = p * 9 + i
if index >= len(card_list):
break
image = urlopen(card_list[index]).read()
cards_group.add(svgwrite.image.Image(
href='data:image/png;base64,' + base64.b64encode(image).decode(),
width='63mm', height='88mm',
x=str(start_x + (63 * x))+'mm', y=str(start_y + (88 * y))+'mm')),
x += 1
if x >= 3:
x = 0
y += 1
if y >= 3:
continue
dwg.add(cards_group)
dwg.save()
| 24.542857 | 81 | 0.610594 |
e64c6e151cf650530049e08cc621f9b0d7bf3833 | 967 | py | Python | save.py | jonoxia/pencilbox | 1ebcbefd0110a2d23ad0da27427df2e32eadfbfe | [
"Condor-1.1"
] | 3 | 2015-04-01T07:20:09.000Z | 2020-12-26T02:37:56.000Z | save.py | jonoxia/pencilbox | 1ebcbefd0110a2d23ad0da27427df2e32eadfbfe | [
"Condor-1.1"
] | null | null | null | save.py | jonoxia/pencilbox | 1ebcbefd0110a2d23ad0da27427df2e32eadfbfe | [
"Condor-1.1"
] | null | null | null | #!/usr/bin/python
from database_tables import DrawingHistory
from webserver_utils import verify_id
import cgi
import cgitb
import datetime
cgitb.enable()
q = cgi.FieldStorage()
history = q.getfirst("history", "")
layers = q.getfirst("layers", "")
title = q.getfirst("title", "")
artist = verify_id()
matches = DrawingHistory.selectBy(title = title, creator=artist)
if matches.count() > 0:
updateOld(matches[0], history, layers)
else:
createNew(title, artist, history, layers)
print "Content-type: text/html"
print
print "OK, saved"
| 23.585366 | 64 | 0.677353 |
e64d3c1360f948a0e4e91a1e5bc77802db0ff7e0 | 2,148 | py | Python | synthesis/paramGen/testcase2.py | hyunynim/DIST-Renderer | 4717ee8cea77f4f413b61f380a893c6800d0bde5 | [
"MIT"
] | 176 | 2020-06-11T19:16:33.000Z | 2022-03-29T01:38:28.000Z | synthesis/paramGen/testcase2.py | hyunynim/DIST-Renderer | 4717ee8cea77f4f413b61f380a893c6800d0bde5 | [
"MIT"
] | 6 | 2020-06-26T05:26:56.000Z | 2021-11-10T07:31:21.000Z | synthesis/paramGen/testcase2.py | hyunynim/DIST-Renderer | 4717ee8cea77f4f413b61f380a893c6800d0bde5 | [
"MIT"
] | 23 | 2020-06-11T21:43:03.000Z | 2022-02-18T00:16:16.000Z | '''
2019-08-07 00:01
Method:
20 x 5 grid over (camera x lighting)
'''
VIEW_NUM, LIGHTING_NUM = 20, 5
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.param_decomposer import AllParams
| 39.777778 | 103 | 0.640596 |
e64e4471df6551917b2b1289cce293fbc929f162 | 12,219 | py | Python | daisy-world/dashdir/plotting.py | frossmann/addon_containers | de6dde175947b24bbfa35d94d44c9d9633a73226 | [
"BSD-3-Clause"
] | null | null | null | daisy-world/dashdir/plotting.py | frossmann/addon_containers | de6dde175947b24bbfa35d94d44c9d9633a73226 | [
"BSD-3-Clause"
] | null | null | null | daisy-world/dashdir/plotting.py | frossmann/addon_containers | de6dde175947b24bbfa35d94d44c9d9633a73226 | [
"BSD-3-Clause"
] | 2 | 2021-11-16T06:05:02.000Z | 2021-11-18T17:16:35.000Z | import plotly.graph_objects as go
import plotly.figure_factory as ff
import numpy as np
import calculations as calc
from plotly.subplots import make_subplots
| 30.022113 | 87 | 0.554301 |
e64e9265798874238fe8da1d312e841fe0ab8876 | 36 | py | Python | opendart/config/__init__.py | JehunYoo/opendart | c88105baf85af57d006cc2404d192aaf9baf73cc | [
"MIT"
] | null | null | null | opendart/config/__init__.py | JehunYoo/opendart | c88105baf85af57d006cc2404d192aaf9baf73cc | [
"MIT"
] | 2 | 2021-07-12T10:59:20.000Z | 2021-07-13T02:06:27.000Z | opendart/config/__init__.py | JehunYoo/opendart | c88105baf85af57d006cc2404d192aaf9baf73cc | [
"MIT"
] | null | null | null | from opendart.config.config import * | 36 | 36 | 0.833333 |
e64ebb3be618b728917060906d4f5af8a1cfc287 | 10,720 | py | Python | NetworkScanners/libs/Pyssh/pyssh.py | isislovecruft/torflow | 666689ad18d358d764a35d041a7b16adb8d3287c | [
"BSD-3-Clause"
] | null | null | null | NetworkScanners/libs/Pyssh/pyssh.py | isislovecruft/torflow | 666689ad18d358d764a35d041a7b16adb8d3287c | [
"BSD-3-Clause"
] | 1 | 2018-12-18T15:58:40.000Z | 2018-12-26T16:52:51.000Z | NetworkScanners/libs/Pyssh/pyssh.py | isislovecruft/torflow | 666689ad18d358d764a35d041a7b16adb8d3287c | [
"BSD-3-Clause"
] | null | null | null | """A SSH Interface class.
An interface to ssh on posix systems, and plink (part of the Putty
suite) on Win32 systems.
By Rasjid Wilcox.
Copyright (c) 2002.
Version: 0.2
Last modified 4 September 2002.
Drawing on ideas from work by Julian Schaefer-Jasinski, Guido's telnetlib and
version 0.1 of pyssh (http://pyssh.sourceforge.net) by Chuck Esterbrook.
Licenced under a Python 2.2 style license. See License.txt.
"""
DEBUG_LEVEL = 0
import os, getpass
import signal # should cause all KeyboardInterrupts to go to the main thread
# try for Linux, does not seem to be try under Cygwin
import nbpipe
import time
# Constants
SSH_PORT=22
SSH_PATH=''
CTRL_C=chr(3)
READ_LAZY=0
READ_SOME=1
READ_ALL=2
# set the path to ssh / plink, and chose the popen2 funciton to use
if os.name=='posix':
import fssa # we can look for ssh-agent on posix
# XXX Can we on Win32/others?
import ptyext # if my patch gets accepted, change this to check for a
# sufficiently high version of python, and assign ptyext=pty
# if sufficient.
sshpopen2=ptyext.popen2
CLOSE_STR='~.'
tp=os.popen('/usr/bin/which ssh')
SSH_PATH=tp.read().strip()
try:
tp.close()
except IOError:
# probably no child process
pass
if SSH_PATH == '':
tp=os.popen('command -v ssh') # works in bash, ash etc, not csh etc.
SSH_PATH=tp.read().strip()
tp.close()
if SSH_PATH == '':
check = ['/usr/bin/ssh', '/usr/local/bin/ssh', '/bin/ssh']
for item in check:
if os.path.isfile(item):
SSH_PATH=item
break
PORT_STR='-p '
else:
sshpopen2=os.popen2
CLOSE_STR=CTRL_C # FIX-ME: This does not work.
# I think I need to implement a 'kill' component
# to the close function using win32api.
SSH_PATH=''
PORT_STR='-P '
# Helper functions
def _prompt(prompt):
"""Print the message as the prompt for input.
Return the text entered."""
noecho = (prompt.lower().find('password:') >= 0) or \
(prompt.lower().find('passphrase:') >=0)
print """User input required for ssh connection.
(Type Ctrl-C to abort connection.)"""
abort = 0
try:
if noecho:
response = getpass.getpass(prompt)
else:
response = raw_input(prompt)
except KeyboardInterrupt:
response = ''
abort = 1
return response, abort
def test():
"""Test routine for myssh.
Usage: python myssh.py [-d] [-sshp path-to-ssh] [username@host | host] [port]
Default host is localhost, default port is 22.
"""
import sys
debug = 0
if sys.argv[1:] and sys.argv[1] == '-d':
debug = 1
del sys.argv[1]
testsshpath = SSH_PATH
if sys.argv[1:] and sys.argv[1] == '-sshp':
testsshpath = sys.argv[2]
del sys.argv[1]
del sys.argv[1]
testusername = None
testhost = 'localhost'
testport = '22'
if sys.argv[1:]:
testhost = sys.argv[1]
if testhost.find('@') != -1:
testusername, testhost = testhost.split('@')
if sys.argv[2:]:
testport = sys.argv[2]
testcon = Ssh(testusername, testhost, testport)
testcon.set_debuglevel(debug)
testcon.set_sshpath(testsshpath)
testcon.login()
cmd = None
while (cmd != 'exit') and testcon.isopen:
cmd = raw_input("Enter command to send: ")
print testcon.sendcmd(cmd)
testcon.close()
if __name__ == '__main__':
test()
| 33.395639 | 82 | 0.547201 |
e64ec15e4f7b983862625b28f909feef4c9e7bb4 | 3,894 | py | Python | pygacal/camera/__init__.py | ereide/pyga-camcal | fd25748ddb11c5b05ef24a2deca2689e0d899875 | [
"MIT"
] | 5 | 2018-05-22T09:11:31.000Z | 2022-03-11T02:32:01.000Z | pygacal/camera/__init__.py | ereide/pyga-camcal | fd25748ddb11c5b05ef24a2deca2689e0d899875 | [
"MIT"
] | null | null | null | pygacal/camera/__init__.py | ereide/pyga-camcal | fd25748ddb11c5b05ef24a2deca2689e0d899875 | [
"MIT"
] | null | null | null |
from clifford import g3c
import numpy as np
import scipy.optimize as opt
from pygacal.rotation.costfunction import restrictedImageCostFunction, restrictedMultiViewImageCostFunction
from pygacal.rotation import minimizeError
from pygacal.rotation.mapping import BivectorLineImageMapping, BivectorLineMapping, LinePropertyBivectorMapping, BivectorLineEstimationMapping
from pygacal.common.cgatools import Sandwich, Dilator, Translator, Reflector, inversion, Rotor, Transversor, I3, I5, VectorEquality, anticommuter, ga_exp, Meet
#Defining variables
layout = g3c.layout
locals().update(g3c.blades)
ep, en, up, down, homo, E0, ninf, no = (g3c.stuff["ep"], g3c.stuff["en"],
g3c.stuff["up"], g3c.stuff["down"], g3c.stuff["homo"],
g3c.stuff["E0"], g3c.stuff["einf"], -g3c.stuff["eo"])
| 34.460177 | 159 | 0.612994 |
e651cc7d2f10c7d86c7ce7b411ef45695942e02f | 1,320 | py | Python | tests/test_utils.py | SalemHarrache/dbcut | 0fd60e15f3b5532c5c531923d2e9ef08ce17c935 | [
"MIT"
] | 16 | 2019-11-22T16:36:56.000Z | 2022-03-12T01:49:03.000Z | tests/test_utils.py | SalemHarrache/dbcut | 0fd60e15f3b5532c5c531923d2e9ef08ce17c935 | [
"MIT"
] | 3 | 2019-11-23T06:11:30.000Z | 2020-06-23T13:34:04.000Z | tests/test_utils.py | SalemHarrache/dbcut | 0fd60e15f3b5532c5c531923d2e9ef08ce17c935 | [
"MIT"
] | 4 | 2019-11-22T20:42:57.000Z | 2022-02-23T09:10:00.000Z | import unittest
from collections import OrderedDict
from dbcut.utils import sorted_nested_dict
| 22.372881 | 82 | 0.524242 |
e6543ff7671521504ac838b1689dbe9bfbccaca2 | 4,704 | py | Python | sprout/runner.py | tjduigna/sprout | d8762ce7e6f04bb082b8ca1e65f73d8900338d9d | [
"Apache-2.0"
] | null | null | null | sprout/runner.py | tjduigna/sprout | d8762ce7e6f04bb082b8ca1e65f73d8900338d9d | [
"Apache-2.0"
] | null | null | null | sprout/runner.py | tjduigna/sprout | d8762ce7e6f04bb082b8ca1e65f73d8900338d9d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019, Sprout Development Team
# Distributed under the terms of the Apache License 2.0
import os
import asyncio
import asyncpg
from tortoise import Tortoise
import sprout
def create_schemas(self, app=None, schemas=None):
"""Initialize db schemas"""
self.app = app or self.app
self.schemas = schemas or self.schemas
self._loop.run_until_complete(self._create_schemas())
def init_schemas(self, app=None, schemas=None):
"""Initialize db tables"""
self.app = app or self.app
self.schemas = schemas or self.schemas
self._loop.run_until_complete(self._init_schemas())
def init_db_pool(self, app=None):
"""Initialize db connection pool"""
self.app = app or self.app
pool = self._loop.run_until_complete(self._init_db_pool())
return pool
def easy_up(self, app):
"""Initialize everything and return a db
connection pool."""
self.create_database(app=app)
schemas = []
self.create_schemas(app=app, schemas=schemas)
self.init_schemas(app=app, schemas=schemas)
return self.init_db_pool(app=app)
| 32.895105 | 67 | 0.577594 |
e6546737a433ea44c0aabf656ba019b30d17d227 | 1,033 | py | Python | tests/test_service.py | beepscore/pi_gpio_service | 47aa9c6e4e378a168320d1f42b6d4c18c998e4db | [
"MIT"
] | 2 | 2018-10-16T18:22:04.000Z | 2021-05-04T21:09:53.000Z | tests/test_service.py | beepscore/pi_gpio_service | 47aa9c6e4e378a168320d1f42b6d4c18c998e4db | [
"MIT"
] | null | null | null | tests/test_service.py | beepscore/pi_gpio_service | 47aa9c6e4e378a168320d1f42b6d4c18c998e4db | [
"MIT"
] | null | null | null | #!/usr/bin/env/python3
import unittest
from pi_gpio_service import service
| 30.382353 | 77 | 0.535334 |
e654e957c98bffeffb8209db916fbae89bbb1792 | 2,726 | py | Python | sangam_poem_csv.py | naturalstupid/sangam_tamil_bot | 2b8117504f10ce4b4bdc2fa8160951374c9d1516 | [
"MIT"
] | null | null | null | sangam_poem_csv.py | naturalstupid/sangam_tamil_bot | 2b8117504f10ce4b4bdc2fa8160951374c9d1516 | [
"MIT"
] | null | null | null | sangam_poem_csv.py | naturalstupid/sangam_tamil_bot | 2b8117504f10ce4b4bdc2fa8160951374c9d1516 | [
"MIT"
] | null | null | null | import string
import regex
import pandas as pd
from pandas.tests.io.parser import index_col
sangam_text_folder = "./sangam_tamil_text/"
sangam_poem_folder = "./sangam_tamil_poems/"
sangam_csv_folder = "./sangam_tamil_csv/"
data_files = ['agananuru','purananuru','ainkurunuru','kalithokai', 'kurunthokai', 'natrinai', 'pathitrupathu', 'pattinapaalai',
'mullaipaattu', 'nedunalvaadai', 'kurinjipaattu','malaipadukadaam','maduraikaanji','porunaraatrupadai',
'perumpaanaatrupadai', 'sirupaanaatrupadai', 'thirumurugaatrupadai', 'ainthinaiezhupathu', 'ainthinaiaimpathu',
'kaarnaarpathu','thinaimozhiaimpathu','kainnilai','thinaimaalainootraimbathu']#, 'thirukkural' ]
POEM_TYPES = ['', '', '', '', '', '', '', '',
'', '','','', '','',
'', '','',' ',' ',' ',
' ','',' ']#,'']
EN_POEM_TYPES = ['Akannru','Purannru','Ainkurunru','Kalithokai','Kurunthokai','Natrinai','Pathitruppathu','Pattinapaalai',
'Mullaipaattu','Nedunalvaadai','Kurinjippttu','Malaipadukadaam','Maduraikaanji','Porunaratrupadai',
'Perumpaanatrupadai','Sirupaanaatrupadai','Thirumurugaatrupadai','Ainthinai Ezhupathu','Aithinai Aimbathu',
'Kaar Naarpathu','Thinaimozhi Aimpathu','Kainnilai','Thinaimaalai Nootraimbathu'
]
sangam_poem_csv_file = sangam_csv_folder+"sangam_poems.csv"
sangam_poems_combined = []
csv_separator = ","
for i, sangam_poem in enumerate(data_files):
csv_file = sangam_csv_folder+sangam_poem+".csv" # agananuru
print("reading poems from",csv_file)
df = pd.read_csv(csv_file,encoding='utf-8',sep=csv_separator,header=0,usecols=['poem'],index_col=None)
df['poem_type'] = POEM_TYPES[i]
df['poem'] = df['poem'].str.translate(str.maketrans('', '', string.punctuation))
df['poem'] = df['poem'].str.replace("", '')
df['poem'] = df['poem'].str.replace("", '')
df['poem'] = df['poem'].str.replace("", '')
df['poem'] = df['poem'].str.replace("", '')
df['poem'] = df['poem'].replace("\d+","",regex=True)
sangam_poems_combined.append(df)
print("Combining all sangam poems into a single database")
sangam_df = pd.concat(sangam_poems_combined,axis=0,ignore_index=True)
print("Writing sangam poems into",sangam_poem_csv_file)
sangam_df.to_csv(sangam_poem_csv_file,encoding='utf-8',sep=csv_separator, index=False, columns=["poem_type", "poem"]) | 69.897436 | 128 | 0.655906 |
e65ac754450ace4ed72f64034c033bc64885d270 | 4,026 | py | Python | backend/src/chat/api/views.py | CSCapstone2019/WebDoctor | cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5 | [
"MIT"
] | 4 | 2019-09-13T14:50:22.000Z | 2019-11-27T03:19:44.000Z | backend/src/chat/api/views.py | CSCapstone2019/WebDoctor | cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5 | [
"MIT"
] | 8 | 2019-09-15T23:02:21.000Z | 2022-02-10T09:26:10.000Z | backend/src/chat/api/views.py | CSCapstone2019/WebDoctor | cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from rest_framework import viewsets
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import FileSystemStorage
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
DestroyAPIView,
UpdateAPIView
)
from patients.models import Chat, Contact, Schedule, Scheduler, Report, Uploader
from chat.views import get_user_contact, get_user_scheduler, get_user_uploader
from .serializers import ChatSerializer, ScheduleSerializer, ReportSerializer
User = get_user_model()
# SCHEDULE
# UPLOAD
| 29.822222 | 80 | 0.748634 |
e65bcafb9495c37c2cdeefdfa42cd99132b78632 | 6,256 | py | Python | flask_opa.py | hirosh7/flask-opa | a090083ce62944d1085a6923572ed9c68f0dbfa3 | [
"MIT"
] | 34 | 2018-10-16T03:12:44.000Z | 2022-02-21T09:53:13.000Z | flask_opa.py | hirosh7/flask-opa | a090083ce62944d1085a6923572ed9c68f0dbfa3 | [
"MIT"
] | 12 | 2018-10-17T00:41:27.000Z | 2021-03-16T12:58:33.000Z | flask_opa.py | hirosh7/flask-opa | a090083ce62944d1085a6923572ed9c68f0dbfa3 | [
"MIT"
] | 8 | 2019-05-28T19:54:41.000Z | 2022-02-23T13:19:33.000Z | """
Flask Extension for OPA
"""
import requests
from flask.app import Flask
__version__ = "1.0.0"
| 30.076923 | 78 | 0.605499 |
e65e9051029543698ac667d8972b05b6ac01763f | 8,920 | py | Python | model.py | Schrodinger1926/Project-3 | 88f8a1411a712a8ba62036e400ebce9e6df8e40f | [
"MIT"
] | null | null | null | model.py | Schrodinger1926/Project-3 | 88f8a1411a712a8ba62036e400ebce9e6df8e40f | [
"MIT"
] | null | null | null | model.py | Schrodinger1926/Project-3 | 88f8a1411a712a8ba62036e400ebce9e6df8e40f | [
"MIT"
] | null | null | null | import sys
import os
import csv
from random import shuffle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten,\
Dense,\
Lambda,\
Conv2D,\
MaxPooling2D,\
Dropout, \
Cropping2D
DATA_DIR = 'data'
IMG_DIR = os.path.join(DATA_DIR, 'IMG')
samples = []
with open(os.path.join(DATA_DIR, 'driving_log.csv')) as csvfile:
reader = csv.reader(csvfile)
next(reader)
for line in reader:
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def fetch_view_angle(batch_sample):
"""
Conducts Preprocessing on a single data point.
1. flips original image
2. adds an offset to steering angle depending upon camera view i.e left, center, right.
Arguments
---------
batch_sample: array_like
Elements as [path_center_image, path_left_image, path_right_image, steering_angle, ..]
Returns
---------
res_images: array_like
Elements as original and fliped images of each camera view as numpy ndarray.
res_angles: array_like
Elements as steering angle of original and fliped images of each camera view as float.
"""
res_images, res_angles = [], []
# fetch center angle
center_angle = float(batch_sample[3])
viewpoints = ['center', 'left', 'right']
for idx, view in enumerate(viewpoints):
filename = os.path.join(IMG_DIR, batch_sample[idx].split('/')[-1])
image = cv2.imread(filename)
# Store original image
res_images.append(image)
# store fliped image
res_images.append(cv2.flip(image, 1))
offset = 0.1
if view == 'center':
# Store angles
res_angles.append(center_angle)
# Store flip angle
res_angles.append(-center_angle)
if view == 'left':
# Store angle
res_angles.append(center_angle + offset)
# Store flip angle
res_angles.append(-(center_angle + offset))
if view == 'right':
# Store angle
res_angles.append(center_angle - offset)
# Store fliped angle
res_angles.append(-(center_angle - offset))
return res_images, res_angles
def sanity_check_model():
"""
Bare Bones model with one no hidden layer i.e flattened input features
directly connected to output node.
This model is suppose to be used when building pipeline with minimum focus on model
performance.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Flatten(input_shape = (160, 320, 3)))
# Normalization
model.add(Lambda(lambda x: (x - 127)/127))
# Fully connected layer
model.add(Dense(1))
# Comple model
model.compile(loss='mse', optimizer='adam')
return model
def LeNet():
"""
Conventional LeNet model.
This model is suppose to be used when building insight about the model performance.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x - 127)/255, input_shape = (160, 320, 3)))
# Crop image, removing hood and beyond horizon
model.add(Cropping2D(cropping = ((70, 25), (0, 0))))
# First: Convolutional layer
model.add(Conv2D(6, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second: Convolutional layer
model.add(Conv2D(6, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Third: Fully Connected layer
model.add(Flatten())
model.add(Dense(120))
model.add(Dropout(0.5))
# Fourth: Fully Connected layer
model.add(Dense(84))
model.add(Dropout(0.5))
# Fourth: Output layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def nvidia():
"""
Model architeture used by Nvidia for end-to-end human behaviour cloning.
Reference: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
This is an even powerfull network with 5 Convolutional layers and 3 Fully connected layers.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x - 127)/255, input_shape = (160, 320, 3)))
# Crop image, removing hood and beyond horizon
model.add(Cropping2D(cropping = ((70, 25), (0, 0))))
# First: Convolutional layer
model.add(Conv2D(24, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Second: Convolutional layer
model.add(Conv2D(36, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Third: Convolutional layer
model.add(Conv2D(48, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Fourth: Convolutional layer
model.add(Conv2D(64, (3, 3), strides = (1, 1), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Fifth: Convolutional layer
model.add(Conv2D(64, (3, 3), strides = (1, 1), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
model.add(Flatten())
# Sixth: Fully Connected layer
model.add(Dense(100))
model.add(Dropout(0.5))
# Seventh: Fully Connected layer
model.add(Dense(50))
model.add(Dropout(0.5))
# Eigth: Fully Connected layer
model.add(Dense(10))
model.add(Dropout(0.5))
# Ninth: Output layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def get_model(name = 'sanity_check'):
"""
Return appropriate model
Arguments
---------
name: string
Name of the model to be trained
Returns
---------
Keras model
"""
if name == 'sanity_check':
return sanity_check_model()
if name == 'LeNet':
return LeNet()
if name == 'nvidia':
return nvidia()
batch_size = 64
train_generator = generator(train_samples, batch_size = batch_size)
validation_generator = generator(validation_samples, batch_size = batch_size)
# Final Model Architecture to be used
model_name = 'nvidia'
print("Traning samples : {} | Validation samples : {}"\
.format(3*2*len(train_samples), 3*2*len(validation_samples)))
print(model_name)
model = get_model(name = model_name)
history_object = model.fit_generator(train_generator, steps_per_epoch= \
2*3*len(train_samples)//batch_size, validation_data=validation_generator, \
validation_steps=3*2*len(validation_samples)//batch_size, epochs=5)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('post_training_analysis.png')
model.save('model_{}.h5'.format(model_name))
| 27.875 | 118 | 0.638453 |
e65f8dcc762ad6c2b71e1c9a7964a20b18c50603 | 3,807 | py | Python | enlarge_form/enlarge_form.py | lester-lees/extra_addons_sz | cddaf972cf4ea64c553bcff0006eb006a115d5ee | [
"Apache-2.0"
] | null | null | null | enlarge_form/enlarge_form.py | lester-lees/extra_addons_sz | cddaf972cf4ea64c553bcff0006eb006a115d5ee | [
"Apache-2.0"
] | null | null | null | enlarge_form/enlarge_form.py | lester-lees/extra_addons_sz | cddaf972cf4ea64c553bcff0006eb006a115d5ee | [
"Apache-2.0"
] | null | null | null | #! -*- encoding: utf-8 -*-
from openerp import addons
from openerp.osv import fields, osv, orm
from openerp import tools
from openerp.tools.translate import _
| 34.297297 | 186 | 0.504334 |
e65fe6b2cc9e13aae056f4e22435bebdff299fe1 | 1,148 | py | Python | tests/test_app.py | betasewer/machaon | 63ccb4405ac693f14f9d25f6a706466a917dddbf | [
"MIT"
] | 2 | 2020-07-05T08:39:12.000Z | 2022-01-19T22:08:21.000Z | tests/test_app.py | betasewer/machaon | 63ccb4405ac693f14f9d25f6a706466a917dddbf | [
"MIT"
] | 23 | 2020-06-23T16:18:17.000Z | 2021-12-29T09:56:48.000Z | tests/test_app.py | betasewer/machaon | 63ccb4405ac693f14f9d25f6a706466a917dddbf | [
"MIT"
] | null | null | null | from machaon.app import AppRoot, deploy_directory, transfer_deployed_directory
from machaon.process import Spirit, TempSpirit
from machaon.types.shell import Path
| 39.586207 | 87 | 0.715157 |
e66034257961b772951c35834b998bf6cf78de31 | 74 | py | Python | 02 Algorithm Reference/06 Securities and Portfolio/01 Securities and Portfolio Classes/03 code.py | Jay-Jay-D/Documentation | c4894e5ac20355ec82ee0db19618ad7f17bf8592 | [
"Apache-2.0"
] | null | null | null | 02 Algorithm Reference/06 Securities and Portfolio/01 Securities and Portfolio Classes/03 code.py | Jay-Jay-D/Documentation | c4894e5ac20355ec82ee0db19618ad7f17bf8592 | [
"Apache-2.0"
] | null | null | null | 02 Algorithm Reference/06 Securities and Portfolio/01 Securities and Portfolio Classes/03 code.py | Jay-Jay-D/Documentation | c4894e5ac20355ec82ee0db19618ad7f17bf8592 | [
"Apache-2.0"
] | null | null | null | #Securities array access to Security Objects:
self.Securities["IBM"].Price | 37 | 45 | 0.810811 |
e66228a86c2396ec8a63b8d48e9ca8a5edd9c594 | 502 | py | Python | migrations/versions/1d09e9261d5_.py | mainulhossain/biowl | 039adc96539fae25843b1fc36074a4e5e55830ec | [
"MIT"
] | null | null | null | migrations/versions/1d09e9261d5_.py | mainulhossain/biowl | 039adc96539fae25843b1fc36074a4e5e55830ec | [
"MIT"
] | null | null | null | migrations/versions/1d09e9261d5_.py | mainulhossain/biowl | 039adc96539fae25843b1fc36074a4e5e55830ec | [
"MIT"
] | 1 | 2020-01-05T10:47:21.000Z | 2020-01-05T10:47:21.000Z | """empty message
Revision ID: 1d09e9261d5
Revises: 40d93619b7d
Create Date: 2016-12-16 11:38:41.336859
"""
# revision identifiers, used by Alembic.
revision = '1d09e9261d5'
down_revision = '40d93619b7d'
from alembic import op
import sqlalchemy as sa
| 18.592593 | 63 | 0.687251 |
e6622feade344255592fd9a7d47d6b9f1bd055ff | 5,732 | py | Python | hops/xattr.py | robzor92/hops-util-py | 88540a0c2b4e366fe6d2acb0441cea9378150c01 | [
"Apache-2.0"
] | 24 | 2018-09-20T17:56:43.000Z | 2021-11-11T23:34:43.000Z | hops/xattr.py | robzor92/hops-util-py | 88540a0c2b4e366fe6d2acb0441cea9378150c01 | [
"Apache-2.0"
] | 39 | 2018-10-04T15:19:07.000Z | 2021-12-23T10:50:33.000Z | hops/xattr.py | robzor92/hops-util-py | 88540a0c2b4e366fe6d2acb0441cea9378150c01 | [
"Apache-2.0"
] | 23 | 2018-09-18T07:51:56.000Z | 2021-08-10T12:10:27.000Z | """
API for attaching, detaching, and reading extended metadata to HopsFS files/directories.
It uses the Hopsworks /xattrs REST API
"""
from hops import constants, util, hdfs
from hops.exceptions import RestAPIError
import urllib
def set_xattr(hdfs_path, xattr_name, value):
"""
Attach an extended attribute to an hdfs_path
Args:
:hdfs_path: path of a file or directory
:xattr_name: name of the extended attribute
:value: value of the extended attribute
Returns:
None
"""
value = str(value)
hdfs_path = urllib.parse.quote(hdfs._expand_path(hdfs_path))
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_PUT
resource_url = constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_XATTR_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs_path + constants.DELIMITERS.QUESTION_MARK_DELIMITER + constants.XATTRS.XATTRS_PARAM_NAME + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + xattr_name
response = util.send_request(method, resource_url, data=value, headers=headers)
response_object = response.json()
if response.status_code >= 400:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not attach extened attributes from a path (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status_code, response.reason, error_code, error_msg, user_msg))
def get_xattr(hdfs_path, xattr_name=None):
"""
Get the extended attribute attached to an hdfs_path.
Args:
:hdfs_path: path of a file or directory
:xattr_name: name of the extended attribute
Returns:
A dictionary with the extended attribute(s) as key value pair(s). If the :xattr_name is None,
the API returns all associated extended attributes.
"""
hdfs_path = urllib.parse.quote(hdfs._expand_path(hdfs_path))
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_GET
resource_url = constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_XATTR_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs_path
if xattr_name is not None:
resource_url += constants.DELIMITERS.QUESTION_MARK_DELIMITER + constants.XATTRS.XATTRS_PARAM_NAME + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + xattr_name
response = util.send_request(method, resource_url, headers=headers)
response_object = response.json()
if response.status_code >= 400:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not get extened attributes attached to a path (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status_code, response.reason, error_code, error_msg, user_msg))
results = {}
for item in response_object["items"]:
results[item["name"]] = item["value"]
return results
def remove_xattr(hdfs_path, xattr_name):
"""
Remove an extended attribute attached to an hdfs_path
Args:
:hdfs_path: path of a file or directory
:xattr_name: name of the extended attribute
Returns:
None
"""
hdfs_path = urllib.parse.quote(hdfs._expand_path(hdfs_path))
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_DELETE
resource_url = constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER + \
constants.REST_CONFIG.HOPSWORKS_XATTR_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER + \
hdfs_path + constants.DELIMITERS.QUESTION_MARK_DELIMITER + constants.XATTRS.XATTRS_PARAM_NAME + \
constants.DELIMITERS.JDBC_CONNECTION_STRING_VALUE_DELIMITER + xattr_name
response = util.send_request(method, resource_url, headers=headers)
if response.status_code >= 400:
response_object = response.json()
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not remove extened attributes from a path (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status_code, response.reason, error_code, error_msg, user_msg)) | 53.074074 | 116 | 0.694348 |
e66279995933e8ecb67f6f34946455046a6bef43 | 96 | py | Python | protonn/vis/__init__.py | protoNN-ai/protoNN | 812fd524a8c2de49612bbb1fc991c503fe3f1202 | [
"Apache-2.0"
] | 3 | 2018-06-20T08:37:13.000Z | 2019-02-21T00:14:47.000Z | protonn/vis/__init__.py | protoNN-ai/protoNN | 812fd524a8c2de49612bbb1fc991c503fe3f1202 | [
"Apache-2.0"
] | null | null | null | protonn/vis/__init__.py | protoNN-ai/protoNN | 812fd524a8c2de49612bbb1fc991c503fe3f1202 | [
"Apache-2.0"
] | null | null | null | from .vis import df_from_file, df_from_dir, filter_by, PivotTable
from .lines import plot_lines
| 32 | 65 | 0.833333 |
e663a08bf8bd9abb5a531e95d22eb32be3364bee | 2,173 | py | Python | slidingWindow/smallestSubarrayWithGivenSum.py | YasinEhsan/interview-prep | ed9f95af5a37b05304e45b41511068b6f72533e7 | [
"Apache-2.0"
] | 11 | 2019-05-02T22:27:01.000Z | 2020-10-30T08:43:02.000Z | slidingWindow/smallestSubarrayWithGivenSum.py | YasinEhsan/interview-prep | ed9f95af5a37b05304e45b41511068b6f72533e7 | [
"Apache-2.0"
] | null | null | null | slidingWindow/smallestSubarrayWithGivenSum.py | YasinEhsan/interview-prep | ed9f95af5a37b05304e45b41511068b6f72533e7 | [
"Apache-2.0"
] | 3 | 2019-11-01T01:35:01.000Z | 2020-01-11T18:00:39.000Z | # 5 27 20
# 5 25 20
# added import
import math
# 3 tries
# 5.18.20
# forgot that sliding window is in while loop shinking bc last val might ewaql 3 first vals
def smallest_subarray_with_given_sum(s, arr):
# TODO: Write your code here
'''
- have starting index, have currLen, minLen
- one found then cut down others in while loop style
- update vars
'''
startIndex, currLen, minLen, currSum = 0,0,len(arr), 0
for endIndex in range(len(arr)):
currSum += arr[endIndex]
currLen += 1
print(endIndex, arr[endIndex], currSum)
while currSum >= s:
minLen = min(minLen, currLen)
currLen -=1
currSum -= arr[startIndex]
startIndex += 1
return minLen
smallest_subarray_with_given_sum(8, [3, 4, 1, 1, 6])
# Given an array of positive numbers and a positive number S, find the length of the smallest contiguous subarray whose sum is greater than or equal to S. Return 0, if no such subarray exists.
# https://www.educative.io/courses/grokking-the-coding-interview/7XMlMEQPnnQ
| 23.879121 | 196 | 0.658997 |
e666c5e9e0189a92959abe01ef942dcddf54c96d | 16,028 | py | Python | build/build.py | lukas-ke/faint-graphics-editor | 33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf | [
"Apache-2.0"
] | 10 | 2016-12-28T22:06:31.000Z | 2021-05-24T13:42:30.000Z | build/build.py | lukas-ke/faint-graphics-editor | 33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf | [
"Apache-2.0"
] | 4 | 2015-10-09T23:55:10.000Z | 2020-04-04T08:09:22.000Z | build/build.py | lukas-ke/faint-graphics-editor | 33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import configparser
import os
import subprocess
import sys
import faint_info
join_path = os.path.join
build_dir = os.path.split(os.path.realpath(__file__))[0]
os.chdir(build_dir) # Fixme: Don't change dir, use absolute paths.
root_dir = os.path.split(build_dir)[0]
sys.path.append(join_path(root_dir, "build-sys/"))
sys.path.append(join_path(root_dir, "test-sys/"))
import build_sys as bs # noqa: E402
from build_sys.util import list_cpp, strip_ext # noqa: E402
from build_sys.util.scoped import working_dir, no_output # noqa: E402
from test_sys import gen_runner # noqa: E402
import gencpp # noqa: E402
if __name__ == '__main__':
platform = ("linux" if sys.platform.startswith('linux') else "msw")
cmdline = bs.parse_command_line()
opts, args = cmdline
exit_on_error(build_faint, (platform, cmdline), blank_line=False)
if platform == 'msw': # Py-extension build not implemented for Linux yet.
exit_on_error(build_python_extension, (platform, cmdline))
if opts.debug:
print("Fixme: Not building tests in debug.")
else:
exit_on_error(build_unit_tests, (platform, cmdline))
exit_on_error(build_image_tests, (platform, cmdline))
exit_on_error(build_benchmarks, (platform, cmdline))
exit_on_error(build_gui_tests, (platform, cmdline))
exit_on_error(run_unit_tests, (platform, cmdline))
if platform == 'msw':
exit_on_error(run_py_tests, (platform, cmdline))
if opts.version != bs.unknown_version_str and platform == 'msw':
bo = read_build_options(platform)
bs.build_installer(opts.version, bo.makensis_exe)
exit(0)
| 31.12233 | 109 | 0.630334 |
e6671dd4f2c0b71c8a3b385713a43ac751148356 | 2,119 | py | Python | printAlternatively.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | null | null | null | printAlternatively.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | 1 | 2020-07-19T15:40:25.000Z | 2020-07-19T15:40:25.000Z | printAlternatively.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | null | null | null |
obj = Solution(A=[3,2,1], B=[3,2,1])
obj.printAlternativelySameSize()
"""
Given two arrays, print each element alternatively
For example)
arr1 = [a,b,c,d]
arr2 = [e,f,g,h,i,j,k]
=> a e b f c g d h i j k
"""
obj = Solution(['a', 'b', 'c', 'd'], ['e','f','g','h','i','j','k'])
obj.print_lists()
| 21.40404 | 77 | 0.547428 |
e667758e13389c3d1155786a731f2598edf57be3 | 981 | py | Python | test/test_theaigame_bot.py | gnmerritt/poker | 5e7241efac1b0757f39c28f6d485f4d79960095b | [
"MIT"
] | 5 | 2015-04-09T02:45:12.000Z | 2018-06-27T05:34:41.000Z | test/test_theaigame_bot.py | gnmerritt/poker | 5e7241efac1b0757f39c28f6d485f4d79960095b | [
"MIT"
] | null | null | null | test/test_theaigame_bot.py | gnmerritt/poker | 5e7241efac1b0757f39c28f6d485f4d79960095b | [
"MIT"
] | 2 | 2017-09-19T04:49:07.000Z | 2018-12-09T19:58:18.000Z | import unittest
from pokeher.theaigame_bot import TheAiGameBot
| 31.645161 | 79 | 0.670744 |
e66883315cccecf4d95a549214dcc1704e5e4e46 | 429 | py | Python | tests/test_exp.py | SiddeshSambasivam/MatterIx | e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139 | [
"MIT"
] | 9 | 2020-07-25T12:00:30.000Z | 2021-07-07T09:30:57.000Z | tests/test_exp.py | SiddeshSambasivam/MatterIx | e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139 | [
"MIT"
] | null | null | null | tests/test_exp.py | SiddeshSambasivam/MatterIx | e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139 | [
"MIT"
] | null | null | null | import unittest
from matterix import Tensor
import numpy as np
| 25.235294 | 58 | 0.638695 |
e6691884e90ad61e89f19cad4d887cbd1d5007c5 | 2,161 | py | Python | tests/scripts/run_lookups.py | abelard2008/overlog | 8df2bb95d2e39e41dd8e30249da6bb8a1615f39f | [
"BSD-3-Clause"
] | 3 | 2016-01-26T22:19:12.000Z | 2019-07-10T02:12:38.000Z | tests/scripts/run_lookups.py | abelard2008/overlog | 8df2bb95d2e39e41dd8e30249da6bb8a1615f39f | [
"BSD-3-Clause"
] | null | null | null | tests/scripts/run_lookups.py | abelard2008/overlog | 8df2bb95d2e39e41dd8e30249da6bb8a1615f39f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
# -*- Mode: python -*-
#
# DESCRIPTION: Setup and run n chord nodes.
#
#
import getopt
import os
import sys
import time
import random
import signal
import threading
if __name__ == "__main__":
try:
flags, args = parse_cmdline(sys.argv)
except:
print_usage()
sys.exit(3)
if len(args) < 3 or not int(flags["port"]):
print_usage()
sys.exit(3)
seed = int(flags["seed"])
port = int(flags["port"])
vantages = int(flags["vantages"])
sleep_time = int(flags["sleep_time"])
ips = args[2:]
print "IPS: ", ips
while 1:
try:
for v in range(vantages):
if len(ips) == 1: run_lookup(args[0], seed, "%s:%s" % (ips[0], int(port)+int(v)), args[1]+"/lookups.log")
else: run_lookup(args[0], seed, "%s:%s" % (ips[v], port), args[1]+"/lookups.log")
seed += 1
time.sleep(sleep_time)
except:
print >> log, "EXCEPTION WHILE LOOP: %s\n" % str(sys.exc_info()[:2])
| 30.43662 | 126 | 0.529847 |
e6692d7fe75e939ec528720c041175b24637e974 | 1,722 | py | Python | src/tests/test_task_2_4.py | Python-course/Python-course | 59de0ef9928aeaa5dd185ceaafa334eb8e719217 | [
"MIT"
] | null | null | null | src/tests/test_task_2_4.py | Python-course/Python-course | 59de0ef9928aeaa5dd185ceaafa334eb8e719217 | [
"MIT"
] | null | null | null | src/tests/test_task_2_4.py | Python-course/Python-course | 59de0ef9928aeaa5dd185ceaafa334eb8e719217 | [
"MIT"
] | null | null | null | """
2.4.
"""
from unittest import TestCase, main
from fractions import Fraction
from tasks import task_2_4
if __name__ == "__main__":
main(verbosity=2)
| 28.7 | 103 | 0.577236 |
e6693d31028174fac6a03f7991d1cc9f5830e4f5 | 1,007 | py | Python | aioweb_auth/helpers.py | kreopt/aioweb_auth | e6a982296b52fc2068dd09afb0827dab527ef9b7 | [
"MIT"
] | null | null | null | aioweb_auth/helpers.py | kreopt/aioweb_auth | e6a982296b52fc2068dd09afb0827dab527ef9b7 | [
"MIT"
] | null | null | null | aioweb_auth/helpers.py | kreopt/aioweb_auth | e6a982296b52fc2068dd09afb0827dab527ef9b7 | [
"MIT"
] | null | null | null | from aiohttp import web
from aiohttp_security import authorized_userid
from aioweb.conf import settings
| 34.724138 | 70 | 0.725919 |
e669828a1fd8d946f628655596de52579956c2b4 | 442 | py | Python | Leetcode/560-Subarray_Sum.py | EdwaRen/Competitve-Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | 1 | 2021-05-03T21:48:25.000Z | 2021-05-03T21:48:25.000Z | Leetcode/560-Subarray_Sum.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | Leetcode/560-Subarray_Sum.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null |
a = Solution()
print(a.subarraySum([1, 2, 1, 2], 3))
| 22.1 | 39 | 0.466063 |
c6fd9ed01bdcac2a90cc2cff054eefd30d07deb0 | 3,901 | py | Python | functions/aou/tests/upload_test_files.py | broadinstitute/wfl | 1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2 | [
"BSD-3-Clause"
] | 15 | 2020-03-04T17:30:25.000Z | 2022-03-09T14:57:26.000Z | functions/aou/tests/upload_test_files.py | broadinstitute/wfl | 1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2 | [
"BSD-3-Clause"
] | 184 | 2020-03-06T20:55:15.000Z | 2022-03-15T18:24:57.000Z | functions/aou/tests/upload_test_files.py | broadinstitute/wfl | 1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2 | [
"BSD-3-Clause"
] | 2 | 2020-07-08T19:16:26.000Z | 2020-07-10T18:47:30.000Z | """ Helper script that copies all of the files for an arrays sample into the dev aou input bucket. This will trigger
the submit_aou_workload cloud function for each file. When all files have been uploaded, it will launch an arrays
workflow via the workflow launcher (but only if a workflow with that chipwell barcode & analysis version has not
been run before).
Usage: python upload_test_files.py -b <bucket>
"""
import argparse
import json
import random
import sys
import subprocess
import tempfile
arrays_path = "gs://broad-gotc-dev-wfl-ptc-test-inputs/arrays/HumanExome-12v1-1_A/"
arrays_metadata_path = "gs://broad-gotc-dev-wfl-ptc-test-inputs/arrays/metadata/HumanExome-12v1-1_A/"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
default="dev-aou-arrays-input",
help="The upload destination bucket."
)
parser.add_argument(
"-p",
"--prod",
action="store_true",
help="Use infrastructure in broad-aou rather than broad-gotc-dev."
)
args = parser.parse_args()
main(args.bucket, args.prod)
| 47.573171 | 141 | 0.681364 |
c6fe87b224a7fdc40686930d3055375689c20f4c | 2,019 | py | Python | warp_gui.py | maciejczechowski/CarND-Advanced-Lane-Lines | 058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230 | [
"MIT"
] | null | null | null | warp_gui.py | maciejczechowski/CarND-Advanced-Lane-Lines | 058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230 | [
"MIT"
] | null | null | null | warp_gui.py | maciejczechowski/CarND-Advanced-Lane-Lines | 058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from src import lane_finder as lf
from src import parameters
import argparse
parser = argparse.ArgumentParser(description='Visualizes the warp transform.')
parser.add_argument('filename')
args = parser.parse_args()
image = cv2.imread(args.filename)
params = parameters.LaneFinderParams()
thresh = WarpFinder(image, params.warp_horizon, params.warp_x1)
| 25.884615 | 103 | 0.602278 |
05007036c73f4b4b153318ac832ce22662ff0e07 | 2,041 | py | Python | election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py | dkupsh/stvote | dbe906681a171c5654341b93dc0fb5b0208cfd33 | [
"MIT"
] | null | null | null | election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py | dkupsh/stvote | dbe906681a171c5654341b93dc0fb5b0208cfd33 | [
"MIT"
] | null | null | null | election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py | dkupsh/stvote | dbe906681a171c5654341b93dc0fb5b0208cfd33 | [
"MIT"
] | null | null | null | ###############
# Ballot Parser for UC Berkeley Results
#
# This ballot parser has been tailored to the ballot
# system used by UCB. If you use another software
# to define ballots, ensure the data returned by the
# ballot parser returns data in the following fashion:
#
# [
# {
# "ballot_id": "unique_ballot_id",
# "ballot_data": {
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# ...
# }
# },
# {
# "ballot_id": "unique_ballot_id",
# "ballot_data": {
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# ...
# }
# },
# ...
# ]
#
# The race_id value should correspond to the value
# specified in the configuration file.
#
# Each list identified by the race_id should be in
# voting-choice order, where the first candidate
# within the list corresponds to the ballot's first
# choice vote.
#
# The candidate_id should correspond to the value
# returned by the election candidate parser.
#
# Last Modified: April 12, 2016
###############
import json
import uuid
| 26.166667 | 82 | 0.526213 |
05018611063b1ec5bb0bc5adba5e6965095d97d4 | 5,971 | py | Python | deco/__init__.py | patdex/deco | 83141719b3b68fb1e99b43384a25288aea5c3e8c | [
"MIT"
] | null | null | null | deco/__init__.py | patdex/deco | 83141719b3b68fb1e99b43384a25288aea5c3e8c | [
"MIT"
] | null | null | null | deco/__init__.py | patdex/deco | 83141719b3b68fb1e99b43384a25288aea5c3e8c | [
"MIT"
] | null | null | null | import collections
import inspect
import time
import re
# module config:
disable_tracing = False
indent = True
# indentation for log output
_log_indent = dict()
def indent_str(cnt, end=False):
"""
indent string
:param cnt: indentation count
:param end: close actual indentation?
:return:
"""
if not indent:
return ''
return '| ' * cnt + ('/ ' if not end else '\\ ')
def _get_wrapped_method(func):
"""
get inner method if multiple decorators are used
:param func:
:return:
"""
while hasattr(func, '__wrapped__'):
func = getattr(func, '__wrapped__')
return func
def _wrap(wrapper, func):
"""
save wrapped function if multiple decorators are used
:param func:
:return:
"""
setattr(wrapper, '__wrapped__', func)
def argument_types(func):
"""
:param func:
:return: dictionary with argument name and type
"""
signature = inspect.signature(func)
sig = re.match(r"\(([^)]+)\)", str(signature)).group(1)
param_list = str(sig).split(', ')
types = dict()
for param in param_list:
try:
elements = param.split(':')
types[elements[0]] = elements[1].split('=')[0]
except IndexError:
pass
return types
def collect_all_arguments_to_dict(func, args, kwargs):
"""
:param func:
:param args:
:param kwargs:
:return: dictionary with all method arguments and their values (like kwargs)
"""
arg_names = [arg_name for arg_name in inspect.signature(func).parameters]
all_as_kwargs = _MyOrderedDict()
# collect args
for arg_name, arg_val in zip(arg_names, args):
all_as_kwargs[arg_name] = arg_val
# collect kwargs
for arg_name in arg_names:
if arg_name in kwargs:
all_as_kwargs[arg_name] = kwargs[arg_name]
# collect default arguments:
for arg_name, arg_val in inspect.signature(func).parameters.items():
if arg_name in arg_names and arg_name not in all_as_kwargs:
all_as_kwargs[arg_name] = arg_val.default
return all_as_kwargs
def cast_std_arguments(func):
"""
cast arguments with standard and defined type
:param func:
:return:
"""
_wrap(wrapper, func)
return wrapper
| 26.420354 | 117 | 0.601574 |
0501d436e365fc40c731e765ab901eb50645cb02 | 1,489 | py | Python | main.py | ytyaru/Hatena.WebSite.Login.201703040757 | 11ffc5549398478146a9966189e06cf535b34092 | [
"CC0-1.0"
] | null | null | null | main.py | ytyaru/Hatena.WebSite.Login.201703040757 | 11ffc5549398478146a9966189e06cf535b34092 | [
"CC0-1.0"
] | null | null | null | main.py | ytyaru/Hatena.WebSite.Login.201703040757 | 11ffc5549398478146a9966189e06cf535b34092 | [
"CC0-1.0"
] | null | null | null | #!python3
#encoding:utf-8
from urllib.request import build_opener, HTTPCookieProcessor
from urllib.parse import urlencode
from http.cookiejar import CookieJar
import pprint
import dataset
if __name__ == '__main__':
hatena_id = 'ytyaru'
client = HatenaSite(
path_hatena_accounts_sqlite3 = "meta_Hatena.Accounts.sqlite3"
)
client.login(hatena_id)
| 33.088889 | 110 | 0.646071 |
05031a4fb3f43f4e15927e78ef77f8dcad229be0 | 767 | py | Python | csf_tz/fleet_management/doctype/vehicle/vehicle.py | Craftint/CSF_TZ | b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5 | [
"MIT"
] | 4 | 2021-09-24T12:30:32.000Z | 2022-03-19T14:55:34.000Z | csf_tz/fleet_management/doctype/vehicle/vehicle.py | Craftint/CSF_TZ | b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5 | [
"MIT"
] | null | null | null | csf_tz/fleet_management/doctype/vehicle/vehicle.py | Craftint/CSF_TZ | b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5 | [
"MIT"
] | 7 | 2021-09-24T12:30:33.000Z | 2022-03-21T11:34:02.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Bravo Logistics and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
| 25.566667 | 62 | 0.741851 |
05067ca48cd1bf1cfe7a6e17e6b2e4d00c579d5b | 3,780 | py | Python | app/mysql2json.py | ToHanwei/CORD | 09f75b136431222ec945b2ddd6798ae805ec332e | [
"MIT"
] | null | null | null | app/mysql2json.py | ToHanwei/CORD | 09f75b136431222ec945b2ddd6798ae805ec332e | [
"MIT"
] | null | null | null | app/mysql2json.py | ToHanwei/CORD | 09f75b136431222ec945b2ddd6798ae805ec332e | [
"MIT"
] | null | null | null | #!coding:utf-8
import json
import pymysql
import pandas as pd
| 30.983607 | 100 | 0.552381 |
0506e61a9ace0c2d5bc6f23b2cc7e615718656a8 | 3,583 | py | Python | dict2xml.py | lucasicf/dict2xml | 7421414c71e1d95a4d60e84f942379edb4df2df5 | [
"BSD-3-Clause"
] | 12 | 2015-07-12T20:07:10.000Z | 2022-02-10T05:16:14.000Z | dict2xml.py | lucasicf/dict2xml | 7421414c71e1d95a4d60e84f942379edb4df2df5 | [
"BSD-3-Clause"
] | null | null | null | dict2xml.py | lucasicf/dict2xml | 7421414c71e1d95a4d60e84f942379edb4df2df5 | [
"BSD-3-Clause"
] | 7 | 2015-05-21T09:39:52.000Z | 2021-02-28T22:01:15.000Z | # -*- coding: utf-8 -*-
from xml.dom import minidom
import re
# Thrown on any dictionary error
_iter_dict_sorted = lambda dic: sorted(
dic.iteritems(), key=(lambda key_value: _dict_sort_key(key_value))
)
DATATYPE_ROOT_DICT = 0
DATATYPE_KEY = 1
DATATYPE_ATTR = 2
DATATYPE_ATTRS = 3
# Recursive core function
| 34.786408 | 88 | 0.6542 |
05071a1ee7761ffc57199c77291dcea3601a853d | 1,247 | py | Python | 06_rotation_transformation.py | Mathanraj-Sharma/OpenCV_Sample_Codes | a20710fa05d7817b9c4c78acc64b852b0cde7583 | [
"Apache-2.0"
] | 1 | 2019-11-23T06:52:58.000Z | 2019-11-23T06:52:58.000Z | 06_rotation_transformation.py | Mathanraj-Sharma/OpenCV_Sample_Codes | a20710fa05d7817b9c4c78acc64b852b0cde7583 | [
"Apache-2.0"
] | null | null | null | 06_rotation_transformation.py | Mathanraj-Sharma/OpenCV_Sample_Codes | a20710fa05d7817b9c4c78acc64b852b0cde7583 | [
"Apache-2.0"
] | 1 | 2019-11-23T11:18:37.000Z | 2019-11-23T11:18:37.000Z | import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument('-i', required = True, help = 'Enter the path of Image')
args = vars(ap.parse_args())
image = cv2.imread(args['i'])
def rotate(image, point, angle, scale):
"""
this function will take an image and rotate it through the given angle
with respect to given point.
Optionally we can scale the image 1.0 = original, 2.0 = double etc.
"""
# M is the rotation Matrix for derived using angel, Point, and Scale
M = cv2.getRotationMatrix2D(point, angle, scale)
rotated_image = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return rotated_image
if __name__ == '__main__':
#tranforming image with respect to its center and through -45*
center = (image.shape[1]//2, image.shape[0]//2)
angel = -45
cv2.imshow('Original Image', image)
cv2.waitKey(0)
rotated_image = rotate(image, center, angel, 1.0)
cv2.imshow('Rotated Image', rotated_image)
cv2.waitKey(0)
wheel(image, center) | 22.267857 | 75 | 0.690457 |
0507429bfe72a62ce8131002bc3538a2af143672 | 3,972 | py | Python | feichangzun/directGetFlightData.py | Octoberr/weizhuangIP | d37e82df35d0b8b84bfa38f3a487fd81ab969070 | [
"Apache-2.0"
] | null | null | null | feichangzun/directGetFlightData.py | Octoberr/weizhuangIP | d37e82df35d0b8b84bfa38f3a487fd81ab969070 | [
"Apache-2.0"
] | null | null | null | feichangzun/directGetFlightData.py | Octoberr/weizhuangIP | d37e82df35d0b8b84bfa38f3a487fd81ab969070 | [
"Apache-2.0"
] | null | null | null | import getflightdata
import requests
from bs4 import BeautifulSoup
import random
import json
import pymongo
import datetime
from Utils.config import config
# import config
mongoConf = config['mongo']
feichangzun = 'http://www.variflight.com/flight/fnum/'
feichangzunhouzui = '.html?AE71649A58c77&fdate='
#
# flight = '3U3048'
# flightdate ='2017-08-02'
#
# jsodater = getDirectFlight(flight, flightdate)
# print(jsodater)
| 34.842105 | 108 | 0.629909 |
0507ce8c6b29b5cd6c3e947a8e5f6cea05343e0b | 2,402 | py | Python | face/face-30sec.py | eric-erki/ai-smarthome | ca7316ebe72b0ad26f0b59e3186426633807cac8 | [
"BSD-2-Clause"
] | 28 | 2018-08-09T13:10:34.000Z | 2022-01-07T13:39:31.000Z | face/face-30sec.py | eric-erki/ai-smarthome | ca7316ebe72b0ad26f0b59e3186426633807cac8 | [
"BSD-2-Clause"
] | 4 | 2018-08-09T13:18:12.000Z | 2021-04-06T19:04:54.000Z | face/face-30sec.py | eric-erki/ai-smarthome | ca7316ebe72b0ad26f0b59e3186426633807cac8 | [
"BSD-2-Clause"
] | 15 | 2018-12-17T09:17:28.000Z | 2021-03-02T11:25:05.000Z | import numpy as np
import cv2
import face_recognition
import time
# Load a sample picture and learn how to recognize it.
me_image = face_recognition.load_image_file("known/joakim.png")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
known_face_encodings = [
me_face_encoding,
]
known_face_names = [
"Joakim Eriksson",
]
cap = cv2.VideoCapture(0)
photo_time = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
print(face_locations)
name = "Unknown"
match = False
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
match = True
cut = frame[top:bottom, left:right]
cv2.rectangle(frame,(left, top), (right, bottom),(0,255,0),3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, name,(left, top - 5), font, 0.7, (255,255,255),2,cv2.LINE_AA)
cv2.imshow('cut', cut)
print("Name: ", name)
if match == False:
print("no match")
# Display the resulting frame
cv2.imshow('frame', frame)
if time.time() - photo_time > 30.0:
print("the photo is old...")
known_face_encodings = known_face_encodings[0:1]
known_face_names = known_face_names[0:1]
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
break
if key == ord('p'):
if(len(known_face_encodings) < 2):
print("Storing new encoding")
photo_time = time.time()
known_face_encodings = known_face_encodings + [face_encoding]
known_face_names = known_face_names + ["Newly Photoed"]
if key == ord('o'):
if name == "Newly Photoed":
print("Door will open for you!")
else:
print("Door is closed for you!")
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 32.459459 | 89 | 0.651957 |
0509cd66d3399c704328d6c31b4db43646200a86 | 8,576 | py | Python | sdtables/sdtables.py | cunningr/sdtables | a698acbeca30e5451ca3285292f089109b360a04 | [
"MIT"
] | null | null | null | sdtables/sdtables.py | cunningr/sdtables | a698acbeca30e5451ca3285292f089109b360a04 | [
"MIT"
] | 1 | 2020-06-25T08:55:00.000Z | 2021-03-22T12:54:48.000Z | sdtables/sdtables.py | cunningr/sdtables | a698acbeca30e5451ca3285292f089109b360a04 | [
"MIT"
] | 1 | 2020-07-03T10:18:58.000Z | 2020-07-03T10:18:58.000Z | # coding: utf-8
"""
xlTables - Load/generate table data with Excel
from python dictionary structures
cunningr - 2020
Requires openpyxl >= 2.6.2, jsonschema
"""
import os
import openpyxl
from openpyxl import Workbook
from sdtables import xlTables
from tabulate import tabulate
# Retrieve a list of schema names under a given worksheet
# list(filter(lambda item: "network_settings" in item.keys(), meme.schemanames)) | 40.262911 | 160 | 0.649021 |
050a2b44b8dd6b46945c916a81b519efe47b76fb | 2,473 | py | Python | solutions/dropbox/compare_functions.py | roman-kachanovsky/checkio | 3134cbc04ed56e92006d1e2f09d7365e900953db | [
"BSD-3-Clause"
] | 1 | 2017-02-07T19:50:52.000Z | 2017-02-07T19:50:52.000Z | solutions/dropbox/compare_functions.py | roman-kachanovsky/checkio-python | 3134cbc04ed56e92006d1e2f09d7365e900953db | [
"BSD-3-Clause"
] | null | null | null | solutions/dropbox/compare_functions.py | roman-kachanovsky/checkio-python | 3134cbc04ed56e92006d1e2f09d7365e900953db | [
"BSD-3-Clause"
] | null | null | null | """ --- Compare Functions --- Simple
Two functions f and g are provided as inputs to checkio.
The first function f is the primary function and the second
function g is the backup. Use your coding skills to return
a third function h which returns the same output as f unless
f raises an exception or returns None. In this case h should
return the same output as g. If both f and g raise exceptions
or return None, then h should return None.
As a second output, h should return a status string indicating
whether the function values are the same and if either function
erred. A function errs if it raises an exception or returns
a null value (None).
The status string should be set to: "same" if f and g return
the same output and neither errs, "different" if f and g return
different outputs and neither errs, "f_error" if f errs but not g,
"g_error" if g errs but not f, or "both_error" if both err.
Input: Two functions: f (primary) and g (backup).
Output: A function h which takes arbitrary inputs
and returns a two-tuple.
How it is used: This is an exercise in working with functions
as first class objects.
Precondition: hasattr(f,'__call__');
hasattr(g,'__call__')
"""
| 32.973333 | 78 | 0.595633 |
050b0bea353171a3c51a6088825350acb0d9291f | 3,402 | py | Python | yawndb/sync.py | selectel/python-yawndb | 6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6 | [
"MIT"
] | null | null | null | yawndb/sync.py | selectel/python-yawndb | 6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6 | [
"MIT"
] | null | null | null | yawndb/sync.py | selectel/python-yawndb | 6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6 | [
"MIT"
] | null | null | null | """
yawndb.sync
~~~~~~~~~~~
Sync YAWNDB transport. Use standart socket object methods.
"""
import time
import json
import socket
import urllib2
import logging
from collections import deque
from yawndb._base import _YAWNDBBase
_logger = logging.getLogger(__name__)
| 29.076923 | 79 | 0.546149 |
050b23d1c21cc11db93c4c94dba0b845a1f1693e | 1,209 | py | Python | setup.py | ofek/depq | 370e3ad503d3e9cedc3c49dc64add393ba945764 | [
"MIT"
] | 1 | 2019-02-12T13:17:56.000Z | 2019-02-12T13:17:56.000Z | setup.py | ofek/depq | 370e3ad503d3e9cedc3c49dc64add393ba945764 | [
"MIT"
] | 4 | 2016-12-10T20:17:38.000Z | 2017-06-16T19:02:47.000Z | setup.py | ofek/depq | 370e3ad503d3e9cedc3c49dc64add393ba945764 | [
"MIT"
] | 5 | 2016-12-10T20:13:42.000Z | 2020-09-28T09:02:10.000Z | from setuptools import setup, find_packages
with open('README.rst', 'r') as infile:
read_me = infile.read()
setup(
packages=find_packages(),
name='depq',
version='1.5.5',
description='Double-ended priority queue',
long_description=read_me,
author='Ofek Lev',
author_email='ofekmeister@gmail.com',
maintainer='Ofek Lev',
maintainer_email='ofekmeister@gmail.com',
url='https://github.com/Ofekmeister/depq',
download_url='https://github.com/Ofekmeister/depq',
license='MIT',
platforms=None,
keywords=[
'double ended priority queue',
'depq',
'priority queue',
'data structure',
'scheduling',
'heuristic analysis',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 28.116279 | 71 | 0.612076 |
050bc5ae6e8eba8aac368023fb49c3014cb5ef03 | 880 | py | Python | tests/exact_tests/contour_tests/strategies.py | lycantropos/rene | c73c616f3e360b994e92c950a3616a8ccb1136b9 | [
"MIT"
] | null | null | null | tests/exact_tests/contour_tests/strategies.py | lycantropos/rene | c73c616f3e360b994e92c950a3616a8ccb1136b9 | [
"MIT"
] | null | null | null | tests/exact_tests/contour_tests/strategies.py | lycantropos/rene | c73c616f3e360b994e92c950a3616a8ccb1136b9 | [
"MIT"
] | null | null | null | from hypothesis import strategies
from rithm import Fraction
from rene import MIN_CONTOUR_VERTICES_COUNT
from rene.exact import (Contour,
Point)
integers = strategies.integers()
non_zero_integers = integers.filter(bool)
scalars = (integers | strategies.fractions()
| strategies.builds(Fraction, integers, non_zero_integers)
| strategies.floats(allow_infinity=False,
allow_nan=False))
points = strategies.builds(Point, scalars, scalars)
contours_vertices = strategies.lists(points,
unique=True,
min_size=MIN_CONTOUR_VERTICES_COUNT)
invalid_count_contours_vertices = strategies.lists(
points,
unique=True,
max_size=MIN_CONTOUR_VERTICES_COUNT - 1
)
contours = strategies.builds(Contour, contours_vertices)
| 36.666667 | 73 | 0.664773 |
050d54bffebd30db922715e17f24f419261765d4 | 2,834 | py | Python | bendyprank.py | gazhay/bendyprank | 9af6b2536fb54001fe1681551362418f1ae78ed3 | [
"Unlicense"
] | null | null | null | bendyprank.py | gazhay/bendyprank | 9af6b2536fb54001fe1681551362418f1ae78ed3 | [
"Unlicense"
] | null | null | null | bendyprank.py | gazhay/bendyprank | 9af6b2536fb54001fe1681551362418f1ae78ed3 | [
"Unlicense"
] | null | null | null | /*
Bendy and the Ink Machine, BATIM, and all graphics and sounds are The Meatly
NOT AN OFFICIAL BENDY AND THE INK MACHINE PRODUCT. NOT APPROVED BY OR ASSOCIATED WITH THEMEATLY GAMES, LTD.
Code below released under GPLv2
*/
import wx
import subprocess
from random import randint
from time import sleep
IMAGE_PATH = 'Bendy.png'
WAKE_SPEAKERS = True
if __name__ == '__main__':
try:
app = wx.App(False)
if WAKE_SPEAKERS:
cmdstr = "aplay Silent.wav"
subprocess.call(cmdstr, shell=True)
cmdstr = "aplay SFX_Jumpscare_01.wav".split()
subprocess.Popen(cmdstr, stdin=None, stdout=None, stderr=None, close_fds=True)
ShapedFrame().Show()
app.MainLoop()
except KeyboardInterrupt:
exit(0)
| 34.560976 | 111 | 0.617855 |
050f7c817ca7e48958c01acf2a63b083dd36ff69 | 1,347 | py | Python | src/answer_key.py | quuu/ASCIImage | d276b9428b8b127069999ffe8e025e8e0ad43c0c | [
"MIT"
] | 2 | 2018-08-01T02:13:31.000Z | 2018-08-01T03:02:31.000Z | src/answer_key.py | quuu/Genetic-ASCIImage | d276b9428b8b127069999ffe8e025e8e0ad43c0c | [
"MIT"
] | 11 | 2018-08-12T13:28:19.000Z | 2018-09-08T19:15:30.000Z | src/answer_key.py | quuu/ASCIImage | d276b9428b8b127069999ffe8e025e8e0ad43c0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
from skimage import io
from skimage import color
from PIL import Image
#TODO Multi-thread
def avg_brightness(image_list):
"""
A list of grey scale images
"""
brightness_per_block=[]
for image in image_list:
img_shape = image.shape
img_Size = image.size
total=0
for i in range(0,img_shape[0]):
for j in range(0,img_shape[1]):
total+=image[i][j]
total/=img_Size
brightness_per_block.append(total)
return brightness_per_block
image_list = my_crop("picture.jpg",80,80,(220*220)/(80*80),"page_name")
images = make_image_list(image_list)
bright_per_block = avg_brightness(images)
print(bright_per_block)
| 26.94 | 71 | 0.626578 |
05108d99ff3259ead7d1205123464ffd5c4850a2 | 5,504 | py | Python | app.py | makerdao-data/gov-tracker | 52b7588e5c200b0af5b64a2891b276cbcc149ff1 | [
"Apache-2.0"
] | null | null | null | app.py | makerdao-data/gov-tracker | 52b7588e5c200b0af5b64a2891b276cbcc149ff1 | [
"Apache-2.0"
] | null | null | null | app.py | makerdao-data/gov-tracker | 52b7588e5c200b0af5b64a2891b276cbcc149ff1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Public version #
from flask import Flask, request, jsonify
import atexit
from datetime import datetime
import csv
from io import StringIO
from werkzeug.wrappers import Response
from sqlalchemy import func
from deps import get_db
from utils.query import pull_filtered_data
from views.main_view import main_page_view, main_page_data
from views.address_views import address_page_view, address_data_view
from views.yay_views import yay_page_view, yay_data_view
from views.poll_views import poll_page_view, poll_data_view
from views.proxy_views import proxy_page_view, proxy_data_view
from views.protocol_parameters_views import parameters_page_view, parameters_data_view
from connectors.sf import sf, sf_disconnect
from models import ParameterEvent
from utils.query import pull_filtered_data
app = Flask(__name__)
app.config["JSON_SORT_KEYS"] = False
# HTML endpoints -------------------------------------------
# DATA endpoints -------------------------------------------
# @app.route("/data/protocol_parameters", methods=["GET"])
# def get_parameters_page_data():
# dataset = parameters_data_view(sf)
# return jsonify(dataset)
# cleanup tasks
def cleanup_task():
if not sf.is_closed():
sf_disconnect(sf)
print("SF connection closed.")
atexit.register(cleanup_task)
if __name__ == "__main__":
app.run(debug=False)
| 28.518135 | 117 | 0.677871 |
051130482cb3691a34b0be84581c86dd2a4ce54f | 3,280 | py | Python | open_spiel/python/mst/run_mst.py | BrandonKates/open_spiel | f820abe9bdfdbc4bd45c2e933439393d4ad3622a | [
"Apache-2.0"
] | null | null | null | open_spiel/python/mst/run_mst.py | BrandonKates/open_spiel | f820abe9bdfdbc4bd45c2e933439393d4ad3622a | [
"Apache-2.0"
] | null | null | null | open_spiel/python/mst/run_mst.py | BrandonKates/open_spiel | f820abe9bdfdbc4bd45c2e933439393d4ad3622a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import app
from absl import flags
import numpy as np
from scipy.spatial import distance_matrix
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "mst", "Name of the game")
flags.DEFINE_integer("num_nodes", None, "Number of nodes")
flags.DEFINE_string("load_state", None,
"A file containing a string to load a specific state")
if __name__ == "__main__":
app.run(main)
| 34.166667 | 129 | 0.695122 |
0511c5e889756be6d1498e4e5630fe4522e1af10 | 176 | py | Python | dzien1/p1_start.py | angelm1974/przyklady | ee0483ca69b789270641f3cd6c945b0cd0afbea9 | [
"MIT"
] | 1 | 2021-09-20T21:48:11.000Z | 2021-09-20T21:48:11.000Z | dzien1/p1_start.py | angelm1974/przyklady | ee0483ca69b789270641f3cd6c945b0cd0afbea9 | [
"MIT"
] | null | null | null | dzien1/p1_start.py | angelm1974/przyklady | ee0483ca69b789270641f3cd6c945b0cd0afbea9 | [
"MIT"
] | null | null | null |
from PyQt6.QtWidgets import QApplication, QWidget
import sys # komentarz
app = QApplication(sys.argv) # ([]) -bez argumentw
window = QWidget()
window.show()
app.exec()
| 14.666667 | 52 | 0.715909 |
0511cceb2ee442a4c70aeab49d84be0233b7fcac | 10,952 | py | Python | classify.py | clulab/incivility | 82d8e8164b81e9f4d5737520f2cbf308d3fcd033 | [
"Apache-2.0"
] | 1 | 2020-09-18T12:05:13.000Z | 2020-09-18T12:05:13.000Z | classify.py | clulab/incivility | 82d8e8164b81e9f4d5737520f2cbf308d3fcd033 | [
"Apache-2.0"
] | null | null | null | classify.py | clulab/incivility | 82d8e8164b81e9f4d5737520f2cbf308d3fcd033 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import subprocess
from typing import List, Sequence, Text
import textwrap
import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
import data
import models
import ga
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained-model-name", default="roberta-base")
parser.add_argument("--label-col", default="namecalling")
subparsers = parser.add_subparsers()
train_parser = subparsers.add_parser("train")
train_parser.add_argument("model_path")
train_parser.add_argument("--train-data", dest="train_data_paths", nargs='+',
metavar="PATH", required=True)
train_parser.add_argument("--dev-data", dest="dev_data_paths", nargs='+',
metavar="PATH", required=True)
train_parser.add_argument("--qsub", action="store_true")
train_parser.add_argument("--time")
train_parser.add_argument("--no-gpu", dest="use_gpu", action="store_false")
train_parser.add_argument(
"--singularity-image",
default="/xdisk/bethard/hpc-ml_centos7-python3.7-transformers3.2.0.sif")
train_parser.add_argument("--n-rows", type=int)
train_parser.add_argument("--learning-rate", type=float, default=3e-5)
train_parser.add_argument("--batch-size", type=int, default=1)
train_parser.add_argument("--grad-accum-steps", type=int, default=1)
train_parser.add_argument("--n-epochs", type=int, default=10)
train_parser.set_defaults(func=train)
test_parser = subparsers.add_parser("test")
test_parser.add_argument("model_paths", nargs="+", metavar="model_path")
test_parser.add_argument("--test-data", dest="test_data_paths", nargs='+',
metavar="PATH", required=True)
test_parser.add_argument("--n-rows", type=int)
test_parser.add_argument("--batch-size", type=int, default=1)
test_parser.add_argument("--verbose", action="store_true")
test_parser.set_defaults(func=test)
predict_parser = subparsers.add_parser("predict")
predict_parser.add_argument("model_path")
predict_parser.add_argument("input_path")
predict_parser.add_argument("output_path")
predict_parser.add_argument("--text-col", default="tweet_text")
predict_parser.add_argument("--output-scores", action="store_true")
predict_parser.add_argument("--n-rows", type=int)
predict_parser.add_argument("--batch-size", type=int, default=1)
predict_parser.set_defaults(func=predict_csv)
args = parser.parse_args()
kwargs = vars(args)
kwargs.pop("func")(**kwargs)
| 38.975089 | 137 | 0.603543 |
051260c977d73217e66d8ef66398ae1931f7b899 | 814 | py | Python | p2/src/prove.py | ruimgf/AIDS | 72bc808ef5e21113f635f34581d18c0dc2c8c7da | [
"MIT"
] | null | null | null | p2/src/prove.py | ruimgf/AIDS | 72bc808ef5e21113f635f34581d18c0dc2c8c7da | [
"MIT"
] | null | null | null | p2/src/prove.py | ruimgf/AIDS | 72bc808ef5e21113f635f34581d18c0dc2c8c7da | [
"MIT"
] | null | null | null | import sys
from kb import *
#receives a list of setences if it is in test mode
if __name__ == '__main__':
print(main())
| 25.4375 | 69 | 0.5086 |
0513d4822718f78bada1c9c056ce41bfe1fb2ffe | 472 | py | Python | rest_framework_tracking/mixins.py | Zagrebelin/drf-tracking | 5fe102439e7baaffc886253e39c21dd96481391f | [
"ISC"
] | 387 | 2015-05-26T08:23:52.000Z | 2022-03-18T11:10:44.000Z | rest_framework_tracking/mixins.py | Zagrebelin/drf-tracking | 5fe102439e7baaffc886253e39c21dd96481391f | [
"ISC"
] | 138 | 2015-05-26T16:20:25.000Z | 2021-09-22T18:07:24.000Z | rest_framework_tracking/mixins.py | Zagrebelin/drf-tracking | 5fe102439e7baaffc886253e39c21dd96481391f | [
"ISC"
] | 121 | 2015-09-25T16:53:48.000Z | 2021-08-18T12:42:04.000Z | from .base_mixins import BaseLoggingMixin
from .models import APIRequestLog
| 21.454545 | 49 | 0.658898 |
0514df3dee36ec46f44f8239441b8f0b35d0374b | 758 | py | Python | stub_extractor/util.py | srittau/stub-extractor | f161c10a2f041a74040a04e00e0b0d33cb94a0fe | [
"MIT"
] | null | null | null | stub_extractor/util.py | srittau/stub-extractor | f161c10a2f041a74040a04e00e0b0d33cb94a0fe | [
"MIT"
] | null | null | null | stub_extractor/util.py | srittau/stub-extractor | f161c10a2f041a74040a04e00e0b0d33cb94a0fe | [
"MIT"
] | null | null | null | from typing import Iterator, List, Optional, Sequence, Tuple, TypeVar
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
def rzip_longest(
seq1: Sequence[_T1], seq2: Sequence[_T2]
) -> Iterator[Tuple[_T1, Optional[_T2]]]:
"""Make an iterator over tuples, with elements from the input sequences.
If the second sequence is shorter than the first by N elements,
the second element of the first N tuples is set to None.
>>> list(rzip_longest([1,2,3], ["a", "b"]))
[(1, None), (2, "a"), (3, "b")]
"""
len_diff = len(seq1) - len(seq2)
if len_diff < 0:
raise ValueError("seq2 can't be longer than seq1")
padded_seq2: List[Optional[_T2]] = [None] * len_diff
padded_seq2.extend(seq2)
return zip(seq1, padded_seq2)
| 30.32 | 76 | 0.647757 |
05167a6a94f7c83fc6497eed1db4333dd9bd4308 | 12,980 | py | Python | weibospider.py | Chiang97912/WeiboSpider | 2c426d2dfa8c6d418b66bd54002c292194872c88 | [
"MIT"
] | null | null | null | weibospider.py | Chiang97912/WeiboSpider | 2c426d2dfa8c6d418b66bd54002c292194872c88 | [
"MIT"
] | null | null | null | weibospider.py | Chiang97912/WeiboSpider | 2c426d2dfa8c6d418b66bd54002c292194872c88 | [
"MIT"
] | 1 | 2021-05-07T06:35:22.000Z | 2021-05-07T06:35:22.000Z | # -*- coding: UTF-8 -*-
import os
import json
import time
import rsa
import base64
import urllib
import binascii
import traceback
import requests
import pandas as pd
from lxml import etree
from datetime import datetime
def main():
blacklist_file = 'blacklist.txt' #
config = {
'query': '', #
'start_month': 1, #
'start_day': 1, #
'username': 'xxxxxxxxxxxx', #
'password': 'xxxxxxxxxxxx', #
}
years = ['2018', '2019']
config = Config(**config)
if not os.path.exists(blacklist_file):
open(blacklist_file, 'w').close() #
if not os.path.exists('./data'):
os.makedirs('./data')
for year in years:
for month in range(config.start_month, 13):
for day in range(config.start_day, 32):
with open(blacklist_file) as f:
blacklist = [line.strip() for line in f.readlines()]
if '{}-{}-{}'.format(year, month, day) in blacklist:
continue
config.year = year
config.month = month
config.day = day
ws = WeiboSpider(config)
ws.start()
with open(blacklist_file, 'a') as f:
f.write('{}-{}-{}\n'.format(year, month, day))
print("")
if __name__ == '__main__':
main()
| 39.938462 | 170 | 0.469106 |
0516e5d4fd543c80d6f16ba01f4a7586b969a893 | 3,783 | py | Python | spoty/commands/get_second_group.py | dy-sh/spoty | 431a392707c8754da713871e0e7747bcc4417274 | [
"MIT"
] | 2 | 2022-02-01T16:49:32.000Z | 2022-03-02T18:30:31.000Z | spoty/commands/get_second_group.py | dy-sh/spoty | 431a392707c8754da713871e0e7747bcc4417274 | [
"MIT"
] | null | null | null | spoty/commands/get_second_group.py | dy-sh/spoty | 431a392707c8754da713871e0e7747bcc4417274 | [
"MIT"
] | null | null | null | from spoty.commands.first_list_commands import \
count_command, \
export_command, \
print_command
from spoty.commands.second_list_commands import \
filter_second_group, \
find_duplicates_second_command,\
find_deezer_second_group, \
find_spotify_second_group
from spoty.commands import get_group
from spoty.utils import SpotyContext
import click
get_second.add_command(filter_second_group.filter_second)
get_second.add_command(count_command.count_tracks)
get_second.add_command(print_command.print_tracks)
get_second.add_command(export_command.export_tracks)
get_second.add_command(find_duplicates_second_command.find_duplicates_second)
get_second.add_command(find_deezer_second_group.find_deezer)
get_second.add_command(find_spotify_second_group.find_spotify) | 50.44 | 187 | 0.641819 |
05195432ec2c13cb2bd586385c70cb0f3fcc21ab | 19,804 | py | Python | jenkins_job_wrecker/modules/triggers.py | romanek-adam/jenkins-job-wrecker | db9379d852afe8b621c7688d34fd057d916de8f2 | [
"MIT"
] | 1 | 2020-06-05T06:36:50.000Z | 2020-06-05T06:36:50.000Z | jenkins_job_wrecker/modules/triggers.py | romanek-adam/jenkins-job-wrecker | db9379d852afe8b621c7688d34fd057d916de8f2 | [
"MIT"
] | 15 | 2020-05-18T07:37:06.000Z | 2020-08-24T09:16:08.000Z | jenkins_job_wrecker/modules/triggers.py | romanek-adam/jenkins-job-wrecker | db9379d852afe8b621c7688d34fd057d916de8f2 | [
"MIT"
] | null | null | null | # encoding=utf8
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.helpers import get_bool, Mapper
| 51.572917 | 119 | 0.569683 |
051d3484ddd9be778a5ba470d36fedfb5de63393 | 4,097 | py | Python | tools/clean-parallel.py | ZJaume/clean | 0c3c6bab8bf173687ec0bba6908097ef7bc38db2 | [
"MIT"
] | 1 | 2021-06-02T03:08:32.000Z | 2021-06-02T03:08:32.000Z | tools/clean-parallel.py | ZJaume/clean | 0c3c6bab8bf173687ec0bba6908097ef7bc38db2 | [
"MIT"
] | 1 | 2021-05-30T22:55:44.000Z | 2021-06-02T08:47:56.000Z | tools/clean-parallel.py | ZJaume/clean | 0c3c6bab8bf173687ec0bba6908097ef7bc38db2 | [
"MIT"
] | 2 | 2021-06-01T19:07:43.000Z | 2021-06-03T11:03:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import regex
import argparse
# The variables below need to be adjusted for a language pair and dataset.
# To add a new language, define the list of alpha characters in the dict below.
MIN_LENGTH = 1 # minimum number of words in a sentence
MAX_LENGTH = 200 # maximum number of words in a sentence
RATIO_LENGTH = 0.3 # maximum length difference between the source and target sentence
RATIO_ALPHA_WORDS = 0.4 # minimum fraction of "real" words in a source sentence
RATIO_ALPHA_CHARS = 0.5 # minimum fraction of alpha characters in a source sentence
CHARS = {
'bg': r'[kas]',
'cs': r'[a-z]',
'ca': r'[a-z]',
'da': r'[a-z]',
'de': r'[a-z]',
'en': r'[a-z]',
'el': r'[a-z]',
'es': r'[a-z]',
'et': r'[a-z]',
'eu': r'[a-z]',
'fi': r'[a-z]',
'fr': r'[a-z]',
'ga': r'[abcdefghilmnoprstu]',
'gl': r'[a-z]',
'hr': r'[abcdefghijklmnoprstuvz]',
'hu': r'[a-z]',
'is': r'[abdefghijklmnoprstuvxy]',
'it': r'[a-z]',
'lt': r'[abcdefghiyjklmnoprstuvz]',
'lv': r'[abcdefghijklmnoprstuvz]',
'mt': r'[abdefghiiejklmnopqrstuvwxz]',
'nb': r'[a-z]',
'nl': r'[a-z]',
'no': r'[a-z]',
'nn': r'[a-z]',
'pl': r'[a-z]',
'ro': r'[a-z]',
'sk': r'[a-z]',
'sl': r'[abcdefghijklmnoprstuvz]',
'sv': r'[a-z]',
}
middle_period = regex.compile(r'\w+[\.\?\!] \p{Lu}\w*,? ')
if __name__ == "__main__":
main()
| 32.515873 | 96 | 0.640469 |
051e064cf78fe1b3efaa1e563322f576984f94e9 | 24,624 | py | Python | rubika/client.py | Bahman-Ahmadi/rubika | 924e82434f9468cadf481af7b29695f642af7e99 | [
"MIT"
] | 23 | 2021-12-06T09:54:01.000Z | 2022-03-31T19:44:29.000Z | rubika/client.py | Bahman-Ahmadi/rubika | 924e82434f9468cadf481af7b29695f642af7e99 | [
"MIT"
] | 4 | 2022-01-08T19:27:40.000Z | 2022-03-30T13:18:23.000Z | rubika/client.py | Bahman-Ahmadi/rubika | 924e82434f9468cadf481af7b29695f642af7e99 | [
"MIT"
] | 13 | 2021-12-08T14:18:39.000Z | 2022-03-30T13:20:37.000Z | from pathlib import Path
from requests import post
from random import randint
from json import loads, dumps
import random, datetime, rubika.encryption
# because should be exist !
adminsAccess = {
"pin":"PinMessages",
"newAdmin":"SetAdmin",
"editInfo":"ChangeInfo",
"banMember":"BanMember",
"changeLink":"SetJoinLink",
"changeMembersAccess":"SetMemberAccess",
"deleteMessages":"DeleteGlobalAllMessages"
}
usersAccess = {
"addMember":"AddMember",
"viewAdmins":"ViewAdmins",
"viewMembers":"ViewMembers",
"sendMessage":"SendMessages"
} | 29.349225 | 2,034 | 0.634665 |
051f4dab5a5f1bed25333ea9cb6d58c8c48a834b | 424 | py | Python | lpyHardway/logic/ex2.py | oreanroy/learn_modules | fb1debc612940b65c409d8f5b35a3b4e16e67494 | [
"MIT"
] | null | null | null | lpyHardway/logic/ex2.py | oreanroy/learn_modules | fb1debc612940b65c409d8f5b35a3b4e16e67494 | [
"MIT"
] | 17 | 2019-12-01T16:56:29.000Z | 2022-03-02T04:49:51.000Z | lpyHardway/logic/ex2.py | oreanroy/learn_modules | fb1debc612940b65c409d8f5b35a3b4e16e67494 | [
"MIT"
] | 1 | 2019-09-28T00:43:54.000Z | 2019-09-28T00:43:54.000Z | people = 30
cars = 40
buses = 15
if cars > people:
print "We should take the cars."
elif cars < people:
print "we should not take the cars."
else:
print "we can't decide."
if buses > cars:
print " That's too many buses"
elif buses < cars:
print " Maybe we could take the bus."
else:
print "we stil can't decide."
if people > buses:
print " Alright lets take the buses."
else:
print "Fine, let's stay home then."
| 17.666667 | 38 | 0.676887 |
0520b1fd12c6c807e99e2585c0ad990c4a9c1185 | 3,001 | py | Python | undercrawler/crazy_form_submitter.py | abael/ScrapyGenericCrawler | 9d210fb862a7fddd58c548847d8f5c2d72eae5c1 | [
"MIT"
] | 88 | 2016-04-07T18:41:19.000Z | 2022-01-03T12:18:44.000Z | undercrawler/crazy_form_submitter.py | shekar9160/generic_scraper | e5104dca5a5d9fe4b9ddd085c7b0935a712ea74d | [
"MIT"
] | 61 | 2016-04-06T18:31:45.000Z | 2021-07-15T12:10:23.000Z | undercrawler/crazy_form_submitter.py | shekar9160/generic_scraper | e5104dca5a5d9fe4b9ddd085c7b0935a712ea74d | [
"MIT"
] | 31 | 2016-04-14T07:49:49.000Z | 2021-08-08T17:07:36.000Z | import logging
import random
import string
from scrapy.http.request.form import _get_inputs as get_form_data
logger = logging.getLogger(__name__)
SEARCH_TERMS = list(string.ascii_lowercase) + list('123456789 *%.?')
| 40.554054 | 78 | 0.638121 |
0520c8a0308bb129120ec328a9eacba21da937c0 | 277 | py | Python | python/pid.py | gin2018/test_tools | 46d911da6719ae2069ed4e87bdcc6922c21459a5 | [
"MIT"
] | null | null | null | python/pid.py | gin2018/test_tools | 46d911da6719ae2069ed4e87bdcc6922c21459a5 | [
"MIT"
] | null | null | null | python/pid.py | gin2018/test_tools | 46d911da6719ae2069ed4e87bdcc6922c21459a5 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
pid_file = open("pid.txt", "w")
x = np.linspace(0, 2 * np.pi, 100)
print x
pid_file.write(x)
y1, y2 = np.sin(x), np.cos(x)
plt.plot(x, y1, label='y = sin(x)')
plt.plot(x, y2, label='y = cos(x)')
plt.legend()
plt.show()
| 16.294118 | 35 | 0.624549 |
0524ab92ab97c6f8922dd3dd0c03bf3b79b8a0ee | 921 | py | Python | libs/libssh2/libssh2.py | simont77/craft-blueprints-kde | 3c0a40923c7c8e0341ad08afde22f86bb1517ddf | [
"BSD-2-Clause"
] | null | null | null | libs/libssh2/libssh2.py | simont77/craft-blueprints-kde | 3c0a40923c7c8e0341ad08afde22f86bb1517ddf | [
"BSD-2-Clause"
] | 1 | 2020-01-10T01:06:16.000Z | 2020-01-10T01:06:16.000Z | libs/libssh2/libssh2.py | simont77/craft-blueprints-kde | 3c0a40923c7c8e0341ad08afde22f86bb1517ddf | [
"BSD-2-Clause"
] | 2 | 2020-01-02T18:22:12.000Z | 2020-08-05T13:39:21.000Z | # -*- coding: utf-8 -*-
import info
from Package.CMakePackageBase import *
| 38.375 | 108 | 0.667752 |
05260b29fa65b53dc965a1c89ebcef95a1a96d54 | 396 | py | Python | test/config_generator_test.py | jnohlgard/projector-installer | 52aeaa936aa21d9fa6aee109d78e209fa068821b | [
"Apache-2.0"
] | null | null | null | test/config_generator_test.py | jnohlgard/projector-installer | 52aeaa936aa21d9fa6aee109d78e209fa068821b | [
"Apache-2.0"
] | null | null | null | test/config_generator_test.py | jnohlgard/projector-installer | 52aeaa936aa21d9fa6aee109d78e209fa068821b | [
"Apache-2.0"
] | null | null | null | """Test config_generator.py module"""
from unittest import TestCase
from projector_installer.config_generator import token_quote
| 30.461538 | 73 | 0.739899 |
05273ebf4b8d4eb6302f146e1b519e163f850d92 | 5,289 | py | Python | tooling/maven.py | AntonisGkortzis/Vulnerabilities-in-Reused-Software | 16b2087cb595b48446dadda8cae75dad6ef1433b | [
"MIT"
] | 3 | 2020-11-24T20:30:59.000Z | 2021-05-26T02:33:53.000Z | tooling/maven.py | AntonisGkortzis/Vulnerabilities-in-Reused-Software | 16b2087cb595b48446dadda8cae75dad6ef1433b | [
"MIT"
] | null | null | null | tooling/maven.py | AntonisGkortzis/Vulnerabilities-in-Reused-Software | 16b2087cb595b48446dadda8cae75dad6ef1433b | [
"MIT"
] | null | null | null | import os
import re
import logging
import zipfile
logger = logging.getLogger(__name__)
def get_compiled_modules(project_trees_file):
with open(project_trees_file) as f:
try:
str_trees = split_trees([l.rstrip() for l in f.readlines()])
except:
logger.error(f'File is malformed: {project_trees_file}')
return []
trees = []
for t in str_trees:
t = ArtifactTree.parse_tree_str('\n'.join(t))
if t.artifact.type in ['jar', 'war']:
t.filter_deps(lambda d : d.artifact.dep_type == 'compile' and d.artifact.type in ['jar', 'war'])
trees.append(t)
return [t for t in trees if not t.missing_m2_pkgs()]
def filter_mvn_output(mvn_tree_output):
re_tree_element = re.compile(r'^\[INFO\] (\||\\\-|\+\-| )*([a-zA-Z_$][a-zA-Z\d_\-$]*\.)*[a-zA-Z_$][a-zA-Z\d_\-$]*:.+?:([a-zA-Z]+?):.+?(:[a-zA-Z\-]+)?$')
with open(tree_file, 'r') as f:
lines = f.readlines()
tree_lines = [l.rstrip() for l in lines if re_tree_element.match(l)]
return tree_lines
def split_trees(tree_lines):
re_artifact = re.compile(r'^\[INFO\] ([a-zA-Z_$][a-zA-Z\d_\-$]*\.)*[a-zA-Z_$][a-zA-Z\d_\-$]*:.+?:([a-zA-Z]+?):.+$')
trees = []
tree = None
for l in tree_lines:
if re_artifact.match(l):
if tree:
trees.append([tree['root']] + tree['deps'])
tree = {'root': l, 'deps': []}
else:
tree['deps'].append(l)
trees.append([tree['root']] + tree['deps'])
return trees
| 33.687898 | 156 | 0.560219 |
0527ccd6baf873620f163e0b3ed2a44bfa92eff6 | 1,812 | py | Python | ptsites/sites/hares.py | kbnq/flexget_qbittorrent_mod | e52d9726b80aab94cf3d9ee6c382b6721b757d3b | [
"MIT"
] | null | null | null | ptsites/sites/hares.py | kbnq/flexget_qbittorrent_mod | e52d9726b80aab94cf3d9ee6c382b6721b757d3b | [
"MIT"
] | null | null | null | ptsites/sites/hares.py | kbnq/flexget_qbittorrent_mod | e52d9726b80aab94cf3d9ee6c382b6721b757d3b | [
"MIT"
] | null | null | null | from ..schema.nexusphp import Attendance
from ..schema.site_base import Work, SignState
from ..utils.net_utils import NetUtils
| 29.225806 | 82 | 0.400662 |
05299930cfe175dfdd505fa507a88544ad0e95c1 | 716 | py | Python | tests/garage/sampler/test_rl2_worker.py | blacksph3re/garage | b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507 | [
"MIT"
] | 1,500 | 2018-06-11T20:36:24.000Z | 2022-03-31T08:29:01.000Z | tests/garage/sampler/test_rl2_worker.py | blacksph3re/garage | b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507 | [
"MIT"
] | 2,111 | 2018-06-11T04:10:29.000Z | 2022-03-26T14:41:32.000Z | tests/garage/sampler/test_rl2_worker.py | blacksph3re/garage | b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507 | [
"MIT"
] | 309 | 2018-07-24T11:18:48.000Z | 2022-03-30T16:19:48.000Z | from garage.envs import GymEnv
from garage.tf.algos.rl2 import RL2Worker
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.policies import DummyPolicy
| 32.545455 | 50 | 0.655028 |
052a76693b3fb6c307548d396e0accbc369737c8 | 660 | py | Python | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | #Uche Ogbuji exercises format-number on Brad Marshall's behalf
from Xml.Xslt import test_harness
sheet_1 = """\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match = "/">
<xsl:value-of select='format-number(10000000000.75 + 10000000000.50, "##.##")'/>
</xsl:template>
</xsl:stylesheet>"""
#"
source_1 = "<spam/>"
expected_1 = """<?xml version="1.0" encoding="UTF-8"?>
20000000001.25"""
| 24.444444 | 84 | 0.671212 |
052bebc9ce249268deadd50cd183873b6f1a799a | 2,697 | py | Python | tests/test_connection.py | daniel-herrero/fastapi-mailman | a174d0ec777d3330dc5464f71fafa7829db07bf1 | [
"MIT"
] | 6 | 2021-10-08T10:20:37.000Z | 2022-03-30T08:56:10.000Z | tests/test_connection.py | daniel-herrero/fastapi-mailman | a174d0ec777d3330dc5464f71fafa7829db07bf1 | [
"MIT"
] | 2 | 2021-11-11T11:44:29.000Z | 2022-03-08T06:54:54.000Z | tests/test_connection.py | daniel-herrero/fastapi-mailman | a174d0ec777d3330dc5464f71fafa7829db07bf1 | [
"MIT"
] | 1 | 2022-03-04T14:43:22.000Z | 2022-03-04T14:43:22.000Z | import typing as t
import pytest as pt
from fastapi_mailman import BadHeaderError, EmailMessage
if t.TYPE_CHECKING:
from fastapi_mailman import Mail
| 28.389474 | 102 | 0.629218 |