hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d1c635399d92b3e1526049c9830b5922d5577a91 | 17,587 | py | Python | src/data/tree_matches.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | null | null | null | src/data/tree_matches.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | null | null | null | src/data/tree_matches.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | 1 | 2021-08-19T15:21:50.000Z | 2021-08-19T15:21:50.000Z | import glob
import os
import pandas as pd
import json
import ast
from tqdm import tqdm
import click
import pickle
from multiprocessing import Pool, cpu_count, Queue
from functools import partial
import itertools
import sys
sys.setrecursionlimit(15000)
import logging
logpath = "./tree_matches.log"
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.FileHandler(logpath)
# ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
# def get_matching_cells(kernel_trees,diff_versions = False, key = None):
# matches = []
# all_cells = []
# for slug,versions in kernel_trees.items():
# all_version_cells = []
# for version_id, cells in versions.items():
# if cells:
# for cell in cells:
# all_version_cells.append(cell)
# n = len(all_version_cells)
# if n == 1:
# continue
# for i in range(n):
# for j in range(i+1,n):
# cell_i = all_version_cells[i]
# cell_j = all_version_cells[j]
# if diff_versions:
# if cell_i.version_id == cell_j.version_id:
# continue
# diff = cell_i.coral_diff(cell_j,key=key)
# if diff == 1:
# matches.append((cell_i,cell_j))
# all_cells = all_cells + all_version_cells
# return matches
# def get_competition_matches(competition_path):
# slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
# matches = []
# for slug in slugs:
# matches = matches + get_slug_matches(competition_path,slug)
# logger.info("Done with {}".format(competition_path))
# return matches
# def get_competition_matcher(ignore_function_args,length_threshold,remove_exact_duplicates,
# ignore_strings):
# def get_competition_matches(ignore_function_args,length_threshold,remove_exact_duplicates,
# ignore_strings, competition_path):
# slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
# matches = []
# for slug in slugs:
# matches = matches + get_slug_matches(competition_path,slug,ignore_function_args,
# remove_exact_duplicates, length_threshold, ignore_strings)
# logger.info("Done with {}".format(competition_path))
# return matches
# return get_competition_matches
if __name__ == '__main__':
main()
| 36.112936 | 189 | 0.591289 |
d1c6823ee90be6b6904c09a99dc9b3ef3c77d40d | 3,273 | py | Python | opencdms/process/r_instat.py | dannyparsons/pyopencdms | 94addc5009a0a68e17fb443607d876540a46afcc | [
"MIT"
] | null | null | null | opencdms/process/r_instat.py | dannyparsons/pyopencdms | 94addc5009a0a68e17fb443607d876540a46afcc | [
"MIT"
] | 11 | 2021-07-28T09:18:20.000Z | 2022-02-24T09:48:53.000Z | opencdms/process/r_instat.py | dannyparsons/pyopencdms | 94addc5009a0a68e17fb443607d876540a46afcc | [
"MIT"
] | 2 | 2021-12-19T19:38:06.000Z | 2022-01-14T16:46:36.000Z | # =================================================================
#
# Authors: Stephen Lloyd
# Ian Edwards
#
# Copyright (c) 2020, OpenCDMS Project
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
def windrose(
speed,
direction,
facet,
n_directions=12,
n_speeds=5,
speed_cuts="NA",
col_pal="GnBu",
ggtheme="grey",
legend_title="Wind Speed",
calm_wind=0,
variable_wind=990,
n_col=1,
):
"""
Plot a windrose showing the wind speed and direction for given
facets using ggplot2.
Args:
* speed
Numeric vector of wind speeds.
* direction
Numeric vector of wind directions.
* facet
Character or factor vector of the facets used to plot the
various windroses.
Kwargs:
* n_directions
The number of direction bins to plot (petals on the rose)
(default 12).
* n_speeds
The number of equally spaced wind speed bins to plot.
This is used if speed_cuts is NA (default 5).
* speed_cuts
Numeric vector containing the cut points for the wind speed
intervals (default "NA").
* col_pal
Character string indicating the name of the brewer.pal.info
colour palette to be used for plotting (default "GNBU").
* ggtheme
Character string (partially) matching the ggtheme to be used
for plotting, may be "grey", "gray", "bw", "linedraw", "light",
"minimal", "classic" (default "grey").
* legend_title
Character string to be used for the legend title
(default "Wind Speed").
* calm_wind
The upper limit for wind speed that is considered calm (default 0).
* variable_wind
Numeric code for variable winds (if applicable) (default 990).
* n_col
The number of columns of plots (default 1).
"""
# clifro::windrose(
# speed, direction, facet, n_directions=12, n_speeds=5, speed_cuts=NA,
# col_pal="GnBu", ggtheme=c(
# "grey", "gray", "bw", "linedraw", "light", "minimal", "classic"),
# legend_title="Wind Speed", calm_wind=0, variable_wind=990,
# n_col=1, ...)
return None
| 34.09375 | 79 | 0.643752 |
d1c81880771dc78be0ce9b1719c11a105c654a6c | 663 | py | Python | examples/accessibility/test_sa11y.py | echo2477/demo-python | adc55aa8075dbd46f94d1ae68f2acfd8f20720d5 | [
"MIT"
] | 42 | 2019-02-27T03:28:52.000Z | 2022-01-25T21:18:45.000Z | examples/accessibility/test_sa11y.py | echo2477/demo-python | adc55aa8075dbd46f94d1ae68f2acfd8f20720d5 | [
"MIT"
] | 12 | 2019-05-10T23:43:55.000Z | 2021-11-05T21:20:02.000Z | examples/accessibility/test_sa11y.py | echo2477/demo-python | adc55aa8075dbd46f94d1ae68f2acfd8f20720d5 | [
"MIT"
] | 38 | 2019-02-27T03:28:52.000Z | 2022-02-17T07:27:08.000Z | import os
from selenium import webdriver
from sa11y.analyze import Analyze
import urllib3
urllib3.disable_warnings()
| 22.862069 | 69 | 0.612368 |
d1ca40f0376f7b0e97f60f4e474395644c035a44 | 653 | py | Python | 275_hindex_ii.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 275_hindex_ii.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 275_hindex_ii.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # 275. H-Index II
# Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
| 29.681818 | 117 | 0.509954 |
d1cad5eb72fd592bce4b7879f6c49c197729b99c | 6,172 | py | Python | base/site-packages/news/templatetags/news_tags.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/news/templatetags/news_tags.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/site-packages/news/templatetags/news_tags.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | from django.conf import settings
from django import template
from news.models import NewsItem, NewsAuthor, NewsCategory
register = template.Library()
def parse_token(token):
"""
Parses a token into 'slug', 'limit', and 'varname' values.
Token must follow format {% tag_name <slug> [<limit>] as <varname> %}
"""
bits = token.split_contents()
if len(bits) == 5:
# A limit was passed it -- try to parse / validate it.
try:
limit = abs(int(bits[2]))
except:
limit = None
elif len(bits) == 4:
# No limit was specified.
limit = None
else:
# Syntax is wrong.
raise template.TemplateSyntaxError("Wrong number of arguments: format is {%% %s <slug> [<limit>] as <varname> %%}" % bits[0])
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as': format is {%% %s <slug> [<limit>] as <varname> %%}" % bits[0])
return (bits[1], limit, bits[-1])
class MonthNode(template.Node): | 30.107317 | 137 | 0.70431 |
d1cc9f6841588916d3d185d0c46b0a187fc51e4e | 1,731 | py | Python | generics/models.py | morfat/falcon-quick-start | e2940d7bbf2f687627fcc18aa9440abc144f3e5c | [
"MIT"
] | null | null | null | generics/models.py | morfat/falcon-quick-start | e2940d7bbf2f687627fcc18aa9440abc144f3e5c | [
"MIT"
] | null | null | null | generics/models.py | morfat/falcon-quick-start | e2940d7bbf2f687627fcc18aa9440abc144f3e5c | [
"MIT"
] | null | null | null | import math
import falcon
import jsonschema
| 21.6375 | 122 | 0.601964 |
d1d0fe4d85e8f06718ad484d0653f8f1487b2d32 | 155 | py | Python | main.py | adael/goldminer | 47571c71c7f815eccb455a7d9e11d0e3892e9a5d | [
"MIT"
] | 2 | 2016-11-08T14:32:40.000Z | 2018-06-12T11:44:24.000Z | main.py | adael/goldminer | 47571c71c7f815eccb455a7d9e11d0e3892e9a5d | [
"MIT"
] | null | null | null | main.py | adael/goldminer | 47571c71c7f815eccb455a7d9e11d0e3892e9a5d | [
"MIT"
] | null | null | null | import os
from goldminer import game
if __name__ == "__main__":
print("Initializing")
print("Working directory: " + os.getcwd())
game.start()
| 19.375 | 46 | 0.670968 |
d1d19c31d7a08cd05475c969fbf2328d027248cd | 15,337 | py | Python | zed-align.py | zyndagj/zed-align | 143b0043b0bfc88f553dc141f4873715bfabc379 | [
"BSD-3-Clause"
] | 1 | 2017-03-17T15:57:04.000Z | 2017-03-17T15:57:04.000Z | zed-align.py | zyndagj/ZED-bsmap-align | 143b0043b0bfc88f553dc141f4873715bfabc379 | [
"BSD-3-Clause"
] | null | null | null | zed-align.py | zyndagj/ZED-bsmap-align | 143b0043b0bfc88f553dc141f4873715bfabc379 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from math import ceil
import os
import sys
import argparse
import multiprocessing
import subprocess as sp
import re
#from pprint import pprint
from array import array
from yaml import load, dump
contexts = ('CG','CHG','CHH')
def ParseFai(inFile):
'''
Parses a fa.fai into a python dictionary
Paramteters
================================
inFile FILE fai file
'''
return dict(map(lambda y: (y[0], int(y[1])), map(lambda y: y.split('\t'), open(inFile,'r').readlines())))
def makeTabStr(C, CT, nSites):
'''
Generates a tab-separated string for the .tab file.
'''
if C:
ratio = float(C)/float(CT)
return '\t%.2f\t%i\t%i\t%i'%(ratio, C, CT, nSites)
return '\t0\t%i\t%i\t%i'%(C, CT, nSites)
def makeDataArrays(offset):
'''
Function for creating arrays that keep track of data from
methratio.py output.
>>> makeDataArrays(1)
(array('H', [0, 0, 0]), array('H', [0, 0, 0]), array('H', [0, 0, 0]))
'''
C = array('H', [0]*(offset*3))
CT = array('H', [0]*(offset*3))
nSites = array('H', [0]*(offset*3)) # max is tile size
return (C, CT, nSites)
if __name__ == "__main__":
main()
| 43.447592 | 207 | 0.57671 |
d1d212dc12933a4a0f21c68d34b67d74f7e46ad2 | 4,316 | py | Python | tests/test_metadata_model.py | statisticsnorway/microdata-validator | c6b6788ab3ba7a3dad889db9120ad2decc598e76 | [
"Apache-2.0"
] | 1 | 2022-03-23T09:15:51.000Z | 2022-03-23T09:15:51.000Z | tests/test_metadata_model.py | statisticsnorway/microdata-validator | c6b6788ab3ba7a3dad889db9120ad2decc598e76 | [
"Apache-2.0"
] | 4 | 2022-02-17T08:41:30.000Z | 2022-02-28T14:08:47.000Z | tests/test_metadata_model.py | statisticsnorway/microdata-validator | c6b6788ab3ba7a3dad889db9120ad2decc598e76 | [
"Apache-2.0"
] | null | null | null | import json
import pytest
from microdata_validator import Metadata, PatchingError
RESOURCE_DIR = 'tests/resources/metadata_model'
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described.json') as f:
TRANSFORMED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_update.json') as f:
UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated.json') as f:
ENUMERATED_TRANSFORMED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated_update.json') as f:
ENUMERATED_UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated_patched.json') as f:
PATCHED_ENUMERATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_patched.json') as f:
PATCHED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_illegal_update.json') as f:
# New variable name on line 18
ILLEGALLY_UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_deleted_object.json') as f:
# Deleted keyType object line 34
DELETED_OBJECT_METADATA = json.load(f)
def test_patch_metadata_illegal_fields_changes():
"""
The "updated" contains randomly chosen fields that are not allowed to be changed.
"""
updated = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_illegal_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated.json')
with pytest.raises(PatchingError) as e:
orig = Metadata(original)
orig.patch(Metadata(updated))
assert 'Can not change these metadata fields [name, temporality, languageCode]' in str(e)
| 37.530435 | 96 | 0.765524 |
d1d24bde4b14a7385a88eadfd5830d39f6ecfb75 | 127 | py | Python | metrics/__init__.py | rizwan09/Tagger | 7622f10561a0f6074abde0c9c26a4f25405b204b | [
"BSD-3-Clause"
] | null | null | null | metrics/__init__.py | rizwan09/Tagger | 7622f10561a0f6074abde0c9c26a4f25405b204b | [
"BSD-3-Clause"
] | null | null | null | metrics/__init__.py | rizwan09/Tagger | 7622f10561a0f6074abde0c9c26a4f25405b204b | [
"BSD-3-Clause"
] | null | null | null | # metrics/__init__.py
# author: Playinf
# email: playinf@stu.xmu.edu.cn
from .metrics import create_tagger_evaluation_metrics
| 21.166667 | 53 | 0.80315 |
d1d273fedbebba3a9ba1430c685e07560c2562dd | 680 | py | Python | tests/platforms/macOS/dmg/test_mixin.py | chuckyQ/briefcase | 06e84e7b1c3af016c828a5a640d277809de6644b | [
"BSD-3-Clause"
] | 3 | 2020-09-29T15:32:35.000Z | 2021-11-08T09:41:04.000Z | tests/platforms/macOS/dmg/test_mixin.py | CuPidev/briefcase | 35619cbe4b512c8521ad3733341e6bc3422efb58 | [
"BSD-3-Clause"
] | null | null | null | tests/platforms/macOS/dmg/test_mixin.py | CuPidev/briefcase | 35619cbe4b512c8521ad3733341e6bc3422efb58 | [
"BSD-3-Clause"
] | 1 | 2021-03-26T11:52:02.000Z | 2021-03-26T11:52:02.000Z | import sys
import pytest
from briefcase.platforms.macOS.dmg import macOSDmgCreateCommand
if sys.platform != 'darwin':
pytest.skip("requires macOS", allow_module_level=True)
| 29.565217 | 76 | 0.772059 |
d1d2b7418ea4360c01e3e7cac48267d8b72eae4a | 336 | py | Python | app/__init__.py | Lijah-Tech-Solution/flask_structure | f1c31043f5756db66624f47b6ae4e7f869064d19 | [
"MIT"
] | 1 | 2020-07-22T15:00:53.000Z | 2020-07-22T15:00:53.000Z | app/__init__.py | Lijah-Tech-Solution/flask_structure | f1c31043f5756db66624f47b6ae4e7f869064d19 | [
"MIT"
] | null | null | null | app/__init__.py | Lijah-Tech-Solution/flask_structure | f1c31043f5756db66624f47b6ae4e7f869064d19 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
if app.config["ENV"] == "production":
app.config.from_object("config.ProductionConfig")
elif app.config["ENV"] == "testing":
app.config.from_object("config.TestingConfig")
else:
app.config.from_object("config.DevelopmentConfig")
from app import views
from app import admin_views | 17.684211 | 51 | 0.752976 |
d1d4630b4a1d77b92aebe2079bfb6cc0bd824f76 | 674 | py | Python | meutils/clis/conf.py | Jie-Yuan/MeUtils | 2bb191b0d35b809af037c0f65b37570b8828bea3 | [
"Apache-2.0"
] | 3 | 2020-12-03T07:30:02.000Z | 2021-02-07T13:37:33.000Z | meutils/clis/conf.py | Jie-Yuan/MeUtils | 2bb191b0d35b809af037c0f65b37570b8828bea3 | [
"Apache-2.0"
] | null | null | null | meutils/clis/conf.py | Jie-Yuan/MeUtils | 2bb191b0d35b809af037c0f65b37570b8828bea3 | [
"Apache-2.0"
] | 1 | 2021-02-07T13:37:38.000Z | 2021-02-07T13:37:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : conf
# @Time : 2021/1/31 10:20
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from meutils.pipe import *
#
#
#
conf_cli = lambda: fire.Fire(run) # <conf_cli> --epoch 11 --batch_size 111
# fire.Fire()
| 18.216216 | 75 | 0.587537 |
d1d6ddc5133d35a051f353823254a3acc14e9b2b | 888 | py | Python | BeautifulSoup/request.py | madhubalajain/code_snippets | 7cd4f79d94ced097efcc651dd0fd878a52fffad1 | [
"MIT"
] | null | null | null | BeautifulSoup/request.py | madhubalajain/code_snippets | 7cd4f79d94ced097efcc651dd0fd878a52fffad1 | [
"MIT"
] | null | null | null | BeautifulSoup/request.py | madhubalajain/code_snippets | 7cd4f79d94ced097efcc651dd0fd878a52fffad1 | [
"MIT"
] | null | null | null | install Request module
pip install requests
import requests
r = requests.get('https://xkcd.com/353/')
print(r)
print(r.text)
#Download image
r = requests.get('https://xkcd.com/comics/python.png')
print(r.content)
with open('comic.png', 'wb') as f:
f.write(r.content)
print(r.status_code)
print(r.ok) # Print True for any response <400
print(r.headers)
https://httpbin.org
# How to pass query parameter
payload = {'page' : 2, 'count' :25}
r = requests.get('https://httpbin.org/get', params=payload)
print(r.text)
####### Post
payload = {'username' : 'madhu', 'password' :'testing'}
r = requests.post('https://httpbin.org/post', data=payload)
r_dict = r.json()
print(r_dict['form'])
## timeout
r = requests.get('https://xkcd.com/comics/python.png', timeout=3)
# if the request don't respond within 3 sec, timeout
| 17.76 | 66 | 0.643018 |
d1d6ec176f56d2655e5c7c5a56574d4a35207716 | 1,231 | py | Python | facturador/facturador/urls.py | crodriguezud/Facturador | 1a1e08072ae1d54f3f7963cdd202444618a0fa2e | [
"Apache-2.0"
] | null | null | null | facturador/facturador/urls.py | crodriguezud/Facturador | 1a1e08072ae1d54f3f7963cdd202444618a0fa2e | [
"Apache-2.0"
] | 9 | 2020-06-05T17:25:18.000Z | 2022-03-11T23:15:36.000Z | facturador/facturador/urls.py | crodriguezud/Facturador | 1a1e08072ae1d54f3f7963cdd202444618a0fa2e | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
# Configuration API Router
from rest_framework import routers
#router = routers.DefaultRouter()
#router.register(r'artists', ArtistViewSet)
#router.register(r'albums', AlbumViewSet)
#router.register(r'songs', SongViewSet)
urlpatterns = [
url(r'^', include('index.urls')),
url(r'^admin/', admin.site.urls),
#url(r'^api/', include(router.urls)),
# AUTH
url(r'^cuenta/', include('allauth.urls')),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^api-token-refresh/', refresh_jwt_token),
url(r'^api-token-verify/', verify_jwt_token),
# Apps
url(r'^usuario/', include('usuario.urls')),
url(r'^stock/', include('stock.urls')),
url(r'^contabilidad/', include('contabilidad.urls')),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 32.394737 | 82 | 0.736799 |
d1d7dba700dc5f0d195179566af837041f1113d5 | 13,432 | py | Python | dynaphopy/analysis/fitting/fitting_functions.py | faradaymahe/DynaPhopy | 8519ff616386651acf71166bee02c1a2aef89312 | [
"MIT"
] | 76 | 2015-02-24T02:55:09.000Z | 2022-03-31T09:38:09.000Z | dynaphopy/analysis/fitting/fitting_functions.py | jianskerh/DynaPhoPy | e1201f6de62b4303c68a7808ed19175364409586 | [
"MIT"
] | 14 | 2017-07-21T12:37:28.000Z | 2021-09-15T08:50:55.000Z | dynaphopy/analysis/fitting/fitting_functions.py | jianskerh/DynaPhoPy | e1201f6de62b4303c68a7808ed19175364409586 | [
"MIT"
] | 38 | 2015-07-02T01:17:27.000Z | 2022-03-25T14:24:33.000Z | import numpy as np
from scipy.optimize import curve_fit, minimize_scalar
h_planck = 4.135667662e-3 # eV/ps
h_planck_bar = 6.58211951e-4 # eV/ps
kb_boltzmann = 8.6173324e-5 # eV/K
fitting_functions = {
0: Lorentzian,
1: Lorentzian_asymmetric,
2: Damped_harmonic,
}
# Test for automatic detection (order can change)
# import sys, inspect
# list_fitting = inspect.getmembers(sys.modules[__name__], inspect.isclass)
# Fitting_functions = {}
# for i, p in enumerate(list_fitting):
# Fitting_functions[i] = p[1]
| 34.979167 | 112 | 0.542064 |
d1d82814692baf55384c0af692ceedac9c370b19 | 4,517 | py | Python | edualgo/circular-linked-list.py | VaishnaviNandakumar/eduAlgo | 5eb24058d969ab6dae2cbd19f9048ea1a353b48e | [
"MIT"
] | 22 | 2021-02-25T04:35:57.000Z | 2022-02-14T13:33:19.000Z | edualgo/circular-linked-list.py | VaishnaviNandakumar/eduAlgo | 5eb24058d969ab6dae2cbd19f9048ea1a353b48e | [
"MIT"
] | 40 | 2021-02-26T06:59:41.000Z | 2021-11-10T07:40:29.000Z | edualgo/circular-linked-list.py | VaishnaviNandakumar/eduAlgo | 5eb24058d969ab6dae2cbd19f9048ea1a353b48e | [
"MIT"
] | 17 | 2021-02-25T00:58:57.000Z | 2021-11-08T23:46:06.000Z | from __init__ import print_msg_box
#creating object
#list = singleLinkedList()
#list.insertLast(50, 60,70)
#list.display()
'''
It shows the entered things at last
output:
=======
50
60
70
50...
'''
#list.insertFirst(10,20,30)
#list.display()
'''
It shows the entered things at first then remaining
output:
=======
10
20
30
50
60
70
10...
'''
#print(list.insertMiddle.__annotations__)
#list.insertMiddle(40,4)
#list.display()
'''
It shows the inserted element at nth position
output:
=======
10
20
30
40
50
60
70
10...
'''
#list.delete(6)
#list.display()
'''
It shows the list after deleting it
output:
=======
10
20
30
40
50
60
10...
'''
| 23.404145 | 75 | 0.579367 |
d1de025379609a12a3f05f1bd0a39e4f01a64269 | 407 | py | Python | core/migrations/0007_item_paystack_link.py | adesiyanoladipo/django-referral-system | 7cc4b41338289ecff78f7a50c9eee4bd47986215 | [
"MIT"
] | 6 | 2020-09-03T20:05:00.000Z | 2021-07-02T11:49:46.000Z | core/migrations/0007_item_paystack_link.py | adesiyan-ifedayo/django-referral-system | 7cc4b41338289ecff78f7a50c9eee4bd47986215 | [
"MIT"
] | null | null | null | core/migrations/0007_item_paystack_link.py | adesiyan-ifedayo/django-referral-system | 7cc4b41338289ecff78f7a50c9eee4bd47986215 | [
"MIT"
] | 4 | 2020-09-03T10:52:20.000Z | 2021-01-13T16:13:45.000Z | # Generated by Django 2.2.14 on 2020-08-23 10:13
from django.db import migrations, models
| 21.421053 | 73 | 0.604423 |
d1deb1b97db88859b62d8246e63346725b35b7ec | 798 | py | Python | messenger/server/src/auth/decorators.py | v-v-d/Python_client-server_apps | 5741c92dc5324ae8af2c7102d95f63c57e71b4c7 | [
"MIT"
] | null | null | null | messenger/server/src/auth/decorators.py | v-v-d/Python_client-server_apps | 5741c92dc5324ae8af2c7102d95f63c57e71b4c7 | [
"MIT"
] | null | null | null | messenger/server/src/auth/decorators.py | v-v-d/Python_client-server_apps | 5741c92dc5324ae8af2c7102d95f63c57e71b4c7 | [
"MIT"
] | 1 | 2020-02-27T08:08:26.000Z | 2020-02-27T08:08:26.000Z | """Decorators for auth module."""
from functools import wraps
from src.protocol import make_response
from src.database import session_scope
from .models import Session
def login_required(func):
"""Check that user is logged in based on the valid token exists in request."""
return wrapper
| 33.25 | 98 | 0.689223 |
d1def20a029952342126d505b499c5a421976187 | 3,607 | py | Python | proj06_functions/proj06.py | hawiab/VSA18 | 852902f96f97d62e4cfbc8e997c96b305754bf5b | [
"MIT"
] | null | null | null | proj06_functions/proj06.py | hawiab/VSA18 | 852902f96f97d62e4cfbc8e997c96b305754bf5b | [
"MIT"
] | null | null | null | proj06_functions/proj06.py | hawiab/VSA18 | 852902f96f97d62e4cfbc8e997c96b305754bf5b | [
"MIT"
] | null | null | null | # Name:
# Date:
# proj05: functions and lists
# Part I
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
# Fill in the function and change the return statment.
numlist = []
check = 1
while check <=num:
divisor = num%check
if divisor == 0:
numlist.append(check)
check = check + 1
else:
check = check + 1
return numlist
def prime(num):
"""
Takes a number and returns True if the number is prime, otherwise False
:param num: int
:return: bool
"""
# Fill in the function and change the return statement.
if len(divisors(num)) == 2:
return True
return False
# Part II:
# REVIEW: Conditionals, for loops, lists, and functions
#
# INSTRUCTIONS:
#
# 1. Make the string "sentence_string" into a list called "sentence_list" sentence_list
# should be a list of each letter in the string: ['H', 'e', 'l', 'l', 'o', ',', ' ', 'm',
# 'y', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'M', 'o', 'n', 't', 'y', ' ', 'P',
# 'y', 't', 'h', 'o', 'n', '.']
#
# Hint: Use a for loop and with an append function: list.append(letter)
#
# sentence_string = "Hello, my name is Monty Python."
# sentencelist = []
# counter = 0
# for item in sentence_string:
# letter = sentence_string[counter]
# sentencelist.append(letter)
# counter = counter + 1
# print sentencelist
# 2. Print every item of sentence_list on a separate line using a for loop, like this:
# H
# e
# l
# l
# o
# ,
#
# m
# y
# .... keeps going on from here.
# 3: Write a for loop that goes through each letter in the list vowels. If the current
# letter is 'b', print out the index of the current letter (should print out the
# number 1).
# vowels = ['a', 'b', 'i', 'o', 'u', 'y']
# counter = 0
# while counter <= len(vowels):
# if vowels[counter] == "b":
# break
# else:
# counter = counter + 1
# print counter
# 4: use the index found to change the list vowels so that the b is replaced with an e.
# for letter in vowels:
# vowels[1]="e"
# print vowels
# 5: Loop through each letter in the sentence_string. For each letter, check to see if the
# number is in the vowels list. If the letter is in the vowels list, add one to a
# counter. Print out the counter at the end of the loop. This counter should show how
# many vowels are in sentence_string.
# counter = 0
# for letter in sentence_string:
# if letter in vowels:
# counter = counter + 1
# print counter
# 6: Make a new function called "vowelFinder" that will return a list of the vowels
# found in a list (no duplicates).The function's parameters should be "list" and "vowels."
vowels = ['a', 'e', 'i', 'o', 'u', 'y']
sentence = ["H","e","l","l","o","h","o","w","a","r","e","y","o","u"]
print vowelFinder(sentence, vowels)
# Example:
# vowelList = vowelFinder(sentence_list, vowels)
# print vowelList
# ['a', 'e', 'i', 'o', 'y']
# def vowelFinder(sentence_list, vowels):
# return [] | 26.91791 | 90 | 0.609093 |
d1df1905cca6f1b65e50adab041641c51732a082 | 2,034 | py | Python | bqplot/__init__.py | jasongrout/bqplot | 2416a146296419340b8d5998bf9d1538e6750579 | [
"Apache-2.0"
] | null | null | null | bqplot/__init__.py | jasongrout/bqplot | 2416a146296419340b8d5998bf9d1538e6750579 | [
"Apache-2.0"
] | 1 | 2019-04-16T04:54:14.000Z | 2019-04-16T09:13:08.000Z | bqplot/__init__.py | jasongrout/bqplot | 2416a146296419340b8d5998bf9d1538e6750579 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
==============
BQPlot Package
==============
.. currentmodule:: bqplot
Each plot starts with a `Figure` object. A `Figure` has a number of `Axis` objects (horizontal and vertical) and a number of `Mark` objects. Each `Mark` object is a visual representation of the data. Each `Axis` and `Mark` has a `Scale` object. The `Scale` objects transform data into a visual property (typically a location in pixel space, but could be a color, etc.). An `Axis` draws an axis associated with the scale. ::
from bqplot import *
from IPython.display import display
x_data = range(10)
y_data = [i ** 2 for i in x_data]
x_sc = LinearScale()
y_sc = LinearScale()
ax_x = Axis(label='Test X', scale=x_sc, tick_format='0.0f')
ax_y = Axis(label='Test Y', scale=y_sc, orientation='vertical', tick_format='0.2f')
line = Lines(x=x_data,
y=y_data,
scales={'x':x_sc, 'y':y_sc},
colors=['red', 'yellow'])
fig = Figure(axes=[ax_x, ax_y], marks=[line])
display(fig)
.. automodule:: bqplot.figure
.. automodule:: bqplot.scales
.. automodule:: bqplot.marks
.. automodule:: bqplot.axes
.. automodule:: bqplot.market_map
.. automodule:: bqplot.interacts
.. automodule:: bqplot.traits
.. automodule:: bqplot.map
.. automodule:: bqplot.pyplot
"""
from .figure import *
from .axes import *
from .marks import *
from .scales import *
from .default_tooltip import *
| 31.292308 | 426 | 0.681416 |
d1e1409d73a3d66b1d9667d3a5d80cc9f1d444f5 | 1,915 | py | Python | modele/Case.py | JordanSamhi/BricksBreaker | e2efb28e5ec43056e9665479920523576c692a6b | [
"MIT"
] | null | null | null | modele/Case.py | JordanSamhi/BricksBreaker | e2efb28e5ec43056e9665479920523576c692a6b | [
"MIT"
] | null | null | null | modele/Case.py | JordanSamhi/BricksBreaker | e2efb28e5ec43056e9665479920523576c692a6b | [
"MIT"
] | null | null | null | '''
Une case est definie par sa couleur,
ses coordonnees et un acces a la grille
pour recuperer ses voisins
'''
| 26.232877 | 79 | 0.515927 |
d1e1bcedb2edbb2d5f4a7e0929b4350832d56cb6 | 1,280 | py | Python | keypoints_SIFT_Descriptor.py | praxitelisk/OpenCV_Image_Mining | 8fb6af58a677e9acd9711164080910e4f62f7de8 | [
"MIT"
] | null | null | null | keypoints_SIFT_Descriptor.py | praxitelisk/OpenCV_Image_Mining | 8fb6af58a677e9acd9711164080910e4f62f7de8 | [
"MIT"
] | null | null | null | keypoints_SIFT_Descriptor.py | praxitelisk/OpenCV_Image_Mining | 8fb6af58a677e9acd9711164080910e4f62f7de8 | [
"MIT"
] | null | null | null | #import Libraries
import cv2
import sys
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
##################################################
'''
This example illustrates how to extract interesting key points
as features from an image
Usage:
keypointsSIFTDescriptor.py [<image_name>]
image argument defaults to fruits.jpg
'''
#Read from input
try:
fn = sys.argv[1]
except IndexError:
fn = "img/home.jpg"
##################################################
#Read image and plot it
img_original = mpimg.imread(fn)
img = mpimg.imread(fn)
plt.subplot(121), plt.imshow(img)
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
#grayscale it
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
##################################################
#use SIFT descriptor for image key points feature extraction
sift = cv2.xfeatures2d.SIFT_create()
(kps, sift) = sift.detectAndCompute(gray, None)
##################################################
#draw the keypoints
img = cv2.drawKeypoints(gray,kps,None,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.subplot(122), plt.imshow(img)
plt.title('Image with extracted keypoints'), plt.xticks([]), plt.yticks([])
plt.show()
################################################## | 28.444444 | 92 | 0.603906 |
d1e232b6f4bcb98d057d8080fd878bcc9a488c24 | 1,103 | py | Python | lib/getHostInfoResponse.py | jacksitlab/esxi-client | 0d9c815a2638fb9ed2c559a6ec9bdeb6ff9f033e | [
"MIT"
] | null | null | null | lib/getHostInfoResponse.py | jacksitlab/esxi-client | 0d9c815a2638fb9ed2c559a6ec9bdeb6ff9f033e | [
"MIT"
] | null | null | null | lib/getHostInfoResponse.py | jacksitlab/esxi-client | 0d9c815a2638fb9ed2c559a6ec9bdeb6ff9f033e | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
from .baseVmWareXmlResponse import BaseVmWareXmlResponse
| 40.851852 | 95 | 0.676337 |
d1e30a27cb089668fb5805462b206c1f85c6621d | 2,900 | py | Python | tests/test_markdown.py | tripleee/ChatExchange | 5509c7ec1efd5b55d4051d6966bcae7d72e84620 | [
"Apache-2.0",
"MIT"
] | 64 | 2015-02-26T02:56:57.000Z | 2021-11-07T20:40:11.000Z | tests/test_markdown.py | tripleee/ChatExchange | 5509c7ec1efd5b55d4051d6966bcae7d72e84620 | [
"Apache-2.0",
"MIT"
] | 53 | 2015-01-29T03:37:23.000Z | 2021-08-15T11:09:05.000Z | tests/test_markdown.py | tripleee/ChatExchange | 5509c7ec1efd5b55d4051d6966bcae7d72e84620 | [
"Apache-2.0",
"MIT"
] | 39 | 2015-02-11T16:37:40.000Z | 2021-01-12T18:53:40.000Z | import sys
import logging
if sys.version_info[:2] <= (2, 6):
logging.Logger.getChild = lambda self, suffix:\
self.manager.getLogger('.'.join((self.name, suffix)) if self.root is not self else suffix)
import pytest
from chatexchange.markdown_detector import markdown
logger = logging.getLogger(__name__)
| 48.333333 | 98 | 0.671724 |
d1e35468812dfeba245515055cc9981eeb5b168b | 313 | py | Python | test.py | QBitor/Neuromorphic_AE_Tools | b20f5f931e82888dfc4eebd7b19b2746d142d4bb | [
"MIT"
] | 4 | 2019-02-19T14:19:14.000Z | 2019-07-29T02:46:47.000Z | test.py | ndouard/Neuromorphic_AE_Tools | b20f5f931e82888dfc4eebd7b19b2746d142d4bb | [
"MIT"
] | null | null | null | test.py | ndouard/Neuromorphic_AE_Tools | b20f5f931e82888dfc4eebd7b19b2746d142d4bb | [
"MIT"
] | 2 | 2018-03-05T22:52:53.000Z | 2018-09-13T21:47:00.000Z | x_indexes = [i for i, j in enumerate(xaddr)] if j == myxaddr]
y_indexes = [i for i, j in enumerate(yaddr)] if j == myyaddr]
print('x_indexes: ' + str(x_indexes))
print('y_indexes:' +str(y_indexes))
# keep common indexes
common = [i for i, j in zip(x_indexes, y_indexes) if i == j]
print('common: ' + str(common))
| 39.125 | 61 | 0.670927 |
d1e50fb8283a579fbdd6f28ea13ffe7026e7416d | 1,651 | py | Python | pyefriend_api/app/v1/setting/router.py | softyoungha/pyefriend | 43a9db224be50308458f0b939ac0181b3bd63d0b | [
"MIT"
] | 8 | 2021-11-26T14:22:21.000Z | 2022-03-26T03:32:51.000Z | pyefriend_api/app/v1/setting/router.py | softyoungha/pyefriend | 43a9db224be50308458f0b939ac0181b3bd63d0b | [
"MIT"
] | 1 | 2021-12-19T13:08:26.000Z | 2021-12-19T13:22:28.000Z | pyefriend_api/app/v1/setting/router.py | softyoungha/pyefriend | 43a9db224be50308458f0b939ac0181b3bd63d0b | [
"MIT"
] | 5 | 2022-01-12T17:54:40.000Z | 2022-03-25T10:22:36.000Z | import os
from typing import Optional, List
from fastapi import APIRouter, Request, Response, status, Depends
from pyefriend_api.models.setting import Setting as SettingModel
from pyefriend_api.app.auth import login_required
from .schema import SettingOrm, SettingUpdate
r = APIRouter(prefix='/setting',
tags=['setting'])
| 30.018182 | 78 | 0.65536 |
d1e715c85a2185a84c7545eb4958d65bd238b0ac | 20,031 | py | Python | dsi/tests/test_multi_analysis.py | mongodb/dsi | 8cfc845156561d698fb01da93464392caca40644 | [
"Apache-2.0"
] | 9 | 2020-05-19T21:39:44.000Z | 2022-02-11T10:03:36.000Z | dsi/tests/test_multi_analysis.py | mongodb/dsi | 8cfc845156561d698fb01da93464392caca40644 | [
"Apache-2.0"
] | 1 | 2021-03-25T23:37:22.000Z | 2021-03-25T23:37:22.000Z | dsi/tests/test_multi_analysis.py | mongodb/dsi | 8cfc845156561d698fb01da93464392caca40644 | [
"Apache-2.0"
] | 3 | 2020-03-05T10:49:10.000Z | 2021-03-02T11:15:45.000Z | """Unit tests for util/multi_analysis.py"""
import os
import unittest
from dsi.multi_analysis import MultiEvergreenAnalysis, main
from test_lib.fixture_files import FixtureFiles
from test_lib.test_requests_parent import TestRequestsParent
FIXTURE_FILES = FixtureFiles()
if __name__ == "__main__":
unittest.main()
| 45.421769 | 97 | 0.403924 |
d1e75fc6ed9f9190b3412688570aefced2173499 | 6,356 | py | Python | src/tratamientos/migrations/0001_initial.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | src/tratamientos/migrations/0001_initial.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | 32 | 2016-05-09T19:37:08.000Z | 2022-01-13T01:00:52.000Z | src/tratamientos/migrations/0001_initial.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-08 03:42
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
| 67.617021 | 209 | 0.645374 |
d1e84365ce59a1577648a2034fb246335760f7cf | 7,308 | py | Python | panel/tasks/tests/test_utils.py | freejooo/vigilio | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | [
"MIT"
] | 137 | 2021-03-26T18:19:45.000Z | 2022-03-06T07:48:23.000Z | panel/tasks/tests/test_utils.py | rrosajp/vigilio | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | [
"MIT"
] | 11 | 2021-03-28T00:07:00.000Z | 2021-05-04T12:54:58.000Z | panel/tasks/tests/test_utils.py | rrosajp/vigilio | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | [
"MIT"
] | 16 | 2021-03-27T23:58:53.000Z | 2022-03-20T14:52:13.000Z | from typing import Any, List, Dict
RAW_INFO: Dict[str, List[Dict[str, Any]]] = {
"streams": [
{
"index": 0,
"codec_name": "h264",
"codec_long_name": "H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10",
"profile": "High",
"codec_type": "video",
"codec_time_base": "1001/48000",
"codec_tag_string": "avc1",
"codec_tag": "0x31637661",
"width": 1920,
"height": 800,
"coded_width": 1920,
"coded_height": 800,
"has_b_frames": 2,
"sample_aspect_ratio": "1:1",
"display_aspect_ratio": "12:5",
"pix_fmt": "yuv420p",
"level": 41,
"chroma_location": "left",
"refs": 1,
"is_avc": "true",
"nal_length_size": "4",
"r_frame_rate": "24000/1001",
"avg_frame_rate": "24000/1001",
"time_base": "1/24000",
"start_pts": 0,
"start_time": "0.000000",
"duration_ts": 168240072,
"duration": "7010.003000",
"bit_rate": "2150207",
"bits_per_raw_sample": "8",
"nb_frames": "168072",
"disposition": {
"default": 1,
"dub": 0,
"original": 0,
"comment": 0,
"lyrics": 0,
"karaoke": 0,
"forced": 0,
"hearing_impaired": 0,
"visual_impaired": 0,
"clean_effects": 0,
"attached_pic": 0,
"timed_thumbnails": 0,
},
"tags": {"language": "und", "handler_name": "VideoHandler"},
},
{
"index": 1,
"codec_name": "aac",
"codec_long_name": "AAC (Advanced Audio Coding)",
"profile": "LC",
"codec_type": "audio",
"codec_time_base": "1/48000",
"codec_tag_string": "mp4a",
"codec_tag": "0x6134706d",
"sample_fmt": "fltp",
"sample_rate": "48000",
"channels": 2,
"channel_layout": "stereo",
"bits_per_sample": 0,
"r_frame_rate": "0/0",
"avg_frame_rate": "0/0",
"time_base": "1/48000",
"start_pts": 0,
"start_time": "0.000000",
"duration_ts": 336480768,
"duration": "7010.016000",
"bit_rate": "143882",
"max_bit_rate": "143882",
"nb_frames": "328597",
"disposition": {
"default": 1,
"dub": 0,
"original": 0,
"comment": 0,
"lyrics": 0,
"karaoke": 0,
"forced": 0,
"hearing_impaired": 0,
"visual_impaired": 0,
"clean_effects": 0,
"attached_pic": 0,
"timed_thumbnails": 0,
},
"tags": {"language": "und", "handler_name": "SoundHandler"},
},
]
}
TORRENTS: List[Dict[str, Any]] = [
{
"added_on": 1612534456,
"amount_left": 0,
"auto_tmm": False,
"availability": -1,
"category": "1",
"completed": 1227921990,
"completion_on": 1612542927,
"content_path": "/home/user/Downloads/2021-01-11-raspios-buster-armhf.zip",
"dl_limit": -1,
"dlspeed": 0,
"downloaded": 1243692499,
"downloaded_session": 0,
"eta": 8640000,
"f_l_piece_prio": False,
"force_start": False,
"hash": "9005f3068fff382eca98cdd6380f08599319520f",
"last_activity": 0,
"magnet_uri": "magnet:?xt=urn:btih:9005f3068fff382eca98cdd6380f08599319520f&dn=2021-01-11-raspios-buster-armhf.zip&tr=http%3a%2f%2ftracker.raspberrypi.org%3a6969%2fannounce",
"max_ratio": -1,
"max_seeding_time": -1,
"name": "2021-01-11-raspios-buster-armhf.zip",
"num_complete": 0,
"num_incomplete": 615,
"num_leechs": 0,
"num_seeds": 0,
"priority": 0,
"progress": 1,
"ratio": 6.351007187348165e-05,
"ratio_limit": -2,
"save_path": "/home/user/Downloads/",
"seeding_time_limit": -2,
"seen_complete": -3600,
"seq_dl": False,
"size": 1227921990,
"state": "pausedUP",
"super_seeding": False,
"tags": "",
"time_active": 14334,
"total_size": 1227921990,
"tracker": "",
"trackers_count": 1,
"up_limit": -1,
"uploaded": 78987,
"uploaded_session": 0,
"upspeed": 0,
},
{
"added_on": 1612746101,
"amount_left": 1741422592,
"auto_tmm": False,
"availability": 0,
"category": "2",
"completed": 0,
"completion_on": -3600,
"content_path": "/home/user/Downloads/xubuntu-20.04.2-desktop-amd64.iso",
"dl_limit": -1,
"dlspeed": 0,
"downloaded": 0,
"downloaded_session": 0,
"eta": 8640000,
"f_l_piece_prio": False,
"force_start": False,
"hash": "5d6bf814125b1660f29a6841dbb5f6e277eb02cc",
"last_activity": 1612746105,
"magnet_uri": "magnet:?xt=urn:btih:5d6bf814125b1660f29a6841dbb5f6e277eb02cc&dn=xubuntu-20.04.2-desktop-amd64.iso&tr=https%3a%2f%2ftorrent.ubuntu.com%2fannounce",
"max_ratio": -1,
"max_seeding_time": -1,
"name": "xubuntu-20.04.2-desktop-amd64.iso",
"num_complete": 0,
"num_incomplete": 0,
"num_leechs": 0,
"num_seeds": 0,
"priority": 4,
"progress": 0,
"ratio": 0,
"ratio_limit": -2,
"save_path": "/home/user/Downloads/",
"seeding_time_limit": -2,
"seen_complete": -3600,
"seq_dl": False,
"size": 1741422592,
"state": "stalledDL",
"super_seeding": False,
"tags": "",
"time_active": 0,
"total_size": 1741422592,
"tracker": "",
"trackers_count": 1,
"up_limit": -1,
"uploaded": 0,
"uploaded_session": 0,
"upspeed": 0,
},
]
MOVIEDB: Dict[str, Any] = {
"movie_results": [
{
"genre_ids": [18],
"original_language": "en",
"original_title": "12 Angry Men",
"poster_path": "/wh0f80G6GZvYBNiYmvqFngt3IYq.jpg",
"video": False,
"vote_average": 8.5,
"overview": "The defense and the prosecution have rested and the jury is filing into the jury room to decide if a young Spanish-American is guilty or innocent of murdering his father. What begins as an open and shut case soon becomes a mini-drama of each of the jurors' prejudices and preconceptions about the trial, the accused, and each other.",
"release_date": "1957-04-10",
"vote_count": 5322,
"title": "12 Angry Men",
"adult": False,
"backdrop_path": "/qqHQsStV6exghCM7zbObuYBiYxw.jpg",
"id": 389,
"popularity": 20.461,
}
],
"person_results": [],
"tv_results": [],
"tv_episode_results": [],
"tv_season_results": [],
}
| 33.369863 | 359 | 0.491516 |
d1e88bdba0945c9b9cc4455b24e5747284f786b4 | 368 | py | Python | circular_rings.py | irahorecka/Diffraction-Simulations--Angular-Spectrum-Method | c2eb1de944685018f887c7861301f7098354e9f5 | [
"MIT"
] | 1 | 2021-01-04T17:04:55.000Z | 2021-01-04T17:04:55.000Z | circular_rings.py | irahorecka/Diffraction-Simulations--Angular-Spectrum-Method | c2eb1de944685018f887c7861301f7098354e9f5 | [
"MIT"
] | null | null | null | circular_rings.py | irahorecka/Diffraction-Simulations--Angular-Spectrum-Method | c2eb1de944685018f887c7861301f7098354e9f5 | [
"MIT"
] | null | null | null | from simulator import PolychromaticField, cf, mm
F = PolychromaticField(
spectrum=1.5 * cf.illuminant_d65,
extent_x=12.0 * mm,
extent_y=12.0 * mm,
Nx=1200,
Ny=1200,
)
F.add_aperture_from_image(
"./apertures/circular_rings.jpg", pad=(9 * mm, 9 * mm), Nx=1500, Ny=1500
)
rgb = F.compute_colors_at(z=1.5)
F.plot(rgb, xlim=[-8, 8], ylim=[-8, 8])
| 23 | 76 | 0.649457 |
d1ee95da457c4546117cb03bfe6b449dcdd2ad26 | 2,581 | py | Python | res/scripts/client/gui/mods/ScoreViewTools_Init.py | JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods | fb424b5bfa3a1e212ef39805f9b3afb750cec82f | [
"MIT"
] | null | null | null | res/scripts/client/gui/mods/ScoreViewTools_Init.py | JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods | fb424b5bfa3a1e212ef39805f9b3afb750cec82f | [
"MIT"
] | null | null | null | res/scripts/client/gui/mods/ScoreViewTools_Init.py | JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods | fb424b5bfa3a1e212ef39805f9b3afb750cec82f | [
"MIT"
] | null | null | null | from items import vehicles, _xml
from gui.Scaleform.daapi.view.lobby.trainings.training_room import TrainingRoom;
from helpers.statistics import StatisticsCollector;
from game import init
import ScoreViewTools
old_noteHangarLoadingState = StatisticsCollector.noteHangarLoadingState
StatisticsCollector.noteHangarLoadingState = new_noteHangarLoadingState
print dir(TrainingRoom)
old_onSettingUpdated = TrainingRoom.onSettingUpdated
old_onRostersChanged = TrainingRoom.onRostersChanged
old_onPlayerStateChanged = TrainingRoom.onPlayerStateChanged
old__TrainingRoomBase__showSettings = TrainingRoom._TrainingRoomBase__showSettings
old_showRosters = TrainingRoom._showRosters
first = True
TrainingRoom.onSettingUpdated = new_onSettingUpdated
TrainingRoom.onRostersChanged = new_onRostersChanged
TrainingRoom.onPlayerStateChanged = new_onPlayerStateChanged
TrainingRoom._TrainingRoomBase__showSettings = new__TrainingRoomBase__showSettings
TrainingRoom._showRosters = new_showRosters
| 40.328125 | 86 | 0.798915 |
d1efcc031c8bf6f3a8fed9857aad8b4235615828 | 897 | py | Python | merge-sort.py | bauluk/algorithms | 9020d2a6150e58ad26d18b8fede32ded966f8a8b | [
"MIT"
] | null | null | null | merge-sort.py | bauluk/algorithms | 9020d2a6150e58ad26d18b8fede32ded966f8a8b | [
"MIT"
] | null | null | null | merge-sort.py | bauluk/algorithms | 9020d2a6150e58ad26d18b8fede32ded966f8a8b | [
"MIT"
] | null | null | null | import random
numbers = []
for i in range(0, 100):
numbers.append(random.randint(1, 100))
numbers = mergeSort(numbers)
print(numbers)
| 19.933333 | 43 | 0.518395 |
d1f02ab69517e03a599a2beb69e3009f8624f7cc | 1,586 | py | Python | W2/task4.py | mcv-m6-video/mcv-m6-2021-team6 | 701fc1420930342f3b3733e8f8fc4675c21d8f3f | [
"Unlicense"
] | null | null | null | W2/task4.py | mcv-m6-video/mcv-m6-2021-team6 | 701fc1420930342f3b3733e8f8fc4675c21d8f3f | [
"Unlicense"
] | 2 | 2021-03-23T10:34:33.000Z | 2021-03-23T18:54:28.000Z | W2/task4.py | mcv-m6-video/mcv-m6-2021-team6 | 701fc1420930342f3b3733e8f8fc4675c21d8f3f | [
"Unlicense"
] | 1 | 2021-03-08T21:13:15.000Z | 2021-03-08T21:13:15.000Z | from utilsw2 import *
from Reader import *
from Adapted_voc_evaluation import *
import glob
path_to_video = 'datasets/AICity_data/train/S03/c010/vdo.avi'
path_to_frames = 'datasets/frames/'
results_path = 'Results/Task1_1'
if __name__ == '__main__':
colors = [cv2.COLOR_BGR2HSV, cv2.COLOR_BGR2RGB, cv2.COLOR_BGR2YCrCb, cv2.COLOR_BGR2LAB]
for c in colors:
task4(c,f"W2/task4_1/mu{str(c)}.pkl",f"W2/task4_1/sigma{str(c)}.pkl")
| 38.682927 | 167 | 0.655107 |
d1f0cff2e554ccf456ca71299fa80fb9f25a8ffe | 3,207 | py | Python | src/dictstore/file_handler.py | sampathbalivada/dictstore | d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6 | [
"Apache-2.0"
] | 1 | 2021-12-21T14:23:50.000Z | 2021-12-21T14:23:50.000Z | src/dictstore/file_handler.py | sampathbalivada/dictstore | d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6 | [
"Apache-2.0"
] | null | null | null | src/dictstore/file_handler.py | sampathbalivada/dictstore | d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sai Sampath Kumar Balivada
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
file handler reads and writes datastore entries to and from the disk.
file paths are case sensitive.
"""
import os.path
import datetime
from pathlib import Path
from dictstore.exceptions import InvalidFileExtension
def generate_file_header_string() -> str:
"""Generates file header string for the data file"""
header = '// Python Dictstore File\n'
date_string = str(datetime.datetime.now())
header += '// Last Rewrite: ' + date_string + '\n'
return header
| 33.061856 | 74 | 0.640474 |
d1f1be9cfd0e8788923ad96d397bd4e298d8a339 | 2,432 | py | Python | tests/mappers/test_action_mapper.py | mik-laj/oozie-to-airflow | c04952ddc8354bcafa340703b30f7ff33f844f4e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/mappers/test_action_mapper.py | mik-laj/oozie-to-airflow | c04952ddc8354bcafa340703b30f7ff33f844f4e | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-07-01T21:57:45.000Z | 2019-07-01T21:57:45.000Z | tests/mappers/test_action_mapper.py | mik-laj/oozie-to-airflow | c04952ddc8354bcafa340703b30f7ff33f844f4e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests action mapper"""
import unittest
from o2a.converter.relation import Relation
from o2a.converter.task import Task
from o2a.mappers.action_mapper import ActionMapper
TEST_MAPPER_NAME = "mapper_name"
TEST_DAG_NAME = "dag_name"
| 41.931034 | 106 | 0.702303 |
d1f1e91e085496f9d5527679e19a038eaba7f62a | 1,265 | py | Python | euclidean_gcd/Python/euclidean_gcd.py | parammittal16/Algorithms | b9c3b6086ebf9f96bacaa55c2c29961be42676f6 | [
"MIT"
] | 1 | 2018-10-04T13:10:23.000Z | 2018-10-04T13:10:23.000Z | euclidean_gcd/Python/euclidean_gcd.py | Rajeev00021/Algorithms | 2aeeff13b63f17bae2145ffc9583dacbe2070994 | [
"MIT"
] | 2 | 2019-10-15T06:31:33.000Z | 2019-10-15T06:32:19.000Z | euclidean_gcd/Python/euclidean_gcd.py | Rajeev00021/Algorithms | 2aeeff13b63f17bae2145ffc9583dacbe2070994 | [
"MIT"
] | 1 | 2019-10-05T18:24:04.000Z | 2019-10-05T18:24:04.000Z | def euclidean_gcd(first, second):
"""
Calculates GCD of two numbers using the division-based Euclidean Algorithm
:param first: First number
:param second: Second number
"""
while(second):
first, second = second, first % second
return first
def euclidean_gcd_recursive(first, second):
"""
Calculates GCD of two numbers using the recursive Euclidean Algorithm
:param first: First number
:param second: Second number
"""
if not second:
return first
return euclidean_gcd_recursive(second, first % second)
if __name__ == '__main__':
main()
| 34.189189 | 79 | 0.480632 |
d1f402dc0bcbd7349f6046e391a89f06ba005aeb | 1,627 | py | Python | util/metrics/covariance.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | util/metrics/covariance.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | util/metrics/covariance.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | """ Distances metrics based on the covariance matrix (mostly in the context of merging and compress)"""
import torch
import numpy as np
import torch.nn.functional as F
np.random.seed(0)
def cov(m, y=None):
"""computes covariance of m"""
if y is not None:
m = torch.cat((m, y), dim=0)
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov = 1 / (x.size(1) - 1) * x.mm(x.t())
return cov
def cov_norm(m, y):
"""computes similarity of x, y covariance matrices"""
m = (m - m.mean(dim=0)) / m.std(dim=0)
y = (y - y.mean(dim=0)) / y.std(dim=0)
# print(m.size())
# print(y.size())
m = cov(m)
y = cov(y)
return torch.norm(m) - torch.norm(y)
def cov_eig(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
s1, s2 = get_svd(m, y)
d = (s1 - s2) if k is None else (s1[:k] - s2[:k])
d = d.sum().abs()
return d
def cov_eig_kl(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
s1, s2 = get_svd(m, y)
if k is not None: s1, s2 = s1[:k] - s2[:k]
d = F.kl_div(F.softmax(s1) - F.softmax(s2))
return d
def cov_kl(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
m_p = F.softmax(m.flatten())
y_p = F.softmax(y.flatten())
d = F.kl_div(m_p, y_p)
return d
if __name__ == "__main__":
x = torch.randn((100, 20))
y = torch.randn((100, 50))
print(cov_norm(x, y))
| 23.926471 | 103 | 0.562999 |
d1f4b4fbb3b683f57ba6d1034a8a600f1e9bf050 | 3,415 | py | Python | tfhub_context.py | thingumajig/simple_flask_tfhub | 75daae03299b43310b674664d41c273b6e3994c0 | [
"Apache-2.0"
] | null | null | null | tfhub_context.py | thingumajig/simple_flask_tfhub | 75daae03299b43310b674664d41c273b6e3994c0 | [
"Apache-2.0"
] | 6 | 2020-01-28T22:42:39.000Z | 2022-02-10T00:10:23.000Z | tfhub_context.py | thingumajig/simple_flask_tfhub | 75daae03299b43310b674664d41c273b6e3994c0 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
def get_use_embedding(texts):
use_embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-large/3")
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
texts_embeddings = session.run(use_embed(texts))
for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
print("Message: {}".format(texts[i]))
print("Embedding size: {}".format(len(message_embedding)))
message_embedding_snippet = ", ".join(
(str(x) for x in message_embedding[:3]))
print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
if __name__ == '__main__':
emb = ElmoTFHubContext(type='default')
tt = emb.get_embedding(['This is a sentence.', 'This is another sentence.'])
print(tt.shape) | 36.72043 | 103 | 0.685505 |
d1f6e12efd38a6684f9d520d31da3aa92782ab6e | 117 | py | Python | netmiko/endace/__init__.py | jcinma/netmiko | 0cf0aa6a57719c78f2cdd54d98951d5dc8189654 | [
"MIT"
] | null | null | null | netmiko/endace/__init__.py | jcinma/netmiko | 0cf0aa6a57719c78f2cdd54d98951d5dc8189654 | [
"MIT"
] | null | null | null | netmiko/endace/__init__.py | jcinma/netmiko | 0cf0aa6a57719c78f2cdd54d98951d5dc8189654 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from netmiko.endace.endace_ssh import EndaceSSH
__all__ = ['EndaceSSH']
| 23.4 | 48 | 0.803419 |
d1f8ab1e5dcd509c7bb1c75102e032a178319bb7 | 1,020 | py | Python | src/genemap/main/map_ids.py | jrderuiter/genemap | 0413474294cae9e17252d88c8b9ff1382e4a2f0f | [
"MIT"
] | null | null | null | src/genemap/main/map_ids.py | jrderuiter/genemap | 0413474294cae9e17252d88c8b9ff1382e4a2f0f | [
"MIT"
] | 2 | 2018-05-25T17:28:21.000Z | 2019-01-07T19:14:01.000Z | src/genemap/main/map_ids.py | jrderuiter/genemap | 0413474294cae9e17252d88c8b9ff1382e4a2f0f | [
"MIT"
] | 3 | 2018-05-25T16:49:13.000Z | 2018-05-25T16:51:45.000Z | # -*- coding: utf-8 -*-
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from genemap.mappers import get_mappers
def main(args):
"""Main function."""
mapper = args.mapper.from_args(args)
mapped = mapper.map_ids(args.ids)
print(' '.join(mapped))
def configure_subparser(subparser):
"""Configures subparser for subcommand."""
parser = subparser.add_parser('map_ids')
parser.set_defaults(main=main)
mapper_subparser = parser.add_subparsers(dest='mapper')
mapper_subparser.required = True
mappers = get_mappers(with_command_line=True).items()
for name, class_ in mappers:
mapper_parser = mapper_subparser.add_parser(name)
class_.configure_parser(mapper_parser)
mapper_parser.add_argument('ids', nargs='+')
mapper_parser.set_defaults(mapper=class_)
| 28.333333 | 74 | 0.732353 |
d1f8f6e84f58dfa799a34b9718329b0459fc7d49 | 3,463 | py | Python | project_gendl/splice42.py | KorfLab/datacore | f6eb04650d8257a8e2eecd44928a60368d374d38 | [
"MIT"
] | null | null | null | project_gendl/splice42.py | KorfLab/datacore | f6eb04650d8257a8e2eecd44928a60368d374d38 | [
"MIT"
] | null | null | null | project_gendl/splice42.py | KorfLab/datacore | f6eb04650d8257a8e2eecd44928a60368d374d38 | [
"MIT"
] | null | null | null | import gzip
import random
import subprocess
import sys
#############
# 42 nt set # 20 nt upstream and downstream of canonical GT|AG
#############
genomes = ('at', 'ce', 'dm')
for gen in genomes:
# observed
eie = f'eie.{gen}.txt.gz'
dons = get_donors(eie)
accs = get_acceptors(eie)
write_fasta(f'splice42/{gen}.don.fa', 'don', dons)
write_fasta(f'splice42/{gen}.acc.fa', 'acc', accs)
# negative 1 - totally random
nd = make_negative1(dons)
na = make_negative1(accs)
write_fasta(f'splice42/{gen}.n1don.fa', 'n1don', nd)
write_fasta(f'splice42/{gen}.n1acc.fa', 'n1acc', na)
# negative 2 - compositional but not positional
nd = make_negative2(dons)
na = make_negative2(accs)
write_fasta(f'splice42/{gen}.n2don.fa', 'n2don', nd)
write_fasta(f'splice42/{gen}.n2acc.fa', 'n2acc', na)
# negative 3 - compositional and positional
nd = make_negative3(dons)
na = make_negative3(accs)
write_fasta(f'splice42/{gen}.n3don.fa', 'n3don', nd)
write_fasta(f'splice42/{gen}.n3acc.fa', 'n3acc', na)
write_fasta(f'data42/{gen}.n3don.fa', 'n3don', nd)
write_fasta(f'data42/{gen}.n3acc.fa', 'n3acc', na)
# negative 4 - sequences from the opposite strand
nd, na = make_negative4(eie)
| 24.913669 | 70 | 0.634421 |
d1f924e262151141ecf3892ae5654b295df1f760 | 1,300 | py | Python | old-stuff/crimes/atividade.py | paulopieczarka/DataScience-Uni | 4013fe97f2a40da8923f11a8ce5907423ed8addd | [
"MIT"
] | null | null | null | old-stuff/crimes/atividade.py | paulopieczarka/DataScience-Uni | 4013fe97f2a40da8923f11a8ce5907423ed8addd | [
"MIT"
] | null | null | null | old-stuff/crimes/atividade.py | paulopieczarka/DataScience-Uni | 4013fe97f2a40da8923f11a8ce5907423ed8addd | [
"MIT"
] | null | null | null | from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
main()
| 23.636364 | 81 | 0.707692 |
d1f9ffd286225f029ae5ec4ee93beabb47e019b6 | 783 | py | Python | example/testapi/migrations/0001_initial.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 1 | 2019-06-20T00:11:58.000Z | 2019-06-20T00:11:58.000Z | example/testapi/migrations/0001_initial.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 3 | 2020-02-11T23:46:14.000Z | 2021-06-10T21:10:37.000Z | example/testapi/migrations/0001_initial.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 3 | 2019-12-13T06:53:18.000Z | 2021-06-04T07:12:56.000Z | # Generated by Django 2.1.4 on 2018-12-12 16:51
from django.db import migrations, models
| 30.115385 | 114 | 0.573436 |
d1fb7ac3548bddd8881f407edfa6134b66678d18 | 19,216 | py | Python | search_sampler/__init__.py | gserapio/search_sampler | 38c8a5c7414edb21126e767ea70e7cd355223f2a | [
"MIT"
] | 1 | 2021-02-09T19:50:17.000Z | 2021-02-09T19:50:17.000Z | search_sampler/__init__.py | gserapio/search_sampler | 38c8a5c7414edb21126e767ea70e7cd355223f2a | [
"MIT"
] | null | null | null | search_sampler/__init__.py | gserapio/search_sampler | 38c8a5c7414edb21126e767ea70e7cd355223f2a | [
"MIT"
] | null | null | null | import os
import pandas
import time
from datetime import datetime, timedelta
from collections import defaultdict
from copy import deepcopy
from googleapiclient.discovery import build
"""
All functions that are used for querying, processing, and saving
the data are located here.
"""
VALID_PERIOD_LENGTHS = ["day", "week", "month"]
| 40.454737 | 119 | 0.585762 |
d1fd6f1f588ff407c01adf35cb99c44793ba7e08 | 659 | py | Python | ami/config/__init__.py | NCKU-CCS/energy-blockchain | 1b87b74579d2e5d658b92bb7ee656a246e4b2380 | [
"MIT"
] | null | null | null | ami/config/__init__.py | NCKU-CCS/energy-blockchain | 1b87b74579d2e5d658b92bb7ee656a246e4b2380 | [
"MIT"
] | 4 | 2019-08-15T11:54:35.000Z | 2020-11-26T10:56:02.000Z | ami/config/__init__.py | NCKU-CCS/energy-blockchain | 1b87b74579d2e5d658b92bb7ee656a246e4b2380 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Cryptodome.Signature import PKCS1_v1_5 as Signature_pkcs1_v1_5
from Cryptodome.PublicKey import RSA
load_dotenv()
API_URI = os.environ.get("API_URI", "https://nodes.thetangle.org:443").split(",")
API_OPEN = os.environ.get("API_OPEN", "https://nodes.thetangle.org:443")
# encrypt
PLAT_RSA_PUB_KEY = RSA.importKey(open("rsa/plat_rsa_public.pem").read())
AMI_CIPHER = Cipher_pkcs1_v1_5.new(PLAT_RSA_PUB_KEY)
# signature
AMI_RSA_PRI_KEY = RSA.importKey(open("rsa/ami_rsa_private.pem").read())
AMI_SIGNER = Signature_pkcs1_v1_5.new(AMI_RSA_PRI_KEY)
| 29.954545 | 81 | 0.798179 |
d1fdd3005698252bde84e97c3ad5be6bf947e18b | 3,620 | py | Python | google-cloud-sdk/lib/surface/compute/users/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/users/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/compute/users/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting users."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute.users import client as users_client
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
| 33.518519 | 80 | 0.690331 |
d1ff7ff41f8abda906716eb125ac0014f5c4aa8f | 34 | py | Python | graph_rl/global_algorithms/__init__.py | nicoguertler/graphrl | 21a1cefc53e5c457745570460de0d99e68622e57 | [
"MIT"
] | 1 | 2022-01-04T15:21:55.000Z | 2022-01-04T15:21:55.000Z | graph_rl/global_algorithms/__init__.py | nicoguertler/graph_rl | 21a1cefc53e5c457745570460de0d99e68622e57 | [
"MIT"
] | null | null | null | graph_rl/global_algorithms/__init__.py | nicoguertler/graph_rl | 21a1cefc53e5c457745570460de0d99e68622e57 | [
"MIT"
] | null | null | null | from .global_hac import GlobalHAC
| 17 | 33 | 0.852941 |
d1fff7908412416073cac969804d096355f1b2f7 | 3,195 | py | Python | hexomino-core/gen_hexos/gen.py | chmnchiang/hexomino | 483a86c11bc0ccf9cdaae4ad6e102168be3cf320 | [
"Apache-2.0",
"MIT"
] | null | null | null | hexomino-core/gen_hexos/gen.py | chmnchiang/hexomino | 483a86c11bc0ccf9cdaae4ad6e102168be3cf320 | [
"Apache-2.0",
"MIT"
] | null | null | null | hexomino-core/gen_hexos/gen.py | chmnchiang/hexomino | 483a86c11bc0ccf9cdaae4ad6e102168be3cf320 | [
"Apache-2.0",
"MIT"
] | null | null | null | from dataclasses import dataclass
from functools import total_ordering
from collections import Counter
import typing
import textwrap
Poly = typing.Tuple[Point, ...]
def generate(n: int) -> typing.List[Poly]:
if n == 1:
return [(Point(0, 0),)]
prev_results = generate(n - 1)
results = set()
for prev_poly in prev_results:
results.update(generate_from_poly(prev_poly))
return list(results)
def hexo_borders(poly: Poly) -> typing.List[typing.Tuple[Point, Point]]:
dfs = tuple(Point(x, y) for x, y in ((0, 0), (0, 1), (1, 1), (1, 0)))
counter = Counter()
for tile in poly:
for i in range(4):
d1 = dfs[i]
d2 = dfs[(i+1) % 4]
if d1 < d2:
d1, d2 = d2, d1
border = (tile + d1, tile + d2)
counter[border] += 1
outer_borders = [border for border, cnt in counter.items() if cnt == 1]
return outer_borders
def hexo_to_repr(poly: Poly) -> str:
assert len(poly) == 6
tiles_str = ', '.join(f'Pos {{ x: {p.x}, y: {p.y} }}' for p in poly)
borders = hexo_borders(poly)
borders_str = ', '.join(
f'(Pos {{ x: {p1.x}, y: {p1.y} }}, Pos {{ x: {p2.x}, y: {p2.y} }})'
for (p1, p2) in borders)
return (
f'''__Hexo {{
tiles: [{tiles_str}],
borders: &[{borders_str}],
}}''')
if __name__ == '__main__':
codegen_template = textwrap.dedent(
'''\
#[cfg(not(test))]
pub const N_HEXOS: usize = {n_hexos};
#[cfg(not(test))]
pub const HEXOS: [__Hexo; {n_hexos}] = [
{hexos}
];
'''
)
I = tuple(Point(0, y) for y in range(6))
hexos = [poly for poly in generate(6) if poly != I]
hexos_str = ',\n '.join(hexo_to_repr(hexo) for hexo in hexos)
print(codegen_template.format(n_hexos = len(hexos), hexos = hexos_str))
| 27.782609 | 75 | 0.553678 |
06011cb6cfe74f34fcd631c875b63cc52bf2717f | 3,270 | py | Python | beartype_test/a00_unit/data/hint/data_hintref.py | jonathanmorley/beartype | 0d1207210220807d5c5848033d13657afa307983 | [
"MIT"
] | null | null | null | beartype_test/a00_unit/data/hint/data_hintref.py | jonathanmorley/beartype | 0d1207210220807d5c5848033d13657afa307983 | [
"MIT"
] | null | null | null | beartype_test/a00_unit/data/hint/data_hintref.py | jonathanmorley/beartype | 0d1207210220807d5c5848033d13657afa307983 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype forward reference data submodule.**
This submodule exercises **forward reference type hints** (i.e., strings whose
values are the names of classes and tuples of classes, one or more of which
typically have yet to be defined) support implemented in the
:func:`beartype.beartype` decorator. This support can *only* be fully exercised
from within an independent data submodule rather than the body of a unit test.
Why? Because:
* That decorator is only safely importable from within the body of a unit test.
* Forward reference type hints can only refer to objects defined at module
scope rather than from within the body of a unit test.
* Forward reference type hints referring to objects previously defined at
module scope fail to exercise the deferred nature of forward references.
* Ergo, callables that are decorated by that decorator, annotated by one or
more forward reference type hints, and both declared and called from within
the body of a unit test fail to exercise this deferred nature.
* Ergo, only callables that are decorated by that decorator, annotated by one or
more forward reference type hints, and both declared and called at module
scope before their referents exercise this deferred nature.
'''
# ....................{ IMPORTS }....................
from beartype import beartype
from typing import Union
# ....................{ CALLABLES }....................
# Decorated callable annotated by a PEP-noncompliant fully-qualified forward
# reference referring to a type that has yet to be declared.
TheDarkestForwardRefOfTheYear = (
'beartype_test.a00_unit.data.hint.data_hintref.TheDarkestEveningOfTheYear')
# Decorated callable annotated by a PEP-noncompliant tuple containing both
# standard types and a fully-qualified forward reference referring to a type
# that has yet to be declared.
TheDarkestTupleOfTheYear = (complex, TheDarkestForwardRefOfTheYear, bool)
# Decorated callable annotated by a PEP-compliant unnested unqualified forward
# reference referring to a type that has yet to be declared.
# Decorated callable annotated by a PEP-compliant nested unqualified forward
# reference referring to a type that has yet to be declared.
TheDarkestUnionOfTheYear = Union[complex, 'TheDarkestEveningOfTheYear', bytes]
# ....................{ CLASSES }....................
# User-defined class previously referred to by forward references above.
class TheDarkestEveningOfTheYear(str): pass
| 44.794521 | 80 | 0.731804 |
06031868a0bff21742dab627fcdc748961bfd19b | 1,701 | py | Python | pywss/statuscode.py | CzaOrz/Pyws | 4b5e9ba6244ea348321446ea5c491f5c19a1d389 | [
"MIT"
] | 25 | 2019-10-16T02:57:54.000Z | 2021-08-05T06:52:05.000Z | pywss/statuscode.py | CzaOrz/Pyws | 4b5e9ba6244ea348321446ea5c491f5c19a1d389 | [
"MIT"
] | 7 | 2019-11-16T04:06:39.000Z | 2021-04-11T06:24:45.000Z | pywss/statuscode.py | CzaOrz/Pyws | 4b5e9ba6244ea348321446ea5c491f5c19a1d389 | [
"MIT"
] | 7 | 2019-12-02T02:57:38.000Z | 2021-02-05T16:54:22.000Z | # coding: utf-8
StatusContinue = 100
StatusSwitchingProtocols = 101
StatusProcessing = 102
StatusEarlyHints = 103
StatusOK = 200
StatusCreated = 201
StatusAccepted = 202
StatusNonAuthoritativeInfo = 203
StatusNoContent = 204
StatusResetContent = 205
StatusPartialContent = 206
StatusMultiStatus = 207
StatusAlreadyReported = 208
StatusIMUsed = 226
StatusMultipleChoices = 300
StatusMovedPermanently = 301
StatusFound = 302
StatusSeeOther = 303
StatusNotModified = 304
StatusUseProxy = 305
StatusTemporaryRedirect = 307
StatusPermanentRedirect = 308
StatusBadRequest = 400
StatusUnauthorized = 401
StatusPaymentRequired = 402
StatusForbidden = 403
StatusNotFound = 404
StatusMethodNotAllowed = 405
StatusNotAcceptable = 406
StatusProxyAuthRequired = 407
StatusRequestTimeout = 408
StatusConflict = 409
StatusGone = 410
StatusLengthRequired = 411
StatusPreconditionFailed = 412
StatusRequestEntityTooLarge = 413
StatusRequestURITooLong = 414
StatusUnsupportedMediaType = 415
StatusRequestedRangeNotSatisfiable = 416
StatusExpectationFailed = 417
StatusTeapot = 418
StatusMisdirectedRequest = 421
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusTooEarly = 425
StatusUpgradeRequired = 426
StatusPreconditionRequired = 428
StatusTooManyRequests = 429
StatusRequestHeaderFieldsTooLarge = 431
StatusUnavailableForLegalReasons = 451
StatusInternalServerError = 500
StatusNotImplemented = 501
StatusBadGateway = 502
StatusServiceUnavailable = 503
StatusGatewayTimeout = 504
StatusHTTPVersionNotSupported = 505
StatusVariantAlsoNegotiates = 506
StatusInsufficientStorage = 507
StatusLoopDetected = 508
StatusNotExtended = 510
StatusNetworkAuthenticationRequired = 511
| 24.652174 | 41 | 0.847737 |
0603e6bbd9ecddad191163178ca4161b1b3decfd | 1,064 | py | Python | digsby/src/oscar/snac/family_x0a.py | ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | [
"Python-2.0"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | digsby/src/oscar/snac/family_x0a.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | digsby/src/oscar/snac/family_x0a.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | import logging
import oscar
x0a_name="User lookup"
log = logging.getLogger('oscar.snac.x0a')
subcodes = {}
def x0a_x01(o, sock, data):
'''
SNAC (xa, x1): User lookup Family Error
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_01.html}
'''
errcode, errmsg, subcode = oscar.snac.error(data)
submsg = subcodes.setdefault(subcode, 'Unknown') if subcode else None
raise oscar.snac.SnacError(0x0a, (errcode, errmsg), (subcode, submsg))
def x0a_x02(email):
'''
SNAC (xa, x2): Search by email
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_02.html}
'''
return 0x0a, 0x02, email
def x0a_x03(o, sock, data):
'''
SNAC (xa, x3): Search response
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_03.html}
'''
fmt = (('tlvs', 'tlv_list'),)
name_tlvs, data = oscar.unpack(fmt, data)
assert not data
names = [tlv.v for tlv in name_tlvs]
| 25.95122 | 75 | 0.62594 |
060485709baa0b9492d85e40f90068c48154acf0 | 2,928 | py | Python | setup.py | rochacon/punch | 7f6fb81221049ab74ef561fb40a4174bdb3e77ef | [
"MIT"
] | null | null | null | setup.py | rochacon/punch | 7f6fb81221049ab74ef561fb40a4174bdb3e77ef | [
"MIT"
] | null | null | null | setup.py | rochacon/punch | 7f6fb81221049ab74ef561fb40a4174bdb3e77ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""setup.py
Defines the setup instructions for the punch framework
Copyright (C) 2016 Rodrigo Chacon
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
readme = ''
setup(name='punch',
version='0.0.1',
description='A Python framework focused (but not limited) in JSON APIs.',
long_description=readme,
author='Rodrigo Chacon',
author_email='rochacon@gmail.com',
url='https://github.com/rochacon/punch',
license='MIT',
packages=['punch'],
requires=['webob'],
install_requires=['webob'],
cmdclass={'test': PyTest},
keywords='Web, Python, Python3, Refactoring, REST, Framework, RPC',
classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'],
**PyTest.extra_kwargs)
| 39.04 | 112 | 0.663934 |
060586b3e64df98e8e03ed3e370e39181a0e31b4 | 13,664 | py | Python | wellAnalysis.py | Jeffalltogether/well_decline_curve_analysis | 0507813d85bdabbf52c4d92afec6af06e5228b26 | [
"Apache-2.0"
] | 53 | 2018-03-25T03:29:44.000Z | 2022-01-28T16:18:14.000Z | wellAnalysis.py | Jeffalltogether/well_decline_curve_analysis | 0507813d85bdabbf52c4d92afec6af06e5228b26 | [
"Apache-2.0"
] | null | null | null | wellAnalysis.py | Jeffalltogether/well_decline_curve_analysis | 0507813d85bdabbf52c4d92afec6af06e5228b26 | [
"Apache-2.0"
] | 34 | 2018-05-26T21:15:59.000Z | 2021-11-11T09:07:56.000Z | '''
Drilling info analysis
This program reads well header data and production logs (e.g. exported from Drilling Info as .csv files) and
walks the user through the genreation of decline curves for each well provided in the input data. Decine curves
are fit with a the hyperbolic curve that is estimated using an iterative least squares method.
Copyright 2018 Jeffrey E. Thatcher
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
### Boiler-plate imports and code
import sys
sys.path.append('./utils/')
import os, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from geopy.distance import vincenty
# import tools and custom code
from tools import load_merge_header_and_production_csv, swap_production_dates_for_time_delta
from tools import current_selection, decline_curve, handle_numerical_variables, handle_dateTime_variables
from tools import handle_object_variables, plot_map, fit_decline_curve, add_BOE_per_day_column, nominal_decline
if __name__ == '__main__':
### well data files
headerCSV = './data/Well_header_data.csv'
productionCSV = './data/Production_Time_Series.CSV'
main(headerCSV, productionCSV)
| 35.490909 | 137 | 0.663422 |
0607341543b37f814977e95ae2726476134dd618 | 2,745 | py | Python | manage.py | Zauberer2/touchresume | c558f6383722f289cf8087a15f6e049b4213c010 | [
"MIT"
] | 3 | 2020-02-25T04:18:22.000Z | 2021-12-25T17:03:50.000Z | manage.py | Zauberer2/touchresume | c558f6383722f289cf8087a15f6e049b4213c010 | [
"MIT"
] | 3 | 2019-09-02T07:49:35.000Z | 2021-12-19T17:46:31.000Z | manage.py | Zauberer2/touchresume | c558f6383722f289cf8087a15f6e049b4213c010 | [
"MIT"
] | 1 | 2021-12-23T18:11:07.000Z | 2021-12-23T18:11:07.000Z | #!/usr/bin/env python
import os
import re
import unittest
from git import Repo
from semver import match
from click import option, argument, echo, ClickException
from touchresume.cli import cli
from touchresume import __version__
if __name__ == '__main__':
cli()
| 32.294118 | 78 | 0.668488 |
06076fc2131eb37f5f2f55c95d8358153da24655 | 485 | py | Python | reb/scrape.py | vibya/Economic-Downturn | 03df854f4c314d5a944cd99474b980a95f088f39 | [
"MIT"
] | 1 | 2018-09-18T01:07:53.000Z | 2018-09-18T01:07:53.000Z | reb/scrape.py | aidinhass/reb | 33fc9d9781e2c0fce8faa6240ec2d56899ee2c07 | [
"MIT"
] | null | null | null | reb/scrape.py | aidinhass/reb | 33fc9d9781e2c0fce8faa6240ec2d56899ee2c07 | [
"MIT"
] | 3 | 2018-09-18T01:08:01.000Z | 2019-03-10T10:06:41.000Z |
from reb.src import pynyt
from reb.conf import APIKEY_NYT_ARTICLE
nyt = pynyt.ArticleSearch(APIKEY_NYT_ARTICLE)
nytArchive = pynyt.ArchiveApi(APIKEY_NYT_ARTICLE)
# # get 1000 news articles from the Foreign newsdesk from 1987
# results_obama = nyt.query(
# q='obama',
# begin_date="20170101",
# end_date="20170102",
# # facet_field=['source', 'day_of_week'],
# # facet_filter = True,
# verbose=True)
arch = nytArchive.query(
year="2012",
month="1"
) | 23.095238 | 62 | 0.692784 |
06086e5e3711066ed31d842f20d1b8ffa81bf793 | 1,403 | py | Python | cm/services/data/cvmfs.py | almahmoud/cloudman | 41067dfd66c6334313069874f5f26e5a06884b71 | [
"MIT"
] | 1 | 2021-02-28T18:59:50.000Z | 2021-02-28T18:59:50.000Z | cm/services/data/cvmfs.py | almahmoud/cloudman | 41067dfd66c6334313069874f5f26e5a06884b71 | [
"MIT"
] | null | null | null | cm/services/data/cvmfs.py | almahmoud/cloudman | 41067dfd66c6334313069874f5f26e5a06884b71 | [
"MIT"
] | null | null | null | """A file system service for managing CVMFS-based client file systems."""
import os
from cm.services import service_states
import logging
log = logging.getLogger('cloudman')
| 28.632653 | 79 | 0.605132 |
0609649120551f07f42eaf40f08ee2c468af7cdf | 3,240 | py | Python | regional.py | relet/pygeohashing | aa04b167f1f0d5a26a011073d3e97013328f209c | [
"MIT"
] | 4 | 2018-06-13T22:28:20.000Z | 2021-07-21T10:59:45.000Z | regional.py | relet/pygeohashing | aa04b167f1f0d5a26a011073d3e97013328f209c | [
"MIT"
] | 3 | 2016-12-14T20:34:25.000Z | 2021-10-29T23:43:13.000Z | regional.py | relet/pygeohashing | aa04b167f1f0d5a26a011073d3e97013328f209c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re, web, datetime, hashlib, struct, yaml, sys, wikipedia
import xml.etree.ElementTree as ET
re_NUMERIC = re.compile("(-?\d+)[ ,]+(-?\d+)")
re_NUMERICF = re.compile("(-?[\.\d]+)[ ,]+(-?[\.\d]+)") #fractions allowed
re_EXPEDITION = re.compile('\[\[(\d{4}-\d{2}-\d{2} -?\d+ -?\d+)')
site = wikipedia.getSite()
if len(sys.argv)>1:
user = sys.argv[1]
else:
print "usage:\n./regional username"
sys.exit(1)
page = wikipedia.Page(site, "User:"+user)
data = page.get()
expeditions = re_EXPEDITION.findall(data)
regionals = {}
count = 0
for exp in expeditions:
date, glat, glon = exp.split()
lat, lon = exp2latlon(exp)
place = geolookup(lat,lon)
print place
for fcode, name in place.iteritems():
if fcode:
if not fcode in regionals:
regionals[fcode]={}
if not name in regionals[fcode]:
regionals[fcode][name]={}
regionals[fcode][name][glat+" "+glon]=True
for fcode, names in regionals.iteritems():
for name, grats in names.iteritems():
num = len(grats)
print "%s %s - %i graticules" % (fcode, name, num)
if num>3:
for grat in grats:
print grat+";",
print
| 29.454545 | 91 | 0.624074 |
0609bb1f315036f7099bd541b54241e33b6fa051 | 1,034 | py | Python | challenges/stack/nearest_smallest_element.py | lukasmartinelli/sharpen | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | [
"CC0-1.0"
] | 13 | 2017-04-24T23:27:16.000Z | 2020-05-25T22:41:42.000Z | challenges/stack/nearest_smallest_element.py | lukasmartinelli/sharpen | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | [
"CC0-1.0"
] | null | null | null | challenges/stack/nearest_smallest_element.py | lukasmartinelli/sharpen | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | [
"CC0-1.0"
] | 2 | 2017-05-27T08:55:28.000Z | 2018-08-11T08:54:51.000Z | def nearest_smallest_element(arr):
"""
Given an array arr, find the nearest smaller element for each element.
The index of the smaller element must be smaller than the current element.
"""
smaller_numbers = []
return [nearest(n) for n in arr]
| 33.354839 | 85 | 0.636364 |
060a86f44e032bdb0deaf25d27674c930c7491c8 | 3,385 | py | Python | hooks/relations.py | projectcalico/charm-bird | 3224e887329c527f6bed2520346e66fb4e795fe8 | [
"Apache-2.0"
] | null | null | null | hooks/relations.py | projectcalico/charm-bird | 3224e887329c527f6bed2520346e66fb4e795fe8 | [
"Apache-2.0"
] | null | null | null | hooks/relations.py | projectcalico/charm-bird | 3224e887329c527f6bed2520346e66fb4e795fe8 | [
"Apache-2.0"
] | 1 | 2022-03-16T16:12:32.000Z | 2022-03-16T16:12:32.000Z | # -*- coding: utf-8 -*-
'''
Relations for BIRD.
'''
import socket
import netaddr
import netifaces
from charmhelpers.core import hookenv
from charmhelpers.core.services.helpers import RelationContext
def resolve_domain_name(name, ip_version=4):
'''
Takes a domain name and resolves it to an IP address
of a given version.
Currently only ever returns one address.
'''
results = socket.getaddrinfo(name, None)
addresses = (netaddr.IPAddress(r[4][0]) for r in results)
filtered = (a for a in addresses if a.version == ip_version)
try:
addr = filtered.next()
except StopIteration:
addr = ''
return str(addr)
def local_ipv6_address():
'''
Determines the IPv6 address to use to contact this machine. Excludes
link-local addresses.
Currently only returns the first valid IPv6 address found.
'''
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for addr in addresses.get(netifaces.AF_INET6, []):
# Make sure we strip any interface specifier from the address.
addr = netaddr.IPAddress(addr['addr'].split('%')[0])
if not (addr.is_link_local() or addr.is_loopback()):
return str(addr)
| 27.298387 | 74 | 0.578139 |
060b2a571442e70a179db487667f330e3647e19a | 1,136 | py | Python | common/cache.py | govtrack/django-lorien-common | 27241ff72536b442dfd64fad8589398b8a6e9f4d | [
"BSD-3-Clause"
] | 1 | 2020-08-17T06:24:56.000Z | 2020-08-17T06:24:56.000Z | common/cache.py | govtrack/django-lorien-common | 27241ff72536b442dfd64fad8589398b8a6e9f4d | [
"BSD-3-Clause"
] | null | null | null | common/cache.py | govtrack/django-lorien-common | 27241ff72536b442dfd64fad8589398b8a6e9f4d | [
"BSD-3-Clause"
] | null | null | null | from hashlib import sha1
from django.core.cache import cache
from django.utils.encoding import smart_str
def cached(key=None, timeout=300):
"""
Cache the result of function call.
Args:
key: the key with which value will be saved. If key is None
then it is calculated automatically
timeout: number of seconds after which the cached value would be purged.
"""
_key = key
return func_wrapper
| 32.457143 | 80 | 0.564261 |
060d03c63bb8152f4e45ecb98502c75a5900990a | 1,417 | py | Python | dtecsv.py | varnav/dte-usage-plotter | cfeca2db8ccb4c4f0564d9f0b493edd26f68e1ca | [
"MIT"
] | null | null | null | dtecsv.py | varnav/dte-usage-plotter | cfeca2db8ccb4c4f0564d9f0b493edd26f68e1ca | [
"MIT"
] | null | null | null | dtecsv.py | varnav/dte-usage-plotter | cfeca2db8ccb4c4f0564d9f0b493edd26f68e1ca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
1. Go to:
https://usage.dteenergy.com/?interval=hour
2. Download CSV
3. Run:
python dtecsv.py .\electric_usage_report_05-31-2021_to_06-05-2021.csv
"""
import csv
import datetime
import click
import matplotlib.pyplot as plt
x = []
y = []
if __name__ == '__main__':
main()
| 23.616667 | 104 | 0.614679 |
060ddb65bbe8989145f472ee9db47a8d7aff5843 | 12,598 | py | Python | model_navigator/model_analyzer/profiler.py | triton-inference-server/model_navigator | ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7 | [
"ECL-2.0",
"Apache-2.0"
] | 49 | 2021-04-09T18:32:07.000Z | 2022-03-29T07:32:24.000Z | model_navigator/model_analyzer/profiler.py | triton-inference-server/model_navigator | ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2021-07-13T09:00:12.000Z | 2021-11-15T17:16:35.000Z | model_navigator/model_analyzer/profiler.py | triton-inference-server/model_navigator | ec2915f4f5a6b9ed7e1b59290899e2b56b98bcc7 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2021-04-09T18:31:56.000Z | 2022-03-01T08:08:04.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import List, Optional
import yaml
from model_navigator.converter import DatasetProfileConfig
from model_navigator.exceptions import ModelNavigatorProfileException
from model_navigator.kubernetes.yaml import CustomDumper
from model_navigator.model_analyzer import ModelAnalyzer, ModelAnalyzerProfileConfig
from model_navigator.model_analyzer.config import BaseConfigGenerator, ModelAnalyzerTritonConfig
from model_navigator.model_analyzer.model_analyzer import ModelAnalyzerMode
from model_navigator.model_analyzer.model_analyzer_config import ModelAnalyzerConfig
from model_navigator.perf_analyzer import PerfMeasurementConfig
from model_navigator.triton import DeviceKind
from model_navigator.triton.model_config import TritonModelConfigGenerator
from model_navigator.triton.utils import get_shape_params
from model_navigator.utils import Workspace
LOGGER = logging.getLogger(__name__)
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
| 44.992857 | 115 | 0.710192 |
061117f2066d00451f5045f7338796a6dddd1a21 | 906 | py | Python | IOPool/Input/test/PrePool2FileInputTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | IOPool/Input/test/PrePool2FileInputTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | IOPool/Input/test/PrePool2FileInputTest_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | # The following comments couldn't be translated into the new config version:
# Test storing OtherThing as well
# Configuration file for PrePoolInputTest
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST2ND")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
#process.maxEvents = cms.untracked.PSet(
# input = cms.untracked.int32(11)
#)
#process.Thing = cms.EDProducer("ThingProducer")
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *',
'drop *_Thing_*_*'),
fileName = cms.untracked.string('PoolInput2FileTest.root')
)
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:PoolInputOther.root") )
process.p = cms.Path(process.OtherThing)
process.ep = cms.EndPath(process.output)
| 28.3125 | 91 | 0.733996 |
0611b8f8b1f08d15f75771f8b58463a12ef35fc0 | 24,165 | py | Python | scripts/old_scripts/compare_svo_multiple.py | noambuckman/mpc-multiple-vehicles | a20949c335f1af97962569eed112e6cef46174d9 | [
"MIT"
] | 1 | 2021-11-02T15:16:17.000Z | 2021-11-02T15:16:17.000Z | scripts/old_scripts/compare_svo_multiple.py | noambuckman/mpc-multiple-vehicles | a20949c335f1af97962569eed112e6cef46174d9 | [
"MIT"
] | 5 | 2021-04-14T17:08:59.000Z | 2021-05-27T21:41:02.000Z | scripts/old_scripts/compare_svo_multiple.py | noambuckman/mpc-multiple-vehicles | a20949c335f1af97962569eed112e6cef46174d9 | [
"MIT"
] | 2 | 2022-02-07T08:16:05.000Z | 2022-03-09T23:30:17.000Z | import datetime
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import casadi as cas
##### For viewing the videos in Jupyter Notebook
import io
import base64
from IPython.display import HTML
# from ..</src> import car_plotting
# from .import src.car_plotting
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import src.MPC_Casadi as mpc
import src.car_plotting as cplot
import src.TrafficWorld as tw
np.set_printoptions(precision=2)
import src.IterativeBestResponseMPCMultiple as mibr
import pickle
SAVE = False
PLOT = False
rounds_ibr = 225
n_other_cars = 4
N = 50
###### LATEX Dimensions (Not currently Working)
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
fig_size = [6, 4]
#################33
#### STEP 1: Sort all the files into the correct SVO
all_subdir = [
"20200301_215332random_ego",
"20200301_215346random_pro",
"20200301_215432random_altru",
"20200301_215520random_pro",
"20200301_215526random_altru",
"20200301_215537random_ego",
"20200301_215551random_pro",
"20200301_215602random_altru",
"20200301_215608random_ego",
"20200301_215623random_pro",
"20200301_215629random_altru",
"20200301_215636random_ego",
"20200301_215652random_pro",
"20200301_215658random_altru",
"20200301_215703random_ego",
"20200301_215713random_pro",
"20200301_215724random_altru",
"20200301_215742random_ego",
"20200301_215751random_pro",
"20200301_215757random_altru",
"20200301_215806random_ego",
"20200302_104840random_1p",
"20200302_104913random_2p",
"20200302_104916random_3p",
"20200302_104920random_4p",
"20200302_104926random_1e",
"20200302_104941random_2e",
"20200302_104946random_3e",
"20200302_105002random_4e",
"20200302_105059random_1a",
"20200302_105101random_2a",
"20200302_105104random_3a",
"20200302_105108random_4a",
"20200302_114834random_5e",
"20200302_114839random_6e",
"20200302_114841random_7e",
"20200302_114844random_8e",
"20200302_114853random_5p",
"20200302_114856random_6p",
"20200302_114859random_7p",
"20200302_114902random_8p",
"20200302_114909random_5a",
"20200302_114912random_6a",
"20200302_114914random_7a",
"20200302_114916random_8a",
"20200227_133704less_kxdotlarger",
"20200228_114359random_pro",
"20200228_114437random_pro",
"20200228_114440random_pro",
"20200228_114443random_pro",
"20200228_114448random_pro",
"20200228_114450random_pro",
"20200228_114913random_pro",
"20200228_114914random_pro",
"20200228_114916random_pro",
"20200228_114917random_pro",
"20200227_142916pi_01_ego",
"20200228_114517random_ego",
"20200228_114518random_ego",
"20200228_114528random_ego",
"20200228_114532random_ego",
"20200228_114547random_ego",
"20200228_114551random_ego",
"20200228_114803random_ego",
"20200228_114805random_ego",
"20200228_114806random_ego",
"20200227_141954pi2_5altru",
"20200228_114501random_altru",
"20200228_114503random_altru",
"20200228_114505random_altru",
"20200228_114506random_altru",
"20200228_114507random_altru",
"20200228_114509random_altru",
"20200228_114850random_altru",
"20200228_114851random_altru",
"20200228_114852random_altru",
]
subdir_name_prosocial_list = []
subdir_name_ego_list = []
subdir_name_altruistic_list = []
altr_theta = []
ego_theta = []
pro_theta = []
NO_GRASS = False
world = tw.TrafficWorld(2, 0, 1000)
for subdir in all_subdir:
try:
file_name = "results/" + subdir+"/data/"+"mpc3.p"
mpc = pickle.load(open(file_name,'rb'))
if mpc.min_y < -999999 or mpc.max_y > 9999999:
print("Messed up ymin/max", file_name)
continue
elif mpc.min_y > world.y_min + 0.000001:
print("Grass is NOT allowed!", file_name)
if not NO_GRASS:
print("Too grass lmmited, ignored", file_name)
continue
elif mpc.min_y <= world.y_min + 0.00001:
print("Grass is allowed!", file_name)
if NO_GRASS:
print("NO Grass, dataset ignored", file_name)
continue
if mpc.theta_iamb > np.pi/3:
subdir_name_altruistic_list += [subdir]
altr_theta += [mpc.theta_iamb]
elif mpc.theta_iamb <= np.pi/6.0:
subdir_name_ego_list += [subdir]
ego_theta += [mpc.theta_iamb]
else:
subdir_name_prosocial_list += [subdir]
pro_theta += [mpc.theta_iamb]
except FileNotFoundError:
print("Not found:", file_name)
print("Atruistic np.pi/2 = 1.5ish")
print(subdir_name_altruistic_list)
print(altr_theta)
print("Egoistic 0")
print(subdir_name_ego_list)
print(ego_theta)
print("Pro-Social", np.pi/2)
print(subdir_name_prosocial_list)
print(pro_theta)
# subdir_name_prosocial_list = [
# "20200227_133704less_kxdotlarger",
# "20200228_114359random_pro",
# "20200228_114437random_pro",
# "20200228_114440random_pro",
# "20200228_114443random_pro",
# "20200228_114448random_pro",
# "20200228_114450random_pro",
# "20200228_114913random_pro",
# "20200228_114914random_pro",
# "20200228_114916random_pro",
# "20200228_114917random_pro",
# ]
# subdir_name_prosocial = "20200227_133704less_kxdotlarger"
# folder_prosocial = "results/" + subdir_name_prosocial + "/"
# subdir_name_ego_list = [
# "20200227_142916pi_01_ego",
# "20200228_114517random_ego",
# "20200228_114518random_ego",
# "20200228_114528random_ego",
# "20200228_114532random_ego",
# "20200228_114547random_ego",
# "20200228_114551random_ego",
# "20200228_114803random_ego",
# "20200228_114805random_ego",
# "20200228_114806random_ego",
# ]
# subdir_name_ego = "20200227_142916pi_01_ego"
# folder_ego = "results/" + subdir_name_ego + "/"
# subdir_name_altruistic_list = [
# "20200227_141954pi2_5altru",
# "20200228_114501random_altru",
# "20200228_114503random_altru",
# "20200228_114505random_altru",
# "20200228_114506random_altru",
# "20200228_114507random_altru",
# "20200228_114509random_altru",
# "20200228_114850random_altru",
# "20200228_114851random_altru",
# "20200228_114852random_altru"]
# subdir_name_altruistic = "20200227_141954pi2_5altru"
# folder_altruistic = "results/" + subdir_name_altruistic + "/"
################ Analyze Results
all_xamb_pro = []
all_uamb_pro = []
all_other_x_pro = []
all_other_u_pro = []
ibr_brounds_array_pro = []
all_xamb_ego = []
all_uamb_ego = []
all_other_x_ego = []
all_other_u_ego = []
ibr_brounds_array_ego = []
all_xamb_altru = []
all_uamb_altru = []
all_other_x_altru = []
all_other_u_altru = []
ibr_brounds_array_altru = []
all_tfinalamb_pro = []
all_tfinalamb_ego = []
all_tfinalamb_altru = []
for sim_i in range(3):
if sim_i==0:
subdir_name_list = subdir_name_prosocial_list
elif sim_i==1:
subdir_name_list = subdir_name_ego_list
else:
subdir_name_list = subdir_name_altruistic_list
for folder in subdir_name_list:
n_full_rounds = 0 # rounods that the ambulance planned
n_all_rounds = 0
all_xamb = np.zeros((6, N+1, rounds_ibr))
all_uamb = np.zeros((2, N, rounds_ibr))
all_xcost = np.zeros((3, rounds_ibr))
all_tfinalamb = np.zeros((1, rounds_ibr))
all_other_x = [np.zeros((6, N+1, rounds_ibr)) for i in range(n_other_cars)]
all_other_u = [np.zeros((2, N, rounds_ibr)) for i in range(n_other_cars)]
all_other_cost = [np.zeros((3, rounds_ibr)) for i in range(n_other_cars)]
all_other_tfinal = [np.zeros((1, rounds_ibr)) for i in range(n_other_cars)]
for amb_ibr_i in range(rounds_ibr):
if (amb_ibr_i % (n_other_cars + 1) == 1) and amb_ibr_i>51: # We only look at sims when slack activated
ibr_prefix = '%03d'%amb_ibr_i
try:
xamb, uamb, xamb_des, xothers, uothers, xothers_des = mibr.load_state("results/" + folder + "/" + "data/" + ibr_prefix, n_other_cars)
all_xamb[:,:,n_full_rounds] = xamb
all_uamb[:,:,n_full_rounds] = uamb
x_goal = 130
all_tfinalamb[:, n_full_rounds] = find_t_final(xamb, x_goal)
for i in range(n_other_cars):
all_other_x[i][:,:,n_full_rounds] = xothers[i]
all_other_u[i][:,:,n_full_rounds] = uothers[i]
# all_other_tfinal[i][:,n_full_rounds] = find_t_final(xothers[i], 120)
n_full_rounds += 1
except FileNotFoundError:
# print("amb_ibr_i %d missing"%amb_ibr_i)
pass
n_all_rounds += 1
### Clip the extra dimension
all_xamb = all_xamb[:,:,:n_full_rounds]
all_uamb = all_uamb[:,:,:n_full_rounds]
all_tfinalamb = all_tfinalamb[:,:n_full_rounds]
for i in range(n_other_cars):
all_other_x[i] = all_other_x[i][:,:,:n_full_rounds]
all_other_u[i] = all_other_u[i][:,:,:n_full_rounds]
ibr_brounds_array = np.array(range(1, n_full_rounds +1))
if n_full_rounds > 0 : # only save those that meet slack requirement
if sim_i==0: #prosocial directory
all_xamb_pro += [all_xamb]
all_uamb_pro += [all_uamb]
all_other_x_pro += [all_other_x]
all_other_u_pro += [all_other_u]
ibr_brounds_array_pro += [ibr_brounds_array]
all_tfinalamb_pro += [all_tfinalamb]
elif sim_i==1: #egoistic directory
all_xamb_ego += [all_xamb]
all_uamb_ego += [all_uamb]
all_other_x_ego += [all_other_x]
all_other_u_ego += [all_other_u]
ibr_brounds_array_ego += [ibr_brounds_array]
all_tfinalamb_ego += [all_tfinalamb]
else: #altruistic directory
all_xamb_altru += [all_xamb]
all_uamb_altru += [all_uamb]
all_other_x_altru += [all_other_x]
all_other_u_altru += [all_other_u]
ibr_brounds_array_altru += [ibr_brounds_array]
all_tfinalamb_altru += [all_tfinalamb]
else:
print("No slack eligible", folder)
### SAVING IN PROSOCIAL'S DIRECTORy
folder = "random" #<----
fig_trajectory, ax_trajectory = plt.subplots(1,1)
ax_trajectory.set_title("Ambulance Trajectories")
# fig_trajectory.set_figheight(fig_height)
# fig_trajectory.set_figwidth(fig_width)
fig_trajectory.set_size_inches((8,6))
print(len(all_xamb_pro))
print(all_xamb_pro[0].shape)
ax_trajectory.plot(all_xamb_pro[0][0,:,-1], all_xamb_pro[0][1,:,-1], '-o', label="Prosocial")
ax_trajectory.plot(all_xamb_ego[0][0,:,-1], all_xamb_ego[0][1,:,-1], '-o', label="Egoistic")
ax_trajectory.plot(all_xamb_altru[0][0,:,-1], all_xamb_altru[0][1,:,-1], '-o', label="Altruistic")
ax_trajectory.set_xlabel("X [m]")
ax_trajectory.set_ylabel("Y [m]")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig1_amb_trajectory.eps'
fig_trajectory.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
##########################################333333
svo_labels = ["Egoistic", "Prosocial", "Altruistic"]
fig_uamb, ax_uamb = plt.subplots(3,1)
fig_uamb.set_size_inches((8,8))
fig_uamb.suptitle("Ambulance Control Input over IBR Iterations")
# ax_uamb[0].plot(ibr_brounds_array, np.sum(all_uamb[0,:,:] * all_uamb[0,:,:], axis=0), '-o')
ax_uamb[0].bar(range(3), [
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_ego]),
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_pro]),
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_altru])]
)
# ax_uamb[0].set_xlabel("IBR Iteration")
ax_uamb[0].set_ylabel(r"$\sum u_{\delta}^2$")
ax_uamb[0].set_xticks(range(3))
ax_uamb[0].set_xticklabels(svo_labels)
ax_uamb[1].bar(range(3), [
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_ego]),
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_pro]),
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_altru])]
)
# ax_uamb[1].set_xlabel("IBR Iteration")
ax_uamb[1].set_ylabel(r"$\sum u_{v}^2$")
ax_uamb[1].set_xticks(range(3))
ax_uamb[1].set_xticklabels(svo_labels)
# ax_uamb[2].bar(range(3), [
# np.sum(all_uamb_ego[0,:,-1] * all_uamb_ego[0,:,-1],axis=0) + np.sum(all_uamb_ego[1,:,-1] * all_uamb_ego[1,:,-1],axis=0),
# np.sum(all_uamb_pro[0,:,-1] * all_uamb_pro[1,:,-1], axis=0) + np.sum(all_uamb_pro[1,:,-1] * all_uamb_pro[1,:,-1], axis=0),
# np.sum(all_uamb_altru[0,:,-1] * all_uamb_altru[0,:,-1],axis=0) + np.sum(all_uamb_altru[1,:,-1] * all_uamb_altru[1,:,-1],axis=0)],)
# ax_uamb[2].set_xlabel("Vehicles' Social Value Orientation")
# ax_uamb[2].set_ylabel("$\sum ||u||^2$")
ax_uamb[1].set_xticks(range(3))
ax_uamb[1].set_xticklabels(svo_labels)
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig2_amb_ctrl_iterations.eps'
fig_uamb.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
##########################################################
#### Convergence
#########################################################
fig_reluamb, ax_reluamb = plt.subplots(2,1)
# fig_reluamb.set_figheight(fig_height)
# fig_reluamb.set_figwidth(fig_width)
fig_reluamb.set_size_inches((8,6))
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
ax_reluamb[0].plot(ibr_brounds_array[0][1:], np.sum((all_uamb[0][0,:,1:]-all_uamb[0][0,:,0:-1])*(all_uamb[0][0,:,1:]-all_uamb[0][0,:,0:-1]), axis=0), '-o', label=label)
ax_reluamb[1].plot(ibr_brounds_array[0][1:], np.sum((all_uamb[0][1,:,1:]-all_uamb[0][1,:,0:-1])*(all_uamb[0][1,:,1:]-all_uamb[0][1,:,0:-1]), axis=0), '-o', label=label)
ax_reluamb[0].set_ylabel("$\sum (u_{v\delta,t}-u_{\delta,t-1})^2$")
ax_reluamb[1].set_xlabel("IBR Iteration")
ax_reluamb[1].set_ylabel("$\sum (u_{v,t}-u_{v,t-1})^2$")
ax_reluamb[0].legend()
ax_reluamb[1].legend()
fig_reluamb.suptitle("Change in Ambulance Control Input over IBR Iterations")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig3_change_amb_ctrl_iterations.eps'
fig_reluamb.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
###################################################################3
##################################################################
fig_xfinal, ax_xfinal = plt.subplots(2,1)
fig_xfinal.suptitle("Final Ambulance State Over Iterations")
fig_xfinal.set_size_inches((8,6))
# fig_xfinal.set_figheight(fig_height)
# fig_xfinal.set_figwidth(fig_width)
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
all_xamb = all_xamb_ego
all_other_x = all_other_x_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
all_xamb = all_xamb_pro
all_other_x = all_other_x_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_xamb = all_xamb_altru
all_other_x = all_other_x_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
ax_xfinal[0].plot(ibr_brounds_array[0], all_xamb[0][0,-1,:], '-o', label=label)
ax_xfinal[1].plot(ibr_brounds_array[0], all_xamb[0][2,-1,:], '-o', label=label)
# ax_reluamb[0].set_xlabel("IBR Iteration")
ax_xfinal[0].set_ylabel("$x_{final}$")
ax_xfinal[0].legend()
ax_xfinal[1].set_xlabel("IBR Iteration")
ax_xfinal[1].set_ylabel(r"$\Theta_{final}$")
ax_xfinal[1].legend()
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig4_iterations_ambperformance.eps'
fig_xfinal.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
################################################################################
###################### NOW PLOTTING THE OTHER VEHICLES #########################
fig_xfinal_all, ax_xfinal_all = plt.subplots(3,1)
fig_xfinal_all.suptitle("Comparing Distance Travel for the Vehicles")
fig_xfinal_all.set_size_inches((8,8))
# fig_xfinal_all.set_figheight(fig_height)
# fig_xfinal_all.set_figwidth(fig_width)
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
all_xamb = all_xamb_ego
all_other_x = all_other_x_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
all_xamb = all_xamb_pro
all_other_x = all_other_x_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_xamb = all_xamb_altru
all_other_x = all_other_x_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
bar_width = 0.5
inter_car_width = 2*bar_width
width_offset = bar_width*sim_i
ticks = [width_offset + (2*bar_width + inter_car_width)*c for c in range(n_other_cars + 1)]
# print(len(all_ither_x))
# ax_xfinal_all[0].bar(ticks,
# [np.mean([all_x[0, -1, -1] - all_x[0, 0, -1] for all_x in all_xamb])] + [np.mean(all_o_x[i][0,-1,-1] - all_o_x[i][0,0,-1]) for i in range(n_other_cars) for all_o_x in all_other_x],
# bar_width, label=label)
# ax_xfinal_all[0].set_xticks(range(n_other_cars + 1))
# ax_xfinal_all[0].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
# ax_xfinal_all[1].bar(ticks,
# [all_xamb[-1, -1, -1] - all_xamb[-1, 0, -1]] + [all_other_x[i][-1,-1,-1] - all_other_x[i][-1,0,-1] for i in range(n_other_cars)],
# bar_width, label=label)
# # ax_xfinal_all[1].set_xticks(range(n_other_cars + 1))
# # ax_xfinal_all[1].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
# ax_xfinal_all[2].bar(ticks,
# [np.sum(all_xamb[2,:,-1]*all_xamb[2,:,-1])] + [np.sum(all_other_x[i][2,:,-1]*all_other_x[i][2,:,-1]) for i in range(n_other_cars)],
# bar_width, label=label)
width_offset = bar_width*1
ticks = [width_offset + (2*bar_width + inter_car_width)*c for c in range(n_other_cars + 1)]
ax_xfinal_all[2].legend()
ax_xfinal_all[2].set_xticks(ticks)
ax_xfinal_all[2].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[0].set_ylabel("Horizontal Displacement $\Delta x$")
ax_xfinal_all[0].legend()
ax_xfinal_all[0].set_xticks(ticks)
ax_xfinal_all[0].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[1].set_ylabel("Total Distance $s_f - s_i$")
ax_xfinal_all[1].legend()
ax_xfinal_all[1].set_xticks(ticks)
ax_xfinal_all[1].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[2].set_ylabel("Angular Deviation $\sum_{t} \Theta_t^2$")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig5_vehicles_comparison.eps'
fig_xfinal_all.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
#########################Let's Reproduce the Table ####################33
print("Amb X Final Avg. Min. Max. ")
final_metric_ego = [all_x[0,-1,-1] for all_x in all_xamb_ego]
final_metric_pro = [all_x[0,-1,-1] for all_x in all_xamb_pro]
final_metric_altru = [all_x[0,-1,-1] for all_x in all_xamb_altru]
# print("Egoistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_ego[0,-1,-1], np.mean(all_xamb_ego[0,-1,:]), np.min(all_xamb_ego[0,-1,:]), np.max(all_xamb_ego[0,-1,:])))
# print("Prosocial & %.02f & %.02f & %.02f & %.02f"%(all_xamb_pro[0,-1,-1], np.mean(all_xamb_pro[0,-1,:]), np.min(all_xamb_pro[0,-1,:]), np.max(all_xamb_pro[0,-1,:])))
# print("Altruistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_altru[0,-1,-1], np.mean(all_xamb_altru[0,-1,:]), np.min(all_xamb_altru[0,-1,:]), np.max(all_xamb_altru[0,-1,:])))
print("Egoistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_ego), np.std(final_metric_ego), np.min(final_metric_ego), np.max(final_metric_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_pro), np.std(final_metric_pro), np.min(final_metric_pro), np.max(final_metric_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_altru), np.std(final_metric_altru), np.min(final_metric_altru), np.max(final_metric_altru)))
final_metric_ego = [t_final[:,-1] for t_final in all_tfinalamb_ego]
final_metric_pro = [t_final[:,-1] for t_final in all_tfinalamb_pro]
final_metric_altru = [t_final[:,-1] for t_final in all_tfinalamb_altru]
# print(all_tfinalamb_ego[0].shape)
# print(final_metric_ego)
# print(final_metric_ego.shape)
# print("Egoistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_ego[0,-1,-1], np.mean(all_xamb_ego[0,-1,:]), np.min(all_xamb_ego[0,-1,:]), np.max(all_xamb_ego[0,-1,:])))
# print("Prosocial & %.02f & %.02f & %.02f & %.02f"%(all_xamb_pro[0,-1,-1], np.mean(all_xamb_pro[0,-1,:]), np.min(all_xamb_pro[0,-1,:]), np.max(all_xamb_pro[0,-1,:])))
# print("Altruistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_altru[0,-1,-1], np.mean(all_xamb_altru[0,-1,:]), np.min(all_xamb_altru[0,-1,:]), np.max(all_xamb_altru[0,-1,:])))
print("Time To "+str(x_goal)+"m")
print("Egoistic & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_ego), np.std(final_metric_ego), np.min(final_metric_ego), np.max(final_metric_ego),len(final_metric_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_pro), np.std(final_metric_pro), np.min(final_metric_pro), np.max(final_metric_pro),len(final_metric_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_altru), np.std(final_metric_altru), np.min(final_metric_altru), np.max(final_metric_altru),len(final_metric_altru)))
print("Veh 1 Final Avg. Min. Max. ")
i = 0
veh_displace_ego = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_ego]
veh_displace_pro = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_pro]
veh_displace_altru = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_altru]
print(" ")
print("Egoistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(veh_displace_ego), np.std(veh_displace_ego), np.min(veh_displace_ego), np.max(veh_displace_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f "%(np.mean(veh_displace_pro), np.std(veh_displace_pro), np.min(veh_displace_pro), np.max(veh_displace_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f "%( np.mean(veh_displace_altru), np.std(veh_displace_altru), np.min(veh_displace_altru), np.max(veh_displace_altru)))
if PLOT:
plt.show()
| 39.679803 | 192 | 0.665798 |
0613ddb7599b3120261ade10d3011d5c27649921 | 2,082 | py | Python | AI_maker/celule_leucemie.py | pamintandrei/Tiroidaptinfoed | 2671f219de2ef8ecf68ae7a932ed82462365d889 | [
"MIT"
] | 5 | 2019-06-10T10:42:22.000Z | 2019-07-10T14:05:13.000Z | AI_maker/celule_leucemie.py | pamintandrei/Tiroidaptinfoed | 2671f219de2ef8ecf68ae7a932ed82462365d889 | [
"MIT"
] | null | null | null | AI_maker/celule_leucemie.py | pamintandrei/Tiroidaptinfoed | 2671f219de2ef8ecf68ae7a932ed82462365d889 | [
"MIT"
] | 2 | 2018-08-30T14:36:20.000Z | 2019-06-17T13:07:18.000Z | import numpy as np
from tensorflow.keras.callbacks import TensorBoard
import cv2
import sys
import threading
import keras
from keras.layers import Conv2D,Dense,MaxPooling2D,Flatten,BatchNormalization,Dropout
from IPython.display import display
from PIL import Image
import tensorflow as tf
np.random.seed(1)
with tf.device('/gpu:0'):
keras_data=keras.preprocessing.image.ImageDataGenerator()
path1="D:\\tiroida\\celule\\leucemie_train"
date1 = keras_data.flow_from_directory(path1, target_size = (450, 450),batch_size=32, classes = ["normal","leucemie"], class_mode = "binary")
path2="D:\\tiroida\\celule\\leucemie_test"
date2 = keras_data.flow_from_directory(path2, target_size = (450, 450),batch_size=100, classes = ["normal","leucemie"], class_mode = "binary")
tfmodel=keras.models.Sequential()
tfmodel.add(Conv2D(filters=4,kernel_size=(3,3), padding='same',activation="relu",input_shape=(450,450,3)))
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(BatchNormalization())
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(Conv2D(filters=16, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(BatchNormalization())
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Flatten())
tfmodel.add(Dense(16, activation="relu"))
tfmodel.add(Dense(1, activation="sigmoid"))
tfmodel.compile(optimizer='Adam',loss="binary_crossentropy", metrics=["accuracy"])
checkpoint = keras.callbacks.ModelCheckpoint(filepath='leucemie.h5', save_best_only=True,monitor='val_acc')
tfmodel.fit_generator(date1,validation_data=date2,epochs=10,steps_per_epoch=100,validation_steps=1,callbacks=[checkpoint])
model=keras.models.load_model('leucemie.h5')
print(model.evaluate_generator(date2,steps=1))
input() | 50.780488 | 146 | 0.739193 |
061412d3ce5243bc277fe70e0a5760f272906364 | 233 | py | Python | Django/env_python3.6.1/Lib/site-packages/setupfiles/__init__.py | archu2020/python-2 | 19c626ca9fd37168db8a7ac075fd80c8e2971313 | [
"Apache-2.0"
] | 48 | 2017-12-24T12:19:55.000Z | 2022-02-26T13:14:27.000Z | Django/env_python3.6.1/Lib/site-packages/setupfiles/__init__.py | 17610178081/python | 3975c678d985c468deecd03560d882e9d316bb63 | [
"Apache-2.0"
] | 6 | 2017-11-10T19:45:18.000Z | 2017-11-12T14:50:42.000Z | Django/env_python3.6.1/Lib/site-packages/setupfiles/__init__.py | 17610178081/python | 3975c678d985c468deecd03560d882e9d316bb63 | [
"Apache-2.0"
] | 113 | 2017-08-09T03:10:04.000Z | 2022-03-26T16:05:01.000Z | #!/usr/bin/env python
import distutils
from setupfiles.dist import DistributionMetadata
from setupfiles.setup import setup
__all__ = ["setup"]
distutils.dist.DistributionMetadata = DistributionMetadata
distutils.core.setup = setup
| 23.3 | 58 | 0.824034 |
06155bb97d79c4a708e108ac4d37d0955dc2bd9c | 3,002 | py | Python | test.py | mricaldone/Gramatica | a7e2ff933fe875f5b8a95338c2c312f403ba5679 | [
"MIT"
] | null | null | null | test.py | mricaldone/Gramatica | a7e2ff933fe875f5b8a95338c2c312f403ba5679 | [
"MIT"
] | null | null | null | test.py | mricaldone/Gramatica | a7e2ff933fe875f5b8a95338c2c312f403ba5679 | [
"MIT"
] | null | null | null | import Gramatica
testSeparadorDeSilabas("AprEnDer", "A-prEn-Der")
testSeparadorDeSilabas("piCo", "-pi-Co")
testSeparadorDeSilabas("PDIO", "P-DIO")
testSeparadorDeSilabas("aprender", "a-pren-der")
testSeparadorDeSilabas("tabla", "ta-bla")
testSeparadorDeSilabas("ratn", "ra-tn")
testSeparadorDeSilabas("pico", "-pi-co")
testSeparadorDeSilabas("brocha", "bro-cha") # grupos consonanticos br, cr, dr, gr, fr, kr, tr, bl, cl, gl, fl, kl, pl son inseparables
testSeparadorDeSilabas("abrazo", "a-bra-zo")
testSeparadorDeSilabas("submarino", "sub-ma-ri-no") # los prefijos pueden o no separarse
testSeparadorDeSilabas("perspicacia", "pers-pi-ca-cia") # 3 consonantes consecutivas, 2 van a la silaba anterior y 1 a la siguiente
testSeparadorDeSilabas("conspirar", "cons-pi-rar")
testSeparadorDeSilabas("obscuro", "obs-cu-ro")
testSeparadorDeSilabas("irreal", "i-rre-al") # no se pueden separar las rr
testSeparadorDeSilabas("acallar", "a-ca-llar") # no se pueden separar las ll
testSeparadorDeSilabas("abstracto", "abs-trac-to") # 4 consonantes consecutivas, 2 van a la silaba anterior y 2 a la siguiente
testSeparadorDeSilabas("rubia", "ru-bia") # los diptongos no se separan
testSeparadorDeSilabas("labio", "la-bio")
testSeparadorDeSilabas("caigo", "cai-go")
testSeparadorDeSilabas("oigo", "oi-go")
testSeparadorDeSilabas("descafeinado", "des-ca-fei-na-do")
testSeparadorDeSilabas("diurno", "diur-no")
testSeparadorDeSilabas("ruido", "rui-do")
testSeparadorDeSilabas("pdio", "p-dio")
testSeparadorDeSilabas("aplanar", "a-pla-nar")
testSeparadorDeSilabas("ocre", "o-cre")
testSeparadorDeSilabas("archi", "ar-chi")
testSeparadorDeSilabas("leer", "le-er")
testSeparadorDeSilabas("caos", "ca-os")
testSeparadorDeSilabas("bal", "ba-l")
testSeparadorDeSilabas("ambiguo", "am-bi-guo")
testSeparadorDeSilabas("antifaz", "an-ti-faz")
testSeparadorDeSilabas("transplantar", "trans-plan-tar")
testSeparadorDeSilabas("substraer", "subs-tra-er")
testSeparadorDeSilabas("abstraer", "abs-tra-er")
testSeparadorDeSilabas("abstracto", "abs-trac-to")
testSeparadorDeSilabas("pingino", "pin-gi-no")
testSeparadorDeSilabas("vergenza", "ver-gen-za")
testSeparadorDeSilabas("bilinge", "bi-lin-ge")
testSeparadorDeSilabas("bal ocre", "ba-l o-cre")
testSeparadorDeSilabas("", "")
testSeparadorDeSilabas(" ", " ")
testSeparadorDeSilabas(" ", " ")
testSeparadorDeSilabas("k", "k")
testSeparadorDeSilabas("1", "1")
testSeparadorDeSilabas("abstraer abstracto", "abs-tra-er abs-trac-to") | 50.033333 | 134 | 0.72052 |
061561270f389e6138b7861cea448dfbc7f9b7ae | 1,201 | py | Python | web/scripts/minify_json.py | albertomh/SqueezeCompass | 30365fd6f1bf8ceca2c2fa7e4c8e15d4d9a85f1f | [
"MIT"
] | null | null | null | web/scripts/minify_json.py | albertomh/SqueezeCompass | 30365fd6f1bf8ceca2c2fa7e4c8e15d4d9a85f1f | [
"MIT"
] | null | null | null | web/scripts/minify_json.py | albertomh/SqueezeCompass | 30365fd6f1bf8ceca2c2fa7e4c8e15d4d9a85f1f | [
"MIT"
] | null | null | null | #
# Minify JSON data files in the `/dist` directory.
# Script invoked by the npm postbuild script after building the project with `npm run build`.
#
from os import (
path,
listdir,
fsdecode
)
import json
from datetime import datetime
if __name__ == '__main__':
minifier = JSONMinifier()
minifier.minify_json(minifier.DIST_CONSTITUENT_DATA_DIRECTORY)
minifier.minify_json(minifier.DIST_SNAPSHOT_DATA_DIRECTORY)
| 34.314286 | 117 | 0.623647 |
ae044bb52fdc9d56a4ae83f40e90c43b75adb5a4 | 13,751 | py | Python | CPU-Name.py | acidburn0zzz/CPU-Name | 2322da712a9ac47f38f22a43bf9bcbc0240e062b | [
"MIT"
] | 1 | 2021-11-30T18:35:46.000Z | 2021-11-30T18:35:46.000Z | CPU-Name.py | acidburn0zzz/CPU-Name | 2322da712a9ac47f38f22a43bf9bcbc0240e062b | [
"MIT"
] | null | null | null | CPU-Name.py | acidburn0zzz/CPU-Name | 2322da712a9ac47f38f22a43bf9bcbc0240e062b | [
"MIT"
] | null | null | null | import subprocess
import platform
from Scripts import plist, utils
c = CPUName()
c.main()
| 49.464029 | 172 | 0.563304 |
ae046c38a2e79a1620b18d8e95f3afd8af8e8031 | 3,853 | py | Python | solvcon/parcel/gasplus/probe.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 16 | 2015-12-09T02:54:42.000Z | 2021-04-20T11:26:39.000Z | solvcon/parcel/gasplus/probe.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 95 | 2015-12-09T00:49:40.000Z | 2022-02-14T13:34:55.000Z | solvcon/parcel/gasplus/probe.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 13 | 2015-05-08T04:16:42.000Z | 2021-01-15T09:28:06.000Z | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2016, Yung-Yu Chen <yyc@solvcon.net>
# BSD 3-Clause License, see COPYING
import os
import numpy as np
import solvcon as sc
# vim: set ff=unix fenc=utf8 ft=python nobomb et sw=4 ts=4 tw=79:
| 30.101563 | 79 | 0.534908 |
ae059eac36d79675fbab914a2bbf4174d3306bb6 | 8,600 | py | Python | data/dataset.py | 1chimaruGin/EfficientDet | 8adf636db1f7c5c64b65c1e897a0d18f682e6251 | [
"Apache-2.0"
] | 9 | 2020-09-02T09:53:04.000Z | 2022-01-16T11:16:57.000Z | data/dataset.py | 1chimaruGin/EfficientDet | 8adf636db1f7c5c64b65c1e897a0d18f682e6251 | [
"Apache-2.0"
] | null | null | null | data/dataset.py | 1chimaruGin/EfficientDet | 8adf636db1f7c5c64b65c1e897a0d18f682e6251 | [
"Apache-2.0"
] | 1 | 2021-06-15T15:55:46.000Z | 2021-06-15T15:55:46.000Z | """ COCO dataset (quick and dirty)
Hacked together by Ross Wightman
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import os
import cv2
import random
import torch
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
| 37.391304 | 136 | 0.546395 |
ae06e563dacfb2f601bc91857ad8c0255bdbcc8b | 1,292 | py | Python | env/Lib/site-packages/OpenGL/GLES2/EXT/sRGB_write_control.py | 5gconnectedbike/Navio2 | 8c3f2b5d8bbbcea1fc08739945183c12b206712c | [
"BSD-3-Clause"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | env/Lib/site-packages/OpenGL/GLES2/EXT/sRGB_write_control.py | 5gconnectedbike/Navio2 | 8c3f2b5d8bbbcea1fc08739945183c12b206712c | [
"BSD-3-Clause"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | env/Lib/site-packages/OpenGL/GLES2/EXT/sRGB_write_control.py | 5gconnectedbike/Navio2 | 8c3f2b5d8bbbcea1fc08739945183c12b206712c | [
"BSD-3-Clause"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''OpenGL extension EXT.sRGB_write_control
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.sRGB_write_control to provide a more
Python-friendly API
Overview (from the spec)
This extension's intent is to expose new functionality which allows an
application the ability to decide if the conversion from linear space to
sRGB is necessary by enabling or disabling this conversion at framebuffer
write or blending time. An application which passes non-linear vector data
to a shader may not want the color conversion occurring, and by disabling
conversion the application can be simplified, sometimes in very significant
and more optimal ways.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/sRGB_write_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.sRGB_write_control import *
from OpenGL.raw.GLES2.EXT.sRGB_write_control import _EXTENSION_NAME
def glInitSrgbWriteControlEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 39.151515 | 76 | 0.813467 |
ae074bc52a086a244bf599cb6b758a858b0ae56e | 241 | py | Python | cgn_framework/imagenet/models/__init__.py | anonymous-user-256/mlrc-cgn | 64f43fcb89b3a13c0ae46db4f19060d9f204a6b1 | [
"MIT"
] | 78 | 2021-01-15T09:22:21.000Z | 2022-03-06T12:15:36.000Z | cgn_framework/imagenet/models/__init__.py | anonymous-user-256/mlrc-cgn | 64f43fcb89b3a13c0ae46db4f19060d9f204a6b1 | [
"MIT"
] | 3 | 2021-03-26T07:33:16.000Z | 2022-01-17T14:49:51.000Z | cgn_framework/imagenet/models/__init__.py | anonymous-user-256/mlrc-cgn | 64f43fcb89b3a13c0ae46db4f19060d9f204a6b1 | [
"MIT"
] | 14 | 2021-01-17T10:08:49.000Z | 2022-01-14T06:32:11.000Z | from imagenet.models.biggan import BigGAN
from imagenet.models.u2net import U2NET
from imagenet.models.cgn import CGN
from imagenet.models.classifier_ensemble import InvariantEnsemble
__all__ = [
CGN, InvariantEnsemble, BigGAN, U2NET
]
| 26.777778 | 65 | 0.821577 |
ae07a130b3eed404ad6c84e0c2e825a8a33c151b | 595 | bzl | Python | source/bazel/deps/osdialog/get.bzl | luxe/unilang | 6c8a431bf61755f4f0534c6299bd13aaeba4b69e | [
"MIT"
] | 33 | 2019-05-30T07:43:32.000Z | 2021-12-30T13:12:32.000Z | source/bazel/deps/osdialog/get.bzl | luxe/unilang | 6c8a431bf61755f4f0534c6299bd13aaeba4b69e | [
"MIT"
] | 371 | 2019-05-16T15:23:50.000Z | 2021-09-04T15:45:27.000Z | source/bazel/deps/osdialog/get.bzl | luxe/unilang | 6c8a431bf61755f4f0534c6299bd13aaeba4b69e | [
"MIT"
] | 6 | 2019-08-22T17:37:36.000Z | 2020-11-07T07:15:32.000Z | # Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
| 35 | 106 | 0.714286 |
ae091378dcfbc21471f5642e52eb0d041d0b3c94 | 529 | py | Python | src/dnsblock_update/config.py | raynigon/dnsblock-update | 258ca7c9934d21e6367ab2b282b24be5c06d9116 | [
"Apache-2.0"
] | null | null | null | src/dnsblock_update/config.py | raynigon/dnsblock-update | 258ca7c9934d21e6367ab2b282b24be5c06d9116 | [
"Apache-2.0"
] | null | null | null | src/dnsblock_update/config.py | raynigon/dnsblock-update | 258ca7c9934d21e6367ab2b282b24be5c06d9116 | [
"Apache-2.0"
] | null | null | null | from yaml import safe_load
from .blocklist import Blocklist | 35.266667 | 82 | 0.644612 |
ae09bb3a14c1ed49e2d5726423fbf824ac0d0220 | 5,532 | py | Python | pySPACE/run/scripts/md_creator.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 32 | 2015-02-20T09:03:09.000Z | 2022-02-25T22:32:52.000Z | pySPACE/run/scripts/md_creator.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 5 | 2015-05-18T15:08:40.000Z | 2020-03-05T19:18:01.000Z | pySPACE/run/scripts/md_creator.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 18 | 2015-09-28T07:16:38.000Z | 2021-01-20T13:52:19.000Z | """ Create meta data file 'metadata.yaml' for :class:`~pySPACE.resources.dataset_defs.feature_vector.FeatureVectorDataset`
Used for external files, which can not be read directly in pySPACE.
Eg. csv files without names.
To be called in the dataset directory.
"""
def get_numerical_user_input(msg):
""" Request input, split it by ',' and parse it for '-' """
tmp_info = raw_input(msg)
tmp_info = tmp_info.replace(' ', '').split(',')
return parse_list(tmp_info)
def get_user_input(msg):
""" Request input """
return raw_input(msg)
def parse_list(input_list):
""" Replace range by explicit numbers """
info = []
for index in input_list:
if type(index) == int:
info.append(index)
if not type(index) == str:
info.append(int(index))
# zero is not an accepted index
if index == '0' or index == '':
continue
# replacing '-' with actual indices
if '-' in str(index):
index_split = index.split('-')
# to handle -1 input
if index_split[0] == '':
info.append(int(index))
continue
low = int(index_split[0])
high = int(index_split[1])
rnge = high - low
new_index = [low]
for i in range(rnge):
new_index.append(low + i + 1)
info = info.extend(new_index)
else:
info.append(int(index))
return info
def check_delimiter(data):
""" Checks delimiter to have length one """
delimiter = data["delimiter"]
if len(delimiter) == 0:
# add the deleted spaces
data["delimiter"]=' '
return True
elif len(delimiter)==1:
# tabulator is included here
return True
else:
import warnings
warnings.warn('To long delimiter. Only 1 sign allowed. Please try again.')
def generate_meta_data(data):
""" Map data to the metadata.yaml string and set defaults """
meta_data = "author: " + os.environ['USER'] + '\n' + \
"date: " + time.strftime("%Y%m%d")+ '\n' + \
"type: feature_vector" + "\n"
for item in data.items():
if item[1] != '':
if item[0] == 'file_name':
meta_data += "file_name: " + str(data["file_name"]) + "\n"
elif item[0] == 'format':
meta_data += "storage_format: [" + str(data["format"]) + ', real]' + "\n"
elif item[0] == 'rows':
meta_data += "ignored_rows: " + str(data["rows"]) + "\n"
elif item[0] == 'columns':
meta_data += "ignored_columns: " + str(data["columns"]) + "\n"
elif item[0] == 'label':
meta_data += "label_column: " + str(data["label"]) + "\n"
else: # set defaults
if item[0] == 'file_name':
meta_data += "file_name: " + "file_name.csv" + "\n"
elif item[0] == 'format':
meta_data += "storage_format: [" + "csv" + ', real]' + "\n"
elif item[0] == 'rows':
meta_data += "ignored_rows: " + "[]" + "\n"
elif item[0] == 'columns':
meta_data += "ignored_columns: " + "[]" + "\n"
elif item[0] == 'label':
meta_data += "label_column: " + str(-1) + "\n"
return meta_data
import os, time, sys
if __name__ == "__main__":
info_string = "\nRunning meta data creator ... \n"
give_info(info_string)
md_file = "metadata.yaml"
if not os.path.isfile(md_file):
main(md_file)
else:
msg = "'metadata.yaml' already exists! \n"
give_info(msg)
yes_no = raw_input("Overwrite? y/n: ")
if yes_no == "y":
main(md_file)
else:
msg = "Exiting ... \n"
give_info(msg)
sys.exit(0)
| 33.731707 | 122 | 0.537419 |
ae0b04625ca9a862eb715fd13d3b553a6fb19211 | 12,715 | py | Python | test/abstract_lut_test.py | sgtm/ColorPipe-tools | 971b546f77b0d1a6e5ee3aa7e4077a9d41c6e59b | [
"BSD-3-Clause"
] | 1 | 2021-06-21T13:35:20.000Z | 2021-06-21T13:35:20.000Z | test/abstract_lut_test.py | sgtm/ColorPipe-tools | 971b546f77b0d1a6e5ee3aa7e4077a9d41c6e59b | [
"BSD-3-Clause"
] | null | null | null | test/abstract_lut_test.py | sgtm/ColorPipe-tools | 971b546f77b0d1a6e5ee3aa7e4077a9d41c6e59b | [
"BSD-3-Clause"
] | null | null | null | """ Testing Abstract LUT model
"""
import unittest
import os
import shutil
import tempfile
from PyOpenColorIO.Constants import INTERP_LINEAR, INTERP_TETRAHEDRAL
from utils import lut_presets as presets
from utils.lut_presets import PresetException, OUT_BITDEPTH
import utils.abstract_lut_helper as alh
from utils.colorspaces import REC709, SGAMUTSLOG, ALEXALOGCV3
from utils.csp_helper import CSP_HELPER
from utils.cube_helper import CUBE_HELPER
from utils.threedl_helper import THREEDL_HELPER, SHAPER, MESH
from utils.spi_helper import SPI_HELPER
from utils.ascii_helper import ASCII_HELPER, AsciiHelperException
from utils.clcc_helper import CLCC_HELPER
from utils.json_helper import JSON_HELPER
from utils.ocio_helper import create_ocio_processor
from utils.lut_utils import get_input_range
DISPLAY = False
if __name__ == '__main__':
unittest.main()
| 42.811448 | 83 | 0.533464 |
ae0e1342adc959978ce2df9edec93bd093cab6fe | 4,704 | py | Python | booktracker.py | stonewell/booktracker | 8fc324f10b4bc9d8a0a22a40871282bbef00e5ad | [
"MIT"
] | null | null | null | booktracker.py | stonewell/booktracker | 8fc324f10b4bc9d8a0a22a40871282bbef00e5ad | [
"MIT"
] | null | null | null | booktracker.py | stonewell/booktracker | 8fc324f10b4bc9d8a0a22a40871282bbef00e5ad | [
"MIT"
] | null | null | null | import argparse
import sys
import logging
import json
if __name__ == '__main__':
parser = args_parser().parse_args()
if parser.verbose >= 1:
logging.getLogger('').setLevel(logging.DEBUG)
if parser.urls_file is None and parser.url is None:
args_parser().print_usage()
sys.exit()
urls = set()
if parser.urls_file:
try:
urls = parse_urls_file_json(parser.urls_file)
except:
logging.exception('urls file:%s is not json try text file', parser.urls_file)
parser.urls_file.seek(0)
urls = parse_urls_file_txt(parser.urls_file)
if parser.url:
urls.add((parser.url,
parser.author,
parser.title,
tuple(parser.headers) if parser.headers else tuple([]))
)
for url, author, title, headers in sorted(urls):
try:
if url.find('piaotian') > 0 or url.find('ptwxz') > 0:
from piaotian.book_tracker import Tracker as PiaoTianTracker
tracker = PiaoTianTracker(url, author, title, parser.output, parser.timeout)
elif url.find('23us') > 0:
from dingdian.book_tracker import Tracker as DingDianTracker
tracker = DingDianTracker(url, author, title, parser.output, parser.timeout)
elif url.find('youdubook') > 0:
from youdu.book_tracker import Tracker as YouduTracker
tracker = YouduTracker(url, author, title, parser.output, parser.timeout)
elif url.find('shuku') > 0:
from shuku.book_tracker import Tracker as ShuKuTracker
tracker = ShuKuTracker(url, author, title, parser.output, parser.timeout)
elif url.find('uukanshu') > 0:
from uukanshu.book_tracker import Tracker as UUKanShuTracker
tracker = UUKanShuTracker(url, author, title, parser.output, parser.timeout)
if not tracker:
raise ValueError("tracker not found")
tracker.headers = list(headers)
update_count = tracker.refresh()
print(tracker.title, 'update count:', update_count)
if parser.epub:
tracker.gen_epub()
except:
logging.exception("update failed:{}".format(url))
| 40.904348 | 199 | 0.60119 |
ae0e3edf6f720a4fb2dd231e188dd1e1fa7fe663 | 667 | py | Python | 06-python-functions-1.py | reysmerwvr/python-playgrounds | 1e039639d96044986ba5cc894a210180cc2b08e0 | [
"MIT"
] | null | null | null | 06-python-functions-1.py | reysmerwvr/python-playgrounds | 1e039639d96044986ba5cc894a210180cc2b08e0 | [
"MIT"
] | null | null | null | 06-python-functions-1.py | reysmerwvr/python-playgrounds | 1e039639d96044986ba5cc894a210180cc2b08e0 | [
"MIT"
] | null | null | null | import math
print(circle_area(5))
print(intermediate_number(-24, 24))
evens, odds = separate([6, 5, 2, 1, 7])
print(evens)
print(odds)
| 16.675 | 39 | 0.610195 |
ae0ef85218f1bd293decfce58f18a3dbb6559d3c | 3,647 | py | Python | cloudfront/resource.py | iPlantCollaborativeOpenSource/iPlant-Atmosphere | d67b953561e813dd30ffa52c8440af7cc2d990cf | [
"Unlicense"
] | 1 | 2017-10-05T08:03:37.000Z | 2017-10-05T08:03:37.000Z | cloudfront/resource.py | iPlantCollaborativeOpenSource/iPlant-Atmosphere | d67b953561e813dd30ffa52c8440af7cc2d990cf | [
"Unlicense"
] | null | null | null | cloudfront/resource.py | iPlantCollaborativeOpenSource/iPlant-Atmosphere | d67b953561e813dd30ffa52c8440af7cc2d990cf | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2010, iPlant Collaborative, University of Arizona, Cold Spring Harbor Laboratories, University of Texas at Austin
# This software is licensed under the CC-GNU GPL version 2.0 or later.
# License: http://creativecommons.org/licenses/GPL/2.0/
#
# Author: Seung-jin Kim
# Contact: seungjin@email.arizona.edu
# Twitter: @seungjin
#
import logging
import httplib
import urllib
from urlparse import urlparse
import string
import datetime
from django.http import HttpResponse
from django.template import Context
from django.template.loader import get_template
from django.http import HttpResponse, Http404
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.contrib.auth import logout
from django.http import HttpResponseNotFound
from django.http import HttpResponseForbidden
from django.utils import simplejson
from atmosphere.cloudfront.models import *
| 32.855856 | 158 | 0.716753 |
ae0f034944c35cf482cef502709dd21969753521 | 554 | py | Python | py/jsontoimgmd_all.py | zhouhaixian/Twikoo-Magic | e5ff88bfb58ab97ffa9c395ab302e696ddefc66f | [
"MIT"
] | 59 | 2021-01-06T01:32:07.000Z | 2022-03-26T04:56:46.000Z | py/jsontoimgmd_all.py | zhouhaixian/Twikoo-Magic | e5ff88bfb58ab97ffa9c395ab302e696ddefc66f | [
"MIT"
] | 5 | 2021-01-14T17:31:12.000Z | 2022-03-26T05:25:40.000Z | py/jsontoimgmd_all.py | zhouhaixian/Twikoo-Magic | e5ff88bfb58ab97ffa9c395ab302e696ddefc66f | [
"MIT"
] | 22 | 2021-02-15T12:06:59.000Z | 2022-02-11T05:51:43.000Z | import json
import os
classlist = os.listdir("./image/")
for classname in classlist:
# "./Classification/"+classname+"/"
try: os.mkdir("./Classification/"+classname+"/")
except: pass
filenamelist = os.listdir("./image/"+classname)
url = "https://cdn.jsdelivr.net/gh/2x-ercha/twikoo-magic/image/" + classname + "/"
with open("./Classification/"+classname+"/README.md", "w", encoding="utf-8") as f:
f.write(classname+"\n\n")
for filename in filenamelist:
f.write("\n")
| 30.777778 | 86 | 0.606498 |
ae0f418d25ef8016cb9f505cbfcc08043b51e1d4 | 4,964 | py | Python | calculator.py | xizhongzhao/challenge5 | fd4535479a0466eb0dec3c5f0078efea5fa40401 | [
"BSD-3-Clause"
] | null | null | null | calculator.py | xizhongzhao/challenge5 | fd4535479a0466eb0dec3c5f0078efea5fa40401 | [
"BSD-3-Clause"
] | null | null | null | calculator.py | xizhongzhao/challenge5 | fd4535479a0466eb0dec3c5f0078efea5fa40401 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
from multiprocessing import Queue,Process,Lock
from datetime import datetime
import getopt
import configparser
que1 = Queue()
que2 = Queue()
if __name__ == '__main__':
main()
| 27.88764 | 91 | 0.52357 |
ae0f8d2404360860d62fb249f2d3aa6934c5170c | 1,730 | py | Python | scripts/financials.py | pwaring/125-accounts | a8d577110184e5f833368977c36b1e407c7357f6 | [
"MIT"
] | null | null | null | scripts/financials.py | pwaring/125-accounts | a8d577110184e5f833368977c36b1e407c7357f6 | [
"MIT"
] | 7 | 2017-04-30T11:11:26.000Z | 2020-09-24T15:23:24.000Z | scripts/financials.py | pwaring/125-accounts | a8d577110184e5f833368977c36b1e407c7357f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import yaml
import pathlib
import decimal
import datetime
import os
decimal.getcontext().prec = 10
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='path to data directory', required=True)
args = parser.parse_args()
script_path = os.path.dirname(os.path.realpath(__file__))
config_path = script_path + '/../config'
# Configuration
config = {}
with open(config_path + '/tax.yaml') as f:
config['tax'] = yaml.safe_load(f.read())
# Find current tax year
today = datetime.date.today()
config['current_tax'] = next(x for x in config['tax'] if x['start_date'] <= today and x['end_date'] >= today)
# Data
total_sales = decimal.Decimal(0.00)
total_payments = decimal.Decimal(0.00)
data_directory = str(args.data)
data_path = pathlib.Path(data_directory)
invoice_files = list(data_path.glob('data/invoices/*.yaml'))
for invoice_file in invoice_files:
fp = invoice_file.open()
invoice_data = yaml.safe_load(fp.read())
fp.close()
if invoice_data['issue_date'] >= config['current_tax']['start_date'] and invoice_data['issue_date'] <= config['current_tax']['end_date'] and invoice_data['issue_date'] <= today:
print(invoice_data['number'])
total_sales += decimal.Decimal(invoice_data['total'])
print(invoice_data['total'])
# Subtract any payments from accounts receivable
if 'payments' in invoice_data:
for payment in invoice_data['payments']:
print(payment['amount'])
total_payments += decimal.Decimal(payment['amount'])
print()
print("Total sales: %.2f" % total_sales)
print("Total payments: %.2f" % total_payments)
# Calculate tax and national insurance
| 28.833333 | 181 | 0.695954 |
ae10738b2828081524171edff4d9e154279c3a52 | 4,131 | py | Python | index.py | welshonion/GB_Tweet_Eraser | 5ba77864e12bbdfc0f44fd417e1584a672120dd6 | [
"MIT"
] | null | null | null | index.py | welshonion/GB_Tweet_Eraser | 5ba77864e12bbdfc0f44fd417e1584a672120dd6 | [
"MIT"
] | null | null | null | index.py | welshonion/GB_Tweet_Eraser | 5ba77864e12bbdfc0f44fd417e1584a672120dd6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#for local
#import config
#config.write_environ()
import os,json
from flask import Flask, render_template, request, redirect, url_for, session
from requests_oauthlib import OAuth1Session
from datetime import timedelta
import twitter_auth
import twitter_delete
import postTweet
import databaseIO
app = Flask(__name__)
app.secret_key = os.environ['APP_SECRET_KEY']
app.permanent_session_lifetime = timedelta(minutes=5)
#session.permanent = True
#scheduler = BackgroundScheduler(daemon = True)
##################################################################
##
CK = os.environ.get('CONSUMER_KEY', '0')
CS = os.environ.get('CONSUMER_SECRET', '0')
##################################################################
is_verified = False
name = ""
screen_name = ""
w = ('stop','running')
"""@app.route('/verified')
def verified():
is_verified,name,screen_name = twitter_auth.user_verified()
#return redirect('http://127.0.0.1:5000/')
return render_template('verified.html',is_verified = is_verified,name=name,screen_name=screen_name)
@app.route('/setting_authenticate')
def authenticate():
authenticate_url = twitter_auth.user_authenticate_setting()
return redirect(authenticate_url)
#return #render_template('tweet.html',message=message,title=title)
"""
if __name__ == '__main__':
#app.debug = True
app.run(threaded=True)
| 27 | 139 | 0.641007 |
ae111d2701de32e61ae648826dd4e4b4b1370654 | 882 | py | Python | angr-management/angrmanagement/ui/menus/disasm_insn_context_menu.py | Ruide/angr-dev | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | [
"BSD-2-Clause"
] | null | null | null | angr-management/angrmanagement/ui/menus/disasm_insn_context_menu.py | Ruide/angr-dev | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | [
"BSD-2-Clause"
] | null | null | null | angr-management/angrmanagement/ui/menus/disasm_insn_context_menu.py | Ruide/angr-dev | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | [
"BSD-2-Clause"
] | null | null | null |
from PySide.QtGui import QKeySequence
from PySide.QtCore import Qt
from .menu import Menu, MenuEntry, MenuSeparator
| 32.666667 | 107 | 0.717687 |
ae11598e927b79f190c3f53d990ca4e8744816b6 | 21,209 | py | Python | shades/shades.py | benrrutter/Shades | 06c1d2e9b7ba6044892a6bf7529e706574fb923c | [
"MIT"
] | 1 | 2020-11-28T19:41:39.000Z | 2020-11-28T19:41:39.000Z | shades/shades.py | benrrutter/Shades | 06c1d2e9b7ba6044892a6bf7529e706574fb923c | [
"MIT"
] | null | null | null | shades/shades.py | benrrutter/Shades | 06c1d2e9b7ba6044892a6bf7529e706574fb923c | [
"MIT"
] | null | null | null | """
shades
contains classes and functions relating to Shades' shade object
"""
from abc import ABC, abstractmethod
from typing import Tuple, List
import numpy as np
from PIL import Image
from .noise_fields import NoiseField, noise_fields
from .utils import color_clamp
| 34.768852 | 95 | 0.588571 |
ae131115e85d42f0478a7f770cbcfcd854b30f6f | 4,104 | py | Python | BCAWT/CA.py | AliYoussef96/BCAW-Tool | a296a52f8795325f08e0c6f00838b9e851f9459e | [
"MIT"
] | 3 | 2019-10-22T07:08:40.000Z | 2021-07-27T14:12:25.000Z | BCAWT/CA.py | AliYoussef96/BCAW-Tool | a296a52f8795325f08e0c6f00838b9e851f9459e | [
"MIT"
] | 13 | 2019-06-26T07:21:25.000Z | 2021-07-23T15:01:31.000Z | BCAWT/CA.py | AliYoussef96/BCAW-Tool | a296a52f8795325f08e0c6f00838b9e851f9459e | [
"MIT"
] | 3 | 2019-07-25T00:13:36.000Z | 2020-09-25T01:58:34.000Z | def CA(file):
"""correspondence analysis.
Args:
file (directory): csv file contains genes' RSCU values
Returns:
- csv file contains genes' values for the first 4 axes of the correspondence analysis result
- csv file contains codons' values for the first 4 axes of the correspondence analysis result
- plot the genes first 2 axes values of the correspondence analysis result
- plot the codons first 2 axes values of the correspondence analysis result
"""
import pandas as pd
import prince
import matplotlib.pyplot as plt
file = str(file)
df = pd.read_csv(file)
df.set_index(df.iloc[:,0] , inplace=True)# to make the first column is the index
df.drop(df.columns[0], axis=1,inplace= True)
df.replace(0,0.0000001,inplace=True)
#with prince # make onle CA for 2 axis
ca = prince.CA(
n_components=4,
n_iter=3,
copy=True,
check_input=True,
engine='auto',
random_state=42
)
df.columns.rename('Gene Name', inplace=True)
df.index.rename('Codons', inplace=True)
ca = ca.fit(df)
codons = ca.row_coordinates(df) # for Codons
genes = ca.column_coordinates(df) #for genes
#ca.eigenvalues_
ca.total_inertia_ #total inertia
ca.explained_inertia_ #inertia for each axis
inertia = ca.explained_inertia_
#save information
file_genes = file.replace(".csv",'')
file_genes = file_genes + "genes"
file_genes = file_genes + ".csv"
genes.rename(columns={genes.columns[0]: 'axis 1', genes.columns[1]: 'axis 2', genes.columns[2]: 'axis 3', genes.columns[3]: 'axis 4'}, inplace=True)
genes.to_csv(file_genes,sep=',', index=True, header=True) # return csv file for genes ca result
file_codons = file.replace(".csv",'')
file_codons = file_codons+ "codons"
file_codons = file_codons + ".csv"
codons.rename(columns={codons.columns[0]: 'axis 1', codons.columns[1]: 'axis 2', codons.columns[2]: 'axis 3', codons.columns[3]: 'axis 4'},inplace=True)
codons.to_csv(file_codons, sep=',', index=True, header=True) # return csv file for codon ca result
file_inertia = file.replace('.csv','.txt')
with open(file_inertia, 'a') as f:
f.write("explained inertia" + "\n")
for i in range(len(inertia)):
i_count = i + 1
with open(file_inertia,'a') as f:
f.write ("axis " + str(i_count) + " = " + str(inertia[i]) + "\n" )
with open(file_inertia,'a') as f:
f.write("Total Inertia = " + str(ca.total_inertia_))
#plot For genes
plt.style.use('seaborn-dark-palette')
fig = plt.figure()
plt.xlabel("Axis 1")
plt.ylabel("Axis 2")
plt.title("CA-plot")
plt.scatter(genes['axis 1'],genes['axis 2'],s=10,marker ='o')
plt.axhline(0, color='black', linestyle='-')
plt.axvline(0, color='black', linestyle='-')
save_file_name__ca_plot = file + "_CA_gens_plot.png"
plt.savefig(save_file_name__ca_plot) # return plot file for gene ca result
#for codons
plt.style.use('seaborn-dark-palette')
fig3 = plt.figure()
plt.xlabel("Axis 1")
plt.ylabel("Axis 2")
plt.title("CA-plot")
plt.scatter(codons['axis 1'],codons['axis 2'], s=10,marker ='o')
plt.axhline(0, color='black', linestyle='-')
plt.axvline(0, color='black', linestyle='-')
if len(codons) < 200:
for x , y , t in zip(codons['axis 1'],codons['axis 2'] , codons.index.values):
x = x * (1 + 0.01)
y = y * (1 + 0.01)
plt.text(x,y,t)
file = file.replace('.csv','')
save_file_name__ca_codons_plot = file + "_CA_codos_plot.png"
plt.savefig(save_file_name__ca_codons_plot) # return plot file for codon ca result
read_genes_file = pd.read_csv(file_genes)
read_genes_file.rename(columns={genes.columns[0]: 'gene id', genes.columns[1]: 'axis 1', genes.columns[2]: 'axis 2'}, inplace=True)
return read_genes_file
| 32.832 | 157 | 0.615497 |
ae131e4cfe7f41c7e3b760f7d7833d99b7a223bd | 32 | py | Python | renderchan/__init__.py | decipher-media/RenderChan | 6aa6b90403f87e8aa41cc487c62ad8e4ac149a6a | [
"BSD-3-Clause"
] | 30 | 2015-02-12T13:21:30.000Z | 2019-12-09T07:29:47.000Z | renderchan/__init__.py | decipher-media/RenderChan | 6aa6b90403f87e8aa41cc487c62ad8e4ac149a6a | [
"BSD-3-Clause"
] | 53 | 2015-12-20T17:04:00.000Z | 2019-11-11T07:54:50.000Z | renderchan/__init__.py | decipher-media/RenderChan | 6aa6b90403f87e8aa41cc487c62ad8e4ac149a6a | [
"BSD-3-Clause"
] | 7 | 2015-08-10T01:38:28.000Z | 2020-02-14T20:06:28.000Z | """
Main RenderChan package
"""
| 8 | 23 | 0.65625 |
ae149f58a8d124a1863b191cb6116f6a91fb3bc3 | 5,110 | py | Python | test/test_package.py | TheJacksonLaboratory/chia_rep | fe774259bfa3a045cc5189c61110a07c8f5eaa26 | [
"MIT"
] | 1 | 2019-09-14T02:44:40.000Z | 2019-09-14T02:44:40.000Z | test/test_package.py | TheJacksonLaboratory/chia_rep | fe774259bfa3a045cc5189c61110a07c8f5eaa26 | [
"MIT"
] | null | null | null | test/test_package.py | TheJacksonLaboratory/chia_rep | fe774259bfa3a045cc5189c61110a07c8f5eaa26 | [
"MIT"
] | 1 | 2021-07-10T12:00:05.000Z | 2021-07-10T12:00:05.000Z | import sys
import os
import shutil
sys.path.append('.')
import chia_rep
| 39.921875 | 79 | 0.626027 |
ae14d95fbddd637652559526a0abec1bcbb1d2a1 | 4,343 | py | Python | src/jibo_animation_ui.py | marketneutral/jibo-teleop | dce5e131a364b2dc8108dd766a74cb7547077eed | [
"MIT"
] | 3 | 2019-06-03T15:12:15.000Z | 2019-06-24T03:44:40.000Z | src/jibo_animation_ui.py | marketneutral/jibo-teleop | dce5e131a364b2dc8108dd766a74cb7547077eed | [
"MIT"
] | null | null | null | src/jibo_animation_ui.py | marketneutral/jibo-teleop | dce5e131a364b2dc8108dd766a74cb7547077eed | [
"MIT"
] | 1 | 2019-04-24T13:15:57.000Z | 2019-04-24T13:15:57.000Z | # Jacqueline Kory Westlund
# May 2016
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Personal Robots Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PySide import QtGui # basic GUI stuff
from jibo_msgs.msg import JiboAction # ROS msgs
from jibo_teleop_ros import jibo_teleop_ros
from functools import partial
| 39.844037 | 125 | 0.686162 |
ae14fd8d5a20f5e39dfb519bebc015197b1abd83 | 7,470 | py | Python | scans/migrations/0001_initial.py | Cashiuus/nmap-manager | 6d53bb4464f6b74ca40d5685a44f36942e5462b0 | [
"MIT"
] | null | null | null | scans/migrations/0001_initial.py | Cashiuus/nmap-manager | 6d53bb4464f6b74ca40d5685a44f36942e5462b0 | [
"MIT"
] | 9 | 2022-01-25T05:27:42.000Z | 2022-03-31T05:30:02.000Z | scans/migrations/0001_initial.py | Cashiuus/nmap-manager | 6d53bb4464f6b74ca40d5685a44f36942e5462b0 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-01-11 19:00
from django.db import migrations, models
import django.db.models.deletion
import scans.models
| 63.305085 | 158 | 0.6 |
ae160d8656b4e6e4a094903dfd38d5d1ed77aedf | 1,447 | py | Python | es_common/command/check_reservations_command.py | ES-TUDelft/interaction-design-tool-ir | d6fffa8d76c9e3df4ed1f505ee9427e5af5b8082 | [
"MIT"
] | 1 | 2021-03-07T12:36:13.000Z | 2021-03-07T12:36:13.000Z | es_common/command/check_reservations_command.py | ES-TUDelft/interaction-design-tool-ir | d6fffa8d76c9e3df4ed1f505ee9427e5af5b8082 | [
"MIT"
] | null | null | null | es_common/command/check_reservations_command.py | ES-TUDelft/interaction-design-tool-ir | d6fffa8d76c9e3df4ed1f505ee9427e5af5b8082 | [
"MIT"
] | 1 | 2021-02-20T15:10:37.000Z | 2021-02-20T15:10:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **
#
# ======================== #
# CHECK_RESERVATIONS_COMMAND #
# ======================== #
# Command for checking reservations.
#
# @author ES
# **
import logging
from collections import OrderedDict
from es_common.command.es_command import ESCommand
from es_common.enums.command_enums import ActionCommand
| 24.525424 | 91 | 0.591569 |
ae16f26a49eb3ff276ad91bfaa98b238072f3c5f | 2,471 | py | Python | mr/hermes/tests.py | dokai/mr.hermes | a7809af6ebeebc7e2df4aea7d69c571e78abce03 | [
"MIT"
] | null | null | null | mr/hermes/tests.py | dokai/mr.hermes | a7809af6ebeebc7e2df4aea7d69c571e78abce03 | [
"MIT"
] | null | null | null | mr/hermes/tests.py | dokai/mr.hermes | a7809af6ebeebc7e2df4aea7d69c571e78abce03 | [
"MIT"
] | null | null | null | # coding: utf-8
from email.mime.text import MIMEText
from email.parser import Parser
import os
import pytest
def test_mails_filename_order(debugsmtp):
me = 'bar@example.com'
you = 'foo@example.com'
for i in range(10):
msg = MIMEText('Mail%02i.' % i)
msg['Subject'] = 'Test'
msg['From'] = me
msg['To'] = you
debugsmtp.process_message(('localhost', 0), me, [you], msg.as_string())
mail_content = []
path = os.path.join(debugsmtp.path, 'foo@example.com')
for filename in os.listdir(path):
with open(os.path.join(path, filename)) as f:
msg = Parser().parsestr(f.read())
mail_content.append(msg.get_payload())
assert mail_content == [
'Mail00.', 'Mail01.', 'Mail02.', 'Mail03.', 'Mail04.',
'Mail05.', 'Mail06.', 'Mail07.', 'Mail08.', 'Mail09.']
def test_functional(sendmail, email_msg, tmpdir):
sendmail(email_msg)
(receiver,) = tmpdir.listdir()
assert receiver.basename == 'receiver@example.com'
(email_path,) = receiver.listdir()
assert email_path.basename.endswith('.eml')
with email_path.open() as f:
email = Parser().parsestr(f.read())
body = email.get_payload(decode=True)
body = body.decode(email.get_content_charset())
assert email['Subject'] == 'Testmail'
assert email['From'] == 'sender@example.com'
assert email['To'] == 'receiver@example.com'
assert u'Sme text' in body
| 29.070588 | 79 | 0.631728 |
ae18dc2b432f7078f03eeb502869d0c99af4f1dd | 21,967 | py | Python | src/lib/pipeline.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | src/lib/pipeline.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | 1 | 2022-03-02T14:54:27.000Z | 2022-03-02T14:54:27.000Z | src/lib/pipeline.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
import warnings
import importlib
import traceback
import subprocess
from io import StringIO
from pathlib import Path
from functools import partial
from multiprocessing import cpu_count
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import yaml
import numpy
import requests
from pandas import DataFrame, Int64Dtype, isnull, isna, read_csv, NA
from tqdm import tqdm
from .anomaly import detect_anomaly_all, detect_stale_columns
from .cast import column_convert
from .concurrent import process_map
from .net import download_snapshot
from .io import read_file, fuzzy_text, export_csv
from .utils import (
ROOT,
CACHE_URL,
combine_tables,
drop_na_records,
filter_output_columns,
infer_new_and_total,
stratify_age_and_sex,
)
def run(
self,
pipeline_name: str,
output_folder: Path,
process_count: int = cpu_count(),
verify: str = "simple",
progress: bool = True,
) -> DataFrame:
"""
Main method which executes all the associated [DataSource] objects and combines their
outputs.
"""
# Read the cache directory from our cloud storage
try:
cache = requests.get("{}/sitemap.json".format(CACHE_URL)).json()
except:
cache = {}
warnings.warn("Cache unavailable")
# Read the auxiliary input files into memory
aux = {name: read_file(file_name) for name, file_name in self.auxiliary_tables.items()}
# Precompute some useful transformations in the auxiliary input files
aux["metadata"]["match_string_fuzzy"] = aux["metadata"].match_string.apply(fuzzy_text)
for category in ("country", "subregion1", "subregion2"):
for suffix in ("code", "name"):
column = "{}_{}".format(category, suffix)
aux["metadata"]["{}_fuzzy".format(column)] = aux["metadata"][column].apply(
fuzzy_text
)
# Get all the pipeline outputs
# This operation is parallelized but output order is preserved
# Make a copy of the auxiliary table to prevent modifying it for everyone, but this way
# we allow for local modification (which might be wanted for optimization purposes)
aux_copy = {name: df.copy() for name, df in aux.items()}
# Create a function to be used during mapping. The nestedness is an unfortunate outcome of
# the multiprocessing module's limitations when dealing with lambda functions, coupled with
# the "sandboxing" we implement to ensure resiliency.
run_func = partial(DataPipeline._run_wrapper, output_folder, cache, aux_copy)
# If the process count is less than one, run in series (useful to evaluate performance)
data_sources_count = len(self.data_sources)
progress_label = f"Run {pipeline_name} pipeline"
if process_count <= 1 or data_sources_count <= 1:
map_func = tqdm(
map(run_func, self.data_sources),
total=data_sources_count,
desc=progress_label,
disable=not progress,
)
else:
map_func = process_map(
run_func, self.data_sources, desc=progress_label, disable=not progress
)
# Save all intermediate results (to allow for reprocessing)
intermediate_outputs = output_folder / "intermediate"
intermediate_outputs_files = []
for data_source, result in zip(self.data_sources, map_func):
data_source_class = data_source.__class__
data_source_config = str(data_source.config)
source_full_name = f"{data_source_class.__module__}.{data_source_class.__name__}"
intermediate_name = uuid.uuid5(
uuid.NAMESPACE_DNS, f"{source_full_name}.{data_source_config}"
)
intermediate_file = intermediate_outputs / f"{intermediate_name}.csv"
intermediate_outputs_files += [intermediate_file]
if result is not None:
export_csv(result, intermediate_file)
# Reload all intermediate results from disk
# In-memory results are discarded, this ensures reproducibility and allows for data sources
# to fail since the last successful intermediate result will be used in the combined output
pipeline_outputs = []
for source_output in intermediate_outputs_files:
try:
pipeline_outputs += [read_file(source_output)]
except Exception as exc:
warnings.warn(f"Failed to read intermediate file {source_output}. Error: {exc}")
# Get rid of all columns which are not part of the output to speed up data combination
pipeline_outputs = [
source_output[filter_output_columns(source_output.columns, self.schema)]
for source_output in pipeline_outputs
]
# Combine all pipeline outputs into a single DataFrame
if not pipeline_outputs:
warnings.warn("Empty result for pipeline chain {}".format(pipeline_name))
data = DataFrame(columns=self.schema.keys())
else:
progress_label = pipeline_name if progress else None
data = combine_tables(pipeline_outputs, ["date", "key"], progress_label=progress_label)
# Return data using the pipeline's output parameters
data = self.output_table(data)
# Skip anomaly detection unless requested
if verify == "simple":
# Validate that the table looks good
detect_anomaly_all(self.schema, data, [pipeline_name])
if verify == "full":
# Perform stale column detection for each known key
map_iter = data.key.unique()
map_func = lambda key: detect_stale_columns(
self.schema, data[data.key == key], (pipeline_name, key)
)
progress_label = f"Verify {pipeline_name} pipeline"
if process_count <= 1 or len(map_iter) <= 1:
map_func = tqdm(
map(map_func, map_iter),
total=len(map_iter),
desc=progress_label,
disable=not progress,
)
else:
map_func = process_map(
map_func, map_iter, desc=progress_label, disable=not progress
)
# Show progress as the results arrive if requested
if progress:
map_func = tqdm(
map_func, total=len(map_iter), desc=f"Verify {pipeline_name} pipeline"
)
# Consume the results
_ = list(map_func)
return data
| 42.489362 | 99 | 0.628488 |
ae1ad8c506c36a888f234786efecf582422e3003 | 35 | py | Python | src/artifice/scraper/supervisor/__init__.py | artifice-project/artifice-scraper | f224a0da22162fd479d6b9f9095ff5cae4723716 | [
"MIT"
] | null | null | null | src/artifice/scraper/supervisor/__init__.py | artifice-project/artifice-scraper | f224a0da22162fd479d6b9f9095ff5cae4723716 | [
"MIT"
] | 5 | 2019-09-18T19:17:14.000Z | 2021-03-20T01:46:06.000Z | src/artifice/scraper/supervisor/__init__.py | artifice-project/artifice-scraper | f224a0da22162fd479d6b9f9095ff5cae4723716 | [
"MIT"
] | null | null | null | from .supervisor import Supervisor
| 17.5 | 34 | 0.857143 |
ae1b1c2f48b9a90d658a39990474e0ffceef271d | 366 | py | Python | Entradas/migrations/0012_auto_20200521_1931.py | ToniIvars/Blog | c2d1674c2c1fdf51749f4b014795b507ed93b45e | [
"MIT"
] | null | null | null | Entradas/migrations/0012_auto_20200521_1931.py | ToniIvars/Blog | c2d1674c2c1fdf51749f4b014795b507ed93b45e | [
"MIT"
] | 4 | 2021-03-30T13:26:38.000Z | 2021-06-10T19:20:56.000Z | Entradas/migrations/0012_auto_20200521_1931.py | ToniIvars/Blog | c2d1674c2c1fdf51749f4b014795b507ed93b45e | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-21 17:31
from django.db import migrations
| 19.263158 | 47 | 0.587432 |
ae1dba2c9332b1aaf3dca98391c5242cc59d4eab | 322 | py | Python | jd/api/rest/ComJdQlBasicWsGlscGlscBasicSecondaryWSGetAssortByFidRequest.py | jof2jc/jd | 691bf22c68ed88fb3fb32bfb43dd6da75024994a | [
"MIT"
] | null | null | null | jd/api/rest/ComJdQlBasicWsGlscGlscBasicSecondaryWSGetAssortByFidRequest.py | jof2jc/jd | 691bf22c68ed88fb3fb32bfb43dd6da75024994a | [
"MIT"
] | null | null | null | jd/api/rest/ComJdQlBasicWsGlscGlscBasicSecondaryWSGetAssortByFidRequest.py | jof2jc/jd | 691bf22c68ed88fb3fb32bfb43dd6da75024994a | [
"MIT"
] | null | null | null | from jd.api.base import RestApi
| 20.125 | 80 | 0.776398 |
ae209fc837cb7fa92d358e927f5a60ae96f43be3 | 682 | py | Python | tensorflow_gnn/tools/generate_training_data_test.py | mattdangerw/gnn | f39d3ea0d8fc6e51cf58814873fc1502c12554ae | [
"Apache-2.0"
] | 611 | 2021-11-18T06:04:10.000Z | 2022-03-29T11:46:42.000Z | tensorflow_gnn/tools/generate_training_data_test.py | mattdangerw/gnn | f39d3ea0d8fc6e51cf58814873fc1502c12554ae | [
"Apache-2.0"
] | 25 | 2021-11-18T17:21:12.000Z | 2022-03-31T06:36:55.000Z | tensorflow_gnn/tools/generate_training_data_test.py | mattdangerw/gnn | f39d3ea0d8fc6e51cf58814873fc1502c12554ae | [
"Apache-2.0"
] | 52 | 2021-11-18T23:12:30.000Z | 2022-03-27T06:31:08.000Z | """Unit tests for generate training data test."""
from os import path
from absl import flags
import tensorflow as tf
from tensorflow_gnn.tools import generate_training_data
from tensorflow_gnn.utils import test_utils
FLAGS = flags.FLAGS
if __name__ == "__main__":
tf.test.main()
| 26.230769 | 76 | 0.781525 |
ae2166a391abaacff03859c883ab005463fa8d39 | 561 | py | Python | vsenvs.py | KaoruShiga/geister_rl | a0dbf6bd7f79b0366727664da6d9f1cf3060190e | [
"MIT"
] | 8 | 2021-03-12T00:06:44.000Z | 2022-01-15T20:09:51.000Z | vsenvs.py | KaoruShiga/geister_rl | a0dbf6bd7f79b0366727664da6d9f1cf3060190e | [
"MIT"
] | null | null | null | vsenvs.py | KaoruShiga/geister_rl | a0dbf6bd7f79b0366727664da6d9f1cf3060190e | [
"MIT"
] | 1 | 2021-10-04T07:42:01.000Z | 2021-10-04T07:42:01.000Z | import random as rnd
import numpy as np
from random_agent import RandomAgent
from geister2 import Geister2
from vsenv import VsEnv
| 29.526316 | 67 | 0.716578 |