hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee30562de746fae8c4e7f911bc276f4521628762 | 886 | py | Python | 1 - companies_list_downloader.py | B-Jugurtha/Project-01--Web-scraping---Data-cleaning | 981ec207c6c2d55efb10f137fec0bbf06df50cbb | [
"MIT"
] | null | null | null | 1 - companies_list_downloader.py | B-Jugurtha/Project-01--Web-scraping---Data-cleaning | 981ec207c6c2d55efb10f137fec0bbf06df50cbb | [
"MIT"
] | null | null | null | 1 - companies_list_downloader.py | B-Jugurtha/Project-01--Web-scraping---Data-cleaning | 981ec207c6c2d55efb10f137fec0bbf06df50cbb | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as bs
from pathlib import Path
import os
import glob
import time
import random
import requests
pwd = os.getcwd()
page_counter = 1
URL = "https://www.example.com/companies/?page="
# Creating 'pages' folder if this one exists deletes it's content
try:
Path(pwd + '/pages').mkdir(parents=True, exist_ok=False)
except FileExistsError:
print("File Already exists, Deleting it's content...")
files = glob.glob(pwd + '/pages/*')
for f in files:
os.remove(f)
time.sleep(5)
while page_counter <= 400:
page = requests.get(URL+str(page_counter))
soup = bs(page.content, "html.parser")
if(page_counter % 10 == 0):
time.sleep(random.randrange(8, 13))
print(page_counter)
with open('pages/'+str(page_counter)+".html", "w", encoding='utf-8') as file:
file.write(str(soup))
page_counter += 1
| 23.945946 | 81 | 0.667043 |
ee327f2d0e55a21aaeef4a1ec21efda3fb98ce69 | 892 | py | Python | app/db_models/main.py | KatlehoGxagxa/kk_secure | 043dfbe9511a3b950686240ae36a6e676b009390 | [
"MIT"
] | null | null | null | app/db_models/main.py | KatlehoGxagxa/kk_secure | 043dfbe9511a3b950686240ae36a6e676b009390 | [
"MIT"
] | null | null | null | app/db_models/main.py | KatlehoGxagxa/kk_secure | 043dfbe9511a3b950686240ae36a6e676b009390 | [
"MIT"
] | null | null | null | import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref
Base = declarative_base()
from app.db_models.accounts import Accounts
from app.db_models.association_table import AssociationTable
from app.db_models.organisations import Organisations
from app.db_models.users import User
# We are using ORM from sqlalchemy so that we
# can have a better representation of our relationships
# To avoid overwriting a database
if os.path.exists(str(os.getcwd())+'/x_system_db.db') == False:
engine = create_engine('sqlite:///'+str(os.getcwd()) +'\database.db')
Base.metadata.create_all(bind=engine)
# Connecting to an existing database
engine = create_engine('sqlite:///'+str(os.getcwd())+'/database.db', echo=False)
Session = sessionmaker(bind=engine)
session = Session()
| 27.030303 | 80 | 0.778027 |
ee33559773aa94f6134aaa49252ad4b6b825ef37 | 791 | py | Python | tests/test_apps/test_covid_preprocessing.py | jtrauer/AuTuMN | 2e1defd0104bbecfe667b8ea5ecaf4bc6741905c | [
"BSD-2-Clause-FreeBSD"
] | 14 | 2020-03-11T06:15:30.000Z | 2022-03-09T03:38:35.000Z | tests/test_apps/test_covid_preprocessing.py | jtrauer/AuTuMN | 2e1defd0104bbecfe667b8ea5ecaf4bc6741905c | [
"BSD-2-Clause-FreeBSD"
] | 96 | 2020-01-29T05:10:29.000Z | 2022-03-31T01:48:46.000Z | tests/test_apps/test_covid_preprocessing.py | monash-emu/AuTuMN | fa3b81ef54cf561e0e7364a48f4ff96585dc3310 | [
"BSD-2-Clause-FreeBSD"
] | 10 | 2020-04-24T00:38:00.000Z | 2021-08-19T16:19:03.000Z | import numpy as np
from autumn.models.covid_19.detection import create_cdr_function
def test_cdr_intercept():
"""
Test that there is zero case detection when zero tests are performed
"""
for cdr_at_1000_tests in np.linspace(0.05, 0.5, 10):
cdr_function = create_cdr_function(1000.0, cdr_at_1000_tests)
assert cdr_function(0.0) == 0.0
def test_cdr_values():
"""
Test that CDR is always a proportion, bounded by zero and one
"""
for cdr_at_1000_tests in np.linspace(0.05, 0.5, 10):
cdr_function = create_cdr_function(1000.0, cdr_at_1000_tests)
for i_tests in list(np.linspace(0.0, 1e3, 11)) + list(np.linspace(0.0, 1e5, 11)):
assert cdr_function(i_tests) >= 0.0
assert cdr_function(i_tests) <= 1.0
| 30.423077 | 89 | 0.672566 |
ee350ea74f60bf255d219e07c176125875586383 | 5,339 | py | Python | nessussearch.py | canidorichard/nessussearch | 7b4623f0b3a3fb60706eb39785ea4f7a1cebf800 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-06-30T15:53:03.000Z | 2020-06-30T15:53:03.000Z | nessussearch.py | canidorichard/nessussearch | 7b4623f0b3a3fb60706eb39785ea4f7a1cebf800 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | nessussearch.py | canidorichard/nessussearch | 7b4623f0b3a3fb60706eb39785ea4f7a1cebf800 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2020-08-05T23:25:36.000Z | 2020-09-26T10:01:18.000Z | #!/usr/bin/env python3
# Copyright (c) 2019, Richard Hughes All rights reserved.
# Released under the BSD license. Please see LICENSE.md for more information.
import sys
import os
import argparse
import glob
import xml.dom.minidom
import re
# Define command line arguments
parms=argparse.ArgumentParser()
parms.add_argument("-f", "--file", type=str, required=False, default="*.nessus", help="Specify input file(s)")
parms.add_argument("-c", "--case_sensitive", required=False, action="store_true", help="Case sensitive search")
parms.add_argument("-d", "--debug", required=False, action="store_true", help="Debug output")
parms.add_argument("-o", "--output", type=str, required=False, default="xml_min", choices=['xml','xml_min','ipv4',"mac","mac+ipv4","ports","script"], help="Specify output format")
parms.add_argument("-p", "--path", type=str, required=False, default=".", help="Specify location of file(s)")
parms.add_argument("-r", "--regex", type=str, required=True, help="Search expression")
args = vars(parms.parse_args())
# Globals
errorsexist = False
# Main processing
# Process file
# Process document
if __name__ == '__main__':
# Execute main method
main(args)
| 33.36875 | 179 | 0.617906 |
ee35e8695f1106242572ccc2f67ad6599a4046fc | 7,889 | py | Python | tests/test_format.py | Hassanchenganasseri/yara-language-server | 9ba4d820f1cabb738921e17d4489706966cb290b | [
"Apache-2.0"
] | 1 | 2021-11-13T02:04:55.000Z | 2021-11-13T02:04:55.000Z | tests/test_format.py | Hassanchenganasseri/yara-language-server | 9ba4d820f1cabb738921e17d4489706966cb290b | [
"Apache-2.0"
] | 4 | 2020-12-12T23:41:40.000Z | 2021-01-18T17:00:53.000Z | tests/test_format.py | Hassanchenganasseri/yara-language-server | 9ba4d820f1cabb738921e17d4489706966cb290b | [
"Apache-2.0"
] | 1 | 2022-02-21T15:36:21.000Z | 2022-02-21T15:36:21.000Z | ''' Format Provider Tests '''
from textwrap import dedent
import pytest
from yarals import helpers
from yarals.base import protocol
from yarals.base import errors as ce
# don't care about pylint(protected-access) warnings since these are just tests
# pylint: disable=W0212
| 31.556 | 100 | 0.615667 |
ee36deec1ce296c7314b585757c03cbcb17ed182 | 5,109 | py | Python | pykitml/fceux.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
] | 34 | 2020-03-06T07:53:43.000Z | 2022-03-13T06:12:29.000Z | pykitml/fceux.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
] | 6 | 2021-06-08T22:43:23.000Z | 2022-03-08T13:57:33.000Z | pykitml/fceux.py | RainingComputers/pykitml | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | [
"MIT"
] | 1 | 2020-11-30T21:20:32.000Z | 2020-11-30T21:20:32.000Z | import warnings
import socket
if(__name__ == '__main__'):
server = FCEUXServer(on_frame)
print(server.info)
server.start()
| 26.609375 | 78 | 0.533372 |
ee383dc672da93812bf7e31171a62575ecb25dfe | 905 | py | Python | orchestra/forms/fields.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 68 | 2015-02-09T10:28:44.000Z | 2022-03-12T11:08:36.000Z | orchestra/forms/fields.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 17 | 2015-05-01T18:10:03.000Z | 2021-03-19T21:52:55.000Z | orchestra/forms/fields.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 29 | 2015-03-31T04:51:03.000Z | 2022-02-17T02:58:50.000Z | from django import forms
from .widgets import SpanWidget
| 30.166667 | 82 | 0.667403 |
ee38deebe1bb8166653f041ac6b217f4fdba49db | 5,480 | py | Python | gossipcat/dev/CAT.py | Ewen2015/GossipCat | 6792c2ddee16515d9724583c9b57f332cff4b206 | [
"Apache-2.0"
] | 2 | 2017-12-17T06:24:43.000Z | 2018-01-17T08:27:52.000Z | gossipcat/dev/CAT.py | Ewen2015/GossipCat | 6792c2ddee16515d9724583c9b57f332cff4b206 | [
"Apache-2.0"
] | null | null | null | gossipcat/dev/CAT.py | Ewen2015/GossipCat | 6792c2ddee16515d9724583c9b57f332cff4b206 | [
"Apache-2.0"
] | 1 | 2017-12-12T16:00:48.000Z | 2017-12-12T16:00:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: Ewen Wang
email: wolfgangwong2012@gmail.com
license: Apache License 2.0
"""
import warnings
warnings.filterwarnings('ignore')
import random
random.seed(0)
import time
import json
import pandas as pd
import matplotlib.pyplot as plt
import catboost as cb | 33.82716 | 144 | 0.578102 |
ee39158935f040d9514500c148f834c9e0815920 | 4,698 | py | Python | kiss.py | QuantumEF/AX25-Chat | d2f8f8b5b3a968c6982dd013c5860aab461e4dc6 | [
"MIT"
] | null | null | null | kiss.py | QuantumEF/AX25-Chat | d2f8f8b5b3a968c6982dd013c5860aab461e4dc6 | [
"MIT"
] | null | null | null | kiss.py | QuantumEF/AX25-Chat | d2f8f8b5b3a968c6982dd013c5860aab461e4dc6 | [
"MIT"
] | 1 | 2020-09-16T03:19:18.000Z | 2020-09-16T03:19:18.000Z | #!/usr/bin/python
import sys
import socket
import asyncio
import select
from hexdump import hexdump
KISS_FEND = 0xC0 # Frame start/end marker
KISS_FESC = 0xDB # Escape character
KISS_TFEND = 0xDC # If after an escape, means there was an 0xC0 in the source message
KISS_TFESC = 0xDD # If after an escape, means there was an 0xDB in the source message
#Code below here slightly modified from https://thomask.sdf.org/blog/2018/12/15/sending-raw-ax25-python.html
# Addresses must be 6 bytes plus the SSID byte, each character shifted left by 1
# If it's the final address in the header, set the low bit to 1
# Ignoring command/response for simple example
#send_kiss("kn4vhm","km4yhi","hi") | 33.557143 | 108 | 0.686037 |
ee3a458bc0b453128e0670c2c4878e148f4fd267 | 2,903 | py | Python | wall/models.py | viral85/test_wall_app | 5487297e3dcd5971c4f8778fe0bc49e35efad587 | [
"MIT"
] | null | null | null | wall/models.py | viral85/test_wall_app | 5487297e3dcd5971c4f8778fe0bc49e35efad587 | [
"MIT"
] | null | null | null | wall/models.py | viral85/test_wall_app | 5487297e3dcd5971c4f8778fe0bc49e35efad587 | [
"MIT"
] | null | null | null | from django.db import models
from users.models import User, BaseModel
# Create your models here.
class Comment(BaseModel, CommonModel):
"""
Comment class is define for the keep the Comment details and other information.
:param CommonModel:CommonModel which has common attribute for the
application.
:param BaseModel: Base class which has common attribute for the
application.
"""
wall = models.ForeignKey(Wall, on_delete=models.CASCADE, blank=True, null=True, related_name="comments")
comment_content = models.CharField(max_length=200)
class Like(BaseModel):
"""
Like class is define for the keep the Like details and other information.
:param BaseModel: Base class which has common attribute for the
application.
"""
wall = models.OneToOneField(Wall, related_name="likes", on_delete=models.CASCADE)
users = models.ManyToManyField(User, related_name='requirement_wall_likes', blank=True)
class DisLike(BaseModel):
"""
DisLike class is define for the keep the DisLike details and other information.
:param BaseModel: Base class which has common attribute for the
application.
"""
wall = models.OneToOneField(Wall, related_name="dis_likes", on_delete=models.CASCADE)
users = models.ManyToManyField(User, related_name='requirement_wall_dis_likes', blank=True)
| 32.255556 | 108 | 0.695143 |
ee3b32d88b8afd4ca09f0005b74b567acf3d93ca | 2,171 | py | Python | src/main/python/generate.py | gpanther/fastutil-guava-tests | 8606eb1126874695e58f263610787b7775c98ffb | [
"Apache-2.0"
] | 1 | 2016-03-16T08:36:51.000Z | 2016-03-16T08:36:51.000Z | src/main/python/generate.py | gpanther/fastutil-guava-tests | 8606eb1126874695e58f263610787b7775c98ffb | [
"Apache-2.0"
] | null | null | null | src/main/python/generate.py | gpanther/fastutil-guava-tests | 8606eb1126874695e58f263610787b7775c98ffb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import re
import jinja2
kind_metadata = {
k: v for (k, v) in [
generate_metadata('Byte'),
generate_metadata('Char', {'boxed_class': 'Character'}),
generate_metadata('Double'),
generate_metadata('Float'),
generate_metadata('Int', {'boxed_class': 'Integer'}),
generate_metadata('Long'),
generate_metadata('Short'),
generate_metadata('Object', {'primitive': None, 'boxed_class': 'String'}),
# This is mostly the same as Object with the difference that equality is checked
# using "==" instead of equals
generate_metadata('Reference', {
'package': 'it.unimi.dsi.fastutil.objects',
'primitive': None,
'boxed_class': 'String',
}),
]
}
script_dir = os.path.dirname(os.path.realpath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(script_dir), autoescape=False, trim_blocks=True)
env.globals['map_cast'] = map_cast
test_template = env.get_template('collection_tests.j')
for kind in kind_metadata.keys():
output = test_template.render(
kinds=kind_metadata.keys(),
kind=kind, metadata=kind_metadata[kind], metadatas=kind_metadata)
output = re.sub(r'(new (?:Object|Reference)\w+?(?:Set|List))(?=\()', r'\1<String>', output)
output = re.sub(r'\(((?:Object|Reference)2\w+Map)\) ', r'(\1<String>) ', output)
with open('%sCollectionsTest.java' % kind, 'w') as f:
f.write(output)
| 35.590164 | 104 | 0.634731 |
ee3b8a11298be38df1b9a7e27b57f8559c0985a3 | 60 | py | Python | app/tests/__init__.py | victorlomi/News-Catchup | 214b4e92b0cf90c7e4906c3b2316578918645dac | [
"Unlicense"
] | null | null | null | app/tests/__init__.py | victorlomi/News-Catchup | 214b4e92b0cf90c7e4906c3b2316578918645dac | [
"Unlicense"
] | null | null | null | app/tests/__init__.py | victorlomi/News-Catchup | 214b4e92b0cf90c7e4906c3b2316578918645dac | [
"Unlicense"
] | null | null | null | from tests import test_article
from tests import test_source | 30 | 30 | 0.883333 |
ee3cb7c19d0619f9abd1c5afe9d9065a4239aee4 | 7,451 | py | Python | Tree_test.py | nelliesnoodles/PythonBinaryTree | a5964cbb991cbd5007a5253bd48bc83eb56dc0ca | [
"MIT"
] | null | null | null | Tree_test.py | nelliesnoodles/PythonBinaryTree | a5964cbb991cbd5007a5253bd48bc83eb56dc0ca | [
"MIT"
] | null | null | null | Tree_test.py | nelliesnoodles/PythonBinaryTree | a5964cbb991cbd5007a5253bd48bc83eb56dc0ca | [
"MIT"
] | null | null | null | from random import randint
from BST_version_3 import BinaryTreeNode, BinaryTree
# I have to keep the build of lists under 3,000 total
# my computer starts to freak out about memory at 10,000
# it slows at 3000.
# recursion depth happens on count at 2000 items
| 27.596296 | 80 | 0.68219 |
ee3d2db0fde263da3c3eb73afde2fa65a46b2a9d | 445 | py | Python | models/batchnorm.py | JingweiJ/JointActorActionSeg | d33904f3f2c02094bb0a32bfec3105affff59426 | [
"MIT"
] | 11 | 2018-12-12T00:44:09.000Z | 2022-01-24T13:25:37.000Z | models/batchnorm.py | JingweiJ/JointActorActionSeg | d33904f3f2c02094bb0a32bfec3105affff59426 | [
"MIT"
] | 1 | 2019-04-24T08:25:12.000Z | 2019-04-24T08:25:12.000Z | models/batchnorm.py | JingweiJ/JointActorActionSeg | d33904f3f2c02094bb0a32bfec3105affff59426 | [
"MIT"
] | 3 | 2018-12-21T08:13:20.000Z | 2020-07-08T22:54:09.000Z | import keras.layers as KL | 37.083333 | 78 | 0.732584 |
ee3d3b33e7d00e0c127a259c954eabf576b5a09b | 523 | py | Python | shadow-hunters/tests/integration_tests/test_utils.py | dolphonie/shadow-hunters | 2257a67f965cf43e1e5c9c8e7af87fe9ae16f5c9 | [
"MIT"
] | 17 | 2019-05-04T13:25:33.000Z | 2022-01-22T14:50:49.000Z | shadow-hunters/tests/integration_tests/test_utils.py | dolphonie/shadow-hunters | 2257a67f965cf43e1e5c9c8e7af87fe9ae16f5c9 | [
"MIT"
] | 25 | 2020-05-24T03:29:42.000Z | 2021-03-29T07:07:47.000Z | shadow-hunters/tests/integration_tests/test_utils.py | dolphonie/shadow-hunters | 2257a67f965cf43e1e5c9c8e7af87fe9ae16f5c9 | [
"MIT"
] | 7 | 2019-05-30T00:15:58.000Z | 2022-01-16T14:37:25.000Z | import pytest
from utils import make_hash_sha256
# test_utils.py
| 34.866667 | 65 | 0.548757 |
ee3f1dab5bbae9fc4ca6f9d759f36e46c7e4b97c | 62 | py | Python | SECRET-TEMPLATE.py | matousc89/nk-bot | bde87d74ea00b6f947641242b282f2ce40cfc7b9 | [
"MIT"
] | null | null | null | SECRET-TEMPLATE.py | matousc89/nk-bot | bde87d74ea00b6f947641242b282f2ce40cfc7b9 | [
"MIT"
] | null | null | null | SECRET-TEMPLATE.py | matousc89/nk-bot | bde87d74ea00b6f947641242b282f2ce40cfc7b9 | [
"MIT"
] | null | null | null | TOKEN = ""
GUILD = ""
# crypto bot
API_KEY_COINMARKETCAP = "" | 12.4 | 26 | 0.645161 |
ee404e6979393de5363253ef67e712dee62d98a1 | 543 | py | Python | loans/migrations/0002_product_organization.py | prateekmohanty63/microfinance | 39839c0d378be4ccc40a9dde5dc38a10773a38a1 | [
"MIT"
] | 1 | 2022-02-25T18:39:44.000Z | 2022-02-25T18:39:44.000Z | loans/migrations/0002_product_organization.py | prateekmohanty63/microfinance | 39839c0d378be4ccc40a9dde5dc38a10773a38a1 | [
"MIT"
] | null | null | null | loans/migrations/0002_product_organization.py | prateekmohanty63/microfinance | 39839c0d378be4ccc40a9dde5dc38a10773a38a1 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2022-02-25 05:41
from django.db import migrations, models
import django.db.models.deletion
| 25.857143 | 126 | 0.64825 |
ee407797b83ac396b3980aeaad4d8b956d5e4e23 | 4,026 | py | Python | writeups/2020/CyberStakes/party-roppin/solve.py | welchbj/ctf | fd4e2cea692b134163cc9bd66c2b4796bdefed8c | [
"MIT"
] | 65 | 2019-10-07T01:29:16.000Z | 2022-03-18T14:20:40.000Z | writeups/2020/CyberStakes/party-roppin/solve.py | welchbj/ctf | fd4e2cea692b134163cc9bd66c2b4796bdefed8c | [
"MIT"
] | null | null | null | writeups/2020/CyberStakes/party-roppin/solve.py | welchbj/ctf | fd4e2cea692b134163cc9bd66c2b4796bdefed8c | [
"MIT"
] | 12 | 2020-05-04T01:16:53.000Z | 2022-01-02T14:33:41.000Z | #!/usr/bin/env python2
"""
Run exploit locally with:
./solve.py
./solve.py REMOTE HOST=challenge.acictf.com PORT=45110
"""
import ast
import struct
import subprocess
from pwn import *
PROG_PATH = './challenge'
PROT_RWX = constants.PROT_READ | constants.PROT_WRITE | constants.PROT_EXEC
EGG_SIZE = 0x1000
if __name__ == '__main__':
init_pwntools_context()
io = init_io()
if args['PAUSE']:
raw_input('PAUSED...')
win(io)
| 23.682353 | 102 | 0.619722 |
ee40c9e058cfde444787c36d7ef8a53e584dde9c | 403 | py | Python | run_tests.py | andres-nieves-endava/djsonb | 5fc1ef3a10cb313af7e1c04c25acac81e81c7096 | [
"BSD-3-Clause"
] | 3 | 2016-03-08T20:55:54.000Z | 2019-06-13T14:31:50.000Z | run_tests.py | andres-nieves-endava/djsonb | 5fc1ef3a10cb313af7e1c04c25acac81e81c7096 | [
"BSD-3-Clause"
] | 13 | 2015-10-07T18:21:37.000Z | 2018-07-30T12:51:40.000Z | run_tests.py | andres-nieves-endava/djsonb | 5fc1ef3a10cb313af7e1c04c25acac81e81c7096 | [
"BSD-3-Clause"
] | 4 | 2016-03-14T18:12:33.000Z | 2020-07-08T15:41:50.000Z | #!/usr/bin/env python
import os
import sys
import time
import django
sys.path.insert(0, './tests')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
if __name__ == '__main__':
from django.core.management import execute_from_command_line
args = sys.argv
args.insert(1, 'test')
if len(args) == 2:
args.insert(2, 'djsonb_fields')
execute_from_command_line(args)
| 19.190476 | 64 | 0.704715 |
ee42ab06df137bb5158c466e211b9c061a500ecf | 1,485 | py | Python | utils/mongo_seed/csv_to_json.py | Abdoul1996/superteam2 | 3ba558f9dfd652007a1f80ee01543c266c87bc0d | [
"MIT"
] | null | null | null | utils/mongo_seed/csv_to_json.py | Abdoul1996/superteam2 | 3ba558f9dfd652007a1f80ee01543c266c87bc0d | [
"MIT"
] | null | null | null | utils/mongo_seed/csv_to_json.py | Abdoul1996/superteam2 | 3ba558f9dfd652007a1f80ee01543c266c87bc0d | [
"MIT"
] | null | null | null | from os import path
import csv
import json
import random
# Our dataset was created from http://www2.informatik.uni-freiburg.de/~cziegler/BX/ and reduced down to 1,000 records
# The CSV file has semicolon delimiters due to book titles containing commas
SCRIPT_DIR = path.dirname(path.realpath(__file__)) + '/'
DB_FILE = SCRIPT_DIR + 'cscl_db.csv'
OUTPUT_FILE = SCRIPT_DIR + 'cscl_db.json'
# Original headers: "ISBN";"Book-Title";"Book-Author";"Year-Of-Publication";"Publisher";"Image-URL-S";"Image-URL-M";"Image-URL-L"
with open(DB_FILE, 'r') as file:
reader = csv.DictReader(file,
delimiter=';',
fieldnames=[
'isbn', 'title', 'author', 'publication_year',
'publisher', 'image_url_s', 'image_url_m',
'image_url_l'
])
with open(OUTPUT_FILE, 'w') as o_file:
for line in reader:
copies = random.randrange(1,10)
available = random.randrange(0,copies)
line['copies'] = copies
line['available'] = available
# Convert publication_year from string to int
line['publication_year'] = int(line['publication_year'])
json.dump(line, o_file)
o_file.write('\n')
print(
'\n----------\nFinished converting {} from CSV to JSON.\nFile can be found at {}'
.format(DB_FILE, OUTPUT_FILE))
| 38.076923 | 129 | 0.576431 |
ee4520a20396d2ea6e82b1e5331b895e66992d37 | 286 | py | Python | service/routing.py | ademuk/features-service | 2c5b448435c7491c0952fd38f31dd8cc987788c4 | [
"MIT"
] | null | null | null | service/routing.py | ademuk/features-service | 2c5b448435c7491c0952fd38f31dd8cc987788c4 | [
"MIT"
] | null | null | null | service/routing.py | ademuk/features-service | 2c5b448435c7491c0952fd38f31dd8cc987788c4 | [
"MIT"
] | null | null | null | from channels import route
from .features import consumers
path = r'^/api/projects/(?P<id>[0-9a-f-]+)/stream/$'
channel_routing = [
route("websocket.connect", consumers.connect_to_project, path=path),
route("websocket.receive", consumers.disconnect_from_project, path=path)
]
| 28.6 | 76 | 0.737762 |
ee464501d973897e3ed2b4d73bd1727f9c46ec63 | 4,923 | py | Python | hearts_navigation/scripts/location_goal.py | HeartsBRL/hearts_navigation | 0f1434675bd200741ea8b21381f35b83692de986 | [
"MIT"
] | null | null | null | hearts_navigation/scripts/location_goal.py | HeartsBRL/hearts_navigation | 0f1434675bd200741ea8b21381f35b83692de986 | [
"MIT"
] | null | null | null | hearts_navigation/scripts/location_goal.py | HeartsBRL/hearts_navigation | 0f1434675bd200741ea8b21381f35b83692de986 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# https://github.com/studioimaginaire/phue
import rospy
from geometry_msgs.msg import PoseStamped, Pose, Pose2D
from std_msgs.msg import String
import json
import io
import os
# pose = Pose value
# location = string value
if __name__ == '__main__':
rospy.init_node("task_controller", anonymous=True)
loc = Location()
rospy.spin()
| 27.198895 | 109 | 0.549868 |
ee46f59058bfd66eb8f015628cb6a304ce257ecc | 3,471 | py | Python | scripts/kinova_joy_teleop.py | Gregory-Baker/kinova_joy_teleop | 42666022662fdcf7985ca5d4598eecb5e18eb8b6 | [
"MIT"
] | null | null | null | scripts/kinova_joy_teleop.py | Gregory-Baker/kinova_joy_teleop | 42666022662fdcf7985ca5d4598eecb5e18eb8b6 | [
"MIT"
] | null | null | null | scripts/kinova_joy_teleop.py | Gregory-Baker/kinova_joy_teleop | 42666022662fdcf7985ca5d4598eecb5e18eb8b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Node to convert joystick commands to kinova arm cartesian movements
"""
import rospy
from sensor_msgs.msg import Joy
#from geometry_msgs.msg import Pose
from kortex_driver.msg import TwistCommand, Finger, Empty, Pose
from kortex_driver.srv import SendGripperCommand, SendGripperCommandRequest, GetMeasuredCartesianPose, GetMeasuredCartesianPoseResponse
max_linear_speed = 0.1
max_angular_speed = 0.4
gripper_speed = 0.05
cartesian_min_limit_x = 0.3
restricted_mode = False
joy_topic = "joy"
arm_ns = ""
if __name__ == '__main__':
try:
joy_listener()
except rospy.ROSInterruptException:
pass
| 35.418367 | 135 | 0.675886 |
ee476c7b28e95c420c92669fa0909df9dee5dae3 | 576 | py | Python | ausgesondert/dammitJim.py | Coding-for-the-Arts/drawbot-samples-solutions | 7191610d6efd7d788056070e7826d255b7ef496b | [
"CC0-1.0"
] | null | null | null | ausgesondert/dammitJim.py | Coding-for-the-Arts/drawbot-samples-solutions | 7191610d6efd7d788056070e7826d255b7ef496b | [
"CC0-1.0"
] | null | null | null | ausgesondert/dammitJim.py | Coding-for-the-Arts/drawbot-samples-solutions | 7191610d6efd7d788056070e7826d255b7ef496b | [
"CC0-1.0"
] | null | null | null | kraftausdruecke = [
"Mist",
"Verdammt",
"Mannmannmann",
"Herrgottnochmal",
"Echt jetzt",
"Zum Teufel"
]
berufe = [
"Baggerfhrer",
"Velokurier",
"Tierrztin",
"Verkehrspolizist",
"Schreinerin",
"Apotheker",
"Komponist",
"Physikerin",
"Buchhndlerin"
]
a = choice(kraftausdruecke)
# pick random element in list
# find out its index
# pop it from the list, so it cant be picked again
b = berufe.pop(berufe.index(choice(berufe)))
c = choice(berufe)
print(a, "Erwin" + ",", "ich bin", b, "und nicht", c + "!")
| 20.571429 | 59 | 0.604167 |
ee4a673bdc3ecbf54bdd00a403e289703d72c886 | 2,429 | py | Python | python/652_find_duplicated_subtrees.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | 17 | 2016-03-01T22:40:53.000Z | 2021-04-19T02:15:03.000Z | python/652_find_duplicated_subtrees.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | null | null | null | python/652_find_duplicated_subtrees.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | 3 | 2019-03-07T03:48:43.000Z | 2020-04-05T01:11:36.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| 26.402174 | 83 | 0.539728 |
ee4ac13afb88b80f6571f8b3cdd5af07771ebb6c | 3,391 | py | Python | main.py | rajanant49/Streamlit-Demo-App | 894e0e2dbdf33148bccc6abc791221f6e7b01036 | [
"Apache-2.0"
] | null | null | null | main.py | rajanant49/Streamlit-Demo-App | 894e0e2dbdf33148bccc6abc791221f6e7b01036 | [
"Apache-2.0"
] | null | null | null | main.py | rajanant49/Streamlit-Demo-App | 894e0e2dbdf33148bccc6abc791221f6e7b01036 | [
"Apache-2.0"
] | null | null | null | import streamlit as st
from PIL import Image
import cv2
import numpy as np
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
st.title("Streamlit Demo App")
st.write("""
# Explorling different classifier on different datasets
""")
dataset_name= st.selectbox("Select Dataset",("","IRIS","BreastCancer","WineDataset"))
if dataset_name!="":
classifier_name=st.selectbox("Select Classifier",("","KNN","RandomForest","SVM"))
if classifier_name!="":
X,y=get_dataset(dataset_name)
st.write("Shape of the dataset",X.shape)
st.write("Number of classes",len(np.unique(y)))
params=add_parameter_ui(classifier_name)
clf=get_classifier(classifier_name,params)
#Classification
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
acc=accuracy_score(y_pred,y_test)
st.write("Classifier = ",classifier_name)
st.write("Accuracy = ",np.round(acc*100,2),"%")
pca=PCA(2)
X_projected=pca.fit_transform(X)
x1=X_projected[:,0]
x2=X_projected[:,1]
fig=plt.figure()
plt.scatter(x1,x2,c=y,alpha=0.8,cmap='viridis')
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.colorbar()
st.pyplot(fig)
# def load_image(image_file):
# img = Image.open(image_file)
# return img
#
# image_file = st.file_uploader("Upload Image",type=['png','jpeg','jpg'])
# if image_file is not None:
# file_details = {"Filename":image_file.name,"FileType":image_file.type,"FileSize":image_file.size}
# st.write(file_details)
#
# img = load_image(image_file)
# st.image(img,width=250,height=250)
# image_array=np.asarray(img)
# st.image(image_array,width=100,height=100)
| 30.54955 | 125 | 0.6243 |
ee4ba609d0784a1c68fa7c4cd767173c1520196d | 3,485 | py | Python | Face-Pixelizer/res/python/src/pixelize.py | spil3141/Pixelizer-Face | c234fe998727435d88f4b860432945c2e6a957ef | [
"MIT"
] | null | null | null | Face-Pixelizer/res/python/src/pixelize.py | spil3141/Pixelizer-Face | c234fe998727435d88f4b860432945c2e6a957ef | [
"MIT"
] | null | null | null | Face-Pixelizer/res/python/src/pixelize.py | spil3141/Pixelizer-Face | c234fe998727435d88f4b860432945c2e6a957ef | [
"MIT"
] | null | null | null | """
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import numpy as np
import PIL
import matplotlib.pyplot as plt
import matplotlib.image as matimage
if __name__ == '__main__':
main() | 34.50495 | 100 | 0.667432 |
ee4d585ac0fdab34831b9549bd00bfc84fbe7647 | 4,905 | py | Python | model_zoo/official/cv/centerface/postprocess.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | 1 | 2021-07-03T06:52:20.000Z | 2021-07-03T06:52:20.000Z | model_zoo/official/cv/centerface/postprocess.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | model_zoo/official/cv/centerface/postprocess.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import numpy as np
from src.model_utils.config import config
from dependency.centernet.src.lib.detectors.base_detector import CenterFaceDetector
from dependency.evaluate.eval import evaluation
dct_map = {'16': '16--Award_Ceremony', '26': '26--Soldier_Drilling', '29': '29--Students_Schoolkids',
'30': '30--Surgeons', '52': '52--Photographers', '59': '59--people--driving--car', '44': '44--Aerobics',
'50': '50--Celebration_Or_Party', '19': '19--Couple', '38': '38--Tennis', '37': '37--Soccer',
'48': '48--Parachutist_Paratrooper', '53': '53--Raid', '6': '6--Funeral', '40': '40--Gymnastics',
'5': '5--Car_Accident', '39': '39--Ice_Skating', '47': '47--Matador_Bullfighter', '61': '61--Street_Battle',
'56': '56--Voter', '18': '18--Concerts', '1': '1--Handshaking', '2': '2--Demonstration',
'28': '28--Sports_Fan', '4': '4--Dancing', '43': '43--Row_Boat', '49': '49--Greeting', '12': '12--Group',
'24': '24--Soldier_Firing', '33': '33--Running', '11': '11--Meeting', '36': '36--Football',
'45': '45--Balloonist', '15': '15--Stock_Market', '51': '51--Dresses', '7': '7--Cheering',
'32': '32--Worker_Laborer', '58': '58--Hockey', '35': '35--Basketball', '22': '22--Picnic',
'55': '55--Sports_Coach_Trainer', '3': '3--Riot', '23': '23--Shoppers', '34': '34--Baseball',
'8': '8--Election_Campain', '9': '9--Press_Conference', '17': '17--Ceremony', '13': '13--Interview',
'20': '20--Family_Group', '25': '25--Soldier_Patrol', '42': '42--Car_Racing', '0': '0--Parade',
'14': '14--Traffic', '41': '41--Swimming', '46': '46--Jockey', '10': '10--People_Marching',
'54': '54--Rescue', '57': '57--Angler', '31': '31--Waiter_Waitress', '27': '27--Spa', '21': '21--Festival'}
if __name__ == '__main__':
cal_acc(config.result_path, config.label_file, config.meta_file, config.save_path)
| 59.096386 | 119 | 0.589602 |
ee4ea53c9f59142caa780fc1889e82f9447f0d50 | 1,231 | py | Python | myapp/multiplication.py | TomokiEmmei/kadai | eaf3c7430aa28ca9cc00bb0dbd219999e5ebb555 | [
"MIT"
] | null | null | null | myapp/multiplication.py | TomokiEmmei/kadai | eaf3c7430aa28ca9cc00bb0dbd219999e5ebb555 | [
"MIT"
] | null | null | null | myapp/multiplication.py | TomokiEmmei/kadai | eaf3c7430aa28ca9cc00bb0dbd219999e5ebb555 | [
"MIT"
] | null | null | null | """
2018.Jan
@author: Tomoki Emmei
description: program to show multiplication and addition table
"""
import sys #read command line argument
# Display the multiplication table
# Display the addition table
if __name__ == '__main__':
main()
| 27.355556 | 77 | 0.570268 |
ee4f325d1a129d74b4f20d86d9a69e407bc823af | 1,524 | py | Python | iliad/integrators/states/riemannian_leapfrog_state.py | JamesBrofos/Iliad | 2220e1e519f479e402072f80f4bc67e419842c4e | [
"MIT"
] | 1 | 2022-03-24T20:32:54.000Z | 2022-03-24T20:32:54.000Z | iliad/integrators/states/riemannian_leapfrog_state.py | JamesBrofos/Iliad | 2220e1e519f479e402072f80f4bc67e419842c4e | [
"MIT"
] | null | null | null | iliad/integrators/states/riemannian_leapfrog_state.py | JamesBrofos/Iliad | 2220e1e519f479e402072f80f4bc67e419842c4e | [
"MIT"
] | null | null | null | from typing import Callable
import numpy as np
from iliad.integrators.states.lagrangian_leapfrog_state import LagrangianLeapfrogState
from iliad.integrators.fields import riemannian
from iliad.linalg import solve_psd
from odyssey.distribution import Distribution
| 41.189189 | 119 | 0.740157 |
ee4ffc00f5aac29ae91942bab254cf9e630e3326 | 2,002 | py | Python | PyRods/examples/modify_user_password.py | kaldrill/irodspython | 9a1018429acf9e86af8fb7ea6f37fb397e0010da | [
"CNRI-Python"
] | null | null | null | PyRods/examples/modify_user_password.py | kaldrill/irodspython | 9a1018429acf9e86af8fb7ea6f37fb397e0010da | [
"CNRI-Python"
] | null | null | null | PyRods/examples/modify_user_password.py | kaldrill/irodspython | 9a1018429acf9e86af8fb7ea6f37fb397e0010da | [
"CNRI-Python"
] | null | null | null | # Copyright (c) 2013, University of Liverpool
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author : Jerome Fuselier
#
from irods import *
USER = "testModify"
PW = "1Password"
if __name__ == "__main__":
status, myEnv = getRodsEnv()
# This have to be a user in the rodsadmin group
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
# Create a user with the name and the group
user = createUser(conn, USER, "rodsuser")
delete_user_after = True
if not user:
delete_user_after = False # If the user exists we won't delete it
user = getUser(conn, USER)
#print setPassword(conn, user.getName(), PW)
print "status for modification: ", user.setPassword(PW)
conn.disconnect()
# Test connection for our modified user
conn, errMsg = rcConnect("localhost", 1247, USER, "tempZone")
status = clientLoginWithPassword(conn, PW)
print "Status for the connection with our modified user %s: %d" % (USER, status)
conn.disconnect()
if delete_user_after:
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
deleteUser(conn, USER)
conn.disconnect()
| 35.75 | 84 | 0.675824 |
ee524291fb83e3e5c6e37a9185fb2243d6f26277 | 4,727 | py | Python | nucleus/io/bedgraph.py | gaybro8777/nucleus | 3bd27ac076a6f3f93e49a27ed60661858e727dda | [
"BSD-3-Clause"
] | 721 | 2018-03-30T14:34:17.000Z | 2022-03-23T00:09:18.000Z | nucleus/io/bedgraph.py | aktaseren/nucleus | 3cc9412be81ed86a99fd7eb086ee94afe852759b | [
"Apache-2.0"
] | 38 | 2018-03-31T09:02:23.000Z | 2022-03-23T21:16:41.000Z | nucleus/io/bedgraph.py | aktaseren/nucleus | 3cc9412be81ed86a99fd7eb086ee94afe852759b | [
"Apache-2.0"
] | 123 | 2018-03-30T21:51:18.000Z | 2021-12-13T06:59:31.000Z | # Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for reading and writing BedGraph files.
The BedGraph format is described at
https://genome.ucsc.edu/goldenpath/help/bedgraph.html
API for reading:
```python
from nucleus.io import bedgraph
# Iterate through all records.
with bed.BedGraphReader(input_path) as reader:
for record in reader:
print(record)
```
where `record` is a `nucleus.genomics.v1.BedGraphRecord` protocol buffer.
API for writing:
```python
from nucleus.io import bedgraph
from nucleus.protos import bedgraph_pb2
# records is an iterable of nucleus.genomics.v1.BedGraphRecord protocol buffers.
records = ...
# Write all records to the desired output path.
with bed.BedGraphWriter(output_path) as writer:
for record in records:
writer.write(record)
```
For both reading and writing, if the path provided to the constructor contains
'.tfrecord' as an extension, a `TFRecord` file is assumed and attempted to be
read or written. Otherwise, the filename is treated as a true BedGraph file.
Files that end in a '.gz' suffix cause the file to be treated as compressed
(with BGZF if it is a BedGraph file, and with gzip if it is a TFRecord file).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nucleus.io import genomics_reader
from nucleus.io import genomics_writer
from nucleus.io.python import bedgraph_reader
from nucleus.io.python import bedgraph_writer
from nucleus.protos import bedgraph_pb2
| 33.524823 | 80 | 0.767929 |
ee5342a6017572637126ba2afb48e284377203df | 7,625 | py | Python | gui/qt/openswap_priceinfo.py | ComputerCraftr/openswap | 7de04aa80dab79bebe4b64483011dad70a48694c | [
"MIT"
] | 16 | 2018-11-05T13:19:02.000Z | 2021-04-06T12:11:49.000Z | gui/qt/openswap_priceinfo.py | ComputerCraftr/openswap | 7de04aa80dab79bebe4b64483011dad70a48694c | [
"MIT"
] | 9 | 2018-09-19T03:37:26.000Z | 2019-04-17T21:58:27.000Z | gui/qt/openswap_priceinfo.py | ComputerCraftr/openswap | 7de04aa80dab79bebe4b64483011dad70a48694c | [
"MIT"
] | 5 | 2018-11-05T13:19:02.000Z | 2020-10-20T09:15:54.000Z | from functools import partial
import math
from electroncash.i18n import _
from electroncash.address import Address
import electroncash.web as web
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from .util import *
from .qrtextedit import ShowQRTextEdit
from electroncash import bchmessage
from electroncash import openswap
from electroncash.util import format_satoshis_plain_nofloat, get_satoshis_nofloat
from electroncash.openswap import cryptos, crypto_list_by_bytes, crypto_list_by_str
def invert(x):
""" Because python does not allow division by zero"""
try:
return 1./x
except ZeroDivisionError:
return math.copysign(math.inf, x)
| 36.658654 | 83 | 0.617574 |
ee54b64f9bc555511d62a6158fb2e8ffda3d1cc6 | 2,906 | py | Python | commons/triggering_training/retraining_defect_type_triggering.py | jibby0/service-auto-analyzer | 79a0dbf6650693a3559b484c51e97e6fac5cc3ba | [
"Apache-2.0"
] | null | null | null | commons/triggering_training/retraining_defect_type_triggering.py | jibby0/service-auto-analyzer | 79a0dbf6650693a3559b484c51e97e6fac5cc3ba | [
"Apache-2.0"
] | null | null | null | commons/triggering_training/retraining_defect_type_triggering.py | jibby0/service-auto-analyzer | 79a0dbf6650693a3559b484c51e97e6fac5cc3ba | [
"Apache-2.0"
] | null | null | null | """
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
from commons.object_saving.object_saver import ObjectSaver
from commons.triggering_training import abstract_triggering_training
logger = logging.getLogger("analyzerApp.retraining_defect_type_triggering")
| 47.639344 | 109 | 0.770131 |
ee56a41665eea1578283e8ab5f16b946f7b0fc97 | 1,091 | py | Python | cloudmesh-exercises/e-cloudmesh-shell-3.py | cloudmesh-community/fa19-516-159 | 4b327d28ebe9cf8bdd3ef23f1819c0ebfbd7ddbe | [
"Apache-2.0"
] | null | null | null | cloudmesh-exercises/e-cloudmesh-shell-3.py | cloudmesh-community/fa19-516-159 | 4b327d28ebe9cf8bdd3ef23f1819c0ebfbd7ddbe | [
"Apache-2.0"
] | null | null | null | cloudmesh-exercises/e-cloudmesh-shell-3.py | cloudmesh-community/fa19-516-159 | 4b327d28ebe9cf8bdd3ef23f1819c0ebfbd7ddbe | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.docopts_example.api.manager import Manager
from cloudmesh.common.console import Console
from cloudmesh.common.util import path_expand
from pprint import pprint
from cloudmesh.common.debug import VERBOSE
| 26.609756 | 97 | 0.64528 |
ee5de97647ec1a5a844d776fae68ad8d234a3b9c | 2,790 | py | Python | tests/test_dvg_util_funcs.py | tos-kamiya/dvg | eb2df7f4b9850543098003a07f565227cdbf11fa | [
"BSD-2-Clause"
] | null | null | null | tests/test_dvg_util_funcs.py | tos-kamiya/dvg | eb2df7f4b9850543098003a07f565227cdbf11fa | [
"BSD-2-Clause"
] | null | null | null | tests/test_dvg_util_funcs.py | tos-kamiya/dvg | eb2df7f4b9850543098003a07f565227cdbf11fa | [
"BSD-2-Clause"
] | null | null | null | from typing import *
import unittest
import contextlib
import os
import sys
import tempfile
from dvg.dvg import prune_overlapped_paragraphs, expand_file_iter
if __name__ == "__main__":
unittest.main()
| 28.469388 | 103 | 0.506093 |
ee61b12145d3742ab35520782261592ec9adb889 | 832 | py | Python | server/api/health.py | lab-grid/labflow | 967c0ac041e2b1000a2624fc3adf1b27fce135c9 | [
"MIT"
] | 4 | 2020-10-20T00:23:28.000Z | 2021-09-14T10:31:02.000Z | server/api/health.py | lab-grid/flow | 967c0ac041e2b1000a2624fc3adf1b27fce135c9 | [
"MIT"
] | 104 | 2020-10-17T22:41:18.000Z | 2021-04-13T18:52:58.000Z | server/api/health.py | lab-grid/labflow | 967c0ac041e2b1000a2624fc3adf1b27fce135c9 | [
"MIT"
] | 2 | 2020-10-17T21:27:52.000Z | 2021-01-10T08:02:47.000Z | from typing import Union
from fastapi import Depends
from fastapi.responses import JSONResponse
from sqlalchemy.orm import Session
from authorization import get_all_roles
from server import app, get_db
from settings import settings
from models import HealthCheck
| 26.83871 | 98 | 0.682692 |
ee62497549e11786eed94ddaf1b321e00e07b0ad | 43 | py | Python | MultiSourceDataFeeds/Providers/Factal/factal/__init__.py | Esri/ArcGIS-Solutions-for-Business | 306b778bb6246f13766ce14245c6ba2aab42ba08 | [
"Apache-2.0"
] | 1 | 2021-01-30T04:43:31.000Z | 2021-01-30T04:43:31.000Z | MultiSourceDataFeeds/Providers/Factal/factal/__init__.py | Esri/ArcGIS-Solutions-for-Business | 306b778bb6246f13766ce14245c6ba2aab42ba08 | [
"Apache-2.0"
] | null | null | null | MultiSourceDataFeeds/Providers/Factal/factal/__init__.py | Esri/ArcGIS-Solutions-for-Business | 306b778bb6246f13766ce14245c6ba2aab42ba08 | [
"Apache-2.0"
] | null | null | null | from .factal import *
from .schema import * | 21.5 | 21 | 0.744186 |
ee625190ee933eb34ed6aced294e275dfea0c9b7 | 1,482 | py | Python | SecretPlots/managers/location/_base.py | secretBiology/SecretPlots | eca1d0e0932e605df49d1f958f98a1f41200d589 | [
"MIT"
] | null | null | null | SecretPlots/managers/location/_base.py | secretBiology/SecretPlots | eca1d0e0932e605df49d1f958f98a1f41200d589 | [
"MIT"
] | null | null | null | SecretPlots/managers/location/_base.py | secretBiology/SecretPlots | eca1d0e0932e605df49d1f958f98a1f41200d589 | [
"MIT"
] | 1 | 2022-01-14T05:43:49.000Z | 2022-01-14T05:43:49.000Z | # SecretPlots
# Copyright (c) 2019. SecretBiology
#
# Author: Rohit Suratekar
# Organisation: SecretBiology
# Website: https://github.com/secretBiology/SecretPlots
# Licence: MIT License
# Creation: 05/10/19, 7:52 PM
#
# All Location Managers will go here
from SecretPlots.managers._axis import AxisManager
from SecretPlots.managers._object import ObjectManager
from SecretPlots.objects import Data
from SecretPlots.utils import Log
| 23.15625 | 69 | 0.653171 |
ee6420717483b3976c5a090488575b8372f61f62 | 5,279 | py | Python | scenes/flip06_obstacle.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 158 | 2018-06-24T17:42:13.000Z | 2022-03-12T13:29:43.000Z | scenes/flip06_obstacle.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 5 | 2018-09-05T07:30:48.000Z | 2020-07-01T08:56:28.000Z | scenes/flip06_obstacle.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 35 | 2018-06-13T04:05:42.000Z | 2022-03-29T16:55:24.000Z | #
# This FLIP example combines narrow band flip, 2nd order wall boundary conditions, and
# adaptive time stepping.
#
from manta import *
dim = 3
res = 64
#res = 124
gs = vec3(res,res,res)
if (dim==2):
gs.z=1
s = Solver(name='main', gridSize = gs, dim=dim)
narrowBand = 3
minParticles = pow(2,dim)
saveParts = False
frames = 200
# Adaptive time stepping
s.frameLength = 0.8 # length of one frame (in "world time")
s.cfl = 3.0 # maximal velocity per cell and timestep, 3 is fairly strict
s.timestep = s.frameLength
s.timestepMin = s.frameLength / 4. # time step range
s.timestepMax = s.frameLength * 4.
# prepare grids and particles
flags = s.create(FlagGrid)
phi = s.create(LevelsetGrid)
phiParts = s.create(LevelsetGrid)
phiObs = s.create(LevelsetGrid)
vel = s.create(MACGrid)
velOld = s.create(MACGrid)
velParts = s.create(MACGrid)
#mapWeights= s.create(MACGrid)
pressure = s.create(RealGrid)
fractions = s.create(MACGrid)
tmpVec3 = s.create(VecGrid)
pp = s.create(BasicParticleSystem)
pVel = pp.create(PdataVec3)
mesh = s.create(Mesh)
# acceleration data for particle nbs
pindex = s.create(ParticleIndexSystem)
gpi = s.create(IntGrid)
# scene setup
bWidth=1
flags.initDomain(boundaryWidth=bWidth, phiWalls=phiObs )
fluidVel = 0
fluidSetVel = 0
phi.setConst(999.)
# standing dam
fluidbox1 = Box( parent=s, p0=gs*vec3(0,0,0), p1=gs*vec3(1.0,0.3,1))
phi.join( fluidbox1.computeLevelset() )
fluidbox2 = Box( parent=s, p0=gs*vec3(0.1,0,0), p1=gs*vec3(0.2,0.75,1))
phi.join( fluidbox2.computeLevelset() )
if 1:
sphere = Sphere( parent=s , center=gs*vec3(0.66,0.3,0.5), radius=res*0.2)
phiObs.join( sphere.computeLevelset() )
#obsbox = Box( parent=s, p0=gs*vec3(0.4,0.2,0), p1=gs*vec3(0.7,0.4,1))
#obsbox = Box( parent=s, p0=gs*vec3(0.3,0.2,0), p1=gs*vec3(0.7,0.6,1))
#phiObs.join( obsbox.computeLevelset() )
flags.updateFromLevelset(phi)
phi.subtract( phiObs );
sampleLevelsetWithParticles( phi=phi, flags=flags, parts=pp, discretization=2, randomness=0.05 )
if fluidVel!=0:
# set initial velocity
fluidVel.applyToGrid( grid=vel , value=fluidSetVel )
mapGridToPartsVec3(source=vel, parts=pp, target=pVel )
# also sets boundary flags for phiObs
updateFractions( flags=flags, phiObs=phiObs, fractions=fractions, boundaryWidth=bWidth )
setObstacleFlags(flags=flags, phiObs=phiObs, fractions=fractions)
lastFrame = -1
if 1 and (GUI):
gui = Gui()
gui.show()
#gui.pause()
# save reference any grid, to automatically determine grid size
if saveParts:
pressure.save( 'ref_flipParts_0000.uni' );
#main loop
while s.frame < frames:
maxVel = vel.getMax()
s.adaptTimestep( maxVel )
mantaMsg('\nFrame %i, time-step size %f' % (s.frame, s.timestep))
# FLIP
pp.advectInGrid(flags=flags, vel=vel, integrationMode=IntRK4, deleteInObstacle=False, stopInObstacle=False )
pushOutofObs( parts=pp, flags=flags, phiObs=phiObs )
advectSemiLagrange(flags=flags, vel=vel, grid=phi, order=1) # first order is usually enough
advectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2)
# create level set of particles
gridParticleIndex( parts=pp , flags=flags, indexSys=pindex, index=gpi )
unionParticleLevelset( pp, pindex, flags, gpi, phiParts )
# combine level set of particles with grid level set
phi.addConst(1.); # shrink slightly
phi.join( phiParts );
extrapolateLsSimple(phi=phi, distance=narrowBand+2, inside=True )
extrapolateLsSimple(phi=phi, distance=3 )
phi.setBoundNeumann(0) # make sure no particles are placed at outer boundary, warning - larger values can delete thin sheets at outer walls...
flags.updateFromLevelset(phi)
# combine particles velocities with advected grid velocities
mapPartsToMAC(vel=velParts, flags=flags, velOld=velOld, parts=pp, partVel=pVel, weight=tmpVec3)
extrapolateMACFromWeight( vel=velParts , distance=2, weight=tmpVec3 )
combineGridVel(vel=velParts, weight=tmpVec3 , combineVel=vel, phi=phi, narrowBand=(narrowBand-1), thresh=0)
velOld.copyFrom(vel)
# forces & pressure solve
addGravity(flags=flags, vel=vel, gravity=(0,-0.001,0))
extrapolateMACSimple( flags=flags, vel=vel , distance=2, intoObs=True )
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
solvePressure(flags=flags, vel=vel, pressure=pressure, phi=phi, fractions=fractions )
extrapolateMACSimple( flags=flags, vel=vel , distance=4, intoObs=True )
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
if (dim==3):
# mis-use phiParts as temp grid to close the mesh
phiParts.copyFrom(phi)
phiParts.setBound(0.5,0)
phiParts.createMesh(mesh)
# set source grids for resampling, used in adjustNumber!
pVel.setSource( vel, isMAC=True )
adjustNumber( parts=pp, vel=vel, flags=flags, minParticles=1*minParticles, maxParticles=2*minParticles, phi=phi, exclude=phiObs, narrowBand=narrowBand )
flipVelocityUpdate(vel=vel, velOld=velOld, flags=flags, parts=pp, partVel=pVel, flipRatio=0.97 )
s.step()
if (lastFrame!=s.frame):
# generate data for flip03_gen.py surface generation scene
if saveParts:
pp.save( 'flipParts_%04d.uni' % s.frame );
if 0 and (GUI):
gui.screenshot( 'flip06_%04d.png' % s.frame );
#s.printMemInfo()
lastFrame = s.frame;
| 32.99375 | 154 | 0.722675 |
ee642f13bf49e8b1507609caffabf3cbde051eab | 369 | py | Python | main.py | Sisyphus141/Password-Encoded | 3353874e0dd8665bb36891616f809aca465f9047 | [
"MIT"
] | null | null | null | main.py | Sisyphus141/Password-Encoded | 3353874e0dd8665bb36891616f809aca465f9047 | [
"MIT"
] | null | null | null | main.py | Sisyphus141/Password-Encoded | 3353874e0dd8665bb36891616f809aca465f9047 | [
"MIT"
] | null | null | null | import hashlib
#take a key
key = str(input("KEY>>> "))
#take a message
password = str(input("MESSAGE>>> "))
#function does does something
#make this more complex or something IDK
password = (key + password + key)
hash1 = hashlib.new("sha256")
password = password.encode("utf-8")
print(password)
hash1.update((password))
print(hash1.hexdigest())
| 19.421053 | 41 | 0.672087 |
ee646ecd75eb338880899b14fe5eafbb53b55cd1 | 38,214 | py | Python | gewittergefahr/gg_io/myrorss_and_mrms_io.py | dopplerchase/GewitterGefahr | 4415b08dd64f37eba5b1b9e8cc5aa9af24f96593 | [
"MIT"
] | 26 | 2018-10-04T01:07:35.000Z | 2022-01-29T08:49:32.000Z | gewittergefahr/gg_io/myrorss_and_mrms_io.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 4 | 2017-12-25T02:01:08.000Z | 2018-12-19T01:54:21.000Z | gewittergefahr/gg_io/myrorss_and_mrms_io.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 11 | 2017-12-10T23:05:29.000Z | 2022-01-29T08:49:33.000Z | """IO methods for radar data from MYRORSS or MRMS.
MYRORSS = Multi-year Reanalysis of Remotely Sensed Storms
MRMS = Multi-radar Multi-sensor
"""
import os
import glob
import warnings
import numpy
import pandas
from netCDF4 import Dataset
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import myrorss_and_mrms_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
NW_GRID_POINT_LAT_COLUMN_ORIG = 'Latitude'
NW_GRID_POINT_LNG_COLUMN_ORIG = 'Longitude'
LAT_SPACING_COLUMN_ORIG = 'LatGridSpacing'
LNG_SPACING_COLUMN_ORIG = 'LonGridSpacing'
NUM_LAT_COLUMN_ORIG = 'Lat'
NUM_LNG_COLUMN_ORIG = 'Lon'
NUM_PIXELS_COLUMN_ORIG = 'pixel'
HEIGHT_COLUMN_ORIG = 'Height'
UNIX_TIME_COLUMN_ORIG = 'Time'
FIELD_NAME_COLUMN_ORIG = 'TypeName'
SENTINEL_VALUE_COLUMNS_ORIG = ['MissingData', 'RangeFolded']
GRID_ROW_COLUMN = 'grid_row'
GRID_COLUMN_COLUMN = 'grid_column'
NUM_GRID_CELL_COLUMN = 'num_grid_cells'
GRID_ROW_COLUMN_ORIG = 'pixel_x'
GRID_COLUMN_COLUMN_ORIG = 'pixel_y'
NUM_GRID_CELL_COLUMN_ORIG = 'pixel_count'
TIME_FORMAT_SECONDS = '%Y%m%d-%H%M%S'
TIME_FORMAT_MINUTES = '%Y%m%d-%H%M'
TIME_FORMAT_FOR_LOG_MESSAGES = '%Y-%m-%d-%H%M%S'
TIME_FORMAT_SECONDS_REGEX = (
'[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]-[0-2][0-9][0-5][0-9][0-5][0-9]')
MINUTES_TO_SECONDS = 60
METRES_TO_KM = 1e-3
SENTINEL_TOLERANCE = 10.
LATLNG_MULTIPLE_DEG = 1e-4
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC = 240
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC = 180
ZIPPED_FILE_EXTENSION = '.gz'
UNZIPPED_FILE_EXTENSION = '.netcdf'
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME]
RADAR_FILE_NAMES_KEY = 'radar_file_name_matrix'
UNIQUE_TIMES_KEY = 'unique_times_unix_sec'
SPC_DATES_AT_UNIQUE_TIMES_KEY = 'spc_dates_at_unique_times_unix_sec'
FIELD_NAME_BY_PAIR_KEY = 'field_name_by_pair'
HEIGHT_BY_PAIR_KEY = 'height_by_pair_m_asl'
def _get_pathless_raw_file_pattern(unix_time_sec):
"""Generates glob pattern for pathless name of raw file.
This method rounds the time step to the nearest minute and allows the file
to be either zipped or unzipped.
The pattern generated by this method is meant for input to `glob.glob`.
This method is the "pattern" version of _get_pathless_raw_file_name.
:param unix_time_sec: Valid time.
:return: pathless_raw_file_pattern: Pathless glob pattern for raw file.
"""
return '{0:s}*{1:s}*'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_MINUTES),
UNZIPPED_FILE_EXTENSION
)
def _get_pathless_raw_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for raw file.
:param unix_time_sec: Valid time.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_raw_file_name: Pathless name for raw file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION,
ZIPPED_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION
)
def _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name, sentinel_values):
"""Removes sentinel values from sparse grid.
:param sparse_grid_table: pandas DataFrame with columns produced by
`read_data_from_sparse_grid_file`.
:param field_name: Name of radar field in GewitterGefahr format.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: sparse_grid_table: Same as input, except that rows with a sentinel
value are removed.
"""
num_rows = len(sparse_grid_table.index)
sentinel_flags = numpy.full(num_rows, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
sparse_grid_table[field_name].values, this_sentinel_value,
atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
return sparse_grid_table.drop(
sparse_grid_table.index[sentinel_indices], axis=0, inplace=False)
def _remove_sentinels_from_full_grid(field_matrix, sentinel_values):
"""Removes sentinel values from full grid.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with radar field.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: field_matrix: Same as input, except that sentinel values are
replaced with NaN.
"""
num_grid_rows = field_matrix.shape[0]
num_grid_columns = field_matrix.shape[1]
num_grid_points = num_grid_rows * num_grid_columns
field_matrix = numpy.reshape(field_matrix, num_grid_points)
sentinel_flags = numpy.full(num_grid_points, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
field_matrix, this_sentinel_value, atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
field_matrix[sentinel_indices] = numpy.nan
return numpy.reshape(field_matrix, (num_grid_rows, num_grid_columns))
def get_relative_dir_for_raw_files(field_name, data_source, height_m_asl=None):
"""Generates relative path for raw files.
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param height_m_asl: Radar height (metres above sea level).
:return: relative_directory_name: Relative path for raw files.
"""
if field_name == radar_utils.REFL_NAME:
radar_utils.check_heights(
data_source=data_source, heights_m_asl=numpy.array([height_m_asl]),
field_name=radar_utils.REFL_NAME)
else:
height_m_asl = radar_utils.get_valid_heights(
data_source=data_source, field_name=field_name)[0]
return '{0:s}/{1:05.2f}'.format(
radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=data_source),
float(height_m_asl) * METRES_TO_KM
)
def find_raw_file(
unix_time_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, raise_error_if_missing=True):
"""Finds raw file.
File should contain one field at one time step (e.g., MESH at 123502 UTC,
reflectivity at 500 m above sea level and 123502 UTC).
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error. If False and file is missing, will
return *expected* path to raw file.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Error-checking.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(raise_error_if_missing)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, height_m_asl=height_m_asl,
data_source=data_source)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name
)
pathless_file_name = _get_pathless_raw_file_name(unix_time_sec, zipped=True)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
pathless_file_name = _get_pathless_raw_file_name(
unix_time_sec, zipped=False)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
raise ValueError(
'Cannot find raw file. Expected at: "{0:s}"'.format(raw_file_name)
)
return raw_file_name
def raw_file_name_to_time(raw_file_name):
"""Parses time from file name.
:param raw_file_name: Path to raw file.
:return: unix_time_sec: Valid time.
"""
error_checking.assert_is_string(raw_file_name)
_, time_string = os.path.split(raw_file_name)
time_string = time_string.replace(ZIPPED_FILE_EXTENSION, '').replace(
UNZIPPED_FILE_EXTENSION, '')
return time_conversion.string_to_unix_sec(time_string, TIME_FORMAT_SECONDS)
def find_raw_file_inexact_time(
desired_time_unix_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, max_time_offset_sec=None,
raise_error_if_missing=False):
"""Finds raw file at inexact time.
If you know the exact valid time, use `find_raw_file`.
:param desired_time_unix_sec: Desired valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Field name in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param max_time_offset_sec: Maximum offset between actual and desired valid
time.
For example, if `desired_time_unix_sec` is 162933 UTC 5 Jan 2018 and
`max_time_offset_sec` = 60, this method will look for az-shear at valid
times from 162833...163033 UTC 5 Jan 2018.
If None, this defaults to `DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC` for
azimuthal-shear fields and `DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC` for
all other fields.
:param raise_error_if_missing: Boolean flag. If no file is found and
raise_error_if_missing = True, this method will error out. If no file
is found and raise_error_if_missing = False, will return None.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if no file is found and raise_error_if_missing = True.
"""
# Error-checking.
error_checking.assert_is_integer(desired_time_unix_sec)
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_boolean(raise_error_if_missing)
radar_utils.check_field_name(field_name)
if max_time_offset_sec is None:
if field_name in AZIMUTHAL_SHEAR_FIELD_NAMES:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC
else:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC
error_checking.assert_is_integer(max_time_offset_sec)
error_checking.assert_is_greater(max_time_offset_sec, 0)
first_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec - max_time_offset_sec),
MINUTES_TO_SECONDS)))
last_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec + max_time_offset_sec),
MINUTES_TO_SECONDS)))
allowed_minutes_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_allowed_minute_unix_sec,
end_time_unix_sec=last_allowed_minute_unix_sec,
time_interval_sec=MINUTES_TO_SECONDS, include_endpoint=True).astype(int)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, data_source=data_source,
height_m_asl=height_m_asl)
raw_file_names = []
for this_time_unix_sec in allowed_minutes_unix_sec:
this_pathless_file_pattern = _get_pathless_raw_file_pattern(
this_time_unix_sec)
this_file_pattern = '{0:s}/{1:s}/{2:s}/{3:s}/{4:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name, this_pathless_file_pattern
)
raw_file_names += glob.glob(this_file_pattern)
file_times_unix_sec = []
for this_raw_file_name in raw_file_names:
file_times_unix_sec.append(raw_file_name_to_time(this_raw_file_name))
if len(file_times_unix_sec):
file_times_unix_sec = numpy.array(file_times_unix_sec)
time_differences_sec = numpy.absolute(
file_times_unix_sec - desired_time_unix_sec)
nearest_index = numpy.argmin(time_differences_sec)
min_time_diff_sec = time_differences_sec[nearest_index]
else:
min_time_diff_sec = numpy.inf
if min_time_diff_sec > max_time_offset_sec:
if raise_error_if_missing:
desired_time_string = time_conversion.unix_sec_to_string(
desired_time_unix_sec, TIME_FORMAT_FOR_LOG_MESSAGES)
error_string = (
'Could not find "{0:s}" file within {1:d} seconds of {2:s}.'
).format(field_name, max_time_offset_sec, desired_time_string)
raise ValueError(error_string)
return None
return raw_file_names[nearest_index]
def find_raw_files_one_spc_date(
spc_date_string, field_name, data_source, top_directory_name,
height_m_asl=None, raise_error_if_missing=True):
"""Finds raw files for one field and one SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and no files are
found, will raise error.
:return: raw_file_names: 1-D list of paths to raw files.
:raises: ValueError: if raise_error_if_missing = True and no files are
found.
"""
error_checking.assert_is_boolean(raise_error_if_missing)
example_time_unix_sec = time_conversion.spc_date_string_to_unix_sec(
spc_date_string)
example_file_name = find_raw_file(
unix_time_sec=example_time_unix_sec, spc_date_string=spc_date_string,
field_name=field_name, data_source=data_source,
top_directory_name=top_directory_name, height_m_asl=height_m_asl,
raise_error_if_missing=False)
example_directory_name, example_pathless_file_name = os.path.split(
example_file_name)
example_time_string = time_conversion.unix_sec_to_string(
example_time_unix_sec, TIME_FORMAT_SECONDS)
pathless_file_pattern = example_pathless_file_name.replace(
example_time_string, TIME_FORMAT_SECONDS_REGEX)
pathless_file_pattern = pathless_file_pattern.replace(
ZIPPED_FILE_EXTENSION, '*')
raw_file_pattern = '{0:s}/{1:s}'.format(
example_directory_name, pathless_file_pattern)
raw_file_names = glob.glob(raw_file_pattern)
if raise_error_if_missing and not raw_file_names:
error_string = (
'Could not find any files with the following pattern: {0:s}'
).format(raw_file_pattern)
raise ValueError(error_string)
return raw_file_names
def find_many_raw_files(
desired_times_unix_sec, spc_date_strings, data_source, field_names,
top_directory_name, reflectivity_heights_m_asl=None,
max_time_offset_for_az_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC,
max_time_offset_for_non_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC):
"""Finds raw file for each field/height pair and time step.
N = number of input times
T = number of unique input times
F = number of field/height pairs
:param desired_times_unix_sec: length-N numpy array with desired valid
times.
:param spc_date_strings: length-N list of corresponding SPC dates (format
"yyyymmdd").
:param data_source: Data source ("myrorss" or "mrms").
:param field_names: 1-D list of field names.
:param top_directory_name: Name of top-level directory with radar data from
the given source.
:param reflectivity_heights_m_asl: 1-D numpy array of heights (metres above
sea level) for the field "reflectivity_dbz". If "reflectivity_dbz" is
not in `field_names`, leave this as None.
:param max_time_offset_for_az_shear_sec: Max time offset (between desired
and actual valid time) for azimuthal-shear fields.
:param max_time_offset_for_non_shear_sec: Max time offset (between desired
and actual valid time) for non-azimuthal-shear fields.
:return: file_dictionary: Dictionary with the following keys.
file_dictionary['radar_file_name_matrix']: T-by-F numpy array of paths to
raw files.
file_dictionary['unique_times_unix_sec']: length-T numpy array of unique
valid times.
file_dictionary['spc_date_strings_for_unique_times']: length-T numpy array
of corresponding SPC dates.
file_dictionary['field_name_by_pair']: length-F list of field names.
file_dictionary['height_by_pair_m_asl']: length-F numpy array of heights
(metres above sea level).
"""
field_name_by_pair, height_by_pair_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_pairs(
field_names=field_names, data_source=data_source,
refl_heights_m_asl=reflectivity_heights_m_asl)
)
num_fields = len(field_name_by_pair)
error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)
error_checking.assert_is_numpy_array(
desired_times_unix_sec, num_dimensions=1)
num_times = len(desired_times_unix_sec)
error_checking.assert_is_string_list(spc_date_strings)
error_checking.assert_is_numpy_array(
numpy.array(spc_date_strings),
exact_dimensions=numpy.array([num_times]))
spc_dates_unix_sec = numpy.array(
[time_conversion.spc_date_string_to_unix_sec(s)
for s in spc_date_strings])
time_matrix = numpy.hstack((
numpy.reshape(desired_times_unix_sec, (num_times, 1)),
numpy.reshape(spc_dates_unix_sec, (num_times, 1))
))
unique_time_matrix = numpy.vstack(
{tuple(this_row) for this_row in time_matrix}
).astype(int)
unique_times_unix_sec = unique_time_matrix[:, 0]
spc_dates_at_unique_times_unix_sec = unique_time_matrix[:, 1]
sort_indices = numpy.argsort(unique_times_unix_sec)
unique_times_unix_sec = unique_times_unix_sec[sort_indices]
spc_dates_at_unique_times_unix_sec = spc_dates_at_unique_times_unix_sec[
sort_indices]
num_unique_times = len(unique_times_unix_sec)
radar_file_name_matrix = numpy.full(
(num_unique_times, num_fields), '', dtype=object)
for i in range(num_unique_times):
this_spc_date_string = time_conversion.time_to_spc_date_string(
spc_dates_at_unique_times_unix_sec[i])
for j in range(num_fields):
if field_name_by_pair[j] in AZIMUTHAL_SHEAR_FIELD_NAMES:
this_max_time_offset_sec = max_time_offset_for_az_shear_sec
this_raise_error_flag = False
else:
this_max_time_offset_sec = max_time_offset_for_non_shear_sec
this_raise_error_flag = True
if this_max_time_offset_sec == 0:
radar_file_name_matrix[i, j] = find_raw_file(
unix_time_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
raise_error_if_missing=this_raise_error_flag)
else:
radar_file_name_matrix[i, j] = find_raw_file_inexact_time(
desired_time_unix_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
max_time_offset_sec=this_max_time_offset_sec,
raise_error_if_missing=this_raise_error_flag)
if radar_file_name_matrix[i, j] is None:
this_time_string = time_conversion.unix_sec_to_string(
unique_times_unix_sec[i], TIME_FORMAT_FOR_LOG_MESSAGES)
warning_string = (
'Cannot find file for "{0:s}" at {1:d} metres ASL and '
'{2:s}.'
).format(
field_name_by_pair[j], int(height_by_pair_m_asl[j]),
this_time_string
)
warnings.warn(warning_string)
return {
RADAR_FILE_NAMES_KEY: radar_file_name_matrix,
UNIQUE_TIMES_KEY: unique_times_unix_sec,
SPC_DATES_AT_UNIQUE_TIMES_KEY: spc_dates_at_unique_times_unix_sec,
FIELD_NAME_BY_PAIR_KEY: field_name_by_pair,
HEIGHT_BY_PAIR_KEY: numpy.round(height_by_pair_m_asl).astype(int)
}
def read_metadata_from_raw_file(
netcdf_file_name, data_source, raise_error_if_fails=True):
"""Reads metadata from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param data_source: Data source (string).
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: metadata_dict: Dictionary with the following keys.
metadata_dict['nw_grid_point_lat_deg']: Latitude (deg N) of northwesternmost
grid point.
metadata_dict['nw_grid_point_lng_deg']: Longitude (deg E) of
northwesternmost grid point.
metadata_dict['lat_spacing_deg']: Spacing (deg N) between meridionally
adjacent grid points.
metadata_dict['lng_spacing_deg']: Spacing (deg E) between zonally adjacent
grid points.
metadata_dict['num_lat_in_grid']: Number of rows (unique grid-point
latitudes).
metadata_dict['num_lng_in_grid']: Number of columns (unique grid-point
longitudes).
metadata_dict['height_m_asl']: Radar height (metres above ground level).
metadata_dict['unix_time_sec']: Valid time.
metadata_dict['field_name']: Name of radar field in GewitterGefahr format.
metadata_dict['field_name_orig']: Name of radar field in original (either
MYRORSS or MRMS) format.
metadata_dict['sentinel_values']: 1-D numpy array of sentinel values.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name_orig = str(getattr(netcdf_dataset, FIELD_NAME_COLUMN_ORIG))
metadata_dict = {
radar_utils.NW_GRID_POINT_LAT_COLUMN:
getattr(netcdf_dataset, NW_GRID_POINT_LAT_COLUMN_ORIG),
radar_utils.NW_GRID_POINT_LNG_COLUMN:
lng_conversion.convert_lng_positive_in_west(
getattr(netcdf_dataset, NW_GRID_POINT_LNG_COLUMN_ORIG),
allow_nan=False),
radar_utils.LAT_SPACING_COLUMN:
getattr(netcdf_dataset, LAT_SPACING_COLUMN_ORIG),
radar_utils.LNG_SPACING_COLUMN:
getattr(netcdf_dataset, LNG_SPACING_COLUMN_ORIG),
radar_utils.NUM_LAT_COLUMN:
netcdf_dataset.dimensions[NUM_LAT_COLUMN_ORIG].size + 1,
radar_utils.NUM_LNG_COLUMN:
netcdf_dataset.dimensions[NUM_LNG_COLUMN_ORIG].size + 1,
radar_utils.HEIGHT_COLUMN:
getattr(netcdf_dataset, HEIGHT_COLUMN_ORIG),
radar_utils.UNIX_TIME_COLUMN:
getattr(netcdf_dataset, UNIX_TIME_COLUMN_ORIG),
FIELD_NAME_COLUMN_ORIG: field_name_orig,
radar_utils.FIELD_NAME_COLUMN: radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
}
latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN]
longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN]
# TODO(thunderhoser): The following "if" condition is a hack. The purpose
# is to change grid corners only for actual MYRORSS data, not GridRad data
# in MYRORSS format.
if latitude_spacing_deg < 0.011 and longitude_spacing_deg < 0.011:
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] = (
rounder.floor_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
metadata_dict[radar_utils.LAT_SPACING_COLUMN]))
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN] = (
rounder.ceiling_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
metadata_dict[radar_utils.LNG_SPACING_COLUMN]))
sentinel_values = []
for this_column in SENTINEL_VALUE_COLUMNS_ORIG:
sentinel_values.append(getattr(netcdf_dataset, this_column))
metadata_dict.update({
radar_utils.SENTINEL_VALUE_COLUMN: numpy.array(sentinel_values)})
netcdf_dataset.close()
return metadata_dict
def read_data_from_sparse_grid_file(
netcdf_file_name, field_name_orig, data_source, sentinel_values,
raise_error_if_fails=True):
"""Reads sparse radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param field_name_orig: Name of radar field in original (either MYRORSS or
MRMS) format.
:param data_source: Data source (string).
:param sentinel_values: 1-D numpy array of sentinel values.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: sparse_grid_table: pandas DataFrame with the following columns.
Each row corresponds to one grid point.
sparse_grid_table.grid_row: Row index.
sparse_grid_table.grid_column: Column index.
sparse_grid_table.<field_name>: Radar measurement (column name is produced
by _field_name_orig_to_new).
sparse_grid_table.num_grid_cells: Number of consecutive grid points with the
same radar measurement. Counting is row-major (to the right along the
row, then down to the next column if necessary).
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_numpy_array_without_nan(sentinel_values)
error_checking.assert_is_numpy_array(sentinel_values, num_dimensions=1)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name = radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
num_values = len(netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG])
if num_values == 0:
sparse_grid_dict = {
GRID_ROW_COLUMN: numpy.array([], dtype=int),
GRID_COLUMN_COLUMN: numpy.array([], dtype=int),
NUM_GRID_CELL_COLUMN: numpy.array([], dtype=int),
field_name: numpy.array([])}
else:
sparse_grid_dict = {
GRID_ROW_COLUMN: netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:],
GRID_COLUMN_COLUMN:
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:],
NUM_GRID_CELL_COLUMN:
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:],
field_name: netcdf_dataset.variables[field_name_orig][:]}
netcdf_dataset.close()
sparse_grid_table = pandas.DataFrame.from_dict(sparse_grid_dict)
return _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name=field_name,
sentinel_values=sentinel_values)
def read_data_from_full_grid_file(
netcdf_file_name, metadata_dict, raise_error_if_fails=True):
"""Reads full radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param metadata_dict: Dictionary created by `read_metadata_from_raw_file`.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None for all output vars.
:return: field_matrix: M-by-N numpy array with radar field. Latitude
increases while moving up each column, and longitude increases while
moving right along each row.
:return: grid_point_latitudes_deg: length-M numpy array of grid-point
latitudes (deg N). This array is monotonically decreasing.
:return: grid_point_longitudes_deg: length-N numpy array of grid-point
longitudes (deg E). This array is monotonically increasing.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None, None, None
field_matrix = netcdf_dataset.variables[
metadata_dict[FIELD_NAME_COLUMN_ORIG]]
netcdf_dataset.close()
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] * (
metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
grid_point_latitudes_deg, grid_point_longitudes_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
field_matrix = _remove_sentinels_from_full_grid(
field_matrix, metadata_dict[radar_utils.SENTINEL_VALUE_COLUMN])
return (numpy.flipud(field_matrix), grid_point_latitudes_deg[::-1],
grid_point_longitudes_deg)
def write_field_to_myrorss_file(
field_matrix, netcdf_file_name, field_name, metadata_dict,
height_m_asl=None):
"""Writes field to MYRORSS-formatted file.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with one radar variable at one time.
Latitude should increase down each column, and longitude should increase
to the right along each row.
:param netcdf_file_name: Path to output file.
:param field_name: Name of radar field in GewitterGefahr format.
:param metadata_dict: Dictionary created by either
`gridrad_io.read_metadata_from_full_grid_file` or
`read_metadata_from_raw_file`.
:param height_m_asl: Height of radar field (metres above sea level).
"""
if field_name == radar_utils.REFL_NAME:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=numpy.array([height_m_asl])))
else:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID))
field_name = list(field_to_heights_dict_m_asl.keys())[0]
radar_height_m_asl = field_to_heights_dict_m_asl[field_name][0]
if field_name in radar_utils.ECHO_TOP_NAMES:
field_matrix = METRES_TO_KM * field_matrix
field_name_myrorss = radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=radar_utils.MYRORSS_SOURCE_ID)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(
FIELD_NAME_COLUMN_ORIG, field_name_myrorss)
netcdf_dataset.setncattr('DataType', 'SparseLatLonGrid')
netcdf_dataset.setncattr(
NW_GRID_POINT_LAT_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
NW_GRID_POINT_LNG_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
HEIGHT_COLUMN_ORIG,
METRES_TO_KM * numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr(
UNIX_TIME_COLUMN_ORIG,
numpy.int32(metadata_dict[radar_utils.UNIX_TIME_COLUMN]))
netcdf_dataset.setncattr('FractionalTime', 0.)
netcdf_dataset.setncattr('attributes', ' ColorMap SubType Unit')
netcdf_dataset.setncattr('ColorMap-unit', 'dimensionless')
netcdf_dataset.setncattr('ColorMap-value', '')
netcdf_dataset.setncattr('SubType-unit', 'dimensionless')
netcdf_dataset.setncattr('SubType-value', numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr('Unit-unit', 'dimensionless')
netcdf_dataset.setncattr('Unit-value', 'dimensionless')
netcdf_dataset.setncattr(
LAT_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LAT_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
LNG_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LNG_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[0], numpy.double(-99000.))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[1], numpy.double(-99001.))
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] *
(metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
unique_grid_point_lats_deg, unique_grid_point_lngs_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
num_grid_rows = len(unique_grid_point_lats_deg)
num_grid_columns = len(unique_grid_point_lngs_deg)
field_vector = numpy.reshape(field_matrix, num_grid_rows * num_grid_columns)
grid_point_lat_matrix, grid_point_lng_matrix = (
grids.latlng_vectors_to_matrices(
unique_grid_point_lats_deg, unique_grid_point_lngs_deg))
grid_point_lat_vector = numpy.reshape(
grid_point_lat_matrix, num_grid_rows * num_grid_columns)
grid_point_lng_vector = numpy.reshape(
grid_point_lng_matrix, num_grid_rows * num_grid_columns)
real_value_indices = numpy.where(numpy.invert(numpy.isnan(field_vector)))[0]
netcdf_dataset.createDimension(
NUM_LAT_COLUMN_ORIG, num_grid_rows - 1)
netcdf_dataset.createDimension(
NUM_LNG_COLUMN_ORIG, num_grid_columns - 1)
netcdf_dataset.createDimension(
NUM_PIXELS_COLUMN_ORIG, len(real_value_indices))
row_index_vector, column_index_vector = radar_utils.latlng_to_rowcol(
grid_point_lat_vector, grid_point_lng_vector,
nw_grid_point_lat_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
nw_grid_point_lng_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN])
netcdf_dataset.createVariable(
field_name_myrorss, numpy.single, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_ROW_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_COLUMN_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
NUM_GRID_CELL_COLUMN_ORIG, numpy.int32, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'BackgroundValue', numpy.int32(-99900))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'units', 'dimensionless')
netcdf_dataset.variables[field_name_myrorss].setncattr(
'NumValidRuns', numpy.int32(len(real_value_indices)))
netcdf_dataset.variables[field_name_myrorss][:] = field_vector[
real_value_indices]
netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:] = (
row_index_vector[real_value_indices])
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:] = (
column_index_vector[real_value_indices])
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:] = (
numpy.full(len(real_value_indices), 1, dtype=int))
netcdf_dataset.close()
| 42.365854 | 80 | 0.731041 |
ee66be524d32778f359946d067c84065472b72da | 94 | py | Python | node-runner-cli/setup/__init__.py | stuartbain/node-runner | 89d10986dbc79da06df402cb17f3edec736f3709 | [
"Apache-2.0"
] | 18 | 2018-11-26T13:22:10.000Z | 2022-03-28T12:41:44.000Z | node-runner-cli/setup/__init__.py | stuartbain/node-runner | 89d10986dbc79da06df402cb17f3edec736f3709 | [
"Apache-2.0"
] | 30 | 2018-09-12T06:40:03.000Z | 2021-09-24T13:46:59.000Z | node-runner-cli/setup/__init__.py | stuartbain/node-runner | 89d10986dbc79da06df402cb17f3edec736f3709 | [
"Apache-2.0"
] | 12 | 2018-09-24T01:57:02.000Z | 2022-03-07T17:55:13.000Z | from setup.Base import Base
from setup.Docker import Docker
from setup.SystemD import SystemD
| 23.5 | 33 | 0.840426 |
ee6793056d92226902cff484562e9055263810e1 | 10,325 | bzl | Python | config/bazel/repositories.bzl | nala-cub/coda | 581608cfc4d9b485182c6f5f40dd2ab7540cec66 | [
"Apache-2.0"
] | 1 | 2021-11-13T06:19:22.000Z | 2021-11-13T06:19:22.000Z | config/bazel/repositories.bzl | nala-cub/coda | 581608cfc4d9b485182c6f5f40dd2ab7540cec66 | [
"Apache-2.0"
] | 1 | 2021-12-21T17:56:58.000Z | 2021-12-21T18:16:27.000Z | config/bazel/repositories.bzl | nala-cub/coda | 581608cfc4d9b485182c6f5f40dd2ab7540cec66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Cory Paik. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Research repositories """
load("//tools:maybe_http.bzl", "http_archive")
def research_repositories():
""" Research repositories """
# Override tensorflow @rules_python version. As of 2021-09-21, the only
# target for which tensorflow uses @rules_python is:
# @org_tensorflow//tensorflow/platform/python/platform:platform
# This uses @rules_python//python/runfiles, which still exists in v0.4.0.
http_archive(
name = "rules_python",
sha256 = "954aa89b491be4a083304a2cb838019c8b8c3720a7abb9c4cb81ac7a24230cea",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz",
],
)
############################################################################
# JAX & Tensoflow
http_archive(
name = "org_tensorflow",
patch_args = ["-p1"],
patches = [
"@com_google_jax//third_party:tensorflow.patch",
Label("//third_party:tensorflow-sqlite.patch"),
Label("//third_party:tensorflow-pyconfig.patch"),
],
sha256 = "6b14b66a74728736359afcb491820fa3e713ea4a74bff0defe920f3453a3a0f0",
strip_prefix = "tensorflow-b5b1ff47ad250c3e38dcadef5f6bc414b0a533ee",
urls = [
"https://github.com/tensorflow/tensorflow/archive/b5b1ff47ad250c3e38dcadef5f6bc414b0a533ee.tar.gz",
],
)
http_archive(
name = "com_google_jax",
sha256 = "a2f6e35e0d1b5d2bed88e815d27730338072601003fce93e6c49442afa3d8d96",
strip_prefix = "jax-c3bacb49489aac6eb565611426022b3dd2a430fa",
urls = [
"https://github.com/corypaik/jax/archive/c3bacb49489aac6eb565611426022b3dd2a430fa.tar.gz",
],
)
############################################################################
http_archive(
name = "bazel_gazelle",
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
],
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "b8b69615e8d9ade79f3612311b8d0c4dfe01017420c90eed11db15e9e7c9ff3c",
strip_prefix = "buildtools-4.2.1",
url = "https://github.com/bazelbuild/buildtools/archive/4.2.1.tar.gz",
)
# we rely on dbx_build_tools for the inbuild python interpreter deps.
http_archive(
name = "dbx_build_tools",
patch_args = ["-p1"],
sha256 = "151b77cf5d1b06884bc2da350322e33ef5289237622196467988894c57616a0c",
strip_prefix = "dbx_build_tools-a5ae53031f11d9114cdbc40da8a84b5d28af58f7",
urls = ["https://github.com/dropbox/dbx_build_tools/archive/a5ae53031f11d9114cdbc40da8a84b5d28af58f7.tar.gz"],
)
http_archive(
name = "facebook_zstd",
build_file_content = """exports_files(["zstd"])""",
patch_cmds = ["make zstd"],
sha256 = "5194fbfa781fcf45b98c5e849651aa7b3b0a008c6b72d4a0db760f3002291e94",
strip_prefix = "zstd-1.5.0",
urls = ["https://github.com/facebook/zstd/releases/download/v1.5.0/zstd-1.5.0.tar.gz"],
)
http_archive(
name = "io_bazel_stardoc",
sha256 = "cd3d1e483eddf9f73db2bd466f329e1d10d65492272820eda57540767c902fe2",
strip_prefix = "stardoc-0.5.0",
urls = ["https://github.com/bazelbuild/stardoc/archive/0.5.0.tar.gz"],
)
# Overwrite @dbx_build_tools version of cpython3.8. Note that we use the
# same version, just with a different BUILD file. We could (and used to)
# just use a patch, but it becomes frustrating to make fixes and we'd like
# to avoid another having yet another submodule.
http_archive(
name = "org_python_cpython_38",
build_file = _clean_dep("//third_party/cpython:python38.BUILD"),
sha256 = "75894117f6db7051c1b34f37410168844bbb357c139a8a10a352e9bf8be594e8",
strip_prefix = "Python-3.8.1",
urls = ["https://www.python.org/ftp/python/3.8.1/Python-3.8.1.tar.xz"],
)
_py_repositories()
# for specific projects
_coda_repositories()
| 43.200837 | 132 | 0.670799 |
ee682792cda511c74c606d02749117ec478e5c63 | 177 | py | Python | src/apps/dive_log/apps.py | GotlingSystem/apnea | 6b2c0bdaa3733b5ec19456aae6177da4a13ab7d1 | [
"MIT"
] | null | null | null | src/apps/dive_log/apps.py | GotlingSystem/apnea | 6b2c0bdaa3733b5ec19456aae6177da4a13ab7d1 | [
"MIT"
] | 3 | 2015-02-14T18:51:19.000Z | 2015-02-24T07:44:05.000Z | src/apps/dive_log/apps.py | GotlingSystem/apnea | 6b2c0bdaa3733b5ec19456aae6177da4a13ab7d1 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
| 25.285714 | 55 | 0.768362 |
ee699a71ac54286cafed23dd6c6819d85173b00b | 3,051 | py | Python | app/core/settings/settings.py | Radarslan/stocks | d0a1ca0808b5ac13c0ade4461832c1fb9bac8f0f | [
"MIT"
] | null | null | null | app/core/settings/settings.py | Radarslan/stocks | d0a1ca0808b5ac13c0ade4461832c1fb9bac8f0f | [
"MIT"
] | null | null | null | app/core/settings/settings.py | Radarslan/stocks | d0a1ca0808b5ac13c0ade4461832c1fb9bac8f0f | [
"MIT"
] | null | null | null | import json
import logging
import sys
from decouple import config
# general
ENVIRONMENT: str = config("ENVIRONMENT", "docker")
API_VERSION: str = config("API_VERSION", "/api")
PROJECT_NAME: str = config("PROJECT_NAME", "Stocks")
BACKEND_CORS_ORIGINS: str = config("BACKEND_CORS_ORIGINS", "*")
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# logging
MILLISECONDS_LENGTH = 3
MODULE_NAME_LENGTH = 20
LINE_NUMBER_LENGTH = 5
LOGGING_LEVEL_NAME_LENGTH = 8
LOG_FORMAT = (
f"[%(asctime)s"
f".%(msecs){MILLISECONDS_LENGTH}d] "
f"[%(module){MODULE_NAME_LENGTH}s] "
f"[%(lineno){LINE_NUMBER_LENGTH}d] "
f"[%(levelname){LOGGING_LEVEL_NAME_LENGTH}s]: "
f"%(message)s"
)
logging.basicConfig(
datefmt=DATETIME_FORMAT,
format=LOG_FORMAT,
level=logging.DEBUG,
stream=sys.stdout,
force=True,
)
# time periods
HALF_AN_HOUR = 1800
# database
DATABASE_PASSWORD: str = config("DATABASE_PASSWORD", "gibberish")
DATABASE_HOST: str = config(
"DATABASE_HOST", "database" if ENVIRONMENT == "docker" else "127.0.0.1"
)
DATABASE_PORT: int = config("DATABASE_PORT", 5005, cast=int)
DATABASE_NAME: int = config("DATABASE_NAME", 0, cast=int)
TIME_TO_LIVE_IN_SECONDS: int = config(
"TIME_TO_LIVE_IN_SECONDS", HALF_AN_HOUR, cast=int
)
# sockets
BINANCE_WEB_SOCKET_URL: str = config(
"BINANCE_WEB_SOCKET_URL",
"wss://stream.binance.com:9443/stream?streams=!miniTicker@arr",
)
SOCKET_MESSAGE_LENGTH: int = config("SOCKET_MESSAGE_LENGTH", 4096, cast=int)
SOCKET_DISCONNECT_MESSAGE: str = config(
"SOCKET_DISCONNECT_MESSAGE", "DISCONNECTED!"
)
ENCODING_FORMAT: str = "utf-8"
LOCAL_APP_CFG = """
{
"SOCKET_CONNECTIONS": [
{
"url_slug": "dxfeed",
"source_type": "dxfeed",
"HOST": "127.0.0.1",
"PORT": 1234
},
{
"url_slug": "dxfeed",
"source_type": "mc_fix",
"HOST": "127.0.0.1",
"PORT": 4321
}
]
}
"""
LOCAL_APP_CFG = """
{
"SOCKET_CONNECTIONS": [
{
"url_slug": "dxfeed",
"source_type": "dxfeed",
"HOST": "127.0.0.1",
"PORT": 1234
},
{
"url_slug": "dxfeed",
"source_type": "mc_fix",
"HOST": "127.0.0.1",
"PORT": 4321
}
]
}
"""
APP_CFG = config("APP_CFG", LOCAL_APP_CFG)
try:
if ENVIRONMENT == "localhost":
SOCKET_CONNECTIONS = json.loads(LOCAL_APP_CFG).get(
"SOCKET_CONNECTIONS"
)
else:
SOCKET_CONNECTIONS = json.loads(APP_CFG).get("SOCKET_CONNECTIONS")
SOCKET_SOURCE_TYPES = {
f"{connection.get('PORT')}": connection.get("source_type")
for connection in SOCKET_CONNECTIONS
}
except Exception as e:
logging.error("failed to get socket connections configuration")
logging.error(e)
sys.exit(1)
# data validation
ASSET_DECIMAL_PLACES = 10
| 25.855932 | 76 | 0.59587 |
ee6ce037321b65c4af02a18fbc8b39f5c8feab5e | 3,673 | py | Python | DCP_13.py | sgorlick/dailycodingproblem.com-solns | b7e006070fab3c69b0e6a95bd1ce51e642d7f0a0 | [
"MIT"
] | null | null | null | DCP_13.py | sgorlick/dailycodingproblem.com-solns | b7e006070fab3c69b0e6a95bd1ce51e642d7f0a0 | [
"MIT"
] | null | null | null | DCP_13.py | sgorlick/dailycodingproblem.com-solns | b7e006070fab3c69b0e6a95bd1ce51e642d7f0a0 | [
"MIT"
] | null | null | null | #This problem was asked by Amazon.
#Given an integer k and a string s, find the length of the longest substring that contains at most k distinct characters.
#For example, given s = "abcba" and k = 2, the longest substring with k distinct characters is "bcb".
#test example from prompt
s = "abcba"
k = 2
DCP_13(s,k)
#3
#test where k = total number of elements in s
s = "abcba"
k = 3
DCP_13(s,k)
#5
#test where k > total number of elements in s
s = "abcba"
k = 4
DCP_13(s,k)
#5
#test repeated values
s = "bbcba"
k = 2
DCP_13(s,k)
#4
#test longer strings
s = 'find the length of the longest substring that contains at most k distinct characters'
k = 5
DCP_13(s,k)
#8
#test karger values of k
s = 'Given an integer k and a string s, find the length of the longest substring that contains at most k distinct characters.'
k = 16
DCP_13(s,k)
#64
#solve time : 1h50m incl notes,comments
#the main challenges here were making the function robust to repeated elements.
#i tried to implement dynamic programming to speed up excecution time.
##beta soln's, complex and dont handle special cases (repeated digits, etc.)
#def DCP_13(s,k):
# #trivial if k=1
# if k == 1:
# print(1)
# else:
# #count rightmost digit index added to sol'n
# i=0
# #count leftmost digit index saved to sol'n
# j=0
# #starting sol'n w at the beginning of the string
# left=s[i:k+i]
# #save sol'n value
# out=str()
# #iterate over all sections of the string
# while i+k < len(s):
# i=i+1
# #store the next possible starting point w/ k-elements
# right=s[j:k+i]
# if len(set(right)) > k :
# j=j+1
# right=s[j:k+i]
# #test whether the adjacent starting points have the same elements
# if set(left).issubset( set(right) ) :#set(left) == set(right):
# left=s[j:k+i]
# out=left
# else:
# left=s[i:k+i]
# j=j+1
# print(len(out))
#finish:10:58
#
#def DCP_13(s,k):
# i=0
# j=0
# left=str()
# out=str()
# while j+1 != i :
# while len(set(left)) <= k:
# left=s[j:i+1]
# i=i+1
# out = left[:-1]
# j=j+len(out)
# left=str()
# return print(len(out))
| 26.615942 | 159 | 0.555949 |
ee6e8e289a9de7e4f9d0b9c903a761ab4c91411d | 4,049 | py | Python | Gathered CTF writeups/2017-11-04-hitcon/secret_server/attack.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/2017-11-04-hitcon/secret_server/attack.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/2017-11-04-hitcon/secret_server/attack.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | import base64
import hashlib
import re
import string
import itertools
from crypto_commons.netcat.netcat_commons import receive_until_match, nc, send, receive_until
from crypto_commons.symmetrical.symmetrical import set_byte_cbc, set_cbc_payload_for_block
main()
| 44.988889 | 141 | 0.594221 |
ee700eb445091218a56c4ea006f8edce90b03bd2 | 8,170 | py | Python | tarpn/settings.py | tarpn/tarpn-node-controller | f5841e9181f24935c507993500d22e9b313c0f23 | [
"MIT"
] | 4 | 2020-07-26T20:33:09.000Z | 2022-02-14T00:17:27.000Z | tarpn/settings.py | tarpn/tarpn-node-controller | f5841e9181f24935c507993500d22e9b313c0f23 | [
"MIT"
] | 9 | 2020-08-01T21:29:55.000Z | 2022-01-03T00:45:17.000Z | tarpn/settings.py | tarpn/tarpn-node-controller | f5841e9181f24935c507993500d22e9b313c0f23 | [
"MIT"
] | 3 | 2020-08-26T18:37:55.000Z | 2022-02-14T00:18:01.000Z | import configparser
import re
import sys
import os
from typing import Optional, Mapping, Iterator, Any, List, Dict
_default_settings = {
"node": {
"id.message": "Terrestrial Amateur Radio Packet Network node ${node.alias} op is ${node.call}",
"id.interval": 600,
"admin.enabled": False,
"admin.listen": "0.0.0.0",
"admin.port": 8888
},
"network": {
"netrom.ttl": 7,
"netrom.obs.min": 4,
"netrom.obs.init": 6,
"netrom.nodes.quality.min": 73,
"netrom.nodes.interval": 300
}
}
_default_port_settings = {
"port.enabled": True,
"serial.timeout": 0.100
}
| 30.599251 | 103 | 0.59645 |
ee709ac2d49de9a25f6994afec04b8339c1c352a | 1,952 | py | Python | mindhome_alpha/erpnext/patches/v11_0/make_asset_finance_book_against_old_entries.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v11_0/make_asset_finance_book_against_old_entries.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v11_0/make_asset_finance_book_against_old_entries.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import rebuild_tree | 43.377778 | 111 | 0.76332 |
ee70bc7fa006c6b656696699e7b20490a6b297e1 | 1,709 | py | Python | gui/web.py | irfanchahyadi/Scraping-komikid | 79db8f4e617b489a31f4c0161d665e0d3bd47d07 | [
"MIT"
] | 3 | 2021-06-20T15:26:42.000Z | 2021-09-13T08:20:47.000Z | gui/web.py | irfanchahyadi/Scraping-komikid | 79db8f4e617b489a31f4c0161d665e0d3bd47d07 | [
"MIT"
] | 1 | 2021-11-20T11:09:41.000Z | 2021-11-20T11:09:41.000Z | gui/web.py | irfanchahyadi/Scraping-komikid | 79db8f4e617b489a31f4c0161d665e0d3bd47d07 | [
"MIT"
] | 2 | 2021-09-04T11:49:13.000Z | 2021-11-03T11:01:47.000Z |
"""
Web GUI
Author: Irfan Chahyadi
Source: github.com/irfanchahyadi/Scraping-Manga
"""
# IMPORT REQUIRED PACKAGE
from flask import Flask, render_template, request, redirect, url_for, Response
import os, webbrowser, time
from gui import web_api
import main
app = Flask(__name__)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
webbrowser.open_new_tab('http://localhost:5000/')
app.run(host='0.0.0.0')
| 24.768116 | 136 | 0.675834 |
ee7114274f05df3d5f9b0b4f95761fdb8ac8dbcd | 4,144 | py | Python | Python/index_finder.py | jgruselius/misc | ae4aa6c72cebed1ef0160f95488e3827fbf706c9 | [
"Apache-2.0"
] | 1 | 2018-09-28T12:12:17.000Z | 2018-09-28T12:12:17.000Z | Python/index_finder.py | jgruselius/misc | ae4aa6c72cebed1ef0160f95488e3827fbf706c9 | [
"Apache-2.0"
] | null | null | null | Python/index_finder.py | jgruselius/misc | ae4aa6c72cebed1ef0160f95488e3827fbf706c9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Author: Joel Gruselius, Dec 2018
# Script for checking index clashes
# Input one or several nucleotide sequences and print any matches found in
# the index reference file. This version is only good for checking for
# full matches.
# It is pretty useful though to list overlapping indexes in the reference file.
# Usage:
# index_finder --ref <reference_list> <index_seq>...
# TODO: Show sequences matching the first six bases not just complete matches
# TODO: Specify cache dir
import sys
import argparse
import re
import hashlib
import json
import os
import errno
COMPL_MAP = {"A": "T", "T": "A", "C": "G", "G": "C"}
# Build a dict of know index sequences from a text file:
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Find index clashes")
g = p.add_mutually_exclusive_group(required=True)
g.add_argument("--seqs", nargs="+", help="All sequences to search for")
g.add_argument("--list", action="store_true", default=False,
help="Print non-unique indexes in the reference list")
p.add_argument("--ref", required=True, help="Reference text file containing"
" known index sequences")
p.add_argument("--rebuild", action="store_true", help="Don't use any cached"
" reference object")
p.add_argument("--length", type=int, choices=range(4,8), help="Set the "
"number of letters to consider, both in the query strings and "
"when building the reference")
main(p.parse_args())
| 33.152 | 80 | 0.602799 |
ee7168cfeda4b2b5f89ba093e9c94ab09fd2c935 | 658 | py | Python | AwardsApp/migrations/0007_auto_20210720_2237.py | josphat-otieno/project-reviews | 5eaf9334fbd15b95726aee922f936d83e6f3d56f | [
"MIT"
] | null | null | null | AwardsApp/migrations/0007_auto_20210720_2237.py | josphat-otieno/project-reviews | 5eaf9334fbd15b95726aee922f936d83e6f3d56f | [
"MIT"
] | null | null | null | AwardsApp/migrations/0007_auto_20210720_2237.py | josphat-otieno/project-reviews | 5eaf9334fbd15b95726aee922f936d83e6f3d56f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-20 19:37
import cloudinary.models
from django.db import migrations
| 26.32 | 92 | 0.630699 |
ee71bab2985f3631fdcc1485aee64b956e3c9e71 | 1,022 | py | Python | Lesson 1/lesson-1-quiz.py | 2series/Data-Structures-and-Algorithms | 68d26dd1e5a21a414321fe70e6fedefd561b0ee9 | [
"MIT"
] | 1 | 2020-02-23T07:40:39.000Z | 2020-02-23T07:40:39.000Z | Lesson 1/lesson-1-quiz.py | 2series/Data-Structures-and-Algorithms | 68d26dd1e5a21a414321fe70e6fedefd561b0ee9 | [
"MIT"
] | null | null | null | Lesson 1/lesson-1-quiz.py | 2series/Data-Structures-and-Algorithms | 68d26dd1e5a21a414321fe70e6fedefd561b0ee9 | [
"MIT"
] | 3 | 2019-12-05T11:04:58.000Z | 2020-02-26T10:42:08.000Z | """input manatees: a list of "manatees", where one manatee is represented by a dictionary
a single manatee has properties like "name", "age", et cetera
n = the number of elements in "manatees"
m = the number of properties per "manatee" (i.e. the number of keys in a manatee dictionary)"""
# Efficiency: O(n)
# Efficiency: O(1)
# Efficiency: O(n*m)
# Efficiency: O(n^2) | 32.967742 | 95 | 0.657534 |
ee721578168ba6c38ea84e55b427798b1b341a75 | 695 | py | Python | warehouse/tests.py | thegangtechnology/thairod-django | b073186a4b5bc42dfef99685b3da30abf8e42862 | [
"MIT"
] | null | null | null | warehouse/tests.py | thegangtechnology/thairod-django | b073186a4b5bc42dfef99685b3da30abf8e42862 | [
"MIT"
] | 3 | 2021-07-27T13:11:36.000Z | 2021-08-10T22:54:55.000Z | warehouse/tests.py | thegangtechnology/thairod-django | b073186a4b5bc42dfef99685b3da30abf8e42862 | [
"MIT"
] | null | null | null | from django.urls import reverse
from address.models import Address
from core.tests import BaseTestSimpleApiMixin
from thairod.utils.test_util import APITestCase
from warehouse.models import Warehouse
| 31.590909 | 81 | 0.676259 |
ee727f9a4edfd776db2f6156ac18429d56618c95 | 4,490 | py | Python | src/meltano/api/app.py | dotmesh-io/meltano | 4616d44ded9dff4e9ad19a9004349e9baa16ddd5 | [
"MIT"
] | 4 | 2019-12-01T12:47:58.000Z | 2021-02-09T00:42:52.000Z | src/meltano/api/app.py | dotmesh-io/meltano | 4616d44ded9dff4e9ad19a9004349e9baa16ddd5 | [
"MIT"
] | 38 | 2019-12-09T06:53:33.000Z | 2022-03-29T22:29:19.000Z | src/meltano/api/app.py | dotmesh-io/meltano | 4616d44ded9dff4e9ad19a9004349e9baa16ddd5 | [
"MIT"
] | 1 | 2020-11-23T20:47:18.000Z | 2020-11-23T20:47:18.000Z | import datetime
import logging
import logging.handlers
import os
import atexit
from flask import Flask, request, g
from flask_login import current_user
from flask_cors import CORS
from importlib import reload
from urllib.parse import urlsplit
import meltano
from meltano.core.project import Project
from meltano.core.tracking import GoogleAnalyticsTracker
from meltano.core.plugin.error import PluginMissingError
from meltano.core.plugin.settings_service import (
PluginSettingsService,
PluginSettingMissingError,
)
from meltano.core.config_service import ConfigService
from meltano.core.compiler.project_compiler import ProjectCompiler
from meltano.core.tracking import GoogleAnalyticsTracker
from meltano.core.db import project_engine
logger = logging.getLogger(__name__)
| 29.346405 | 81 | 0.704677 |
ee7343721934bb1607af511c0969882332910b83 | 24,456 | py | Python | rsTools/utils/openMaya/deformer.py | robertstratton630/rigTools | cdc9530bf12ac46654860443c2c264fce619dbd0 | [
"MIT"
] | null | null | null | rsTools/utils/openMaya/deformer.py | robertstratton630/rigTools | cdc9530bf12ac46654860443c2c264fce619dbd0 | [
"MIT"
] | null | null | null | rsTools/utils/openMaya/deformer.py | robertstratton630/rigTools | cdc9530bf12ac46654860443c2c264fce619dbd0 | [
"MIT"
] | null | null | null | import maya.cmds as cmds
import re
import rsTools.utils.openMaya.dataUtils as dUtils
import maya.OpenMayaAnim as OpenMayaAnimOld
import maya.OpenMaya as OpenMayaOld
import maya.api.OpenMaya as om
import maya.api.OpenMayaAnim as oma
'''
isDeformer("rig_normalPushq")
getDeformerList("pSphere1",nodeType='geometryFilter')
getDeformerFn("rig_normalPushq")
getDeformerSet("rig_normalPushq")
getDeformerSetFn("rig_normalPushq")
q = getDeformerSetMembers("rig_normalPushq")
p = getDeformerSetMemberStrList("rig_normalPushq")
s = getAffectedGeometry("rig_normalPushq")
weights = getWeights("rig_normalPushq")
'''
def getDeformerSetMembers(deformer, geometry=''):
'''
Return the deformer set members of the specified deformer.
You can specify a shape name to query deformer membership for.
Otherwise, membership for the first affected geometry will be returned.
Results are returned as a list containing an MDagPath to the affected shape and an MObject for the affected components.
@param deformer: Deformer to query set membership for
@type deformer: str
@param geometry: Geometry to query deformer set membership for. Optional.
@type geometry: str
'''
# Get deformer function sets
deformerSetFn = getDeformerSetFn(deformer)
# Get deformer set members
deformerSetSel = deformerSetFn.getMembers(True)
# Get geometry index
if geometry:
geomIndex = getGeomIndex(geometry, deformer)
else:
geomIndex = 0
# Get number of selection components
deformerSetLen = deformerSetSel.length()
if geomIndex >= deformerSetLen:
raise Exception('Geometry index out of range! (Deformer: "'+deformer+'", Geometry: "' +
geometry+'", GeoIndex: '+str(geomIndex)+', MaxIndex: '+str(deformerSetLen)+')')
# Get deformer set members
data = deformerSetSel.getDagPath(geomIndex)
# Return result
return data
def getDeformerSetMemberStrList(deformer, geometry=''):
'''
Return the deformer set members of the specified deformer as a list of strings.
You can specify a shape name to query deformer membership for.
Otherwise, membership for the first affected geometry will be returned.
@param deformer: Deformer to query set membership for
@type deformer: str
@param geometry: Geometry to query deformer set membership for. Optional.
@type geometry: str
'''
# Get deformer function sets
deformerSetFn = getDeformerSetFn(deformer)
# Get deformer set members
deformerSetSel = om.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
# Convert to list of strings
setMemberStr = []
deformerSetSel.getSelectionStrings(setMemberStr)
setMemberStr = cmds.ls(setMemberStr, fl=True)
# Return Result
return setMemberStr
def getGeomIndex(geometry, deformer):
'''
Returns the geometry index of a shape to a specified deformer.
@param geometry: Name of shape or parent transform to query
@type geometry: str
@param deformer: Name of deformer to query
@type deformer: str
'''
# Verify input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check geometry
geo = geometry
if cmds.objectType(geometry) == 'transform':
try:
geometry = cmds.listRelatives(
geometry, s=True, ni=True, pa=True)[0]
except:
raise Exception('Object "'+geo+'" is not a valid geometry!')
geomObj = dUtils.getMObject(geometry)
# Get geometry index
deformerObj = dUtils.getMObject(deformer)
deformerFn = oma.MFnGeometryFilter(deformerObj)
try:
geomIndex = deformerFn.indexForOutputShape(geomObj)
except:
raise Exception('Object "'+geometry +
'" is not affected by deformer "'+deformer+'"!')
# Retrun result
return geomIndex
def findInputShape(shape):
'''
Return the input shape ('...ShapeOrig') for the specified shape node.
This function assumes that the specified shape is affected by at least one valid deformer.
@param shape: The shape node to find the corresponding input shape for.
@type shape: str
'''
# Checks
if not cmds.objExists(shape):
raise Exception('Shape node "'+shape+'" does not exist!')
# Get inMesh connection
inMeshConn = cmds.listConnections(
shape+'.inMesh', source=True, destination=False, shapes=True)
if not inMeshConn:
return shape
# Check direct mesh (outMesh -> inMesh) connection
if str(cmds.objectType(inMeshConn[0])) == 'mesh':
return inMeshConn[0]
# Find connected deformer
deformerObj = dUtils.getMObject(inMeshConn[0])
if not deformerObj.hasFn(om.MFn.kGeometryFilt):
deformerHist = cmds.ls(cmds.listHistory(shape), type='geometryFilter')
if not deformerHist:
print('findInputShape.py: Shape node "'+shape +
'" has incoming inMesh connections but is not affected by any valid deformers! Returning "'+shape+'"!')
return shape
#raise Exception('Shape node "'+shape+'" is not affected by any valid deformers!')
else:
deformerObj = dUtils.getMObject(deformerHist[0])
# Get deformer function set
deformerFn = oma.MFnGeometryFilter(deformerObj)
# Get input shape for deformer
shapeObj = dUtils.getMObject(shape)
geomIndex = deformerFn.indexForOutputShape(shapeObj)
inputShapeObj = deformerFn.inputShapeAtIndex(geomIndex)
# Return result
return om.MFnDependencyNode(inputShapeObj).name()
def renameDeformerSet(deformer, deformerSetName=''):
'''
Rename the deformer set connected to the specified deformer
@param deformer: Name of the deformer whose deformer set you want to rename
@type deformer: str
@param deformerSetName: New name for the deformer set. If left as default, new name will be (deformer+"Set")
@type deformerSetName: str
'''
# Verify input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check deformer set name
if not deformerSetName:
deformerSetName = deformer+'Set'
# Rename deformer set
deformerSet = cmds.listConnections(
deformer+'.message', type='objectSet')[0]
if deformerSet != deformerSetName:
deformerSetName = cmds.rename(deformerSet, deformerSetName)
# Retrun result
return deformerSetName
def bindPreMatrix(deformer, bindPreMatrix='', parent=True):
'''
Create a bindPreMatrix transform for the specified deformer.
@param deformer: Deformer to create bind pre matrix transform for
@type deformer: str
@param bindPreMatrix: Specify existing transform for bind pre matrix connection. If empty, create a new transform
@type bindPreMatrix: str
@param parent: Parent the deformer handle to the bind pre matrix transform
@type deformer: bool
'''
# Check deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
if not cmds.objExists(deformer+'.bindPreMatrix'):
raise Exception('Deformer "'+deformer +
'" does not accept bindPreMatrix connections!')
# Get deformer handle
deformerHandle = cmds.listConnections(deformer+'.matrix', s=True, d=False)
if deformerHandle:
deformerHandle = deformerHandle[0]
else:
raise Exception('Unable to find deformer handle!')
# Check bindPreMatrix
if bindPreMatrix:
if not cmds.objExists(bindPreMatrix):
bindPreMatrix = cmds.createNode('transform', n=bindPreMatrix)
else:
# Build bindPreMatrix transform
prefix = deformerHandle.replace(deformerHandle.split('_')[-1], '')
bindPreMatrix = cmds.createNode('transform', n=prefix+'bindPreMatrix')
# Match transform and pivot
cmds.xform(bindPreMatrix, ws=True, matrix=cmds.xform(
deformerHandle, q=True, ws=True, matrix=True))
cmds.xform(bindPreMatrix, ws=True, piv=cmds.xform(
deformerHandle, q=True, ws=True, rp=True))
# Connect inverse matrix to localize cluster
cmds.connectAttr(
bindPreMatrix+'.worldInverseMatrix[0]', deformer+'.bindPreMatrix', f=True)
# Parent
if parent:
cmds.parent(deformerHandle, bindPreMatrix)
# Return result
return bindPreMatrix
def pruneWeights(deformer, geoList=[], threshold=0.001):
'''
Set deformer component weights to 0.0 if the original weight value is below the set threshold
@param deformer: Deformer to removed components from
@type deformer: str
@param geoList: The geometry objects whose components are checked for weight pruning
@type geoList: list
@param threshold: The weight threshold for removal
@type threshold: str
'''
# Check deformer
if not cmds.objExists(deformer):
raise Exception('Deformer "'+deformer+'" does not exist!')
# Check geometry
if type(geoList) == str:
geoList = [geoList]
if not geoList:
geoList = cmds.deformer(deformer, q=True, g=True)
if not geoList:
raise Exception('No geometry to prune weight for!')
for geo in geoList:
if not cmds.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# For each geometry
for geo in geoList:
# Get deformer member indices
memberIndexList = getDeformerSetMemberIndices(deformer, geo)
# Get weight list
weightList = getWeights(deformer, geo)
# Prune weights
pWeightList = [wt if wt > threshold else 0.0 for wt in weightList]
# Apply pruned weight list
setWeights(deformer, pWeightList, geo)
def pruneMembershipByWeights(deformer, geoList=[], threshold=0.001):
'''
Remove components from a specified deformer set if there weight value is below the set threshold
@param deformer: Deformer to removed components from
@type deformer: str
@param geoList: The geometry objects whose components are checked for removal
@type geoList: list
@param threshold: The weight threshold for removal
@type threshold: str
'''
# Check deformer
if not cmds.objExists(deformer):
raise Exception('Deformer "'+deformer+'" does not exist!')
# Check geometry
if type(geoList) == str:
geoList = [geoList]
if not geoList:
geoList = cmds.deformer(deformer, q=True, g=True)
if not geoList:
raise Exception('No geometry to prune weight for!')
for geo in geoList:
if not cmds.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# Get deformer set
deformerSet = getDeformerSet(deformer)
# For each geometry
allPruneList = []
for geo in geoList:
# Get Component Type
geoType = glTools.utils.geometry.componentType(geo)
# Get Deformer Member Indices
memberIndexList = getDeformerSetMemberIndices(deformer, geo)
# Get Weights
weightList = getWeights(deformer, geo)
# Get Prune List
pruneList = [memberIndexList[i] for i in range(
len(memberIndexList)) if weightList[i] <= threshold]
for i in range(len(pruneList)):
if type(pruneList[i]) == str or type(pruneList[i]) == unicode or type(pruneList[i]) == int:
pruneList[i] = '['+str(pruneList[i])+']'
elif type(pruneList[i]) == list:
pruneList[i] = [str(p) for p in pruneList[i]]
pruneList[i] = '['+']['.join(pruneList[i])+']'
pruneList[i] = geo+'.'+geoType+str(pruneList[i])
allPruneList.extend(pruneList)
# Prune deformer set membership
if pruneList:
cmds.sets(pruneList, rm=deformerSet)
# Return prune list
return allPruneList
def clean(deformer, threshold=0.001):
'''
Clean specified deformer.
Prune weights under the given tolerance and prune membership.
@param deformer: The deformer to clean.
@type deformer: str
@param threshold: Weight value tolerance for prune operations.
@type threshold: float
'''
# Print Message
print('Cleaning deformer: '+deformer+'!')
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Prune Weights
glTools.utils.deformer.pruneWeights(deformer, threshold=threshold)
# Prune Membership
glTools.utils.deformer.pruneMembershipByWeights(
deformer, threshold=threshold)
def checkMultipleOutputs(deformer, printResult=True):
'''
Check the specified deformer for multiple ouput connections from a single plug.
@param deformer: Deformer to check for multiple output connections
@type deformer: str
@param printResult: Print results to the script editor
@type printResult: bool
'''
# Check deformer
if not isDeformer(deformer):
raise Exception('Deformer "'+deformer+'" is not a valid deformer!')
# Get outputGeometry plug
outGeomPlug = glTools.utils.attribute.getAttrMPlug(
deformer+'.outputGeometry')
if not outGeomPlug.isArray():
raise Exception('Attribute "'+deformer +
'.outputGeometry" is not an array attribute!')
# Get existing indices
indexList = om.MIntArray()
numIndex = outGeomPlug.getExistingArrayAttributeIndices(indexList)
# Check output plugs
returnDict = {}
for i in range(numIndex):
plugConn = cmds.listConnections(
deformer+'.outputGeometry['+str(indexList[i])+']', s=False, d=True, p=True)
# Check multiple outputs
if len(plugConn) > 1:
# Append to return value
returnDict[deformer+'.outputGeometry[' +
str(indexList[i])+']'] = plugConn
# Print connection info
if printResult:
print('Deformer output "'+deformer+'.outputGeometry['+str(
indexList[i])+']" has '+str(len(plugConn))+' outgoing connections:')
for conn in plugConn:
print('\t- '+conn)
# Return result
return returnDict
| 33.45554 | 123 | 0.677257 |
ee74b61615725492239c5444cd5387bf60c2f49c | 804 | py | Python | util/save_image_worker.py | zigonk/CMPC-Refseg | 0d59c90e9968ed836c695976ff90081e1c24378a | [
"MIT"
] | null | null | null | util/save_image_worker.py | zigonk/CMPC-Refseg | 0d59c90e9968ed836c695976ff90081e1c24378a | [
"MIT"
] | null | null | null | util/save_image_worker.py | zigonk/CMPC-Refseg | 0d59c90e9968ed836c695976ff90081e1c24378a | [
"MIT"
] | null | null | null | import logging
import os
from queue import Queue
from threading import Thread
from time import time
import cv2 | 27.724138 | 62 | 0.609453 |
ee7534e127b9fb25131caade726542eb20c6bbe5 | 208 | py | Python | pesummary/utils/__init__.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2021-08-03T05:58:20.000Z | 2021-08-03T05:58:20.000Z | pesummary/utils/__init__.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2020-06-13T13:29:35.000Z | 2020-06-15T12:45:04.000Z | pesummary/utils/__init__.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 3 | 2021-07-08T08:31:28.000Z | 2022-03-31T14:08:58.000Z | # Licensed under an MIT style license -- see LICENSE.md
from .utils import (
gw_results_file, functions, history_dictionary, command_line_arguments
)
__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
| 26 | 74 | 0.759615 |
ee777920db42ef90f8ce8a58fb13a346a19081f4 | 7,444 | py | Python | catalog/views.py | chancald/mask-ecommerce | 1907007e726f989b6d99546e1b03ad5891d65715 | [
"Apache-2.0"
] | null | null | null | catalog/views.py | chancald/mask-ecommerce | 1907007e726f989b6d99546e1b03ad5891d65715 | [
"Apache-2.0"
] | null | null | null | catalog/views.py | chancald/mask-ecommerce | 1907007e726f989b6d99546e1b03ad5891d65715 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.utils import timezone
from django.views.generic import ListView, DetailView, View
from .models import Item, Order, OrderItem, Address, Promo
from .forms import AddressForm, PromoForm
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
messages.success(request, f"{item.title} ya esta en el carrito")
return redirect('product', slug=slug)
else:
order.items.add(order_item)
order.save()
messages.success(request, f"{item.title} fue anadido al carrito")
return redirect('product', slug=slug)
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered=False, ordered_date=ordered_date)
order.items.add(order_item)
order.save()
messages.success(request, f"{item.title} fue anadido al carrito")
return redirect('product', slug=slug)
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
OrderItem.objects.filter(id=order_item.id).delete()
messages.warning(request, f"{item.title} fue eliminado del carrito")
return redirect('product', slug=slug)
else:
messages.warning(request, f"{item.title} no esta en el carrito")
return redirect('product', slug=slug)
else:
messages.warning(request, f"{item.title} no hay una orden activa")
return redirect('product', slug=slug)
def add_item_quantity(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_item.quantity += 1
order_item.save()
return redirect('order_summary')
def remove_item_quantity(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
order = order_qs[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
order.save()
messages.warning(request, f"{item.title} fue eliminado del carrito")
return redirect('order_summary')
def remove_from_cart_summary(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
order = order_qs[0]
OrderItem.objects.filter(id=order_item.id).delete()
messages.warning(request, f"{item.title} el producto fue eliminado del carrito")
return redirect('order_summary')
| 42.537143 | 171 | 0.623052 |
ee782ef725478d903e728b0f667018c3fa8766e7 | 20,700 | py | Python | source/scripts/common/Utils.py | hjimmy/glustermg | e43ad5f17b248fa543f0b5d4204baca3c8b18aab | [
"MulanPSL-1.0"
] | null | null | null | source/scripts/common/Utils.py | hjimmy/glustermg | e43ad5f17b248fa543f0b5d4204baca3c8b18aab | [
"MulanPSL-1.0"
] | null | null | null | source/scripts/common/Utils.py | hjimmy/glustermg | e43ad5f17b248fa543f0b5d4204baca3c8b18aab | [
"MulanPSL-1.0"
] | null | null | null | # Copyright (C) 2011 Gluster, Inc. <http://www.gluster.com>
# This file is part of Gluster Management Gateway (GlusterMG).
#
# GlusterMG is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# GlusterMG is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
import os
import sys
p1 = os.path.abspath(os.path.dirname(sys.argv[0]))
p2 = "%s/common" % os.path.dirname(p1)
if not p1 in sys.path:
sys.path.append(p1)
if not p2 in sys.path:
sys.path.append(p2)
import re
import syslog
import subprocess
import time
import tempfile
import glob
import commands
import paramiko
import Globals
import XmlHandler
RUN_COMMAND_ERROR = -1024
LOG_SYSLOG = 1
SYSLOG_REQUIRED = False
LOG_FILE_NAME = None
LOG_FILE_OBJ = None
logOpened = False
sshCommandPrefix = "ssh -l root -q -i /opt/glustermg/keys/gluster.pem -o BatchMode=yes -o GSSAPIAuthentication=no -o PasswordAuthentication=no -o StrictHostKeyChecking=no".split()
sshCommandPrefixShell = "ssh -l root -q -i /opt/glustermg/keys/gluster.pem -o BatchMode=yes -o GSSAPIAuthentication=no -o PasswordAuthentication=no -o StrictHostKeyChecking=no"
try:
commandPath = "/opt/glustermg/%s/backend" % os.environ['GMG_VERSION']
except KeyError, e:
commandPath = "/opt/glustermg/2.4/backend"
# else:
# return 2
##########added by bin.liu 2013-4-27
| 32.394366 | 204 | 0.608454 |
ee78cab8fb68e7a82e7fffa023da93ce159c8f3e | 1,243 | py | Python | mnd_utils/video.py | mnicolas94/python_utils | f9491fea3e73c94ef49b5bc844c39b2bc7be8318 | [
"MIT"
] | null | null | null | mnd_utils/video.py | mnicolas94/python_utils | f9491fea3e73c94ef49b5bc844c39b2bc7be8318 | [
"MIT"
] | null | null | null | mnd_utils/video.py | mnicolas94/python_utils | f9491fea3e73c94ef49b5bc844c39b2bc7be8318 | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
| 30.317073 | 105 | 0.687852 |
ee799216d33c9ed30924cce3dbebfa13f696710c | 7,220 | py | Python | taskq/consumer.py | ipsosante/django-taskq | 933893c51bf512983b1ca0fc0b8db523d37c9996 | [
"MIT"
] | null | null | null | taskq/consumer.py | ipsosante/django-taskq | 933893c51bf512983b1ca0fc0b8db523d37c9996 | [
"MIT"
] | 5 | 2018-11-22T13:42:10.000Z | 2019-09-16T13:00:41.000Z | taskq/consumer.py | ipsosante/django-taskq | 933893c51bf512983b1ca0fc0b8db523d37c9996 | [
"MIT"
] | null | null | null | import importlib
import logging
import threading
from time import sleep
import timeout_decorator
from django_pglocks import advisory_lock
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from .constants import TASKQ_DEFAULT_CONSUMER_SLEEP_RATE, TASKQ_DEFAULT_TASK_TIMEOUT
from .exceptions import Cancel, TaskLoadingError, TaskFatalError
from .models import Task
from .scheduler import Scheduler
from .task import Taskify
from .utils import task_from_scheduled_task, traceback_filter_taskq_frames, ordinal
logger = logging.getLogger('taskq')
| 34.380952 | 109 | 0.635457 |
ee7b13e3f8add887be12393c811c00fdb0fd0ddc | 14,786 | py | Python | async_message_bus_test.py | ifurusato/ros | 77b1361e78f68f00ba2d3e3db908bb5ce0f973f5 | [
"MIT"
] | 9 | 2020-10-12T08:49:55.000Z | 2021-07-23T14:20:05.000Z | async_message_bus_test.py | fanmuzhi/ros | 04534a35901341c4aaa9084bff3d46851795357d | [
"MIT"
] | 12 | 2020-07-22T19:08:58.000Z | 2022-02-03T03:17:03.000Z | async_message_bus_test.py | fanmuzhi/ros | 04534a35901341c4aaa9084bff3d46851795357d | [
"MIT"
] | 3 | 2020-07-19T20:43:19.000Z | 2022-03-02T09:15:51.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2021-02-24
# modified: 2021-02-24
#
# see: https://www.aeracode.org/2018/02/19/python-async-simplified/
import sys, time, asyncio, itertools, traceback
from abc import ABC, abstractmethod
from collections import deque as Deque
import uuid
import random
from colorama import init, Fore, Style
init()
from lib.event import Event
from lib.ticker import Ticker
from lib.message import Message
from lib.message_factory import MessageFactory
from lib.logger import Logger, Level
#from mock.ifs import MockIntegratedFrontSensor
# ..............................................................................
# ..............................................................................
# ..............................................................................
# ..............................................................................
# ..............................................................................
# ..............................................................................
# ..............................................................................
# ..............................................................................
# main .........................................................................
#_log = Logger('main', Level.INFO)
def main(argv):
_log = Logger("main", Level.INFO)
try:
_log.info(Fore.BLUE + 'configuring objects...')
_loop_freq_hz = 10
_ticker = Ticker(_loop_freq_hz, Level.INFO)
_message_factory = MessageFactory(Level.INFO)
_message_bus = MessageBus()
# _publisher = Publisher(_message_bus)
_publisher = MyPublisher(_message_factory, _message_bus)
# _publisher.enable()
_publish = _publisher.publish(10)
_log.info(Fore.BLUE + 'generating subscribers...')
_subscribers = []
_subscriptions = []
for x in range(10):
_subscriber = MySubscriber('s{}'.format(x), _ticker, _message_bus)
_subscribers.append(_subscriber)
_subscriptions.append(_subscriber.subscribe())
_ticker.enable()
loop = asyncio.get_event_loop()
_log.info(Fore.BLUE + 'starting loop...')
loop.run_until_complete(asyncio.gather(_publish, *_subscriptions))
_log.info(Fore.BLUE + 'closing {} subscribers...'.format(len(_subscribers)))
for subscriber in _subscribers:
_log.info(Fore.BLUE + 'subscriber {} has {:d} messages remaining in queue: {}'.format(subscriber.name, subscriber.queue_length(), _subscriber.print_queue_contents()))
_log.info(Fore.BLUE + 'loop complete.')
except KeyboardInterrupt:
_log.info('caught Ctrl-C; exiting...')
except Exception:
_log.error('error processing message bus: {}'.format(traceback.format_exc()))
finally:
_log.info('exit.')
# call main ....................................................................
if __name__== "__main__":
main(sys.argv[1:])
#EOF
| 42.245714 | 178 | 0.540714 |
ee7ba2306ea22a03b64701fd0713ad3f2419cb98 | 2,113 | py | Python | terrain_gen.py | MrKren/TTA | 3a677337fbcca199a88c64248af89d0889b960dd | [
"MIT"
] | null | null | null | terrain_gen.py | MrKren/TTA | 3a677337fbcca199a88c64248af89d0889b960dd | [
"MIT"
] | null | null | null | terrain_gen.py | MrKren/TTA | 3a677337fbcca199a88c64248af89d0889b960dd | [
"MIT"
] | null | null | null | import pygame
import random
| 30.185714 | 77 | 0.546143 |
ee7bdfb8faa653258cb1a64cd8897b40dfd8e04b | 207 | py | Python | examples/automator/swatcher_quick_action.py | joshbduncan/swatcher | 91e459df75be4c50d38540b8cf49c6c4ed6a5764 | [
"MIT"
] | null | null | null | examples/automator/swatcher_quick_action.py | joshbduncan/swatcher | 91e459df75be4c50d38540b8cf49c6c4ed6a5764 | [
"MIT"
] | null | null | null | examples/automator/swatcher_quick_action.py | joshbduncan/swatcher | 91e459df75be4c50d38540b8cf49c6c4ed6a5764 | [
"MIT"
] | null | null | null | import sys
from swatcher import Swatcher
if __name__ == "__main__":
files = sys.argv[1:]
for file in files:
s = Swatcher(file)
s.export_ase_file()
s.export_palette_image()
| 17.25 | 32 | 0.628019 |
ee7ca142b0ca37407f34d60f1083590fe6f55203 | 1,179 | py | Python | setup.py | jjk01/muMap | fe879039d025d62b51a70a088f3b0b275e134d9b | [
"BSD-3-Clause"
] | 8 | 2021-11-23T10:44:02.000Z | 2022-02-23T20:56:05.000Z | setup.py | jjk01/muMap | fe879039d025d62b51a70a088f3b0b275e134d9b | [
"BSD-3-Clause"
] | null | null | null | setup.py | jjk01/muMap | fe879039d025d62b51a70a088f3b0b275e134d9b | [
"BSD-3-Clause"
] | 2 | 2021-12-05T16:52:15.000Z | 2022-03-31T22:15:09.000Z | #! /usr/bin/env python
from setuptools import setup
VERSION = "1.0"
AUTHOR = "James Klatzow, Virginie Uhlmann"
AUTHOR_EMAIL = "uhlmann@ebi.ac.uk"
setup(
name="microMatch",
version=VERSION,
description="3D shape correspondence for microscopy data",
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
"mumatch",
],
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: " "Implementation :: CPython",
],
url="https://github.com/uhlmanngroup/muMatch",
python_requires=">=3.6",
)
| 29.475 | 73 | 0.605598 |
ee7d13ac1bfb50aa14e3d432688e96e955f612d9 | 1,615 | py | Python | scripts/python3-shell-job-example.py | pfeilbr/aws-glue-playground | 52648d527a03e32ae1cc6e2f9fcf418e0875021e | [
"MIT"
] | null | null | null | scripts/python3-shell-job-example.py | pfeilbr/aws-glue-playground | 52648d527a03e32ae1cc6e2f9fcf418e0875021e | [
"MIT"
] | null | null | null | scripts/python3-shell-job-example.py | pfeilbr/aws-glue-playground | 52648d527a03e32ae1cc6e2f9fcf418e0875021e | [
"MIT"
] | null | null | null | import datetime
import time
import boto3
import sys
import os
import importlib
print('sys.argv:\n{}\n\n'.format(sys.argv))
print('os.environ:\n{}\n\n'.format(os.environ))
# only run the following if running in aws glue environment (not availble locally)
if 'GLUE_INSTALLATION' in os.environ:
aws_glue_utils = importlib.import_module('awsglue.utils')
args = aws_glue_utils.getResolvedOptions(sys.argv,
['example_argument_0',
'example_argument_1'])
print('example_argument_0 is {}\n\n'.format(args['example_argument_0']))
print('example_argument_1 is {}\n\n'.format(args['example_argument_1']))
ts = time.time()
timestamp_string = datetime.datetime.fromtimestamp(
ts).strftime('%Y-%m-%d_%H.%M.%S')
s3 = boto3.client('s3')
bucket_name = 'aws-glue-playground-01'
bucket_directory = 'tmp'
print('__file__: {}'.format(__file__))
script_file_path = os.path.abspath(__file__)
print('script_file_path: {}'.format(script_file_path))
script_directory_path = os.path.dirname(script_file_path)
print('script_directory_path: {}'.format(script_directory_path))
local_file_path = os.path.abspath(
'{}/{}-hello.txt'.format(script_directory_path, timestamp_string))
print('local_file_path: {}'.format(local_file_path))
local_file_name = os.path.basename(local_file_path)
print('local_file_name: {}'.format(local_file_name))
open(local_file_path, "w").write('Hello, world!')
key = '{}/{}'.format(bucket_directory, local_file_name)
s3.upload_file(local_file_path, bucket_name, key)
os.remove(local_file_path)
| 33.645833 | 82 | 0.712693 |
ee7f1ffa3ae65649a2137010308390975957d2f4 | 7,570 | py | Python | magentoclient.py | smileinnovation/snips-magento-skill | c8fe2d1615fce688bcad9258560895a5798c03c2 | [
"Apache-2.0"
] | null | null | null | magentoclient.py | smileinnovation/snips-magento-skill | c8fe2d1615fce688bcad9258560895a5798c03c2 | [
"Apache-2.0"
] | null | null | null | magentoclient.py | smileinnovation/snips-magento-skill | c8fe2d1615fce688bcad9258560895a5798c03c2 | [
"Apache-2.0"
] | null | null | null | import requests
import time
CLIENT_TOKEN_URI = "rest/V1/integration/customer/token"
GET_CART_URI = "rest/default/V1/carts/mine"
GET_CART_ITEM_URI = "rest/default/V1/carts/mine/items"
ADD_TO_CART_URI = "rest/default/V1/carts/mine/items"
ME_URI = "rest/default/V1/customers/me"
DELETE_ITEM_URI = "rest/default/V1/carts/mine/items/{}"
### SHOULD NOT EXISTS... FOR DEMO PURPOSE ONLY
ADMIN_TOKEN_URI = "rest/V1/integration/admin/token"
ORDER_URI = "rest/default/V1/orders"
ORDER_SEARCH_CRITERIA="searchCriteria[filter_groups][0][filters][0][field]=customer_lastname" \
"&searchCriteria[filter_groups][0][filters][0][value]={}" \
"&searchCriteria[filter_groups][0][filters][0][condition_type]=eq" \
"&searchCriteria[sortOrders][0][field]=created_at"
# Magento API call wrapper : catch 401 and try to recover it by refreshing the auth token
| 36.926829 | 155 | 0.634346 |
ee7fcf375630b878ce593586dacd44dcf8efa3a5 | 108 | py | Python | spaghettiqueue/__main__.py | giorgioshine/SpaghettiQueue | 44944800a41f2fe041a52c6a4c1d06540ea3c834 | [
"MIT"
] | 9 | 2019-08-14T04:42:36.000Z | 2020-11-18T15:48:13.000Z | spaghettiqueue/__main__.py | Tominous/SpaghettiQueue | 44944800a41f2fe041a52c6a4c1d06540ea3c834 | [
"MIT"
] | 1 | 2019-08-15T18:03:17.000Z | 2019-08-15T18:52:06.000Z | spaghettiqueue/__main__.py | Tominous/SpaghettiQueue | 44944800a41f2fe041a52c6a4c1d06540ea3c834 | [
"MIT"
] | 1 | 2019-12-12T12:14:04.000Z | 2019-12-12T12:14:04.000Z | from spaghettiqueue.__init__ import main
main()
#Makes the code executable by doing python -m spaghettiqueue | 36 | 60 | 0.833333 |
ee810690b40aba06e4d511080b16348fc6e69b8a | 533 | py | Python | problem_3/problem_3.py | CaioTeixeira95/Euler | 90e98f4110b7e6dc7d36f53eea0b22cf455ac005 | [
"MIT"
] | null | null | null | problem_3/problem_3.py | CaioTeixeira95/Euler | 90e98f4110b7e6dc7d36f53eea0b22cf455ac005 | [
"MIT"
] | null | null | null | problem_3/problem_3.py | CaioTeixeira95/Euler | 90e98f4110b7e6dc7d36f53eea0b22cf455ac005 | [
"MIT"
] | null | null | null | import math
# A function to print all prime factors of
# a given number n
prime_factor(600851475143)
| 20.5 | 49 | 0.553471 |
ee811e9426fe3dcfed1e5b99abbfc02ac9fd2eea | 8,038 | py | Python | ppdet/modeling/architectures/centernet.py | ZeHuiGong/AFSM | 54af2f072071779789ba0baa4e4270a1403fd0dd | [
"Apache-2.0"
] | 27 | 2020-12-07T10:46:39.000Z | 2021-08-01T08:56:33.000Z | ppdet/modeling/architectures/centernet.py | ZeHuiGong/AFSM | 54af2f072071779789ba0baa4e4270a1403fd0dd | [
"Apache-2.0"
] | 4 | 2020-12-18T08:06:15.000Z | 2021-08-01T02:54:50.000Z | ppdet/modeling/architectures/centernet.py | ZeHuiGong/AFSM | 54af2f072071779789ba0baa4e4270a1403fd0dd | [
"Apache-2.0"
] | 4 | 2020-12-18T04:37:42.000Z | 2020-12-31T02:08:33.000Z | # AUTHOR: Zehui Gong
# DATE: 2020/6/16
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Xavier, Constant
from ppdet.core.workspace import register
import numpy as np
from ppdet.utils.check import check_version
from .cornernet_squeeze import rescale_bboxes
from .input_helper import corner_multiscale_def
from .AdativeFeatureSelection import FeatureFusion, AdaptFeatureFusionV1
__all__ = ['CenterNet']
| 41.647668 | 116 | 0.58323 |
ee81731e37bb731eaceac3e8565f9dcaff9847fa | 55,219 | py | Python | layers.py | kiranscaria/keras_layers | 1934c4c7a13bfc0be40b224fe586d1c0ffa9f18d | [
"MIT"
] | null | null | null | layers.py | kiranscaria/keras_layers | 1934c4c7a13bfc0be40b224fe586d1c0ffa9f18d | [
"MIT"
] | null | null | null | layers.py | kiranscaria/keras_layers | 1934c4c7a13bfc0be40b224fe586d1c0ffa9f18d | [
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Layer, Lambda
from tensorflow.python.keras.layers import InputSpec
from tensorflow.python.ops import nn_ops
from tensorflow.python.keras import initializers, regularizers, constraints, activations
from tensorflow.python.keras.utils import conv_utils
def Resize2D(size, method='bilinear'):
"""Spatial resizing layer.
# Arguments
size: spatial output size (rows, cols)
method: 'bilinear', 'bicubic', 'nearest', ...
"""
return Lambda(lambda x: tf.image.resize(x, size, method=method))
| 39.869314 | 146 | 0.559264 |
ee877e586d4bfd06ebea25a4cf5e0bb5c99e4dd0 | 1,488 | py | Python | test_update_resource_property.py | mconlon17/vivo-foundation | 202f458bc72fb76c7d89240091c4fb00522cfe3f | [
"BSD-3-Clause"
] | null | null | null | test_update_resource_property.py | mconlon17/vivo-foundation | 202f458bc72fb76c7d89240091c4fb00522cfe3f | [
"BSD-3-Clause"
] | 1 | 2015-04-04T01:38:51.000Z | 2015-04-04T01:38:51.000Z | tools/test_update_resource_property.py | mconlon17/vivo-1.5-improvement | 44d8335eb7bbe518374a53c0e1f9f39014023ee7 | [
"BSD-3-Clause"
] | null | null | null | """
test_update_resource_property.py -- Given a VIVO URI, a predicate, and two
URIs -- VIVO resource URI and the source URI, generate the add and subtract
RDF necessary to execute "five case logic" in updating VIVO with an
authoritative source URI.
Note. In common use, the source data is presented with a "key" value, not
a URI. The key value must be translated using a dictionary to a URI. For
example, a person might be referred to in source data via a UFID. The
UFID is translated to a URI using a UFID dictionary.
Version 0.1 MC 2013-12-27
-- Initial version.
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2013, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.1"
import vivotools as vt
from datetime import datetime
print datetime.now(),"Start"
cases = {
"1. VIVO has A, Source Has B": ["A","B"],
"2. VIVO has A and Source also has A": ["A","A"],
"3. VIVO has A, source has no value": ["A",None],
"4. VIVO has no value, Source has B": [None,"B"],
"5. VIVO has no value and Source also has no value": [None,None]
}
for case in sorted(cases.keys()):
print "\n",case,":"
[vivo,source] = cases[case]
[add,sub] = vt.update_resource_property("http://vivo.uri","http://pred.uri",
vivo,source)
print " Add:"
print add
print " Subtract:"
print sub
print datetime.now(),"Finish"
| 33.818182 | 80 | 0.635753 |
ee88b24eca82ddcab181129272a9f62d15dd7605 | 36,064 | py | Python | external/pyvista/python/pyvista/spectra.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | 5 | 2019-04-11T13:35:24.000Z | 2019-11-14T06:12:51.000Z | external/pyvista/python/pyvista/spectra.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | null | null | null | external/pyvista/python/pyvista/spectra.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | 5 | 2018-09-20T22:07:43.000Z | 2021-01-15T07:13:38.000Z | import matplotlib
import matplotlib.pyplot as plt
import os
import pdb
import pickle
import copy
import scipy.signal
import scipy.interpolate
import numpy as np
from astropy.modeling import models, fitting
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.io import ascii, fits
from astropy.convolution import convolve, Box1DKernel, Box2DKernel
import pyvista
from pyvista import image
from pyvista import tv
from tools import plots
ROOT = os.path.dirname(os.path.abspath(__file__)) + '/../../'
def get_wavecal(file) :
""" load a wavecal object from disk file
"""
with open(file,'rb') as wavecal :
return pickle.load(wavecal)
def mash(hd,sp=None,bks=None) :
"""
Mash image into spectra using requested window
"""
if sp is None :
sp=[0,hd.data.shape[0]]
obj = hd.data[sp[0]:sp[1]].sum(axis=0)
obj = hd.data[sp[0]:sp[1]].sum(axis=0)
if bks is not None :
back=[]
for bk in bks :
tmp=np.median(data[bk[0]:bk[1]],axis=0)
back.append(tmp)
obj-= np.mean(back,axis=0)
return obj
def wavecal(hd,file=None,wref=None,disp=None,wid=[3],rad=5,snr=3,degree=2,wcal0=None,thresh=100,type='poly'):
"""
Get wavelength solution for single 1D spectrum
"""
# choose middle row +/ 5 rows
sz=hd.data.shape
spec=hd.data[int(sz[0]/2)-5:int(sz[0]/2)+5,:].sum(axis=0)
spec=spec-scipy.signal.medfilt(spec,kernel_size=101)
pix = np.arange(len(spec))
fig,ax = plt.subplots(2,1,sharex=True,figsize=(14,6))
ax[0].plot(spec)
# get wavelength guess from input WaveCal if given, else use wref and dispersion, else header
if wcal0 is not None :
lags=range(-300,300)
fitpeak,shift = image.xcorr(wcal0.spectrum,spec,lags)
wnew=copy.deepcopy(wcal0)
wnew.pix0 = wcal0.pix0+shift.argmax()+lags[0]
print(' Derived pixel shift from input wcal0: ',shift.argmax()+lags[0])
wav=wnew.wave(pix)
else :
# get dispersion guess from header cards if not given in disp
if disp is None: disp=hd.header['DISPDW']
if wref is not None :
w0=wref[0]
pix0=wref[1]
wav=w0+(pix-pix0)*disp
else:
w0=hd.header['DISPWC']
pix0=sz[1]/2
wav=w0+(pix-pix0)*disp
ax[1].plot(wav,spec)
# open file with wavelengths and read
f=open(file,'r')
lines=[]
for line in f :
if line[0] != '#' :
w=float(line.split()[0])
name=line[10:].strip()
lpix=abs(w-wav).argmin()
if lpix > 1 and lpix < sz[1]-1 :
ax[0].text(lpix,0.,'{:7.1f}'.format(w),rotation='vertical',va='top',ha='center')
lines.append(w)
lines=np.array(lines)
f.close()
# get centroid around expected lines
cents=[]
for line in lines :
peak=abs(line-wav).argmin()
if (peak > rad) and (peak < sz[1]-rad) and (spec[peak-rad:peak+rad].max() > thresh) :
print(peak,spec[peak-rad:peak+rad].max())
cents.append((spec[peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spec[peak-rad:peak+rad].sum())
cents=np.array(cents)
print(' cents:', cents)
waves=[]
weight=[]
print(' Centroid W0 Wave')
for cent in cents :
w=wav[int(cent)]
ax[0].plot([cent,cent],[0,10000],'k')
print(' {:8.2f}{:8.2f}{:8.2f}'.format(cent, w, lines[np.abs(w-lines).argmin()]))
waves.append(lines[np.abs(w-lines).argmin()])
weight.append(1.)
waves=np.array(waves)
weight=np.array(weight)
# set up new WaveCal object
pix0 = int(sz[1]/2)
wcal = WaveCal(order=degree,type=type,spectrum=spec,pix0=pix0)
# iterate allowing for interactive removal of points
done = False
ymax = ax[0].get_ylim()[1]
while not done :
gd=np.where(weight>0.)[0]
bd=np.where(weight<=0.)[0]
wcal.fit(cents[gd],waves[gd],weights=weight[gd])
# plot
ax[1].cla()
ax[1].plot(cents[gd],wcal.wave(cents[gd])-waves[gd],'go')
if len(bd) > 0 : ax[1].plot(cents[bd],wcal.wave(cents[bd])-waves[bd],'ro')
diff=wcal.wave(cents[gd])-waves[gd]
ax[1].set_ylim(diff.min()-1,diff.max()+1)
for i in range(len(cents)) :
ax[1].text(cents[i],wcal.wave(cents[i])-waves[i],'{:2d}'.format(i),va='top',ha='center')
if weight[i] > 0 :
ax[0].plot([cents[i],cents[i]],[0,ymax],'g')
else :
ax[0].plot([cents[i],cents[i]],[0,ymax],'r')
plt.draw()
# get input from user on lines to remove
for i in range(len(cents)) :
print(' {:3d}{:8.2f}{:8.2f}{:8.2f}{:8.2f}{:8.2f}'.format(
i, cents[i], wcal.wave(cents[i]), waves[i], waves[i]-wcal.wave(cents[i]),weight[i]))
print(' rms: {:8.2f} Anstroms'.format(diff.std()))
i = input('enter ID of line to remove (-n for all lines<n, +n for all lines>n, return to continue): ')
if i is '' :
done = True
elif '+' in i :
weight[int(i)+1:] = 0.
elif '-' in i :
weight[0:abs(int(i))] = 0.
elif int(i) >= 0 :
weight[int(i)] = 0.
else :
print('invalid input')
plt.close()
return wcal.wave(pix),wcal
def fluxcal(obs,wobs,file=None) :
"""
flux calibration
"""
fluxdata=ascii.read(file)
stan=np.interp(wobs,fluxdata['col1'],fluxdata['col2'])
return stan/obs
def trace(hd,apertures=None,pix0=1024) :
""" Get all traces
apertures is a list of row numbers at pixel 1024
"""
alltr=[]
for i in range(len(apertures)) :
tr=Trace()
print('tracing aperture {:d}'.format(i),end='\r')
sr=apertures[i]
tr.trace(hd,pix0,sr)
alltr.append(tr)
return alltr
def extract(hd,apertures) :
""" Do all extractions
"""
spec = np.zeros([len(apertures),hd.data.shape[1]])
for i,order in enumerate(apertures) :
print('extracting aperture {:d}'.format(i),end='\r')
spec[i] = order.extract(hd)
return spec
| 41.357798 | 143 | 0.519188 |
ee89247ec2812106f50e8a446f7d4f2d9336cff8 | 1,075 | py | Python | tests/test_hydrated_dataclass.py | Jasha10/hydra-zen | c229ab37f9ab6be4d1c64cabb646df99d0308027 | [
"MIT"
] | 65 | 2021-06-10T00:02:57.000Z | 2022-03-26T12:31:54.000Z | tests/test_hydrated_dataclass.py | Jasha10/hydra-zen | c229ab37f9ab6be4d1c64cabb646df99d0308027 | [
"MIT"
] | 60 | 2021-06-08T14:10:17.000Z | 2022-03-31T20:51:41.000Z | tests/test_hydrated_dataclass.py | Jasha10/hydra-zen | c229ab37f9ab6be4d1c64cabb646df99d0308027 | [
"MIT"
] | 2 | 2021-07-14T21:06:15.000Z | 2021-09-11T20:16:02.000Z | # Copyright (c) 2022 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
from dataclasses import is_dataclass
import pytest
from hydra_zen import hydrated_dataclass, instantiate
| 21.5 | 62 | 0.686512 |
ee89624c51890a06113ce4a0dc71ea76761f9141 | 185 | py | Python | kattis/python/hello_world.py | PixPanz/VariousTomfoolery | a6e7a840dd42fdd1cfac4a01d659d5fb52e2278f | [
"Unlicense"
] | null | null | null | kattis/python/hello_world.py | PixPanz/VariousTomfoolery | a6e7a840dd42fdd1cfac4a01d659d5fb52e2278f | [
"Unlicense"
] | null | null | null | kattis/python/hello_world.py | PixPanz/VariousTomfoolery | a6e7a840dd42fdd1cfac4a01d659d5fb52e2278f | [
"Unlicense"
] | null | null | null | print("Hello World!")
#because why the hell not that's why. It was like a free
#point on Kattis. Actually got a compiler error on my first
#try because I was new to Python3 at the time. | 46.25 | 59 | 0.751351 |
ee8a8f31356455e4042f8f9fd8906eb324b18cec | 3,165 | py | Python | countries/models.py | Valuehorizon/valuehorizon-countries | 04398f518ef5977cf4ccd2c2bffd7955d6a6e095 | [
"MIT"
] | 3 | 2015-05-27T17:11:28.000Z | 2016-07-08T18:01:28.000Z | countries/models.py | Valuehorizon/valuehorizon-countries | 04398f518ef5977cf4ccd2c2bffd7955d6a6e095 | [
"MIT"
] | 5 | 2020-02-11T22:27:18.000Z | 2021-12-13T19:40:25.000Z | countries/models.py | Valuehorizon/valuehorizon-countries | 04398f518ef5977cf4ccd2c2bffd7955d6a6e095 | [
"MIT"
] | null | null | null | from django.db import models
from forex.models import Currency
| 31.969697 | 133 | 0.663507 |
ee8bfc979ac28197031d9f486d5e391436cd294c | 920 | py | Python | export.py | philtgun/mediaeval-emothemes-explorer | 647fe527b719a9be72265f2855d890823c70e8ab | [
"MIT"
] | 1 | 2021-11-25T08:08:33.000Z | 2021-11-25T08:08:33.000Z | export.py | philtgun/mediaeval-emothemes-explorer | 647fe527b719a9be72265f2855d890823c70e8ab | [
"MIT"
] | null | null | null | export.py | philtgun/mediaeval-emothemes-explorer | 647fe527b719a9be72265f2855d890823c70e8ab | [
"MIT"
] | null | null | null | import argparse
from pathlib import Path
import json
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', type=Path, help='Input JSON file, generated by process.py')
parser.add_argument('output', type=Path, help='Output file containing the figure')
args = parser.parse_args()
main(args.input, args.output)
| 32.857143 | 119 | 0.695652 |
ee8f0359e2f8322c643b0be99995a5abb3f922f2 | 3,436 | py | Python | tests/test_cascade_call.py | xyloon/ptrait | 88186f77feaf921c44633b1693fed1e124f99c76 | [
"MIT"
] | null | null | null | tests/test_cascade_call.py | xyloon/ptrait | 88186f77feaf921c44633b1693fed1e124f99c76 | [
"MIT"
] | 1 | 2019-04-20T08:22:04.000Z | 2019-04-20T08:22:04.000Z | tests/test_cascade_call.py | xyloon/ptrait | 88186f77feaf921c44633b1693fed1e124f99c76 | [
"MIT"
] | null | null | null | from ptrait import TraitExtends
import copy
from pytest_assertutil import assert_equal
def test_cascade_call_instanceA():
assert_equal(
((), {'a': 1}),
A().a_instancemethodA()
)
def test_cascade_call_instanceB():
assert_equal(
((), {'a': 2}),
A().a_instancemethodB()
)
def test_cascade_call_instanceC():
assert_equal(
((), {'a': 3}),
A().a_instancemethodC()
)
def test_cascade_call_staticmethodA():
assert_equal(
((), {'a': 1}),
A.a_staticmethodA()
)
def test_cascade_call_staticmethodB():
assert_equal(
((), {'a': 2}),
A.a_staticmethodB()
)
def test_cascade_call_staticmethodC():
assert_equal(
((), {'a': 3}),
A.a_staticmethodC()
)
def test_cascade_call_classmethodA():
assert_equal(
((), {'a': 1}),
A.a_classmethodA()
)
def test_cascade_call_classmethodB():
assert_equal(
((), {'a': 2}),
A.a_classmethodB()
)
def test_cascade_call_classmethodC():
assert_equal(
((), {'a': 3}),
A.a_classmethodC()
)
| 21.746835 | 49 | 0.568102 |
ee9001145ae40affeb5a0f87d550d8ea5a52ee78 | 1,526 | py | Python | data_processing/download_data.py | HusseinLezzaik/Stock-Market-Prediction | 03f6b835466ebee9d4ee5ad217c4ed5c57b60a30 | [
"MIT"
] | null | null | null | data_processing/download_data.py | HusseinLezzaik/Stock-Market-Prediction | 03f6b835466ebee9d4ee5ad217c4ed5c57b60a30 | [
"MIT"
] | null | null | null | data_processing/download_data.py | HusseinLezzaik/Stock-Market-Prediction | 03f6b835466ebee9d4ee5ad217c4ed5c57b60a30 | [
"MIT"
] | null | null | null | import yahoo_fin.stock_info as si
import pandas as pd
import os | 35.488372 | 89 | 0.539974 |
c988bda85797c24c33439e544dc67c890a456828 | 7,220 | py | Python | life_line_chart/GedcomParsing.py | mustaqimM/life_line_chart | a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6 | [
"MIT"
] | null | null | null | life_line_chart/GedcomParsing.py | mustaqimM/life_line_chart | a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6 | [
"MIT"
] | null | null | null | life_line_chart/GedcomParsing.py | mustaqimM/life_line_chart | a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6 | [
"MIT"
] | null | null | null | import datetime
import re
import os
import logging
import json
_months = [
"JAN",
"FEB",
"MAR",
"APR",
"MAY",
"JUN",
"JUL",
"AUG",
"SEP",
"OCT",
"NOV",
"DEC"
]
_precision = [
'ABT',
'CAL',
'EST',
'AFT',
'BEF'
]
_date_expr = re.compile('(?:(' + '|'.join(_precision) + ') )?(?:(\\d+) )?(?:(' + '|'.join(_months) + ') )?(\\d{4})')
_interval_expr = re.compile('(BET) (?:(\\d+) (' + '|'.join(_months) + ') )?(\\d{4}) AND (?:(\\d+) (' + '|'.join(_months) + ') )?(\\d{4})')
_max_days = {
1:31,
2:29,
3:31,
4:30,
5:31,
6:30,
7:31,
8:31,
9:30,
10:31,
11:30,
12:31
}
def get_date_dict_from_tag(parent_item, tag_name):
"""
read the date from a gedcom tag
Args:
parent_item (dict): parent event node to output the result
tag_name (str): event type
"""
# TODO: Implement BET = Between
try:
if tag_name not in parent_item:
return
if 'DATE' not in parent_item[tag_name]:
return
comment = None
precision = ''
content = parent_item[tag_name]['DATE']['tag_data']
date_info = _date_expr.match(content)
if date_info is None:
date_info = _interval_expr.match(content)
if date_info.group(1) == 'EST':
comment = 'Estimated'
elif date_info.group(1) == 'ABT':
comment = 'About'
elif date_info.group(1) == 'CAL':
comment = 'Calculated'
elif date_info.group(1) == 'AFT':
comment = 'After'
elif date_info.group(1) == 'BEF':
comment = 'Before'
elif date_info.group(1) == 'BET':
comment = 'Between'
elif date_info.group(2) is None and date_info.group(3) is None and date_info.group(4) is not None:
comment = 'YearPrecision'
month_max_, day_max_ = 12, 31
month_min_, day_min_ = 1, 1
year_min, year_max = None, None
month_max, day_max = None, None
month_min, day_min = None, None
if date_info.group(1) == 'BET':
if date_info.group(7):
year_max = int(date_info.group(7))
if date_info.group(6):
month_max = _months.index(date_info.group(6)) + 1
if date_info.group(5):
day_max = int(date_info.group(5))
if date_info.group(4):
year_min = int(date_info.group(4))
if not year_max:
year_max = year_min
precision = 'y' + precision
if date_info.group(3):
month_min = _months.index(date_info.group(3)) + 1
if not month_max:
month_max = month_min
precision = 'm' + precision
if date_info.group(2):
day_min = int(date_info.group(2))
if not day_max:
day_max = day_min
precision = 'd' + precision
if date_info.group(1) == 'AFT':
year_max = year_min + 15
elif date_info.group(1) == 'BEF':
year_min = year_max - 15
if not month_max: month_max = month_max_
if not month_min: month_min = month_min_
if not day_max: day_max = day_max_
if not day_min: day_min = day_min_
day_max = min(_max_days[month_max], day_max)
date_min = datetime.datetime(year_min, month_min, day_min, 0, 0, 0, 0)
try:
date_max = datetime.datetime(year_max, month_max, day_max, 0, 0, 0, 0)
except ValueError as e:
if month_max==2:
date_max = datetime.datetime(year_max, month_max, day_max, 0, 0, 0, 0)
else:
raise
if tag_name in ['BURI', 'DEAT']:
# if unknown move to the end of the year
date = date_max
else:
# if unknown move to the beginning of the year
date = date_min
return {
'tag_name': tag_name,
'date': date,
'ordinal_value': date.toordinal(),
'ordinal_value_max': date_max.toordinal(),
'ordinal_value_min': date_min.toordinal(),
'comment': comment,
'precision' : precision
}
except:
pass
def estimate_marriage_date(family):
"""
If the marriage date is unknown, then estimate the date by assuming:
- the marriage took place before the first child was born
Args:
family (BaseFamily): family instance
"""
if family.marriage is None:
children_events = []
for child in family.children_individual_ids:
child_events = {}
_get_relevant_events(family._database_indi, child, child_events)
if child_events['birth_or_christening']:
children_events.append(child_events['birth_or_christening'])
# unsorted_marriages = [family._instances[('f',m)] for m in family._marriage_family_ids]
if len(children_events) > 0:
sorted_pairs = list(zip([(m['ordinal_value'], i) for i, m in enumerate(
children_events)], children_events))
sorted_pairs.sort()
family.marriage = sorted_pairs[0][1]
| 31.666667 | 138 | 0.570083 |
c98a4eea5dbbc32238c561ae29365092efd245e1 | 4,020 | py | Python | lib/HimalayanDownloader.py | oldfatcrab/Himalayan | 8b879036dd33c406b48306f560f9df85c989908b | [
"MIT"
] | null | null | null | lib/HimalayanDownloader.py | oldfatcrab/Himalayan | 8b879036dd33c406b48306f560f9df85c989908b | [
"MIT"
] | null | null | null | lib/HimalayanDownloader.py | oldfatcrab/Himalayan | 8b879036dd33c406b48306f560f9df85c989908b | [
"MIT"
] | null | null | null | from HTMLParser import HTMLParser
import json
from os import makedirs
from os.path import abspath, dirname, exists, join, normpath
import pycurl
import Queue
import re
import requests
import tempfile
import urllib2
| 39.029126 | 95 | 0.573632 |
c98aa4f7ed8a5b97ac39613aa73b2d33d7fcd243 | 4,962 | py | Python | 2016/python/day22.py | astonshane/AdventOfCode | 25c7380e73eede3f79287de6a9dedc8314ab7965 | [
"MIT"
] | null | null | null | 2016/python/day22.py | astonshane/AdventOfCode | 25c7380e73eede3f79287de6a9dedc8314ab7965 | [
"MIT"
] | null | null | null | 2016/python/day22.py | astonshane/AdventOfCode | 25c7380e73eede3f79287de6a9dedc8314ab7965 | [
"MIT"
] | null | null | null | import re
import copy
import hashlib
#part1()
part2() | 26.677419 | 98 | 0.414349 |
c98ae72a1a05ec62a0743c4c2fe5567276c8577b | 64 | py | Python | c2nl/objects/__init__.py | kopf-yhs/ncscos | 8248aaad32d4d19c01d070bf0dfba7aab849ba1d | [
"MIT"
] | 131 | 2020-05-05T05:29:02.000Z | 2022-03-30T13:32:42.000Z | c2nl/objects/__init__.py | kopf-yhs/ncscos | 8248aaad32d4d19c01d070bf0dfba7aab849ba1d | [
"MIT"
] | 32 | 2020-04-17T22:58:21.000Z | 2022-03-22T22:28:58.000Z | c2nl/objects/__init__.py | kopf-yhs/ncscos | 8248aaad32d4d19c01d070bf0dfba7aab849ba1d | [
"MIT"
] | 53 | 2020-05-05T06:17:25.000Z | 2022-03-22T03:19:11.000Z | __author__ = 'wasi'
from .summary import *
from .code import *
| 12.8 | 22 | 0.703125 |
c98cba25f4d6645c123d4fc4d2170d2512dffa18 | 820 | py | Python | dl/state.py | eric-erki/Prometheus | def07745ebcbe08ebb2fbba124bd07873edc8c9c | [
"MIT"
] | null | null | null | dl/state.py | eric-erki/Prometheus | def07745ebcbe08ebb2fbba124bd07873edc8c9c | [
"MIT"
] | null | null | null | dl/state.py | eric-erki/Prometheus | def07745ebcbe08ebb2fbba124bd07873edc8c9c | [
"MIT"
] | null | null | null | from collections import defaultdict
from prometheus.utils.misc import FrozenClass
| 22.777778 | 75 | 0.570732 |
c991092ab3a5e6ba800ee09dfef81c31b1bd3d3c | 1,262 | py | Python | t2t_bert/pretrain_finetuning/test_green_sample.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/pretrain_finetuning/test_green_sample.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/pretrain_finetuning/test_green_sample.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | #-*- coding: utf-8 -*-
import requests
import numpy as np
import json
import concurrent.futures
import codecs
with codecs.open('./test_1.txt', 'r', 'utf-8') as frobj:
input1 = frobj.read().strip()
with codecs.open('./candidate_1.txt', 'r', 'utf-8') as frobj:
candidate1 = frobj.read().strip()
with codecs.open('./test_2.txt', 'r', 'utf-8') as frobj:
input1 = frobj.read().strip()
with codecs.open('./candidate_2.txt', 'r', 'utf-8') as frobj:
candidate1 = frobj.read().strip()
post_data_1 = {
"data":{
"query":input1,
"candidate":[candidate1]
}
}
session = create_http_session({})
resp = infer(post_data_1)
print(resp)
| 25.755102 | 107 | 0.698891 |
c992994f6028cb87c2268ffceabfca70874f4d48 | 194 | py | Python | src/superannotate/lib/app/exceptions.py | xalgorithm/superannotate-python-sdk | 8f28ff3fdfc7bbae41946c79847ad1f6cf8a9300 | [
"MIT"
] | 26 | 2020-09-25T06:25:06.000Z | 2022-01-30T16:44:07.000Z | src/superannotate/lib/app/exceptions.py | xalgorithm/superannotate-python-sdk | 8f28ff3fdfc7bbae41946c79847ad1f6cf8a9300 | [
"MIT"
] | 12 | 2020-12-21T19:59:48.000Z | 2022-01-21T10:32:07.000Z | src/superannotate/lib/app/exceptions.py | xalgorithm/superannotate-python-sdk | 8f28ff3fdfc7bbae41946c79847ad1f6cf8a9300 | [
"MIT"
] | 11 | 2020-09-17T13:39:19.000Z | 2022-03-02T18:12:29.000Z | from lib.core.exceptions import AppException
| 13.857143 | 44 | 0.659794 |
c993d47a6ac0a9f61ec7fa06b4e0b59229dbea51 | 527 | py | Python | tests/resources/greeting_resoource.py | NoeCruzMW/zpy-flask-msc | 9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a | [
"MIT"
] | null | null | null | tests/resources/greeting_resoource.py | NoeCruzMW/zpy-flask-msc | 9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a | [
"MIT"
] | null | null | null | tests/resources/greeting_resoource.py | NoeCruzMW/zpy-flask-msc | 9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a | [
"MIT"
] | null | null | null | from zpy.api.resource import ZResource, HTTP_METHODS
| 23.954545 | 71 | 0.601518 |
c9941d5b23d70a37ae7fb2286ed5ef0edd392a4d | 227 | py | Python | tinned_django/project_name/config/apps_config/pymorphy.py | futurecolors/tinned-django | 9603c1bb746c733e1248abddb0097642702070b7 | [
"MIT"
] | 1 | 2015-05-08T10:54:59.000Z | 2015-05-08T10:54:59.000Z | tinned_django/project_name/config/apps_config/pymorphy.py | futurecolors/tinned-django | 9603c1bb746c733e1248abddb0097642702070b7 | [
"MIT"
] | null | null | null | tinned_django/project_name/config/apps_config/pymorphy.py | futurecolors/tinned-django | 9603c1bb746c733e1248abddb0097642702070b7 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
| 18.916667 | 69 | 0.493392 |
c99498c0faf71a46ad1d7a4f4be4a7ad4fc54402 | 172 | py | Python | Coursera/separa_palavras.py | tobiaspontes/ScriptsPython | 21ed779e49adca500ce5815dd100f4ec999a2571 | [
"MIT"
] | null | null | null | Coursera/separa_palavras.py | tobiaspontes/ScriptsPython | 21ed779e49adca500ce5815dd100f4ec999a2571 | [
"MIT"
] | null | null | null | Coursera/separa_palavras.py | tobiaspontes/ScriptsPython | 21ed779e49adca500ce5815dd100f4ec999a2571 | [
"MIT"
] | null | null | null | import re
def separa_palavras(frase):
'''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''
print('lista de palavras: ', frase.split())
| 28.666667 | 84 | 0.709302 |
c9954ca69e09619ef1665a4df64483f61ffee3c4 | 1,253 | py | Python | janggi/utils.py | sungho-cho/pyjanggi | cecaaa8f30fb2c2c319bab28197c786576cfe92d | [
"MIT"
] | 1 | 2020-09-23T09:40:59.000Z | 2020-09-23T09:40:59.000Z | janggi/utils.py | sungho-cho/pyjanggi | cecaaa8f30fb2c2c319bab28197c786576cfe92d | [
"MIT"
] | null | null | null | janggi/utils.py | sungho-cho/pyjanggi | cecaaa8f30fb2c2c319bab28197c786576cfe92d | [
"MIT"
] | null | null | null | import logging
import random
from .base.camp import Camp
from .base.formation import Formation
from .game.janggi_game import JanggiGame
from .game.game_log import GameLog
from .ui.game_player import GamePlayer
from .ui.replay_viewer import ReplayViewer
from .proto import log_pb2
logging.basicConfig()
logging.root.setLevel(logging.DEBUG)
def replay(filepath: str):
"""
Replay a game by parsing the log file at the given path.
Args:
filepath (str): Path of the proto-serialized log file.
"""
log_file = open(filepath, "rb")
log_proto = log_pb2.Log()
log_proto.ParseFromString(log_file.read())
game_log = GameLog.from_proto(log_proto)
game_log.generate_board_log()
replay_viewer = ReplayViewer(game_log)
replay_viewer.run()
def play(game: JanggiGame):
"""
Play a game by running GamePlayer.
Args:
game (JanggiGame): Pre-initialized game to play.
"""
player = GamePlayer(game)
player.run()
def generate_random_game():
"""Generate a random Janggi game."""
camp = Camp(random.choice([-1, 1]))
cho_formation = Formation(random.randint(1, 4))
han_formation = Formation(random.randint(1, 4))
return JanggiGame(camp, cho_formation, han_formation)
| 25.571429 | 62 | 0.710295 |
c998cff8c1a24f25ddb54c9444ec21d5f87c9ecc | 1,762 | py | Python | brainforest/s_create_inputs.py | binello7/swisssmartfarming | 40eef7b1726bc47d320ab12507479d836592138b | [
"MIT"
] | 2 | 2020-08-03T10:05:14.000Z | 2021-03-30T13:18:39.000Z | brainforest/s_create_inputs.py | binello7/swisssmartfarming | 40eef7b1726bc47d320ab12507479d836592138b | [
"MIT"
] | 6 | 2021-03-19T12:41:16.000Z | 2021-09-26T21:21:40.000Z | brainforest/s_create_inputs.py | binello7/swisssmartfarming | 40eef7b1726bc47d320ab12507479d836592138b | [
"MIT"
] | 3 | 2020-05-13T23:57:04.000Z | 2020-06-18T09:37:17.000Z | from data_interface import Dataset, Data_Interface
from utils import functions as ufunc
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import os
import rasterio as rio
import rasterio.mask as riom
import shapely
from IPython import embed
import sys
sys.path.append('/home/seba/Projects/swisssmartfarming')
rgb_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/rgb/'
'20200626_flight2_blackfly_rgb_transparent_mosaic_group1.tif')
ms_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/nir/'
'20200626_flight2_photonfocus_nir_transparent_reflectance_group1.tif')
masks_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'trees.shp')
boundary_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'boundary.shp')
dataset = rio.open(rgb_path)
shapefile = gpd.read_file(masks_path)
shapes = shapefile.geometry
# (img_mask, transf_mask) = riom.mask(dataset, shapes)
# img_mask = np.swapaxes(img_mask, 0, 2)
# plt.imshow(img_mask[:,:,0:3])
boundary = gpd.read_file(boundary_path)
tree_masks = gpd.read_file(masks_path)
dataset = Dataset(
name='gubler',
date='20200626',
rgb_path=rgb_path,
ms_path=ms_path,
mask_shapefile=tree_masks,
outer_shapefile=boundary,
rgb_bands_to_read=[0, 1, 2],
ms_bands_to_read=None,
)
dataset = [dataset]
di_train = Data_Interface(dataset, {'tree': 1, 'car': 2})
img, msk = di_train.get_pair()
# plt.imshow(msk)
save_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train'
di_train.save(save_path=save_path)
# x1003_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train/masks/x1003_y1009.png'
# x1003 = ufunc.read_img2array(x1003_path)
| 28.885246 | 95 | 0.746311 |
c99a5abd3c9530802736be82134167242a054f72 | 574 | py | Python | profiles_api/views.py | Vinutha2905/Python_RestAPI | 4c185d37d32c3b5f00154f4be1b4ad0d2fab6d66 | [
"MIT"
] | null | null | null | profiles_api/views.py | Vinutha2905/Python_RestAPI | 4c185d37d32c3b5f00154f4be1b4ad0d2fab6d66 | [
"MIT"
] | null | null | null | profiles_api/views.py | Vinutha2905/Python_RestAPI | 4c185d37d32c3b5f00154f4be1b4ad0d2fab6d66 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
| 31.888889 | 74 | 0.642857 |
c99e1e3eaa5ae563327d390c5f49ea33d97c4ae8 | 911 | py | Python | forumdb.py | fatih-iver/Intro-to-Relational-Databases | 28528132378436d6dd1f1bdec96d1e7e285b4e4d | [
"MIT"
] | null | null | null | forumdb.py | fatih-iver/Intro-to-Relational-Databases | 28528132378436d6dd1f1bdec96d1e7e285b4e4d | [
"MIT"
] | null | null | null | forumdb.py | fatih-iver/Intro-to-Relational-Databases | 28528132378436d6dd1f1bdec96d1e7e285b4e4d | [
"MIT"
] | null | null | null | # "Database code" for the DB Forum.
import psycopg2
import bleach
DNAME = "forum"
#POSTS = [("This is the first post.", datetime.datetime.now())]
def get_posts():
"""Return all posts from the 'database', most recent first."""
db = psycopg2.connect(database=DNAME)
c = db.cursor()
c.execute("select content, time from posts order by time desc")
rows = c.fetchall()
db.close()
return rows
#def get_posts():
#"""Return all posts from the 'database', most recent first."""
#return reversed(POSTS)
def add_post(content):
"""Add a post to the 'database' with the current timestamp."""
db = psycopg2.connect(database=DNAME)
c = db.cursor()
c.execute("INSERT INTO posts values (%s) ", (bleach.clean(content),))
db.commit()
db.close()
#def add_post(content):
#"""Add a post to the 'database' with the current timestamp."""
#POSTS.append((content, datetime.datetime.now()))
| 24.621622 | 71 | 0.675082 |
c9a08b6c821aa60ac0ac2219e490a38ed9d96387 | 19,553 | py | Python | src/apps/Door.py | sdunlap-afit/hilics | ae06113365817e4240fe894d3dfd784991c78102 | [
"Apache-2.0"
] | 2 | 2019-09-27T15:45:46.000Z | 2021-07-28T15:02:21.000Z | src/apps/Door.py | sdunlap-afit/hilics | ae06113365817e4240fe894d3dfd784991c78102 | [
"Apache-2.0"
] | 3 | 2020-09-25T13:40:56.000Z | 2020-11-03T20:38:32.000Z | src/apps/Door.py | sdunlap-afit/hilics | ae06113365817e4240fe894d3dfd784991c78102 | [
"Apache-2.0"
] | 2 | 2020-05-29T16:58:55.000Z | 2021-04-27T23:52:17.000Z | #!/usr/bin/env python3
#
# IP: HILICS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import Tkinter as tk
from Tkinter.font import Font
except ImportError:
import tkinter as tk
from tkinter.font import Font
from PIL import Image, ImageTk
import threading
import time
from sims.DoorSim import DoorSim
from widgets.AlarmCircle import AlarmCircle
| 28.012894 | 138 | 0.646499 |
c9a1e099a815ae4cb966de4d518a0c2e63b69ddd | 1,925 | py | Python | java/run.py | foxtrotzulu94/LanguageBenchmarkGame | 29c92c47c860d426409047a8408eaa52284a0cff | [
"MIT"
] | null | null | null | java/run.py | foxtrotzulu94/LanguageBenchmarkGame | 29c92c47c860d426409047a8408eaa52284a0cff | [
"MIT"
] | null | null | null | java/run.py | foxtrotzulu94/LanguageBenchmarkGame | 29c92c47c860d426409047a8408eaa52284a0cff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
output_name = './build/libs/java.jar'
#end run
#end run
#end run
if __name__=="__main__":
import sys, os
setup()
build()
if os.path.basename(sys.argv[0]) == os.path.basename(__file__):
run(sys.argv[1:])
# end main
| 34.375 | 117 | 0.61974 |
c9a2771f62caa857ef11e61807ee5881fe3bec39 | 353 | py | Python | schemas/tag.py | airflow-plugins/pardot_plugin | 163a33dd1eb0fd750821824a67f1a83e1c5b188e | [
"Apache-2.0"
] | null | null | null | schemas/tag.py | airflow-plugins/pardot_plugin | 163a33dd1eb0fd750821824a67f1a83e1c5b188e | [
"Apache-2.0"
] | null | null | null | schemas/tag.py | airflow-plugins/pardot_plugin | 163a33dd1eb0fd750821824a67f1a83e1c5b188e | [
"Apache-2.0"
] | 2 | 2019-03-26T14:15:40.000Z | 2021-03-24T11:33:55.000Z | """
Tags
http://developer.pardot.com/kb/api-version-4/tags/
http://developer.pardot.com/kb/object-field-references/#tag
"""
tag = [{'name': 'id',
'type': 'integer'},
{'name': 'name',
'type': 'varchar(512)'},
{'name': 'created_at',
'type': 'timestamp'},
{'name': 'updated_at',
'type': 'timestamp'}]
| 22.0625 | 59 | 0.529745 |
c9a32c37376f6fbf21ec0007633081e608e34031 | 125 | py | Python | packages/vic_kubuntu_14_10/vlc/__init__.py | warvariuc/mykde | 01c2f617d8556e279870d32247d07216a49a41da | [
"BSD-3-Clause"
] | 5 | 2016-01-31T18:32:45.000Z | 2019-12-22T07:16:22.000Z | packages/vic_kubuntu_14_10/vlc/__init__.py | warvariuc/mykde | 01c2f617d8556e279870d32247d07216a49a41da | [
"BSD-3-Clause"
] | null | null | null | packages/vic_kubuntu_14_10/vlc/__init__.py | warvariuc/mykde | 01c2f617d8556e279870d32247d07216a49a41da | [
"BSD-3-Clause"
] | 1 | 2019-06-17T16:02:36.000Z | 2019-06-17T16:02:36.000Z | import mykde
| 13.888889 | 36 | 0.64 |
c9a5727c7ae854b7f8c58b9738a3f08c161ea5b6 | 26 | py | Python | exercise-1_from-pull-to-push/jenny02221995.py | choznerol/c4lab-git-tutorial | 0b8d9a49002df3c669ddb3713574eb13c5b28cb8 | [
"MIT"
] | 1 | 2017-12-29T08:43:10.000Z | 2017-12-29T08:43:10.000Z | exercise-1_from-pull-to-push/jenny02221995.py | choznerol/c4lab-git-tutorial | 0b8d9a49002df3c669ddb3713574eb13c5b28cb8 | [
"MIT"
] | null | null | null | exercise-1_from-pull-to-push/jenny02221995.py | choznerol/c4lab-git-tutorial | 0b8d9a49002df3c669ddb3713574eb13c5b28cb8 | [
"MIT"
] | null | null | null | hello everyone,
fighting~
| 8.666667 | 15 | 0.807692 |