hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c66c55f72d0c193656a8d7f34187cd20b4099a6f | 2,458 | py | Python | pycondor/tools.py | kant/pycondor | ee87854504e8f4023feda860d8a9ddbecc7a70da | [
"BSD-3-Clause"
] | 6 | 2015-04-17T08:25:30.000Z | 2020-04-11T23:58:16.000Z | pycondor/tools.py | kant/pycondor | ee87854504e8f4023feda860d8a9ddbecc7a70da | [
"BSD-3-Clause"
] | 3 | 2015-12-22T07:40:02.000Z | 2019-01-21T15:07:00.000Z | pycondor/tools.py | kant/pycondor | ee87854504e8f4023feda860d8a9ddbecc7a70da | [
"BSD-3-Clause"
] | 6 | 2015-11-13T18:55:22.000Z | 2020-03-12T19:32:56.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Tools
http://en.wikipedia.org/wiki/Haversine_formula
ToDo: ToFix / ToTest
"""
import math
def waypoint_bearing(lat1, lon1, lat2, lon2):
"""
Calculates the bearing between 2 locations.
Method calculates the bearing between 2 locations.
@param lon1 First point longitude.
@param lat1 First point latitude.
@param lon2 Second point longitude.
@param lat2 Second point latitude.
@return The bearing between 2 locations.
"""
longitude1 = math.radians(lon1)
latitude1 = math.radians(lat1)
longitude2 = math.radians(lon2)
latitude2 = math.radians(lat2)
clat1 = math.cos(latitude1)
clat2 = math.cos(latitude2)
dlon = longitude2 - longitude1
y = math.sin(dlon) * clat2
x = clat1 * math.sin(latitude2) - math.sin(latitude1) * clat2 * math.cos(dlon)
if x==0 and y==0:
return(0.0)
else:
return((360 + math.degrees(math.atan2(y, x)) + 0.5) % 360.0)
def haversine_bearing(lat1, lon1, lat2, lon2):
"""
Calculate the bearing from 1 point to 1 other
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
dlon = lon2 - lon1
b = math.atan2(math.sin(dlon) * math.cos(lat2),
math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(dlon)) # bearing calc
bd = math.degrees(b)
br, bn = divmod(bd + 360, 360) # the bearing remainder and final bearing
return bn
def haversine_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
r = 6371.0 # Radius of earth in kilometers. Use 3956 for miles
return(c * r)
if __name__ == '__main__':
main()
| 25.340206 | 83 | 0.633849 |
c66f914aa66ae752fa396361357e16cd39293db5 | 10,951 | py | Python | courses/views.py | mdavoodi/konkourse-python | 50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee | [
"MIT"
] | 4 | 2015-06-23T22:17:50.000Z | 2019-01-17T21:32:02.000Z | courses/views.py | mdavoodi/konkourse-python | 50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee | [
"MIT"
] | null | null | null | courses/views.py | mdavoodi/konkourse-python | 50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, render_to_response
from django.template.context import RequestContext
from account.views import login
from models import Course
from website.views import index
from forms import CourseForm, CourseInitialForm
from account.util import createImage
from django.core.context_processors import csrf
from events.forms import EventForm
from events.models import Event
import datetime
from documents.forms import DocumentForm
from datetime import timedelta
from conversation.models import ConvoWall, ConversationPost
from documents.views import __upload_core
from documents.models import Document
from endless_pagination.decorators import page_template
from notification.views import notifyCreateEvent, notifyDocShareCourse
from page.models import Page
from django.http import HttpResponse
from django.utils import simplejson
| 37.892734 | 116 | 0.649621 |
c67157381752f709d6b39cd4632427d8936411ad | 2,701 | py | Python | rx/operators/observable/delaywithselector.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | rx/operators/observable/delaywithselector.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | rx/operators/observable/delaywithselector.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-08T08:23:08.000Z | 2020-05-08T08:23:08.000Z | from rx.core import ObservableBase, AnonymousObservable, typing
from rx.disposables import CompositeDisposable, \
SingleAssignmentDisposable, SerialDisposable
def delay_with_selector(self, subscription_delay=None,
delay_duration_mapper=None) -> ObservableBase:
"""Time shifts the observable sequence based on a subscription delay
and a delay mapper function for each element.
# with mapper only
1 - res = source.delay_with_selector(lambda x: Scheduler.timer(5000))
# with delay and mapper
2 - res = source.delay_with_selector(Observable.timer(2000),
lambda x: Observable.timer(x))
subscription_delay -- [Optional] Sequence indicating the delay for the
subscription to the source.
delay_duration_mapper [Optional] Selector function to retrieve a
sequence indicating the delay for each given element.
Returns time-shifted sequence.
"""
source = self
sub_delay, mapper = None, None
if isinstance(subscription_delay, typing.Observable):
mapper = delay_duration_mapper
sub_delay = subscription_delay
else:
mapper = subscription_delay
return AnonymousObservable(subscribe)
| 32.154762 | 108 | 0.585339 |
c6715e41c59947802aabe44b258270730dfcbb52 | 719 | py | Python | w2/palindrome.py | connorw72/connorapcsptri3 | 2e885644ed2a8d478e5ce193f94b02ad03c6e6b3 | [
"MIT"
] | null | null | null | w2/palindrome.py | connorw72/connorapcsptri3 | 2e885644ed2a8d478e5ce193f94b02ad03c6e6b3 | [
"MIT"
] | 3 | 2022-03-14T21:10:05.000Z | 2022-03-28T21:11:17.000Z | w2/palindrome.py | connorw72/connorapcsptri3 | 2e885644ed2a8d478e5ce193f94b02ad03c6e6b3 | [
"MIT"
] | 2 | 2022-03-10T06:11:11.000Z | 2022-03-11T06:11:11.000Z | # Testing these to see if they are palindromes
test_cases = ["A man, a plan, a canal -- Panama", "racecar", "broncos"]
| 31.26087 | 81 | 0.585535 |
c672a5daf5acf1852874d76a788a6d4edc536ca3 | 3,890 | py | Python | sat-competition-2018/xof-state/sha3-xof.py | cipherboy/sat | 65cbcebf03ffdfd64d49359ebb1d654c73e2c720 | [
"MIT"
] | 1 | 2019-01-19T23:04:50.000Z | 2019-01-19T23:04:50.000Z | sat-competition-2018/xof-state/sha3-xof.py | cipherboy/sat | 65cbcebf03ffdfd64d49359ebb1d654c73e2c720 | [
"MIT"
] | null | null | null | sat-competition-2018/xof-state/sha3-xof.py | cipherboy/sat | 65cbcebf03ffdfd64d49359ebb1d654c73e2c720 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import hash_framework as hf
hf.config.model_dir = "/home/cipherboy/GitHub/sat/sat-competition-2018/models"
import time, sys, os, random
run = False
release = False
if '--run' in sys.argv:
run = True
if '--release' in sys.argv:
release = True
if '-h' in sys.argv or '--help' in sys.argv:
print(sys.argv[0] + " [--run] [--release] [--args file] [w r e s]")
print('---')
print("Generates models for benchmarking. Runs if specified, otherwise only creates models.")
print("--run - runs the resulting CNF file")
print("--release - deletes the intermediate stages after creation")
print("--args file - specify a file to load arguments from")
print("w - sha3 w")
print("r - sha3 rounds format (str only)")
print("e - effective margin (128/256/.../512: as if w=1600)")
print("s - steps to apply to base state (extract s*e*w/64 bits from the XOF)")
sys.exit(0)
if '--args' in sys.argv:
sha3_xof_recreate_file()
else:
sha3_xof_recreate_args()
| 28.814815 | 103 | 0.554756 |
c6739210f1e8d51ce9d34502997456a48bfc0ddd | 3,357 | py | Python | methinks/db.py | andreasgrv/methinks | 5c65fdb84e35b8082ee35963431a352e06f4af44 | [
"BSD-3-Clause"
] | null | null | null | methinks/db.py | andreasgrv/methinks | 5c65fdb84e35b8082ee35963431a352e06f4af44 | [
"BSD-3-Clause"
] | null | null | null | methinks/db.py | andreasgrv/methinks | 5c65fdb84e35b8082ee35963431a352e06f4af44 | [
"BSD-3-Clause"
] | null | null | null | import os
import datetime
import xxhash
import json
from flask_sqlalchemy import SQLAlchemy
from methinks.utils import str_to_date
from methinks.config import get_default_conf
db = SQLAlchemy()
def to_file(self, folderpath):
path = os.path.join(folderpath, self.filename)
with open(path, 'w') as f:
f.write(self.text)
| 31.373832 | 89 | 0.593983 |
c673cb49bd602adacaeaaa1c827fbb7abab3bbf6 | 580 | py | Python | functional/decorator.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | 2 | 2018-01-20T03:38:58.000Z | 2019-07-21T11:33:24.000Z | functional/decorator.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | null | null | null | functional/decorator.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | null | null | null | import functools
now()
today()
print(today.__name__)
| 17.575758 | 58 | 0.568966 |
c6742b09c8b11bbe5babccf11451efdfb75310ee | 2,797 | py | Python | dense_estimation/points_estimation.py | zouzhenhong98/kitti-tools | 30b7d5c799ca2a44fe88522f6d46ad2a53c61d53 | [
"MIT"
] | 7 | 2020-01-03T13:05:36.000Z | 2021-08-03T07:51:43.000Z | dense_estimation/points_estimation.py | zouzhenhong98/kitti-tools | 30b7d5c799ca2a44fe88522f6d46ad2a53c61d53 | [
"MIT"
] | null | null | null | dense_estimation/points_estimation.py | zouzhenhong98/kitti-tools | 30b7d5c799ca2a44fe88522f6d46ad2a53c61d53 | [
"MIT"
] | 3 | 2020-07-07T03:35:06.000Z | 2021-07-21T11:40:38.000Z | '''
point clouds estimation: transfer sparse map to dense map,
work for both depth and reflectance.
'''
import sys
sys.path.append("..")
from utils import data_provider
from utils import velo_2_cam
import numpy as np
# fetch image and point clouds: coordinates and reflectance
# project points on the image plane
def completion(point_cloud_):
"""codes wait for completion"""
pass
if __name__ == "__main__":
filename = "um_000000"
pc_path = "../data/bin/"+filename+".bin"
calib_path = "../data/calib/"+filename+".txt"
image_path = "../data/img/"+filename+".png"
print('using data ',filename,' for test')
img, lidar = rawData(pc_path_=pc_path, img_path_=image_path)
pixel = lidarPreprocess(point_cloud_=lidar,
calib_path_=calib_path, type_="r")
# add pixels to image
# velo_2_cam.add_pc_to_img(img_path=image_path, coor=pixel, saveto='./result/'+filename+'_composition.png') | 35.405063 | 111 | 0.598498 |
c6770cd7813960cae894c7947e2f76b45e5169f4 | 1,014 | py | Python | tests/run_compiler.py | yshrdbrn/ogle | 529337203b1bd3ec66c08f4ed153dba5fc8349a1 | [
"MIT"
] | null | null | null | tests/run_compiler.py | yshrdbrn/ogle | 529337203b1bd3ec66c08f4ed153dba5fc8349a1 | [
"MIT"
] | null | null | null | tests/run_compiler.py | yshrdbrn/ogle | 529337203b1bd3ec66c08f4ed153dba5fc8349a1 | [
"MIT"
] | null | null | null | from ogle.code_generator.code_generator import CodeGenerator
from ogle.lexer.lexer import Lexer
from ogle.parser.parser import Parser
from ogle.semantic_analyzer.semantic_analyzer import SemanticAnalyzer
| 36.214286 | 82 | 0.759369 |
c678c38909ca5f9f3348fe7d0e9471e1720d3bee | 817 | py | Python | graph/dfs_dict_attempt2.py | automoto/python-code-golf | 1a4e0b5984e64620637de9d80e82c6e89997f4af | [
"MIT"
] | null | null | null | graph/dfs_dict_attempt2.py | automoto/python-code-golf | 1a4e0b5984e64620637de9d80e82c6e89997f4af | [
"MIT"
] | null | null | null | graph/dfs_dict_attempt2.py | automoto/python-code-golf | 1a4e0b5984e64620637de9d80e82c6e89997f4af | [
"MIT"
] | null | null | null | # !depth first search !dfs !graph
# dict of nodes as the key and sets for the edges(children)
graph = {'A': set(['B', 'C', 'D']),
'B': set(['E', 'F']),
'C': set([]),
'D': set(['G', 'H']),
'E': set([]),
'F': set(['I', 'J']),
'G': set(['K']),
'H': set([]),
'I': set([]),
'J': set([]),
'K': set([])}
dfs(graph, 'A')
# PESUDOCODE
# create set of visited nodes
# create a searching stack with the starting node
# while the stack has nodes
# pop the current_node off of the stack
# add current node to visited
# add the connected nodes minus visitsed to the stack to search | 24.029412 | 63 | 0.597307 |
c678e771e656c40554793300ea72d58a952d58cd | 301 | py | Python | slack_invite/slack_invite/forms.py | sanchagrins/umuc-cs-slack | 648e709905b153ad17a3df8bd826a784edd5c11b | [
"MIT"
] | null | null | null | slack_invite/slack_invite/forms.py | sanchagrins/umuc-cs-slack | 648e709905b153ad17a3df8bd826a784edd5c11b | [
"MIT"
] | 13 | 2017-08-18T01:19:49.000Z | 2017-11-16T02:24:07.000Z | slack_invite/slack_invite/forms.py | umuc-cs/umuc-cs-slack | 648e709905b153ad17a3df8bd826a784edd5c11b | [
"MIT"
] | null | null | null | from django import forms
| 50.166667 | 143 | 0.724252 |
c67973e1c48ecff18bf6a4fc82b259940ef31d3c | 4,561 | py | Python | tools/fastq_pair_names/fastq_pair_names.py | Neato-Nick/pico_galaxy | 79666612a9ca2d335622bc282a4768bb43d91419 | [
"MIT"
] | 18 | 2015-06-09T13:57:09.000Z | 2022-01-14T21:05:54.000Z | tools/fastq_pair_names/fastq_pair_names.py | Neato-Nick/pico_galaxy | 79666612a9ca2d335622bc282a4768bb43d91419 | [
"MIT"
] | 34 | 2015-04-02T19:26:08.000Z | 2021-06-17T18:59:24.000Z | tools/fastq_pair_names/fastq_pair_names.py | Neato-Nick/pico_galaxy | 79666612a9ca2d335622bc282a4768bb43d91419 | [
"MIT"
] | 24 | 2015-02-25T13:40:19.000Z | 2021-09-08T20:40:40.000Z | #!/usr/bin/env python
"""Extract paired read names from FASTQ file(s).
The input file should be a valid FASTQ file(s), the output is two tabular
files - the paired read names (without suffixes), and unpaired read names
(including any unrecognised pair names).
Note that the FASTQ variant is unimportant (Sanger, Solexa, Illumina, or even
Color Space should all work equally well).
This script is copyright 2014-2017 by Peter Cock, The James Hutton Institute
(formerly SCRI), Scotland, UK. All rights reserved.
See accompanying text file for licence details (MIT license).
"""
from __future__ import print_function
import os
import re
import sys
if "-v" in sys.argv or "--version" in sys.argv:
print("0.0.5")
sys.exit(0)
from galaxy_utils.sequence.fastq import fastqReader
msg = """Expects at least 3 arguments:
- Pair names tabular output filename
- Non-pair names tabular output filename
- Input FASTQ input filename(s)
"""
if len(sys.argv) < 3:
sys.exit(msg)
output_pairs = sys.argv[1]
output_nonpairs = sys.argv[2]
input_fastq_filenames = sys.argv[3:]
# Cope with three widely used suffix naming convensions,
# Illumina: /1 or /2
# Forward/revered: .f or .r
# Sanger, e.g. .p1k and .q1k
# See http://staden.sourceforge.net/manual/pregap4_unix_50.html
re_f = re.compile(r"(/1|\.f|\.[sfp]\d\w*)$")
re_r = re.compile(r"(/2|\.r|\.[rq]\d\w*)$")
# assert re_f.match("demo/1")
assert re_f.search("demo.f")
assert re_f.search("demo.s1")
assert re_f.search("demo.f1k")
assert re_f.search("demo.p1")
assert re_f.search("demo.p1k")
assert re_f.search("demo.p1lk")
assert re_r.search("demo/2")
assert re_r.search("demo.r")
assert re_r.search("demo.q1")
assert re_r.search("demo.q1lk")
assert not re_r.search("demo/1")
assert not re_r.search("demo.f")
assert not re_r.search("demo.p")
assert not re_f.search("demo/2")
assert not re_f.search("demo.r")
assert not re_f.search("demo.q")
re_illumina_f = re.compile(r"^@[a-zA-Z0-9_:-]+ 1:.*$")
re_illumina_r = re.compile(r"^@[a-zA-Z0-9_:-]+ 2:.*$")
assert re_illumina_f.match("@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 1:N:0:TGNCCA")
assert re_illumina_r.match("@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 2:N:0:TGNCCA")
assert not re_illumina_f.match(
"@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 2:N:0:TGNCCA"
)
assert not re_illumina_r.match(
"@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 1:N:0:TGNCCA"
)
count = 0
pairs = set() # Will this scale OK?
forward = 0
reverse = 0
neither = 0
out_pairs = open(output_pairs, "w")
out_nonpairs = open(output_nonpairs, "w")
for input_fastq in input_fastq_filenames:
if not os.path.isfile(input_fastq):
sys.exit("Missing input FASTQ file %r" % input_fastq)
in_handle = open(input_fastq)
# Don't care about the FASTQ type really...
for record in fastqReader(in_handle, "sanger"):
count += 1
name = record.identifier.split(None, 1)[0]
assert name[0] == "@", record.identifier # Quirk of the Galaxy parser
name = name[1:]
is_forward = False
suffix = re_f.search(name)
if suffix:
# ============
# Forward read
# ============
template = name[: suffix.start()]
is_forward = True
elif re_illumina_f.match(record.identifier):
template = name # No suffix
is_forward = True
if is_forward:
forward += 1
if template not in pairs:
pairs.add(template)
out_pairs.write(template + "\n")
else:
is_reverse = False
suffix = re_r.search(name)
if suffix:
# ============
# Reverse read
# ============
template = name[: suffix.start()]
is_reverse = True
elif re_illumina_r.match(record.identifier):
template = name # No suffix
is_reverse = True
if is_reverse:
reverse += 1
if template not in pairs:
pairs.add(template)
out_pairs.write(template + "\n")
else:
# ===========================
# Neither forward nor reverse
# ===========================
out_nonpairs.write(name + "\n")
neither += 1
in_handle.close()
out_pairs.close()
out_nonpairs.close()
print(
"%i reads (%i forward, %i reverse, %i neither), %i pairs"
% (count, forward, reverse, neither, len(pairs))
)
| 31.027211 | 86 | 0.611489 |
c6798f3695e83af119f05e4fdd4f14111d00889d | 2,903 | py | Python | code/05_speech_to_text/main_05_b_wake_word.py | padmalcom/AISpeechAssistant | b7501a23a8f513acb5043f3c7bb06df129bdc2cc | [
"Apache-2.0"
] | 1 | 2021-09-08T09:21:16.000Z | 2021-09-08T09:21:16.000Z | code/05_speech_to_text/main_05_b_wake_word.py | padmalcom/AISpeechAssistant | b7501a23a8f513acb5043f3c7bb06df129bdc2cc | [
"Apache-2.0"
] | null | null | null | code/05_speech_to_text/main_05_b_wake_word.py | padmalcom/AISpeechAssistant | b7501a23a8f513acb5043f3c7bb06df129bdc2cc | [
"Apache-2.0"
] | 2 | 2022-02-06T09:54:40.000Z | 2022-03-01T07:52:51.000Z | from loguru import logger
import yaml
import time
import pyaudio
import struct
import os
import sys
from vosk import Model, SpkModel, KaldiRecognizer
import json
import text2numde
from TTS import Voice
import multiprocessing
CONFIG_FILE = "config.yml"
SAMPLE_RATE = 16000
FRAME_LENGTH = 512
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
va = VoiceAssistant()
logger.info("Anwendung wurde gestartet")
va.run() | 27.130841 | 86 | 0.709955 |
c67b22952d56027ba353d1b1a79d790cab33d849 | 4,781 | py | Python | PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(09) Dictionaries (2).py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(09) Dictionaries (2).py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(09) Dictionaries (2).py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Dictionaries (2)
# In the last lesson we saw how to create dictionaries and how to access the different items in a dictionary by their key. We also saw how to add to and update the items in a dictionary using assignment, or the <code>dict.update()</code> method, and how to delete items using the <code>del</code> keyword.
#
# In this lesson we're going to continue looking at dictionaries. We're going to find out how to get a dictionary's length, access the keys and values from the dictionary, as well as how to access items inside nested dictionaries.
#
# This knowledge will come in really handy when we start building charts with Plotly, as we'll be passing the instructions for our chart to Plotly using nested dictionaries.
#
# ### Getting a dictionary's length
#
# You can get the number of items in a dictionary by using the <code>len()</code> function in the same way that we could get the length of a string or a list. This counts the number of key/value pairs in the dictionary:
# In[1]:
testScores1 = {'Alice' : 100, 'Bob' : 75, 'Ian' : 25, 'Susan' : 60}
len(testScores1)
# ### Getting a list of keys from a dictionary
#
# We can get a list of keys in a dictionary by using the <code>dict.keys()</code> method:
# In[2]:
print(testScores1.keys())
# This returns an object that looks a bit like a list, but doesn't behave like one. You can't slice it for example:
# In[3]:
var1 = testScores1.keys()
var1[0]
# You can however turn it into a list by using the <code>list()</code> function, which behaves like the <code>str()</code> or <code>int()</code> functions we learnt about previously. Converting the <code>dict_keys</code> object into a list lets us index and slice it:
# In[4]:
var2 = list(testScores1.keys())
var2[0]
# ### Getting a list of values in a dictionary
#
# In the same way that we got the dictionary's keys using <code>dict.keys()</code>, we can also get the dictionary's values using the <code>dict.values()</code> function:
# In[5]:
print(testScores1.values())
# Once again, we can't index or slice this object without turning it into a list:
# In[6]:
var3 = testScores1.values()
var3[0]
# In[7]:
var4 = list(testScores1.values())
var4[0]
# ### Getting the keys and values from a dictionary
#
# As well as getting the keys and values from a dictionary separately, we can also get them together using the <code>dict.items()</code> method. This is especially useful for looping through the items in a dictionary, and we'll look at this in more depth in the loops lesson.
#
# <code>dict.items()</code> returns a collection of tuples. A tuple is very similar to a list, in that you can index the items in it, however you cannot change their values.
# In[8]:
print(testScores1.items())
# Once again, we need to convert this <code>dict_items</code> object into a list to be able to index and slice it:
# In[9]:
var5 = list(testScores1.items())
var5[0]
# We can then select the items individually inside the tuple as if we were selecting items in a nested list:
# In[10]:
var5[0][0]
# ### Nested Dictionaries
#
# Just as we can create nested lists, we can also create nested dictionaries. Here I've created a dictionary which holds the test scores for the students. I've initially populated it with the scores for the first test:
# In[11]:
studentGrades = {'test1' : testScores1}
print(studentGrades)
# Now I can add the scores for the second test:
# In[12]:
testScores2 = {'Ian': 32, 'Susan': 71, 'Bob': 63, 'Alice': 99}
studentGrades.update({'test2' : testScores2})
print(studentGrades)
# We can access the scores for the first test:
# In[13]:
print(studentGrades['test1'])
# We can access the scores for a particular student for the first test in the same way. First of all, we access the <code>'test1'</code> dictionary, then within that we access the value we want by passing the corresponding key. Here I'm getting Ian's score for test 1:
# In[14]:
print(studentGrades['test1']['Ian'])
# ### What have we learnt this lesson?
# In this lesson we've seen how to get the length of a dictionary - the number of items in it. We've also seen how to get a list of the keys and values in the dictionary by using the <code>dict.keys()</code> and <code>dict.values()</code> functions in conjunction with the <code>list()</code> function, as well as how to get a list of tuples of the keys and values using <code>dict.items()</code>. We also learnt that a tuple is very similar to a list, but that you cannot change the items inside.
#
# Finally, we learnt how to create nested dictionaries and access the items inside them.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| 32.972414 | 497 | 0.72328 |
c67c053ab03a0bdd40b9436db19002f5b98e01a7 | 7,851 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | rezarajan/sdc-capstone | a1612fc8acd544e892d8d98142ffc161edf9989a | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | rezarajan/sdc-capstone | a1612fc8acd544e892d8d98142ffc161edf9989a | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | rezarajan/sdc-capstone | a1612fc8acd544e892d8d98142ffc161edf9989a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
import numpy as np
from scipy.spatial import KDTree
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 41.760638 | 136 | 0.659279 |
c67c49682485de62598f7200a4f39c46aba3d865 | 1,353 | py | Python | apps/hosts/migrations/0001_initial.py | yhgnice/toolsvb | 35f9d27ee2439d134cab160a7cf930ea13a31d26 | [
"Apache-2.0"
] | null | null | null | apps/hosts/migrations/0001_initial.py | yhgnice/toolsvb | 35f9d27ee2439d134cab160a7cf930ea13a31d26 | [
"Apache-2.0"
] | null | null | null | apps/hosts/migrations/0001_initial.py | yhgnice/toolsvb | 35f9d27ee2439d134cab160a7cf930ea13a31d26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-09 10:31
from __future__ import unicode_literals
from django.db import migrations, models
| 39.794118 | 120 | 0.602365 |
c67cc3624a702cafd7e7246abe8b88132e111d61 | 53 | py | Python | modules/__init__.py | richardHaw/nagare | 4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c | [
"MIT"
] | null | null | null | modules/__init__.py | richardHaw/nagare | 4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c | [
"MIT"
] | null | null | null | modules/__init__.py | richardHaw/nagare | 4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c | [
"MIT"
] | null | null | null | # this file is needed for python2, delete for python3 | 53 | 53 | 0.792453 |
c67cf2280718f5ed4b61e267d9cdf0637f3ec6f1 | 3,603 | py | Python | src/originexample/pipelines/handle_ggo_received.py | project-origin/example-backend | 13d9b528533dcaada8b0f0b93bbe2ef6a25c38ae | [
"MIT"
] | null | null | null | src/originexample/pipelines/handle_ggo_received.py | project-origin/example-backend | 13d9b528533dcaada8b0f0b93bbe2ef6a25c38ae | [
"MIT"
] | 1 | 2021-02-10T02:22:51.000Z | 2021-02-10T02:22:51.000Z | src/originexample/pipelines/handle_ggo_received.py | project-origin/example-backend | 13d9b528533dcaada8b0f0b93bbe2ef6a25c38ae | [
"MIT"
] | null | null | null | """
TODO write this
"""
import marshmallow_dataclass as md
from sqlalchemy import orm
from originexample import logger
from originexample.db import inject_session
from originexample.tasks import celery_app, lock
from originexample.auth import User, UserQuery
from originexample.consuming import (
GgoConsumerController,
ggo_is_available,
)
from originexample.services.account import (
Ggo,
AccountService,
AccountServiceError,
)
# Settings
RETRY_DELAY = 10
MAX_RETRIES = (24 * 60 * 60) / RETRY_DELAY
LOCK_TIMEOUT = 60 * 2
# Services / controllers
controller = GgoConsumerController()
account_service = AccountService()
# JSON schemas
ggo_schema = md.class_schema(Ggo)()
def start_handle_ggo_received_pipeline(ggo, user):
"""
:param Ggo ggo:
:param User user:
"""
handle_ggo_received \
.s(
subject=user.sub,
ggo_json=ggo_schema.dump(ggo),
address=ggo.address,
) \
.apply_async()
# def get_lock_key(subject, begin):
# """
# :param str subject:
# :param datetime.datetime begin:
# :rtype: str
# """
# return '%s-%s' % (subject, begin.strftime('%Y-%m-%d-%H-%M'))
| 27.503817 | 93 | 0.64141 |
c680e030b9ebdb230ac43be3ba2a8e742227cf78 | 113 | py | Python | tests/test_spliceai_wrapper.py | RipollJ/spliceai-wrapper | 29adcc7c93fa45bec953e9df078151653f419a73 | [
"MIT"
] | 2 | 2020-05-27T12:55:22.000Z | 2020-05-30T12:59:49.000Z | tests/test_spliceai_wrapper.py | RipollJ/spliceai-wrapper | 29adcc7c93fa45bec953e9df078151653f419a73 | [
"MIT"
] | 3 | 2019-10-08T03:38:55.000Z | 2021-11-15T17:51:09.000Z | tests/test_spliceai_wrapper.py | RipollJ/spliceai-wrapper | 29adcc7c93fa45bec953e9df078151653f419a73 | [
"MIT"
] | 2 | 2020-05-19T12:48:57.000Z | 2020-05-20T05:44:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `spliceai_wrapper` package."""
import pytest # noqa
| 18.833333 | 43 | 0.637168 |
c68186a925cc94d28ae976073b01ff32e11600e6 | 313 | py | Python | src/pythonboilerplate/app.py | favgeris/pythonBoilerplate | c367758ae01137f3b499ca0d4f8ebb414ae2c4d2 | [
"MIT"
] | null | null | null | src/pythonboilerplate/app.py | favgeris/pythonBoilerplate | c367758ae01137f3b499ca0d4f8ebb414ae2c4d2 | [
"MIT"
] | null | null | null | src/pythonboilerplate/app.py | favgeris/pythonBoilerplate | c367758ae01137f3b499ca0d4f8ebb414ae2c4d2 | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import pathlib
from typing import Union
logger = logging.getLogger(__name__)
def run(somearg) -> int:
"""Run app"""
try:
print(f'Some exciting argument: {somearg}')
except RuntimeError as ex:
logger.error(ex)
return 1
return 0
| 15.65 | 51 | 0.658147 |
c681980ffe6cfcb7e6776781d202be19d967c86b | 4,656 | py | Python | hard-gists/81635e6cbc933b7e8862/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/81635e6cbc933b7e8862/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/81635e6cbc933b7e8862/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | import os
import sys
import time
import atomac
import subprocess
if len(sys.argv) < 2:
print "Usage: bouncer.py <path_to_logic_project> (<path_to_logic_project>)"
os.exit(1)
bundleId = 'com.apple.logic10'
for project in sys.argv[1:]:
projectName = project.split('/')[-1].replace('.logicx', '')
filename = projectName + ".wav"
print "Opening %s..." % project
# Open a project file
subprocess.call(['open', project])
print "Activating Logic Pro X..."
logic = atomac.getAppRefByBundleId(bundleId)
logic.activate()
print "Waiting for project '%s' to open..." % projectName
while len(filter(lambda x: projectName in x.AXTitle, logic.windows())) == 0:
time.sleep(0.1)
# Wait for the window to load
time.sleep(1)
print "Triggering bounce operation..."
logic.activate()
logic.sendGlobalKeyWithModifiers('b', [atomac.AXKeyCodeConstants.COMMAND])
print "Waiting for bounce window..."
bounce_window = None
while not bounce_window:
bounce_window = filter(lambda x: ('Output 1-2' in x.AXTitle) or
('Bounce' in x.AXTitle),
logic.windows())
time.sleep(0.1)
bounce_window = bounce_window[0]
print "Selecting output formats..."
qualityScrollArea = bounce_window.findFirst(AXRole='AXScrollArea')
qualityTable = qualityScrollArea.findFirst(AXRole='AXTable')
for row in qualityTable.findAll(AXRole='AXRow'):
rowName = row.findFirst(AXRole='AXTextField').AXValue
checkbox = row.findFirst(AXRole='AXCheckBox')
if rowName == 'PCM':
if checkbox.AXValue is 0:
print "Selected %s output format." % rowName
checkbox.Press()
else:
print "%s output format selected." % rowName
elif checkbox.AXValue is 1:
print "Deselected %s output format." % rowName
checkbox.Press()
print "Pressing Bounce button..."
bounce_button = bounce_window.findFirst(AXRole="AXButton",
AXTitle="Bounce")
if not bounce_button:
bounce_button = bounce_window.findFirst(
AXRole="AXButton",
AXTitle="OK"
)
bounce_button.Press()
bounce_window = None
# bounce_window is now gone and we have a modal dialog about saving
print "Waiting for save window..."
save_window = None
while not save_window:
save_window = filter(lambda x: ('Output 1-2' in x.AXTitle) or
('Bounce' in x.AXTitle),
logic.windows())
time.sleep(0.1)
save_window = save_window[0]
print "Entering filename..."
filenameBox = save_window.findFirst(AXRole="AXTextField")
filenameBox.AXValue = filename
print "Pressing 'Bounce' on the save window..."
bounce_button = save_window.findFirst(AXRole="AXButton", AXTitle="Bounce")
bounce_button.Press()
# Check to see if we got a "this file already exists" dialog
if len(save_window.sheets()) > 0:
print "Allowing overwriting of existing file..."
overwrite_sheet = save_window.sheets()[0]
overwrite_sheet.findFirst(AXRole="AXButton",
AXTitle=u"Replace").Press()
print "Bouncing '%s'..." % projectName
# All UI calls will block now, because Logic blocks the UI thread while bouncing
while len(logic.windows()) > 1:
time.sleep(0.1)
print "Waiting for Logic to regain its senses..."
time.sleep(2)
# Done - should be saved now.
# Close the window with command-option-W
logic.activate()
time.sleep(1)
print "Closing project '%s'..." % projectName
logic.sendGlobalKeyWithModifiers('w', [
atomac.AXKeyCodeConstants.COMMAND, atomac.AXKeyCodeConstants.OPTION
])
print "Waiting for the 'do you want to save changes' window..."
save_window = None
attempts = 0
while not save_window and attempts < 20:
save_window = filter(lambda x: '' == x.AXTitle, logic.windows())
time.sleep(0.1)
attempts += 1
if save_window:
print "Saying 'No, I don't want to save changes'..."
save_window = save_window[0]
logic.activate()
# Click the "Don't Save" button
filter(lambda x: 'Don' in x.AXTitle, save_window.findAll(AXRole="AXButton"))[0].Press()
print "Waiting for all Logic windows to close..."
while len(logic.windows()) > 0:
time.sleep(0.5)
print "Terminating Logic."
atomac.terminateAppByBundleId(bundleId)
| 30.631579 | 95 | 0.618986 |
c6832c12d1f11f0fd4b7b74f990fd950eb68d5c6 | 2,506 | py | Python | functions/formatString.py | Steve-Xyh/AutoAoxiang | a8f1abed0f17b967456b1fa539c0aae79dac1d01 | [
"WTFPL"
] | 7 | 2020-02-17T08:12:14.000Z | 2021-12-29T09:41:35.000Z | functions/formatString.py | Steve-Xyh/AutoAoxiang | a8f1abed0f17b967456b1fa539c0aae79dac1d01 | [
"WTFPL"
] | null | null | null | functions/formatString.py | Steve-Xyh/AutoAoxiang | a8f1abed0f17b967456b1fa539c0aae79dac1d01 | [
"WTFPL"
] | 1 | 2020-07-24T07:16:14.000Z | 2020-07-24T07:16:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import colorama
colorama.init(autoreset=True)
logData = {
'': 'Location',
'': '',
'': '',
'': '',
'': '37.2',
'': '',
'': '',
}
def log_line(dic: dict, color=True):
'''
log
#### Parameters::
dic: log dict(e.g. {name: value})
'''
time_info = setColor(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
color='greenFore') if color else datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
res = '[' + time_info + '] '
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(12, chr(12288))
val_info = setColor(dic[key], color='yellowFore') if color else dic[key]
res += val_info if flg else ''.ljust(20, chr(12288)) + '\n'
return res
def log_cn(dic: dict):
"""
log
:param dic: log dict(e.g. {name: value})
"""
formLen = 40
res = '-' * formLen + '\n'
res += '[' + setColor(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), color='greenFore') + ']\n'
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(12, chr(12288))
res += (setColor(dic[key], color='yellowFore')
if flg else '').ljust(20, chr(12288)) + '\n'
res += '-' * formLen
return res
def log_en(dic):
"""
log
:param dic: log dict(e.g. {name: value})
"""
formLen = 40
res = '-' * formLen + '\n'
res += '[' + setColor(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), color='greenFore') + ']\n'
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(20)
res += (setColor(dic[key], color='yellowFore')
if flg else '').ljust(20) + '\n'
res += '-' * formLen
return res
def setColor(string, color):
''''''
convertColor = {
'redFore': colorama.Fore.RED + colorama.Back.RESET,
'redBack': colorama.Fore.WHITE + colorama.Back.RED,
'greenFore': colorama.Fore.GREEN + colorama.Back.RESET,
'greenBack': colorama.Fore.BLACK + colorama.Back.GREEN,
'yellowFore': colorama.Fore.YELLOW + colorama.Back.RESET,
}
return colorama.Style.BRIGHT + convertColor[color] + string + colorama.Style.RESET_ALL
if __name__ == "__main__":
a = 'This is red.'
b = setColor(a, 'redFore')
print(b)
print(log_cn(logData))
| 25.571429 | 111 | 0.541899 |
c6843679e999329dca1a8986c704607c2cb84a96 | 433 | py | Python | 2 - Automation tools with IP hiding techniques/checkValidJson.py | Phong940253/facebook-data-extraction | fa64680dcff900db4d852af06ff792ccf4d5be33 | [
"MIT"
] | null | null | null | 2 - Automation tools with IP hiding techniques/checkValidJson.py | Phong940253/facebook-data-extraction | fa64680dcff900db4d852af06ff792ccf4d5be33 | [
"MIT"
] | null | null | null | 2 - Automation tools with IP hiding techniques/checkValidJson.py | Phong940253/facebook-data-extraction | fa64680dcff900db4d852af06ff792ccf4d5be33 | [
"MIT"
] | null | null | null | import json
import glob
groupPost = glob.glob("rawData/*/*/*.json")
pagePost = glob.glob("rawData/*/*.json")
groupPagePost = groupPost + pagePost
for postFile in groupPagePost:
with open(postFile, "r", encoding="utf-8") as f:
valid = is_json(f)
if not valid:
print(postFile)
| 19.681818 | 52 | 0.628176 |
c684d80b73e9aa13f3fe72698614ec87757d94ac | 805 | py | Python | Observed_Game_Parra/consent/models.py | danielfParra/Lying_Online_Parra2022 | 10e8ef6248f684f63e9dea1314ef57f197e48773 | [
"CC0-1.0"
] | null | null | null | Observed_Game_Parra/consent/models.py | danielfParra/Lying_Online_Parra2022 | 10e8ef6248f684f63e9dea1314ef57f197e48773 | [
"CC0-1.0"
] | null | null | null | Observed_Game_Parra/consent/models.py | danielfParra/Lying_Online_Parra2022 | 10e8ef6248f684f63e9dea1314ef57f197e48773 | [
"CC0-1.0"
] | null | null | null | from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
author = 'Daniel Parra'
doc = """
Consent
"""
| 18.72093 | 88 | 0.649689 |
c685b0ee814971262f2ee615d82d87aa09cf8cef | 448 | py | Python | tests/compute/utils.py | joshcarty/dgl | 4464b9734c1061bd84325a54883c5046031def37 | [
"Apache-2.0"
] | 4 | 2018-12-25T14:59:08.000Z | 2021-07-02T12:36:40.000Z | tests/compute/utils.py | xyanAI/dgl | 36daf66f6216bad4d30651311bcb87aa45dd33d5 | [
"Apache-2.0"
] | 6 | 2018-12-13T15:22:08.000Z | 2021-04-22T02:40:27.000Z | tests/compute/utils.py | xyanAI/dgl | 36daf66f6216bad4d30651311bcb87aa45dd33d5 | [
"Apache-2.0"
] | 4 | 2020-12-26T10:39:36.000Z | 2020-12-26T12:38:52.000Z | import pytest
import backend as F
if F._default_context_str == 'cpu':
parametrize_dtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
else:
# only test int32 on GPU because many graph operators are not supported for int64.
parametrize_dtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
| 28 | 86 | 0.676339 |
c685c691386312383505490cedb37b0394b57c94 | 1,069 | py | Python | apps/controllerx/cx_helper.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 19 | 2019-11-21T19:51:40.000Z | 2020-01-14T09:24:33.000Z | apps/controllerx/cx_helper.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 11 | 2019-11-20T16:43:35.000Z | 2020-01-17T16:23:06.000Z | apps/controllerx/cx_helper.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 5 | 2019-12-20T21:31:07.000Z | 2020-01-06T18:49:52.000Z | import importlib
import os
import pkgutil
from typing import Any, List, Type
| 31.441176 | 88 | 0.658559 |
c686c1dded95c4fb11f50e8f958330e48395c1cb | 304 | py | Python | 34.PySimpleGUI.py | sarincr/GUI-With-Tkinter-using-Python | 3b57fc4aeed9e4a3018fc940bafdb4160ec853fc | [
"MIT"
] | null | null | null | 34.PySimpleGUI.py | sarincr/GUI-With-Tkinter-using-Python | 3b57fc4aeed9e4a3018fc940bafdb4160ec853fc | [
"MIT"
] | null | null | null | 34.PySimpleGUI.py | sarincr/GUI-With-Tkinter-using-Python | 3b57fc4aeed9e4a3018fc940bafdb4160ec853fc | [
"MIT"
] | null | null | null | import PySimpleGUI as PySG
lay = [ [PySG.Text("What's your name?")],
[PySG.Input()],
[PySG.Button('Ok')] ]
wd = PySG.Window('Python Simple GUI', lay)
event, values = wd.read()
print('Hello', values[0])
wd.close()
| 21.714286 | 48 | 0.457237 |
c687f6be8dfca9ca6885acd96b6da1567a4b50ba | 270 | py | Python | slbo/policies/uniform_policy.py | LinZichuan/AdMRL | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | [
"MIT"
] | 27 | 2020-06-17T11:40:17.000Z | 2021-11-16T07:39:33.000Z | slbo/policies/uniform_policy.py | LinZichuan/AdMRL | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | [
"MIT"
] | 3 | 2020-06-19T07:01:48.000Z | 2020-06-19T07:14:57.000Z | slbo/policies/uniform_policy.py | LinZichuan/AdMRL | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | [
"MIT"
] | 5 | 2020-11-19T01:11:24.000Z | 2021-12-24T09:03:56.000Z | import numpy as np
from . import BasePolicy
| 24.545455 | 81 | 0.685185 |
c688fe0af58ac798c7af0c9f68af25aff660071c | 5,304 | py | Python | models/ScrabbleGAN.py | iambhuvi/ScrabbleGAN | 30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b | [
"MIT"
] | 9 | 2021-02-02T06:31:32.000Z | 2021-11-03T11:19:58.000Z | models/ScrabbleGAN.py | iambhuvi/ScrabbleGAN | 30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b | [
"MIT"
] | 1 | 2021-12-01T12:13:14.000Z | 2021-12-01T12:13:14.000Z | models/ScrabbleGAN.py | iambhuvi/ScrabbleGAN | 30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b | [
"MIT"
] | 6 | 2021-02-02T06:31:49.000Z | 2022-01-21T14:33:43.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from models.model_utils import BigGAN as BGAN
from utils.data_utils import *
import pandas as pd
def create_model(config, char_map):
model = ScrabbleGAN(config, char_map)
model.to(config.device)
return model
| 37.617021 | 112 | 0.572587 |
c689b60ebca7bfda5e5401b93bdc1651fc7b24be | 2,745 | py | Python | jobbing/controllers/providers_controller.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | jobbing/controllers/providers_controller.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | 1 | 2021-06-10T03:34:07.000Z | 2021-06-10T03:34:07.000Z | jobbing/controllers/providers_controller.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | 1 | 2022-02-14T15:51:01.000Z | 2022-02-14T15:51:01.000Z | from flask import abort
from jobbing.models.user_profile import UserProfile # noqa: E501
from jobbing.models.service import Service # noqa: E501
from jobbing.DBModels import Profile as DBProfile
from jobbing.DBModels import Service as DBService
from jobbing.login import token_required
| 34.746835 | 82 | 0.647723 |
c68a6d8f407663035b0e8aaa5e7a9d1c6021d7ca | 11,082 | py | Python | app/tests/evaluation_tests/test_views.py | njmhendrix/grand-challenge.org | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | [
"Apache-2.0"
] | null | null | null | app/tests/evaluation_tests/test_views.py | njmhendrix/grand-challenge.org | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | [
"Apache-2.0"
] | null | null | null | app/tests/evaluation_tests/test_views.py | njmhendrix/grand-challenge.org | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from datetime import timedelta
import factory
import pytest
from django.db.models import signals
from django.utils import timezone
from tests.factories import (
EvaluationFactory,
MethodFactory,
SubmissionFactory,
)
from tests.utils import (
get_view_for_user,
validate_admin_only_view,
validate_admin_or_participant_view,
validate_open_view,
)
def submission_and_evaluation(*, challenge, creator):
"""Creates a submission and an evaluation for that submission."""
s = SubmissionFactory(challenge=challenge, creator=creator)
e = EvaluationFactory(submission=s)
return s, e
def submissions_and_evaluations(two_challenge_sets):
"""
Create (e)valuations and (s)ubmissions for each (p)articipant and
(c)hallenge.
"""
SubmissionsAndEvaluations = namedtuple(
"SubmissionsAndEvaluations",
[
"p_s1",
"p_s2",
"p1_s1",
"p12_s1_c1",
"p12_s1_c2",
"e_p_s1",
"e_p_s2",
"e_p1_s1",
"e_p12_s1_c1",
"e_p12_s1_c2",
],
)
# participant 0, submission 1, challenge 1, etc
p_s1, e_p_s1 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
p_s2, e_p_s2 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
p1_s1, e_p1_s1 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant1,
)
# participant12, submission 1 to each challenge
p12_s1_c1, e_p12_s1_c1 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.participant12,
)
p12_s1_c2, e_p12_s1_c2 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_2.challenge,
creator=two_challenge_sets.participant12,
)
return SubmissionsAndEvaluations(
p_s1,
p_s2,
p1_s1,
p12_s1_c1,
p12_s1_c2,
e_p_s1,
e_p_s2,
e_p1_s1,
e_p12_s1_c1,
e_p12_s1_c2,
)
# TODO: test that private results cannot be seen
| 33.889908 | 78 | 0.721891 |
c68b2764c5975cf7d7cb7f41a4acfa6e9c0a27aa | 2,496 | py | Python | Misc/convm.py | Dengjianping/AlgorithmsPractice | 612f40b4fca4c1cf2b0cd9ca4df63e217b7affbf | [
"MIT"
] | null | null | null | Misc/convm.py | Dengjianping/AlgorithmsPractice | 612f40b4fca4c1cf2b0cd9ca4df63e217b7affbf | [
"MIT"
] | null | null | null | Misc/convm.py | Dengjianping/AlgorithmsPractice | 612f40b4fca4c1cf2b0cd9ca4df63e217b7affbf | [
"MIT"
] | null | null | null | a=[[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]]
b=[[1,2,3,4],[3,4,5,5]]
print convArray(a[0], b[0],'full')
print convArray(a[0], b[0],'same')
print convArray(a[0], b[0],'valid')
print convMatrix(a, b,'full')
print convMatrix(a, b,'same')
print convMatrix(a, b,'valid') | 35.15493 | 84 | 0.405849 |
c68b2c92d58355bdae49241aa6bb6793ce19665b | 1,170 | py | Python | com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/ports/Atomic3SIn0AIn2Out.py | systemincloud/sic-examples | b82d5d672f515b1deb5ddb35c5a93c003e03c030 | [
"Apache-2.0"
] | null | null | null | com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/ports/Atomic3SIn0AIn2Out.py | systemincloud/sic-examples | b82d5d672f515b1deb5ddb35c5a93c003e03c030 | [
"Apache-2.0"
] | 15 | 2015-01-08T20:28:19.000Z | 2016-07-20T07:19:15.000Z | com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/ports/Atomic3SIn0AIn2Out.py | systemincloud/sic-examples | b82d5d672f515b1deb5ddb35c5a93c003e03c030 | [
"Apache-2.0"
] | null | null | null | from sicpythontask.PythonTaskInfo import PythonTaskInfo
from sicpythontask.PythonTask import PythonTask
from sicpythontask.InputPort import InputPort
from sicpythontask.OutputPort import OutputPort
from sicpythontask.data.Int32 import Int32
from sicpythontask.data.Float32 import Float32
from sicpythontask.data.Float64 import Float64
from sicpythontask.data.Text import Text
| 39 | 92 | 0.718803 |
c68c3919e177e8d1de7b30c2a650b62b74c47975 | 6,811 | py | Python | bin/extract_bcs.py | dmaticzka/bctools | e4733b1f59a151f8158a8173a3cde48a5d119bc2 | [
"Apache-2.0"
] | null | null | null | bin/extract_bcs.py | dmaticzka/bctools | e4733b1f59a151f8158a8173a3cde48a5d119bc2 | [
"Apache-2.0"
] | 3 | 2016-04-24T14:26:17.000Z | 2017-04-28T15:17:20.000Z | bin/extract_bcs.py | dmaticzka/bctools | e4733b1f59a151f8158a8173a3cde48a5d119bc2 | [
"Apache-2.0"
] | 2 | 2016-05-06T03:57:25.000Z | 2018-11-06T10:57:32.000Z | #!/usr/bin/env python
import argparse
import logging
import re
from sys import stdout
from Bio.SeqIO.QualityIO import FastqGeneralIterator
# avoid ugly python IOError when stdout output is piped into another program
# and then truncated (such as piping to head)
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
tool_description = """
Exract barcodes from a FASTQ file according to a user-specified pattern. Starting from the 5'-end, positions marked by X will be moved into a separate FASTQ file. Positions marked bv N will be kept.
By default output is written to stdout.
Example usage:
- remove barcode nucleotides at positions 1-3 and 6-7 from FASTQ; write modified
FASTQ entries to output.fastq and barcode nucleotides to barcodes.fa:
fastq_extract_barcodes.py barcoded_input.fastq XXXNNXX --out output.fastq --bcs barcodes.fastq
"""
# parse command line arguments
parser = argparse.ArgumentParser(description=tool_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"infile",
help="Path to fastq file.")
parser.add_argument(
"pattern",
help="Pattern of barcode nucleotides starting at 5'-end. X positions will be moved to the header, N positions will be kept.")
# optional arguments
parser.add_argument(
"-o", "--outfile",
help="Write results to this file.")
parser.add_argument(
"-b", "--bcs",
dest="out_bc_fasta",
help="Write barcodes to this file in FASTQ format.")
parser.add_argument(
"--fasta-barcodes",
dest="save_bcs_as_fa",
action="store_true",
help="Save extracted barcodes in FASTA format.")
parser.add_argument(
"-a", "--add-bc-to-fastq",
dest="add_to_head",
help="Append extracted barcodes to the FASTQ headers.",
action="store_true")
parser.add_argument(
"-v", "--verbose",
help="Be verbose.",
action="store_true")
parser.add_argument(
"-d", "--debug",
help="Print lots of debugging information",
action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(filename)s - %(levelname)s - %(message)s")
elif args.verbose:
logging.basicConfig(level=logging.INFO, format="%(filename)s - %(levelname)s - %(message)s")
else:
logging.basicConfig(format="%(filename)s - %(levelname)s - %(message)s")
logging.info("Parsed arguments:")
logging.info(" infile: '{}'".format(args.infile))
logging.info(" pattern: '{}'".format(args.pattern))
if args.outfile:
logging.info(" outfile: enabled writing to file")
logging.info(" outfile: '{}'".format(args.outfile))
if args.out_bc_fasta:
logging.info(" bcs: enabled writing barcodes to fastq file")
logging.info(" bcs: {}".format(args.out_bc_fasta))
if args.save_bcs_as_fa:
logging.info(" fasta-barcodes: write barcodes in fasta format instead of fastq")
logging.info("")
# check if supplied pattern is valid
valid_pattern = re.compile("^[XN]+$")
pattern_match = valid_pattern.match(args.pattern)
if pattern_match is None:
raise ValueError("Error: supplied pattern '{}' is not valid.".format(args.pattern))
# check if at least one barcode position is included in the pattern
has_bcpos_pattern = re.compile("X")
pattern_match = has_bcpos_pattern.search(args.pattern)
if pattern_match is None:
raise ValueError("Error: supplied pattern '{}' does not contain a barcode position 'X'.".format(args.pattern))
logging.info("Barcode pattern analysis:")
# get X positions of pattern string
barcode_nt_pattern = re.compile("X+")
barcode_positions = []
for m in re.finditer(barcode_nt_pattern, args.pattern):
logging.info(' found barcode positions in pattern: %02d-%02d: %s' % (m.start(), m.end(), m.group(0)))
barcode_positions.append((m.start(), m.end()))
logging.info(" barcode positions: {}".format(barcode_positions))
# get last position of a barcode nt in the pattern
# reads must be long enough for all
min_readlen = barcode_positions[-1][-1]
logging.info(" last position of a barcode nt in pattern: {}".format(min_readlen))
logging.info("")
# get coordinates of nucleotides to keep
# the tail after the last barcode nt is handled separately
seq_positions = []
last_seq_start = 0
for bcstart, bcstop in barcode_positions:
seq_positions.append((last_seq_start, bcstart))
last_seq_start = bcstop
logging.info(" sequence positions: {}".format(seq_positions))
logging.info(" start of sequence tail: {}".format(last_seq_start))
samout = (open(args.outfile, "w") if args.outfile is not None else stdout)
if args.out_bc_fasta is not None:
faout = open(args.out_bc_fasta, "w")
for header, seq, qual in FastqGeneralIterator(open(args.infile)):
# skip reads that are too short to extract the full requested barcode
if len(seq) < min_readlen:
logging.warning("skipping read '{}', is too short to extract the full requested barcode".format(header))
logging.debug("seq: {}".format(seq))
logging.debug("len(seq): {}".format(len(seq)))
continue
# extract barcode nucleotides
barcode_list = []
barcode_qual_list = []
for bcstart, bcstop in barcode_positions:
barcode_list.append(seq[bcstart:bcstop])
barcode_qual_list.append(qual[bcstart:bcstop])
barcode = "".join(barcode_list)
barcode_quals = "".join(barcode_qual_list)
logging.debug("extracted barcode: {}".format(barcode))
# create new sequence and quality string without barcode nucleotides
new_seq_list = []
new_qual_list = []
for seqstart, seqstop in seq_positions:
new_seq_list.append(seq[seqstart:seqstop])
new_qual_list.append(qual[seqstart:seqstop])
new_seq_list.append(seq[last_seq_start:])
new_qual_list.append(qual[last_seq_start:])
new_seq = "".join(new_seq_list)
new_qual = "".join(new_qual_list)
# check if at least one nucleotide is left. having none would break fastq
if len(new_seq) == 0:
logging.warning("skipping read '{}', no sequence remains after barcode extraction".format(header))
logging.debug("seq: {}".format(seq))
logging.debug("len(seq): {}".format(len(seq)))
continue
# write barcode nucleotides into header
if args.add_to_head:
annotated_header = " ".join([header, barcode])
else:
annotated_header = header
samout.write("@%s\n%s\n+\n%s\n" % (annotated_header, new_seq, new_qual))
# write barcode to fasta if requested
if args.out_bc_fasta is not None:
if args.save_bcs_as_fa:
faout.write(">{}\n{}\n".format(header, barcode))
else:
faout.write("@{}\n{}\n+\n{}\n".format(header, barcode, barcode_quals))
# close files
samout.close()
if args.out_bc_fasta is not None:
faout.close()
| 39.143678 | 198 | 0.707238 |
c68e39b0e1053cfb768407c21209e2d2583bacc2 | 1,226 | py | Python | main.py | pranavbaburaj/sh | dc0da9e10e7935310ae40d350c1897fcd65bce8f | [
"MIT"
] | 4 | 2021-01-30T12:25:21.000Z | 2022-03-13T07:23:19.000Z | main.py | pranavbaburaj/sh | dc0da9e10e7935310ae40d350c1897fcd65bce8f | [
"MIT"
] | 3 | 2021-02-26T13:11:17.000Z | 2021-06-04T17:26:05.000Z | main.py | pranavbaburaj/sh | dc0da9e10e7935310ae40d350c1897fcd65bce8f | [
"MIT"
] | 1 | 2021-02-08T10:18:29.000Z | 2021-02-08T10:18:29.000Z | import pyfiglet as figlet
import click as click
from project import Project, ApplicationRunner
# The application package manager
# get
from package import PackageManager
# print out the application name
# call the project class
# and create a new project
# call teh run class
# and run the specified project
# call the package manager
# and install packages
if __name__ == "__main__":
index() | 24.52 | 69 | 0.693312 |
c68e5add13f063fbcdf222fe47050eb0edc40e67 | 1,970 | py | Python | src/container/cni/cni/test/kube_cni/test_kube_params.py | Dmitry-Eremeev/contrail-controller | 1238bcff697981662225ec5a15bc4d3d2237ae93 | [
"Apache-2.0"
] | null | null | null | src/container/cni/cni/test/kube_cni/test_kube_params.py | Dmitry-Eremeev/contrail-controller | 1238bcff697981662225ec5a15bc4d3d2237ae93 | [
"Apache-2.0"
] | 2 | 2018-12-04T02:20:52.000Z | 2018-12-22T06:16:30.000Z | src/container/cni/cni/test/kube_cni/test_kube_params.py | Dmitry-Eremeev/contrail-controller | 1238bcff697981662225ec5a15bc4d3d2237ae93 | [
"Apache-2.0"
] | 1 | 2018-12-04T02:07:47.000Z | 2018-12-04T02:07:47.000Z | import sys
import mock
import unittest
import os
import types
from mock import patch, Mock
docker = Mock()
docker.client = Mock()
sys.modules['docker'] = docker
from cni.kube_cni import kube_params
| 30.78125 | 84 | 0.63198 |
c68e8c8615c536021a7d96bf97849a89f15fbe86 | 672 | py | Python | build/lib/ripda/__init__.py | isakruas/ripda | a85e04be6f2d019a294a284e16b55b533cd32c33 | [
"MIT"
] | 3 | 2021-06-05T13:05:49.000Z | 2021-06-08T12:01:16.000Z | build/lib/ripda/__init__.py | isakruas/ripda | a85e04be6f2d019a294a284e16b55b533cd32c33 | [
"MIT"
] | null | null | null | build/lib/ripda/__init__.py | isakruas/ripda | a85e04be6f2d019a294a284e16b55b533cd32c33 | [
"MIT"
] | 1 | 2021-06-05T13:06:15.000Z | 2021-06-05T13:06:15.000Z | import os
from pathlib import Path
from .settings import default
import logging
try:
if os.path.isdir(str(Path.home()) + '/ripda/'):
if not os.path.isdir(str(Path.home()) + '/ripda/blocks/'):
os.mkdir(str(Path.home()) + '/ripda/blocks/')
pass
if not os.path.isfile(str(Path.home()) + '/ripda/config.ini'):
default()
pass
else:
os.mkdir(str(Path.home()) + '/ripda/')
os.mkdir(str(Path.home()) + '/ripda/blocks/')
default()
except Exception as e:
logging.exception(e)
__all__ = [
'block',
'blockchain',
'miner',
'node',
'transaction',
'wallet'
]
| 21.677419 | 70 | 0.549107 |
c6904f6da38987f613861eec004342d5edfec9c2 | 1,339 | py | Python | src/21.py | peter-hunt/project-euler-solution | ce5be80043e892e3a95604bd5ebec9dc88c7c037 | [
"MIT"
] | null | null | null | src/21.py | peter-hunt/project-euler-solution | ce5be80043e892e3a95604bd5ebec9dc88c7c037 | [
"MIT"
] | null | null | null | src/21.py | peter-hunt/project-euler-solution | ce5be80043e892e3a95604bd5ebec9dc88c7c037 | [
"MIT"
] | null | null | null | """
Amicable numbers
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and
142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
from math import floor, sqrt
limit = 10_000
# 31626
print(initial_func(limit))
# print(improved_func(limit))
| 21.596774 | 79 | 0.551158 |
c691c92322330bef3cb93860c43c284649dcb60d | 120 | py | Python | cronicl/tracers/__init__.py | joocer/cronicl | 5ab215554939699683752cb7b8549756edff9ea5 | [
"Apache-2.0"
] | null | null | null | cronicl/tracers/__init__.py | joocer/cronicl | 5ab215554939699683752cb7b8549756edff9ea5 | [
"Apache-2.0"
] | 73 | 2020-10-05T21:00:48.000Z | 2020-11-16T23:29:41.000Z | cronicl/tracers/__init__.py | joocer/cronicl | 5ab215554939699683752cb7b8549756edff9ea5 | [
"Apache-2.0"
] | null | null | null | from .file_tracer import FileTracer
from .null_tracer import NullTracer
from .base_tracer import BaseTracer, get_tracer
| 30 | 47 | 0.858333 |
c6929965a631981dae06929029921384cdc65b4d | 2,156 | py | Python | test.py | adrianlazar-personal/py-jwt-validator | 1d586129a1279f90b4b326aa29f40b9302004e43 | [
"MIT"
] | 6 | 2020-05-28T20:22:23.000Z | 2021-09-21T06:26:52.000Z | test.py | adrianlazar-personal/py-jwt-validator | 1d586129a1279f90b4b326aa29f40b9302004e43 | [
"MIT"
] | 4 | 2020-11-09T23:12:38.000Z | 2021-03-03T16:39:59.000Z | test.py | adrianlazar-personal/py-jwt-validator | 1d586129a1279f90b4b326aa29f40b9302004e43 | [
"MIT"
] | 1 | 2020-12-07T15:00:35.000Z | 2020-12-07T15:00:35.000Z | from py_jwt_validator import PyJwtValidator, PyJwtException
import requests
jwt = 'eyJraWQiOiIyMjIiLCJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdF9oYXNoIjoic2pvdjVKajlXLXdkblBZUDd3djZ0USIsInN1YiI6Imh0dHBzOi8vbG9naW4uc2FsZXNmb3JjZS5jb20vaWQvMDBEMXQwMDAwMDBEVUo2RUFPLzAwNTF0MDAwMDAwRHlhUEFBUyIsInpvbmVpbmZvIjoiRXVyb3BlL0R1YmxpbiIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJhZGRyZXNzIjp7ImNvdW50cnkiOiJSTyJ9LCJwcm9maWxlIjoiaHR0cHM6Ly9hZHJpYW4tcHJvZC1kZXYtZWQubXkuc2FsZXNmb3JjZS5jb20vMDA1MXQwMDAwMDBEeWFQQUFTIiwiaXNzIjoiaHR0cHM6Ly9hZHJpYW4tcHJvZC1kZXYtZWQubXkuc2FsZXNmb3JjZS5jb20iLCJwaG9uZV9udW1iZXJfdmVyaWZpZWQiOnRydWUsInByZWZlcnJlZF91c2VybmFtZSI6ImFkcmlhbnNhbGVzZm9yY2VAbWFpbGluYXRvci5jb20iLCJnaXZlbl9uYW1lIjoiQWRyaWFuIiwibG9jYWxlIjoiZW5fSUVfRVVSTyIsIm5vbmNlIjoibm9uY2UiLCJwaWN0dXJlIjoiaHR0cHM6Ly9hZHJpYW4tcHJvZC1kZXYtZWQtLWMuZXUxNi5jb250ZW50LmZvcmNlLmNvbS9wcm9maWxlcGhvdG8vMDA1L0YiLCJhdWQiOiIzTVZHOWZUTG1KNjBwSjVKRF9GLndaTE1TZXJsRm03VmxCUWZPNWJhNHRSbDVrLmFPenhiTUVEN3g1ZTF6M2pwUmU2M1ZQOTNCbEp4eU5QUG9oWkcyLGh0dHA6Ly9sb2NhbGhvc3Q6NTUwMCIsInVwZGF0ZWRfYXQiOiIyMDE5LTA1LTAzVDE1OjQ4OjUyWiIsIm5pY2tuYW1lIjoiYWRyaWFuc2FsZXNmb3JjZSIsIm5hbWUiOiJBZHJpYW4gTGF6YXIiLCJwaG9uZV9udW1iZXIiOiIrNDAgMDcyNTUxMTg4NCIsImV4cCI6MTU4MDQwMTMxNiwiaWF0IjoxNTgwNDAxMTk2LCJmYW1pbHlfbmFtZSI6IkxhemFyIiwiZW1haWwiOiJhZHJpYW5zYWxlc2ZvcmNlQG1haWxpbmF0b3IuY29tIn0.QrEyD4qt1ZzT1-1ncdCqYxpGNsne8E22jwnHCvn3ygId1ZcA3305Mso2WfNASyMAyFWFcyc_sQmc67RZKFuMk0pdflkCwLl6JJdL9IKZo8qjcUmWdalAdpxU61F-NyUSa7IE6eh5y-Dm_qtrhxMXrqen9ugwf1MIiBm2VwgdaQFymEa8jKojfljOivHnEafX0D91NFLAFZPebPnMQp9YE-UR0n49lGT4x68avkqGXaRRVtxBCP_r5swOvqW9OL2Sa3kvSwUlp62Edf2Rxke6REnaWpYZs3rbGlQAzIsVAbansZBXv0dGJU8z2EFOmi7bKThjscqP-VmtASl1TJVrgWVBoRE9EyT10AUpGEuAAfTjGEtNOAq_u0UcFZc9quphy4cSJ2y66-KNwvD73y0Vl9KoeyJPc6Mrnu7yCVXTgsateaUwVn3dx2Cw0Jf3azUO-G5RfnQTpdE7huwofXUyh_WmaYVQ997lcXiVdhndZmSVDPEB9t05-qHCC5hafmnQqMpBvV-eI-OKhMVxwhdjzZnwmrALj-2Z9ApqfKsHxTy27RtIfNKPTijOAW8L6YwI909J__F7_tcPHOtEmusmg-CvU5qPUeq8D3pPC_IdzZBD-3GmavzzVeEjN1ucuo6aIIcvmsjQzeR4r_ZvWWdjx0gOHiGEraO2uETGiA3zesk'
try:
print(PyJwtValidator(jwt, auto_verify=False, check_expiry=False).verify(True))
except PyJwtException as e:
print(f"Exception caught. Error: {e}") | 269.5 | 1,920 | 0.967532 |
c6934a03c692a0936dbdefc83d05a9252b05f1c4 | 6,795 | py | Python | files/area.py | joaovpassos/USP-Programs | 09ddb8aed238df1f1a2e80afdc202ac4538daf41 | [
"MIT"
] | 2 | 2021-05-26T19:14:16.000Z | 2021-05-27T21:14:24.000Z | files/area.py | joaovpassos/USP-Programs | 09ddb8aed238df1f1a2e80afdc202ac4538daf41 | [
"MIT"
] | null | null | null | files/area.py | joaovpassos/USP-Programs | 09ddb8aed238df1f1a2e80afdc202ac4538daf41 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#------------------------------------------------------------------
# Constantes que voc pode utilizar nesse exerccio
# Em notao cientfica 1.0e-6 o o mesmo qoe 0.000001 (10 elevado a -6)
EPSILON = 1.0e-6
#------------------------------------------------------------------
# O import abaixo permite que o programa utilize todas as funes do mdulo math,
# como por exemplo, math.exp e math.sin.
import math
#------------------------------------------------------------------
def main():
'''() -> None
Modifique essa funo, escrevendo outros testes.
'''
# escolha a funo que desejar e atribuia a f_x
f_x = math.cos
# f_x = math.sin
# f_x = math.exp # etc, para integrao com outras funes.
# f_x = identidade # identidade() definidas mais adiante
# f_x = circunferencia # circunferencia() definida mais adiante
# f_x = exp # exp() definida mais adiante
print("Incio dos testes.")
# Testes da f_x
nome = f_x.__name__ # nome da f_x usada
print(f"A funo f_x usada nos testes {nome}()")
print(f"Valor de f_x(0.0)= {f_x( 0.0 )}")
print(f"Valor de f_x(0.5)= {f_x( 0.5 )}")
print(f"Valor de f_x(1.0)= {f_x( 1.0 )}")
# testes da funo rea_por_retangulos
print()
print("rea por retngulos:")
a, b = 0, 1 # intervalo [a,b]
k = 1 # nmero de retngulos
n = 3 # nmero de iteraes
i = 0
while i < n:
print(f"teste {i+1}: para {k} retngulos no intervalo [{a}, {b}]:")
print(f" rea aproximada = {area_por_retangulos(f_x, a, b, k):g}")
k *= 10
i += 1
# testes da funo rea_aproximada
print()
print("rea aproximada:")
a, b = 0, 1 # intervalo
k, area = area_aproximada(f_x, a, b) # nmero de retngulos e aproximao
print(f"teste 1: para eps = {EPSILON:g} e intervalo [{a}, {b}]:")
print(f" com {k} retngulo a rea aproximadamente = {area:g}")
eps = 1e-6 # erro relativo aceitvel
i = 1
n = 4
while i < n:
eps *= 10 # aumenta o erro relativo aceitvel
k, area = area_aproximada(f_x, a, b, eps)
print(f"teste {i+1}: para eps = {eps:g} e intervalo [{a}, {b}]:")
print(f" com {k} retngulos a rea aproximadamente = {area:g}")
i += 1
print("Fim dos testes.")
#------------------------------------------------------------------
# FUNO AUXILIAR PARA TESTE: funo f(x)=x
def identidade( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA o valor recebido.
EXEMPLOS:
In [6]: identidade(3.14)
Out[6]: 3.14
In [7]: identidade(1)
Out[7]: 1
In [8]: identidade(-3)
Out[8]: -3
'''
return x
#------------------------------------------------------------------
# FUNO AUXILIAR PARA TESTE: funo f(x)=sqrt(1 - x*x)
def circunferencia( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA um valor y >= 0 tal que (x,y) um ponto na circunferncia de raio 1 e centro (0,0).
PR-CONDIO: a funo supe que x um valor tal que -1 <= x <= 1.
EXEMPLOS:
In [9]: circunferencia(-1)
Out[9]: 0.0
In [10]: circunferencia(0)
Out[10]: 1.0
In [11]: circunferencia(1)
Out[11]: 0.0
'''
y = math.sqrt( 1 - x*x )
return y
#------------------------------------------------------------------
# FUNO AUXILIAR PARA TESTE: funo f(x) = e^x
def exp( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA (uma aproximao de) exp(x).
EXEMPLOS:
In [12]: exp(1)
Out[12]: 2.718281828459045
In [13]: exp(0)
Out[13]: 1.0
In [14]: exp(-1)
Out[14]: 0.36787944117144233
'''
y = math.exp( x )
return y # return math.exp( x )
#------------------------------------------------------------------
#
def erro_rel(y, x):
''' (float, float) -> float
RECEBE dois nmeros x e y.
RETORNA o erro relativo entre eles.
EXEMPLOS:
In [1]: erro_rel(0, 0)
Out [1]: 0.0
In [2]: erro_rel(0.01, 0)
Out [2]: 1.0
In [3]: erro_rel(1.01, 1.0)
Out [3]: 0.01
'''
if x == 0 and y == 0:
return 0.0
elif x == 0:
return 1.0
erro = (y-x)/x
if erro < 0:
return -erro
return erro
#------------------------------------------------------------------
def area_por_retangulos(f, a, b, k):
'''(function, float, float, int) -> float
RECEBE uma funo f, dois nmeros a e b e um inteiro k.
RETORNA uma aproximao da rea sob a funo f no intervalo [a,b]
usando k retngulos.
PR-CONDIO: a funo supe que a funo f continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [15]area_por_retangulos(identidade, 0, 1, 1)
Out[15]: 0.5
In [16]:area_por_retangulos(circunferencia, -1, 0, 1)
Out[16]: 0.8660254037844386
'''
# escreva a sua soluo a seguir
# remova ou modifique a linha abaixo como desejar
base = (b-a)/k
i = 0
x_meio = ((b-a)/(2*k)) + a
soma = 0
while i < k:
area = f(x_meio)*base
x_meio += base
i += 1
soma += area
return soma
#------------------------------------------------------------------
def area_aproximada(f, a, b, eps=EPSILON):
'''(function, float, float, float) -> int, float
RECEBE uma funo f, dois nmeros a, b, eps.
RETORNA um inteiro k e uma aproximao da rea sob a funo f no intervalo [a,b]
usando k retngulo.
O valor de k deve ser a __menor potncia__ de 2 tal que o erro relativo
da aproximao retornada seja menor que eps.
Assim, os possveis valores de k so 1, 2, 4, 8, 16, 32, 64, ...
PR-CONDIO: a funo supe que a funo f continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [22]: area_aproximada(identidade, 1, 2)
Out[22]: (2, 1.5)
In [23]: area_aproximada(exp, 1, 2, 16)
Out[23]: (2, 4.6224728167337865)
'''
# escreva o corpo da funo
# remova ou modifique a linha abaixo como desejar
k = 1
sub = eps + 1
while sub >= eps:
sub = erro_rel(area_por_retangulos(f,a,b,k*2),area_por_retangulos(f,a,b,k))
k *= 2
return k, area_por_retangulos(f,a,b,k) # para retornar um int e um float
# basta separ-los por vrgula
#######################################################
### FIM ###
#######################################################
#
# NO MODIFIQUE AS LINHAS ABAIXO
#
# Esse if serve para executar a funo main() apenas quando
# este o mdulo a partir do qual a execuo foi iniciada.
if __name__ == '__main__':
main()
| 31.901408 | 98 | 0.512288 |
c693df3548964a87b3411b88e56a453a7a597f59 | 4,421 | py | Python | gribmagic/unity/download/engine.py | earthobservations/gribmagic | 59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e | [
"MIT"
] | 9 | 2020-12-18T13:26:45.000Z | 2022-03-03T16:46:33.000Z | gribmagic/unity/download/engine.py | earthobservations/gribmagic | 59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e | [
"MIT"
] | 12 | 2020-12-19T18:32:51.000Z | 2021-10-30T17:48:35.000Z | gribmagic/unity/download/engine.py | earthobservations/gribmagic | 59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e | [
"MIT"
] | 2 | 2020-12-19T08:02:03.000Z | 2021-10-30T16:01:02.000Z | """
Handle download of NWP data from remote servers.
"""
import logging
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, List
import requests
from gribmagic.unity.configuration.constants import (
KEY_COMPRESSION,
KEY_LOCAL_FILE_PATHS,
KEY_REMOTE_FILE_PATHS,
)
from gribmagic.unity.configuration.model import WeatherModelSettings
from gribmagic.unity.download.decoder import (
decode_bunzip,
decode_identity,
decode_tarfile,
)
from gribmagic.unity.enumerations import WeatherModel
from gribmagic.unity.model import DownloadItem
session = requests.Session()
logger = logging.getLogger(__name__)
DEFAULT_NUMBER_OF_PARALLEL_PROCESSES = 4
def run_download(
weather_model: WeatherModel,
model_file_lists: Dict[str, List[str]],
parallel_download: bool = False,
n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES,
) -> None:
"""
Download weather forecasts data.
"""
model = WeatherModelSettings(weather_model)
if model.info[KEY_COMPRESSION] == "tar":
return __download_tar_file(
weather_model,
model_file_lists[KEY_REMOTE_FILE_PATHS][0],
model_file_lists[KEY_LOCAL_FILE_PATHS],
)
if parallel_download:
download_specifications = [
DownloadItem(model=weather_model, local_file=local_file_path, remote_url=remote_file)
for remote_file, local_file_path in zip(
model_file_lists[KEY_REMOTE_FILE_PATHS],
model_file_lists[KEY_LOCAL_FILE_PATHS],
)
]
return __download_parallel(download_specifications, n_processes)
else:
results = []
for remote_file, local_file_path in zip(
model_file_lists[KEY_REMOTE_FILE_PATHS],
model_file_lists[KEY_LOCAL_FILE_PATHS],
):
item = DownloadItem(
model=weather_model, local_file=local_file_path, remote_url=remote_file
)
results.append(__download(item))
return results
def __download(item: DownloadItem) -> None:
"""
base download function to manage single file download
Args:
download_specification: Tuple with
- WeatherModel
- local_file_path
- remote_file_path
Returns:
Stores a file in temporary directory
"""
model = WeatherModelSettings(item.model)
# Compute source URL and target file.
url = item.remote_url
target_file = Path(item.local_file)
if target_file.exists():
logger.info(f"Skipping existing file {target_file}")
return target_file
logger.info(f"Downloading {url} to {target_file}")
try:
response = session.get(url, stream=True)
response.raise_for_status()
except Exception as ex:
logger.warning(f"Failed accessing resource {url}: {ex}")
return
if not target_file.parent.is_dir():
target_file.parent.mkdir(exist_ok=True)
if model.info[KEY_COMPRESSION] == "bz2":
decode_bunzip(response.raw, target_file)
else:
decode_identity(response.raw, target_file)
return target_file
def __download_parallel(
download_specifications: List[DownloadItem],
n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES,
) -> None:
"""
Script to run download in parallel
Args:
download_specifications: List of Tuple with
- WeatherModel
- local_file_path
- remote_file_path
n_processes: Number of parallel processes used for download
Returns:
None
"""
with ThreadPoolExecutor(max_workers=n_processes) as executor:
results = executor.map(__download, download_specifications)
executor.shutdown(wait=True)
return results
def __download_tar_file(
weather_model: WeatherModel, url: str, local_file_list: List[Path]
) -> None:
"""
Downloads a weather forecast package with one tar archive
Args:
weather_model:
remote_file:
local_file_list:
Returns:
"""
model = WeatherModelSettings(weather_model)
try:
response = session.get(url, stream=True)
response.raise_for_status()
except Exception as ex:
logger.warning(f"Failed accessing resource {url}: {ex}")
return
return decode_tarfile(response.raw, local_file_list)
| 27.459627 | 97 | 0.680615 |
c696cbe9a74a6a3f3db61104f5e94acb0ded96e3 | 2,195 | py | Python | tests/main.py | Antojitos/guacamole | 50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152 | [
"MIT"
] | 3 | 2015-10-30T13:09:13.000Z | 2021-02-17T19:12:37.000Z | tests/main.py | amessinger/guacamole | 50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152 | [
"MIT"
] | 5 | 2015-10-30T12:53:05.000Z | 2015-12-14T15:20:04.000Z | tests/main.py | Antojitos/guacamole | 50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152 | [
"MIT"
] | 1 | 2015-10-28T08:44:48.000Z | 2015-10-28T08:44:48.000Z | import sys
import os
import shutil
import filecmp
import json
import unittest
# Path hack. http://stackoverflow.com/questions/6323860/sibling-package-imports
sys.path.insert(0, os.path.abspath('../guacamole'))
import guacamole
if __name__ == '__main__':
unittest.main() | 32.279412 | 89 | 0.626424 |
c696f39c84a65ece0fb68103ccf754b71fcc536c | 1,249 | py | Python | scripts/devicereload/reload_cisco_device.py | chrisbalmer/netauto-helper-scripts | 1855085f899fa1cfbf86d6515330e0a2b002ec6a | [
"MIT"
] | null | null | null | scripts/devicereload/reload_cisco_device.py | chrisbalmer/netauto-helper-scripts | 1855085f899fa1cfbf86d6515330e0a2b002ec6a | [
"MIT"
] | null | null | null | scripts/devicereload/reload_cisco_device.py | chrisbalmer/netauto-helper-scripts | 1855085f899fa1cfbf86d6515330e0a2b002ec6a | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
import yaml
import paramiko
import base64
import time
import keychain
import re
import console
console.clear()
# Load options
with open('devices.yaml', 'r') as file:
device_list = yaml.load(file)
hostname = device_list['device1']['host']
public_key_string = device_list['device1']['public_key']
username = device_list['device1']['username']
public_key = paramiko.RSAKey(data=base64.b64decode(public_key_string))
# Prep the SSH connection
ssh = paramiko.SSHClient()
shell = connect_to_device(ssh)
print send_command(shell, 'reload\n')
logout(shell)
print '\n\nComplete!'
console.hud_alert('Complete!',duration=2) | 22.303571 | 70 | 0.72458 |
c697934e43005813bbf25f5936b378004c77b6ac | 324 | py | Python | settings.py | musahibrahimali/flasket-api | d212cb84817dee90e9a53015b2811468a4db75ff | [
"MIT"
] | 7 | 2018-02-23T17:41:04.000Z | 2022-03-09T12:20:56.000Z | settings.py | musahibrahimali/flasket-api | d212cb84817dee90e9a53015b2811468a4db75ff | [
"MIT"
] | null | null | null | settings.py | musahibrahimali/flasket-api | d212cb84817dee90e9a53015b2811468a4db75ff | [
"MIT"
] | 1 | 2021-06-02T17:23:45.000Z | 2021-06-02T17:23:45.000Z | # Flask settings
FLASK_DEBUG = True # Do not use debug mode in production
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# Flask-Restplus settings
SWAGGER_UI_DOC_EXPANSION = 'list'
RESTPLUS_VALIDATE = True
RESTPLUS_MASK_SWAGGER = False
ERROR_404_HELP = False
| 23.142857 | 57 | 0.805556 |
c69cba0e213110d86560b0464617fbc29e061f5e | 1,409 | py | Python | parkings/management/commands/create_init_user.py | PICTEC/pgs | c5e8fd78d411937ce60e733316d4d425410153bc | [
"MIT"
] | 1 | 2021-03-26T05:49:08.000Z | 2021-03-26T05:49:08.000Z | parkings/management/commands/create_init_user.py | PICTEC/PGS | 813721b3bdbaf173d68cb81b3dc0886e542b9a4e | [
"MIT"
] | null | null | null | parkings/management/commands/create_init_user.py | PICTEC/PGS | 813721b3bdbaf173d68cb81b3dc0886e542b9a4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Create superuser and monitoring group
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
from parkings.models import Monitor
from parkings.models import EnforcementDomain
MODELS = ['operator', 'parking area', 'parking check', 'parking terminal', 'parking', 'region', 'payment zone']
PERMISSIONS = ['view']
| 35.225 | 111 | 0.698368 |
c69d613e92541912c5d1aa1169340677fbcf4a96 | 5,437 | py | Python | mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py | mlpiper/mlpiper | 0fd2b6773f970c831038db47bf4920ada21a5f51 | [
"Apache-2.0"
] | 7 | 2019-04-08T02:31:55.000Z | 2021-11-15T14:40:49.000Z | mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py | mlpiper/mlpiper | 0fd2b6773f970c831038db47bf4920ada21a5f51 | [
"Apache-2.0"
] | 31 | 2019-02-22T22:23:26.000Z | 2021-08-02T17:17:06.000Z | mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py | mlpiper/mlpiper | 0fd2b6773f970c831038db47bf4920ada21a5f51 | [
"Apache-2.0"
] | 8 | 2019-03-15T23:46:08.000Z | 2020-02-06T09:16:02.000Z | import numpy as np
from parallelm.mlops.mlops_exception import MLOpsStatisticsException
from parallelm.mlops.stats.graph import Graph
from parallelm.mlops.stats.multi_line_graph import MultiLineGraph
from parallelm.mlops.stats.single_value import SingleValue
from parallelm.mlops.stats.table import Table
from parallelm.mlops.stats_category import StatCategory
| 41.823077 | 161 | 0.609343 |
c69e524f9b42fcb4896f83fcc4785aff222562d4 | 303 | py | Python | terrascript/data/davidji99/split.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/data/davidji99/split.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/data/davidji99/split.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/data/davidji99/split.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:27:33 UTC)
import terrascript
__all__ = [
"split_traffic_type",
"split_workspace",
]
| 16.833333 | 73 | 0.745875 |
c69f9f22e1976429e68cc587ef9c41a2baa5fb93 | 85 | py | Python | nginx/apps.py | rockychen-dpaw/it-assets | 92ec23c6a413c5c45bb3d96981d6af68535d225c | [
"Apache-2.0"
] | 4 | 2018-11-16T13:49:49.000Z | 2021-08-19T05:16:50.000Z | nginx/apps.py | rockychen-dpaw/it-assets | 92ec23c6a413c5c45bb3d96981d6af68535d225c | [
"Apache-2.0"
] | 10 | 2018-07-06T09:34:56.000Z | 2022-01-28T06:09:05.000Z | nginx/apps.py | rockychen-dpaw/it-assets | 92ec23c6a413c5c45bb3d96981d6af68535d225c | [
"Apache-2.0"
] | 9 | 2018-05-05T23:29:10.000Z | 2020-06-26T02:29:17.000Z | from django.apps import AppConfig
| 14.166667 | 33 | 0.741176 |
c69faa343bb44ce7636a902fb8fc9cfe5f9f2c0d | 3,511 | py | Python | tools/telemetry/telemetry/unittest/output_formatter.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2019-01-16T03:57:39.000Z | 2019-01-16T03:57:39.000Z | tools/telemetry/telemetry/unittest/output_formatter.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | tools/telemetry/telemetry/unittest/output_formatter.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.unittest import options_for_unittests
| 26.801527 | 80 | 0.727428 |
c69fe4b03acf538832512321d83a32c7f8cc326f | 480 | py | Python | awsflow/lambdas/demo.py | algorithmia-algorithms/awsflow | 927698c27e57377dbe8094c71d5b0c36548b0937 | [
"MIT"
] | 12 | 2019-04-06T14:59:29.000Z | 2020-04-14T21:02:23.000Z | awsflow/lambdas/demo.py | vaquarkhan/awsflow | 59f9001972aec2bac60a97d174b97f96689360ce | [
"MIT"
] | null | null | null | awsflow/lambdas/demo.py | vaquarkhan/awsflow | 59f9001972aec2bac60a97d174b97f96689360ce | [
"MIT"
] | 3 | 2019-07-30T17:11:14.000Z | 2020-02-17T20:39:25.000Z | from awsflow.tools.emr import logging
from awsflow.version import __version__
def hello_world(event, context):
"""
Test function, does nothing
:param event: AWS lambdas function event
:param context: AWS lambdas function context
:return:
"""
message = 'event={} context={}'.format(event, context)
logging.info('Hello World! Message is {}'.format(message))
return {
'parameters': message,
'awsflow-version': __version__
}
| 25.263158 | 62 | 0.666667 |
c6a0b2e6f13cc83e001ace2dc43eeb51890ba31f | 1,074 | py | Python | weather/tools.py | yulinliu101/DeepTP | bc4f9adad6dda6c32e58026dda7863e0cb2a6072 | [
"MIT"
] | 46 | 2018-09-23T02:08:02.000Z | 2022-03-19T15:56:15.000Z | weather/tools.py | yulinliu101/DeepTP | bc4f9adad6dda6c32e58026dda7863e0cb2a6072 | [
"MIT"
] | 6 | 2018-12-02T09:04:56.000Z | 2021-09-30T12:14:53.000Z | weather/tools.py | yulinliu101/DeepTP | bc4f9adad6dda6c32e58026dda7863e0cb2a6072 | [
"MIT"
] | 27 | 2018-11-19T18:17:07.000Z | 2021-08-28T17:07:11.000Z | '''
Module's author : Jarry Gabriel
Date : June, July 2016
Some Algorithms was made by : Malivai Luce, Helene Piquet
This module handle different tools
'''
from pyproj import Proj, Geod
import numpy as np
# Projections
wgs84=Proj("+init=EPSG:4326")
epsg3857=Proj("+init=EPSG:3857")
g=Geod(ellps='WGS84')
# Returns pressure from altitude (ft)
# Returns the closest lvl from levels with altitude (atl)
# def proxy(val, lvl1, lvl2):
# if (abs(val - lvl1) < abs(val - lvl2)):
# return lvl1
# else:
# return lvl2
# p = press(alt)
# levels = sorted(lvls.keys())
# if p < levels[0]:
# return levels[0]
# else:
# for i, el in enumerate(levels[1:]):
# if p < el:
# return proxy(p, levels[i-1], el)
# return levels[-1] | 25.571429 | 58 | 0.57635 |
c6a0ccd39f3cb516016d54f1a50913914e43bf5d | 1,315 | py | Python | src/database/report.py | moevm/nosql1h19-report-stats | ab1dc80858df2d8b44489dc7ca900371b1fcc80f | [
"MIT"
] | null | null | null | src/database/report.py | moevm/nosql1h19-report-stats | ab1dc80858df2d8b44489dc7ca900371b1fcc80f | [
"MIT"
] | null | null | null | src/database/report.py | moevm/nosql1h19-report-stats | ab1dc80858df2d8b44489dc7ca900371b1fcc80f | [
"MIT"
] | null | null | null | from docx import Document
| 32.073171 | 75 | 0.579468 |
c6a371ecbe5a163fba368a97852b226ecc2b76c6 | 19,724 | py | Python | transmission/PFM_v24.py | zarppy/MUREIL_2014 | 25ba16554ce8f614b9337e0fffce75da3fa259a4 | [
"MIT"
] | null | null | null | transmission/PFM_v24.py | zarppy/MUREIL_2014 | 25ba16554ce8f614b9337e0fffce75da3fa259a4 | [
"MIT"
] | null | null | null | transmission/PFM_v24.py | zarppy/MUREIL_2014 | 25ba16554ce8f614b9337e0fffce75da3fa259a4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import math
| 45.657407 | 97 | 0.523575 |
c6a5691106c675b51a0898624e8d7f4af7a6316d | 11,893 | py | Python | ecl/tests/unit/compute/v2/test_server.py | keiichi-hikita/eclsdk | c43afb982fd54eb1875cdc22d46044644d804c4a | [
"Apache-2.0"
] | 5 | 2017-04-07T06:23:04.000Z | 2019-11-19T00:52:34.000Z | ecl/tests/unit/compute/v2/test_server.py | keiichi-hikita/eclsdk | c43afb982fd54eb1875cdc22d46044644d804c4a | [
"Apache-2.0"
] | 16 | 2018-09-12T11:14:40.000Z | 2021-04-19T09:02:44.000Z | ecl/tests/unit/compute/v2/test_server.py | keiichi-hikita/eclsdk | c43afb982fd54eb1875cdc22d46044644d804c4a | [
"Apache-2.0"
] | 14 | 2017-05-11T14:26:26.000Z | 2021-07-14T14:00:06.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from ecl.compute.v2 import server
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'accessIPv4': '1',
'accessIPv6': '2',
'addresses': {'region': '3'},
'created': '2015-03-09T12:14:57.233772',
'flavorRef': '5',
'flavor': {'id': 'FLAVOR_ID', 'links': {}},
'hostId': '6',
'id': IDENTIFIER,
'imageRef': '8',
'image': {'id': 'IMAGE_ID', 'links': {}},
'links': '9',
'metadata': {'key': '10'},
'name': '11',
'progress': 12,
'tenant_id': '13',
'status': '14',
'updated': '2015-03-09T12:15:57.233772',
'user_id': '16',
'key_name': '17',
'OS-DCF:diskConfig': '18',
'OS-EXT-AZ:availability_zone': '19',
'OS-EXT-STS:power_state': '20',
'OS-EXT-STS:task_state': '21',
'OS-EXT-STS:vm_state': '22',
'os-extended-volumes:volumes_attached': '23',
'OS-SRV-USG:launched_at': '2015-03-09T12:15:57.233772',
'OS-SRV-USG:terminated_at': '2015-03-09T12:15:57.233772',
'security_groups': '26',
'adminPass': '27',
'personality': '28',
'block_device_mapping_v2': {'key': '29'},
'os:scheduler_hints': {'key': '30'},
'user_data': '31'
}
| 38.739414 | 79 | 0.600858 |
c6a5791901b1fc6361134fdaba0ad7eda0768c85 | 1,577 | py | Python | packages/diana/diana/connect/utils/orth_fiq.py | derekmerck/diana-star | 78aa7badb27677a1f5c83d744852f659e2541567 | [
"MIT"
] | null | null | null | packages/diana/diana/connect/utils/orth_fiq.py | derekmerck/diana-star | 78aa7badb27677a1f5c83d744852f659e2541567 | [
"MIT"
] | null | null | null | packages/diana/diana/connect/utils/orth_fiq.py | derekmerck/diana-star | 78aa7badb27677a1f5c83d744852f659e2541567 | [
"MIT"
] | null | null | null | # import logging
# from pprint import pformat
from diana.utils.dicom import DicomLevel
def find_item_query(item):
"""
Have some information about the dixel, want to find the STUID, SERUID, INSTUID
Returns a _list_ of dictionaries with matches, retrieves any if "retrieve" flag
"""
q = {}
keys = {}
# All levels have these
keys[DicomLevel.STUDIES] = ['PatientID',
'PatientName',
'PatientBirthDate',
'PatientSex',
'StudyInstanceUID',
'StudyDate',
'StudyTime',
'AccessionNumber']
# Series level has these
keys[DicomLevel.SERIES] = keys[DicomLevel.STUDIES] + \
['SeriesInstanceUID',
'SeriesDescription',
'ProtocolName',
'SeriesNumber',
'NumberOfSeriesRelatedInstances',
'Modality']
# For instance level, use the minimum
keys[DicomLevel.INSTANCES] = ['SOPInstanceUID', 'SeriesInstanceUID']
for k in keys[item.level]:
q = add_key(q, k, item)
if item.level == DicomLevel.STUDIES and item.meta.get('Modality'):
q['ModalitiesInStudy'] = item.meta.get('Modality')
# logging.debug(pformat(q))
query = {'Level': str(item.level),
'Query': q}
return query | 30.326923 | 83 | 0.521877 |
c6a59cf0b7d0aebf7e2f62e142e7553ec2e18c60 | 32,332 | py | Python | IRIS_data_download/IRIS_download_support/obspy/io/ascii/tests/test_ascii.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/io/ascii/tests/test_ascii.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/io/ascii/tests/test_ascii.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import os
import unittest
import numpy as np
from obspy import Trace, UTCDateTime, read
from obspy.io.ascii.core import (_determine_dtype, _is_slist, _is_tspair,
_read_slist, _read_tspair, _write_slist,
_write_tspair)
from obspy.core.util import NamedTemporaryFile
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 45.346424 | 79 | 0.578034 |
c6ac1de06c12088cfd7b5e0c3570e7c36efacf0e | 68 | py | Python | libMap/__init__.py | ChrisSJard/PythonGUI-WinApplication1 | 54f658e7d345a63d09bff683a635d01d57856e6e | [
"Apache-2.0"
] | null | null | null | libMap/__init__.py | ChrisSJard/PythonGUI-WinApplication1 | 54f658e7d345a63d09bff683a635d01d57856e6e | [
"Apache-2.0"
] | null | null | null | libMap/__init__.py | ChrisSJard/PythonGUI-WinApplication1 | 54f658e7d345a63d09bff683a635d01d57856e6e | [
"Apache-2.0"
] | null | null | null | '''
MAP v-SCREEN gargle test - Shimadzu 8020 H
Date: 30/11/2020
''' | 17 | 43 | 0.661765 |
c6ac4f68c9c3e35b48eadd2793c372b95b8f9ebd | 1,793 | py | Python | zinnia/converters.py | emencia/django-blog-xinnia | ab19b55477ce7003b6f0712f8bd12af3501c4829 | [
"BSD-3-Clause"
] | null | null | null | zinnia/converters.py | emencia/django-blog-xinnia | ab19b55477ce7003b6f0712f8bd12af3501c4829 | [
"BSD-3-Clause"
] | null | null | null | zinnia/converters.py | emencia/django-blog-xinnia | ab19b55477ce7003b6f0712f8bd12af3501c4829 | [
"BSD-3-Clause"
] | 1 | 2021-06-17T14:02:21.000Z | 2021-06-17T14:02:21.000Z | """URL converters for the Zinnia project"""
| 20.146067 | 78 | 0.611824 |
c6acd732e85ef3e6872505baf917d917ef7c0ec1 | 8,045 | py | Python | nisse/routes/slack/command_handlers/report_command_handler.py | nexocodecom/nisse.io | 58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb | [
"MIT"
] | null | null | null | nisse/routes/slack/command_handlers/report_command_handler.py | nexocodecom/nisse.io | 58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb | [
"MIT"
] | 42 | 2018-07-20T14:15:48.000Z | 2019-09-26T05:44:21.000Z | nisse/routes/slack/command_handlers/report_command_handler.py | nexocodecom/nisse.io | 58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb | [
"MIT"
] | null | null | null | import logging
import os
import uuid
from typing import List
from flask import current_app
from flask.config import Config
from flask_injector import inject
from slackclient import SlackClient
from werkzeug.utils import secure_filename
from nisse.models.DTO import PrintParametersDto
from nisse.models.slack.common import ActionType
from nisse.models.slack.common import LabelSelectOption
from nisse.models.slack.dialog import Element, Dialog
from nisse.models.slack.message import Attachment, Message, Action, TextSelectOption
from nisse.models.slack.payload import ReportGenerateFormPayload
from nisse.routes.slack.command_handlers.slack_command_handler import SlackCommandHandler
from nisse.services.project_service import ProjectService
from nisse.services.reminder_service import ReminderService
from nisse.services.report_service import ReportService
from nisse.services.user_service import UserService
from nisse.services.xlsx_document_service import XlsxDocumentService
from nisse.utils import string_helper
from nisse.utils.date_helper import TimeRanges
from nisse.utils.date_helper import get_start_end_date
from nisse.utils.validation_helper import list_find
| 43.252688 | 121 | 0.640895 |
c6acd7e0d4951d5c3034a6f821df7b9a82c0e2f9 | 369 | py | Python | days/day01/part1.py | jaredbancroft/aoc2021 | 4eaf339cc0c8566da2af13f7cb9cf6fe87355aac | [
"MIT"
] | null | null | null | days/day01/part1.py | jaredbancroft/aoc2021 | 4eaf339cc0c8566da2af13f7cb9cf6fe87355aac | [
"MIT"
] | null | null | null | days/day01/part1.py | jaredbancroft/aoc2021 | 4eaf339cc0c8566da2af13f7cb9cf6fe87355aac | [
"MIT"
] | null | null | null | from helpers import inputs
| 28.384615 | 63 | 0.588076 |
c6ad38af41dda6b3a428b74b7d6a179478b67cda | 583 | py | Python | packages/syft/src/syft/core/node/common/node_table/setup.py | callezenwaka/PySyft | 2545c302441cfe727ec095c4f9aa136bff02be32 | [
"Apache-1.1"
] | 2 | 2022-02-18T03:48:27.000Z | 2022-03-05T06:13:57.000Z | packages/syft/src/syft/core/node/common/node_table/setup.py | callezenwaka/PySyft | 2545c302441cfe727ec095c4f9aa136bff02be32 | [
"Apache-1.1"
] | 3 | 2021-11-17T15:34:03.000Z | 2021-12-08T14:39:10.000Z | packages/syft/src/syft/core/node/common/node_table/setup.py | callezenwaka/PySyft | 2545c302441cfe727ec095c4f9aa136bff02be32 | [
"Apache-1.1"
] | 1 | 2021-08-19T12:23:01.000Z | 2021-08-19T12:23:01.000Z | # third party
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
# relative
from . import Base
| 25.347826 | 73 | 0.716981 |
c6afea6ff7fcfa0cc419b40bc7e78312c3c4768e | 1,983 | py | Python | third_party/blink/renderer/bindings/scripts/web_idl/make_copy_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | third_party/blink/renderer/bindings/scripts/web_idl/make_copy_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/renderer/bindings/scripts/web_idl/make_copy_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from .composition_parts import Component
from .composition_parts import Identifier
from .make_copy import make_copy
| 34.189655 | 73 | 0.616742 |
c6b2844ec8d83bfcefb1163893e3fea8102bf2bc | 1,554 | py | Python | 04 - Classes-inheritance-oops/39-classes-numeric-normal-magic-methods.py | python-demo-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | 2 | 2019-08-23T06:05:55.000Z | 2019-08-26T03:56:07.000Z | 04 - Classes-inheritance-oops/39-classes-numeric-normal-magic-methods.py | python-lang-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | null | null | null | 04 - Classes-inheritance-oops/39-classes-numeric-normal-magic-methods.py | python-lang-codes/basics | 2a151bbff4b528cefd52978829c632fd087c8f20 | [
"DOC"
] | 4 | 2020-10-01T07:16:07.000Z | 2021-07-17T07:55:08.000Z | # HEAD
# Classes - Magic Methods - Normal Numeric Magic Methods
# DESCRIPTION
# Describes the magic methods of classes
# add, sub, mul, floordiv, div, truediv, mod,
# divmod, pow, lshift, rshift, and, or, xor
# RESOURCES
#
# https://rszalski.github.io/magicmethods/
# Normal arithmetic operators
# Now, we cover the typical binary operators (and a function or two): +, -, * and the like. These are, for the most part, pretty self-explanatory.
# __add__(self, other)
# Implements addition.
# __sub__(self, other)
# Implements subtraction.
# __mul__(self, other)
# Implements multiplication.
# __floordiv__(self, other)
# Implements integer division using the // operator.
# __div__(self, other)
# Implements division using the / operator.
# __truediv__(self, other)
# Implements true division. Note that this only works when from __future__ import division is in effect.
# __mod__(self, other)
# Implements modulo using the % operator.
# __divmod__(self, other)
# Implements behavior for long division using the divmod() built in function.
# __pow__
# Implements behavior for exponents using the ** operator.
# __lshift__(self, other)
# Implements left bitwise shift using the << operator.
# __rshift__(self, other)
# Implements right bitwise shift using the >> operator.
# __and__(self, other)
# Implements bitwise and using the & operator.
# __or__(self, other)
# Implements bitwise or using the | operator.
# __xor__(self, other)
# Implements bitwise xor using the ^ operator.
| 34.533333 | 146 | 0.714286 |
c6b2c3233e24382da55f5267e87bb737b994481e | 11,384 | py | Python | abcdeep/argsutils.py | Conchylicultor/AbcDeep | 6fcfc03a1a516ccd760201bb004098e6f6fe0e7e | [
"Apache-2.0"
] | 1 | 2017-09-10T14:13:39.000Z | 2017-09-10T14:13:39.000Z | abcdeep/argsutils.py | Conchylicultor/AbcDeep | 6fcfc03a1a516ccd760201bb004098e6f6fe0e7e | [
"Apache-2.0"
] | null | null | null | abcdeep/argsutils.py | Conchylicultor/AbcDeep | 6fcfc03a1a516ccd760201bb004098e6f6fe0e7e | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Extention of argparse to automatically save/restore command line arguments
The other classes can easily add new arguments to the default ones
"""
import sys
import ast # For literal eval
import collections
import argparse
import abcdeep.otherutils as otherutils
from abcdeep.otherutils import cprint, TermMsg
# TODO: Create new class allowing to add subprograms (with .add_subparsers). Should
# allow to define a default subparser to use if no subprogram is defined. This
# can be done using the dest argument of add_subparsers and test it after the
# parse_args call (pb is that default help won't be printed)
| 45.903226 | 236 | 0.634048 |
c6b65a93ba4bfc063204ecefff708893dd868984 | 265 | py | Python | listas/gabarito/lista4CT/exe-1.py | yujinishioka/computacional-thinking-python | 38abfc00d94c45cc5a7d4303e57cb8f0cab4272a | [
"MIT"
] | 1 | 2022-03-08T21:54:49.000Z | 2022-03-08T21:54:49.000Z | listas/gabarito/lista4CT/exe-1.py | yujinishioka/computacional-thinking-python | 38abfc00d94c45cc5a7d4303e57cb8f0cab4272a | [
"MIT"
] | null | null | null | listas/gabarito/lista4CT/exe-1.py | yujinishioka/computacional-thinking-python | 38abfc00d94c45cc5a7d4303e57cb8f0cab4272a | [
"MIT"
] | null | null | null | soma = 0
print("0 para parar")
numero = int(input("Digite numero: "))
while numero != 0:
if numero %2 == 0:
soma += numero
if numero == 0:
break
print("0 para parar")
numero = int(input("Digite numero: "))
print("O total ", soma) | 18.928571 | 42 | 0.562264 |
c6b714242593972ca83e47dd6c36c7d8b16188e4 | 41,780 | py | Python | venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/schedule_rule_pb2.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/schedule_rule_pb2.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/schedule_rule_pb2.py | choonho/plugin-prometheus-mon-webhook | afa7d65d12715fd0480fb4f92a9c62da2d6128e0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/power_scheduler/v1/schedule_rule.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from spaceone.api.core.v1 import query_pb2 as spaceone_dot_api_dot_core_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/power_scheduler/v1/schedule_rule.proto',
package='spaceone.api.power_scheduler.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3spaceone/api/power_scheduler/v1/schedule_rule.proto\x12\x1fspaceone.api.power_scheduler.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto\x1a spaceone/api/core/v1/query.proto\"\xd1\x02\n\x19\x43reateScheduleRuleRequest\x12\x13\n\x0bschedule_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x41\n\x05state\x18\x03 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12<\n\trule_type\x18\x04 \x01(\x0e\x32).spaceone.api.power_scheduler.v1.RuleType\x12\x33\n\x04rule\x18\x05 \x03(\x0b\x32%.spaceone.api.power_scheduler.v1.Rule\x12\x10\n\x08priority\x18\x06 \x01(\x05\x12%\n\x04tags\x18\x0b \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07user_id\x18\x17 \x01(\t\x12\x11\n\tdomain_id\x18\x16 \x01(\t\"\xf5\x01\n\x19UpdateScheduleRuleRequest\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x41\n\x05state\x18\x03 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12\x33\n\x04rule\x18\x04 \x03(\x0b\x32%.spaceone.api.power_scheduler.v1.Rule\x12%\n\x04tags\x18\x0b \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x16 \x01(\t\"B\n\x13ScheduleRuleRequest\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"S\n\x16GetScheduleRuleRequest\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\x12\x0c\n\x04only\x18\x03 \x03(\t\"\xf8\x01\n\x11ScheduleRuleQuery\x12*\n\x05query\x18\x01 \x01(\x0b\x32\x1b.spaceone.api.core.v1.Query\x12\x18\n\x10schedule_rule_id\x18\x02 \x01(\t\x12\x13\n\x0bschedule_id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08priority\x18\x05 \x01(\x05\x12\x41\n\x05state\x18\x06 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x11\n\tdomain_id\x18\t \x01(\t\">\n\x04Rule\x12\r\n\x03\x64\x61y\x18\x01 \x01(\tH\x00\x12\x0e\n\x04\x64\x61te\x18\x02 \x01(\tH\x00\x12\r\n\x05times\x18\x03 \x03(\x05\x42\x08\n\x06\x66ormat\"\x85\x03\n\x08RuleInfo\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x41\n\x05state\x18\x03 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12<\n\trule_type\x18\x04 \x01(\x0e\x32).spaceone.api.power_scheduler.v1.RuleType\x12\x33\n\x04rule\x18\x05 \x03(\x0b\x32%.spaceone.api.power_scheduler.v1.Rule\x12\x10\n\x08priority\x18\x06 \x01(\x05\x12\x13\n\x0bschedule_id\x18\x07 \x01(\t\x12%\n\x04tags\x18\x0c \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nproject_id\x18\x15 \x01(\t\x12\x11\n\tdomain_id\x18\x16 \x01(\t\x12\x12\n\ncreated_by\x18\x17 \x01(\t\x12\x12\n\ncreated_at\x18\x1f \x01(\t\"\\\n\tRulesInfo\x12:\n\x07results\x18\x01 \x03(\x0b\x32).spaceone.api.power_scheduler.v1.RuleInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\"`\n\x15ScheduleRuleStatQuery\x12\x34\n\x05query\x18\x01 \x01(\x0b\x32%.spaceone.api.core.v1.StatisticsQuery\x12\x11\n\tdomain_id\x18\x02 \x01(\t*B\n\x11ScheduleRuleState\x12\x13\n\x0fRULE_STATE_NONE\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\x0b\n\x07STOPPED\x10\x02*7\n\x08RuleType\x12\x12\n\x0eRULE_TYPE_NONE\x10\x00\x12\x0b\n\x07ROUTINE\x10\x01\x12\n\n\x06TICKET\x10\x02\x32\xea\x07\n\x0cScheduleRule\x12\x9b\x01\n\x06\x63reate\x12:.spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest\x1a).spaceone.api.power_scheduler.v1.RuleInfo\"*\x82\xd3\xe4\x93\x02$\"\"/power-scheduler/v1/schedule-rules\x12\xad\x01\n\x06update\x12:.spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest\x1a).spaceone.api.power_scheduler.v1.RuleInfo\"<\x82\xd3\xe4\x93\x02\x36\x1a\x34/power-scheduler/v1/schedule-rule/{schedule_rule_id}\x12\x94\x01\n\x06\x64\x65lete\x12\x34.spaceone.api.power_scheduler.v1.ScheduleRuleRequest\x1a\x16.google.protobuf.Empty\"<\x82\xd3\xe4\x93\x02\x36*4/power-scheduler/v1/schedule-rule/{schedule_rule_id}\x12\xa7\x01\n\x03get\x12\x37.spaceone.api.power_scheduler.v1.GetScheduleRuleRequest\x1a).spaceone.api.power_scheduler.v1.RuleInfo\"<\x82\xd3\xe4\x93\x02\x36\x12\x34/power-scheduler/v1/schedule-rule/{schedule_rule_id}\x12\xbf\x01\n\x04list\x12\x32.spaceone.api.power_scheduler.v1.ScheduleRuleQuery\x1a*.spaceone.api.power_scheduler.v1.RulesInfo\"W\x82\xd3\xe4\x93\x02Q\x12\"/power-scheduler/v1/schedule-rulesZ+\")/power-scheduler/v1/schedule-rules/search\x12\x88\x01\n\x04stat\x12\x36.spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery\x1a\x17.google.protobuf.Struct\"/\x82\xd3\xe4\x93\x02)\"\'/power-scheduler/v1/schedule-rules/statb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,spaceone_dot_api_dot_core_dot_v1_dot_query__pb2.DESCRIPTOR,])
_SCHEDULERULESTATE = _descriptor.EnumDescriptor(
name='ScheduleRuleState',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RULE_STATE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1851,
serialized_end=1917,
)
_sym_db.RegisterEnumDescriptor(_SCHEDULERULESTATE)
ScheduleRuleState = enum_type_wrapper.EnumTypeWrapper(_SCHEDULERULESTATE)
_RULETYPE = _descriptor.EnumDescriptor(
name='RuleType',
full_name='spaceone.api.power_scheduler.v1.RuleType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RULE_TYPE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ROUTINE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TICKET', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1919,
serialized_end=1974,
)
_sym_db.RegisterEnumDescriptor(_RULETYPE)
RuleType = enum_type_wrapper.EnumTypeWrapper(_RULETYPE)
RULE_STATE_NONE = 0
RUNNING = 1
STOPPED = 2
RULE_TYPE_NONE = 0
ROUTINE = 1
TICKET = 2
_CREATESCHEDULERULEREQUEST = _descriptor.Descriptor(
name='CreateScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_id', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.schedule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_type', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.rule_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.rule', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.priority', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.tags', index=6,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_id', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.user_id', index=7,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.domain_id', index=8,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=549,
)
_UPDATESCHEDULERULEREQUEST = _descriptor.Descriptor(
name='UpdateScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.rule', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.tags', index=4,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.domain_id', index=5,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=797,
)
_SCHEDULERULEREQUEST = _descriptor.Descriptor(
name='ScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleRequest.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=799,
serialized_end=865,
)
_GETSCHEDULERULEREQUEST = _descriptor.Descriptor(
name='GetScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='only', full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest.only', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=867,
serialized_end=950,
)
_SCHEDULERULEQUERY = _descriptor.Descriptor(
name='ScheduleRuleQuery',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.schedule_rule_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.schedule_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.priority', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.state', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.project_id', index=6,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.domain_id', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=953,
serialized_end=1201,
)
_RULE = _descriptor.Descriptor(
name='Rule',
full_name='spaceone.api.power_scheduler.v1.Rule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='day', full_name='spaceone.api.power_scheduler.v1.Rule.day', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date', full_name='spaceone.api.power_scheduler.v1.Rule.date', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='times', full_name='spaceone.api.power_scheduler.v1.Rule.times', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='format', full_name='spaceone.api.power_scheduler.v1.Rule.format',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1203,
serialized_end=1265,
)
_RULEINFO = _descriptor.Descriptor(
name='RuleInfo',
full_name='spaceone.api.power_scheduler.v1.RuleInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.RuleInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.RuleInfo.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_type', full_name='spaceone.api.power_scheduler.v1.RuleInfo.rule_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.power_scheduler.v1.RuleInfo.rule', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='spaceone.api.power_scheduler.v1.RuleInfo.priority', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.schedule_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.RuleInfo.tags', index=7,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.project_id', index=8,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.domain_id', index=9,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_by', full_name='spaceone.api.power_scheduler.v1.RuleInfo.created_by', index=10,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='spaceone.api.power_scheduler.v1.RuleInfo.created_at', index=11,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1268,
serialized_end=1657,
)
_RULESINFO = _descriptor.Descriptor(
name='RulesInfo',
full_name='spaceone.api.power_scheduler.v1.RulesInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.power_scheduler.v1.RulesInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.power_scheduler.v1.RulesInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1659,
serialized_end=1751,
)
_SCHEDULERULESTATQUERY = _descriptor.Descriptor(
name='ScheduleRuleStatQuery',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1753,
serialized_end=1849,
)
_CREATESCHEDULERULEREQUEST.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_CREATESCHEDULERULEREQUEST.fields_by_name['rule_type'].enum_type = _RULETYPE
_CREATESCHEDULERULEREQUEST.fields_by_name['rule'].message_type = _RULE
_CREATESCHEDULERULEREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATESCHEDULERULEREQUEST.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_UPDATESCHEDULERULEREQUEST.fields_by_name['rule'].message_type = _RULE
_UPDATESCHEDULERULEREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_SCHEDULERULEQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._QUERY
_SCHEDULERULEQUERY.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_RULE.oneofs_by_name['format'].fields.append(
_RULE.fields_by_name['day'])
_RULE.fields_by_name['day'].containing_oneof = _RULE.oneofs_by_name['format']
_RULE.oneofs_by_name['format'].fields.append(
_RULE.fields_by_name['date'])
_RULE.fields_by_name['date'].containing_oneof = _RULE.oneofs_by_name['format']
_RULEINFO.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_RULEINFO.fields_by_name['rule_type'].enum_type = _RULETYPE
_RULEINFO.fields_by_name['rule'].message_type = _RULE
_RULEINFO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_RULESINFO.fields_by_name['results'].message_type = _RULEINFO
_SCHEDULERULESTATQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._STATISTICSQUERY
DESCRIPTOR.message_types_by_name['CreateScheduleRuleRequest'] = _CREATESCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['UpdateScheduleRuleRequest'] = _UPDATESCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['ScheduleRuleRequest'] = _SCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['GetScheduleRuleRequest'] = _GETSCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['ScheduleRuleQuery'] = _SCHEDULERULEQUERY
DESCRIPTOR.message_types_by_name['Rule'] = _RULE
DESCRIPTOR.message_types_by_name['RuleInfo'] = _RULEINFO
DESCRIPTOR.message_types_by_name['RulesInfo'] = _RULESINFO
DESCRIPTOR.message_types_by_name['ScheduleRuleStatQuery'] = _SCHEDULERULESTATQUERY
DESCRIPTOR.enum_types_by_name['ScheduleRuleState'] = _SCHEDULERULESTATE
DESCRIPTOR.enum_types_by_name['RuleType'] = _RULETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('CreateScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATESCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest)
})
_sym_db.RegisterMessage(CreateScheduleRuleRequest)
UpdateScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('UpdateScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATESCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest)
})
_sym_db.RegisterMessage(UpdateScheduleRuleRequest)
ScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('ScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ScheduleRuleRequest)
})
_sym_db.RegisterMessage(ScheduleRuleRequest)
GetScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('GetScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _GETSCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.GetScheduleRuleRequest)
})
_sym_db.RegisterMessage(GetScheduleRuleRequest)
ScheduleRuleQuery = _reflection.GeneratedProtocolMessageType('ScheduleRuleQuery', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULERULEQUERY,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ScheduleRuleQuery)
})
_sym_db.RegisterMessage(ScheduleRuleQuery)
Rule = _reflection.GeneratedProtocolMessageType('Rule', (_message.Message,), {
'DESCRIPTOR' : _RULE,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.Rule)
})
_sym_db.RegisterMessage(Rule)
RuleInfo = _reflection.GeneratedProtocolMessageType('RuleInfo', (_message.Message,), {
'DESCRIPTOR' : _RULEINFO,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.RuleInfo)
})
_sym_db.RegisterMessage(RuleInfo)
RulesInfo = _reflection.GeneratedProtocolMessageType('RulesInfo', (_message.Message,), {
'DESCRIPTOR' : _RULESINFO,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.RulesInfo)
})
_sym_db.RegisterMessage(RulesInfo)
ScheduleRuleStatQuery = _reflection.GeneratedProtocolMessageType('ScheduleRuleStatQuery', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULERULESTATQUERY,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery)
})
_sym_db.RegisterMessage(ScheduleRuleStatQuery)
_SCHEDULERULE = _descriptor.ServiceDescriptor(
name='ScheduleRule',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1977,
serialized_end=2979,
methods=[
_descriptor.MethodDescriptor(
name='create',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.create',
index=0,
containing_service=None,
input_type=_CREATESCHEDULERULEREQUEST,
output_type=_RULEINFO,
serialized_options=b'\202\323\344\223\002$\"\"/power-scheduler/v1/schedule-rules',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.update',
index=1,
containing_service=None,
input_type=_UPDATESCHEDULERULEREQUEST,
output_type=_RULEINFO,
serialized_options=b'\202\323\344\223\0026\0324/power-scheduler/v1/schedule-rule/{schedule_rule_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='delete',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.delete',
index=2,
containing_service=None,
input_type=_SCHEDULERULEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0026*4/power-scheduler/v1/schedule-rule/{schedule_rule_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='get',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.get',
index=3,
containing_service=None,
input_type=_GETSCHEDULERULEREQUEST,
output_type=_RULEINFO,
serialized_options=b'\202\323\344\223\0026\0224/power-scheduler/v1/schedule-rule/{schedule_rule_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='list',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.list',
index=4,
containing_service=None,
input_type=_SCHEDULERULEQUERY,
output_type=_RULESINFO,
serialized_options=b'\202\323\344\223\002Q\022\"/power-scheduler/v1/schedule-rulesZ+\")/power-scheduler/v1/schedule-rules/search',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='stat',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.stat',
index=5,
containing_service=None,
input_type=_SCHEDULERULESTATQUERY,
output_type=google_dot_protobuf_dot_struct__pb2._STRUCT,
serialized_options=b'\202\323\344\223\002)\"\'/power-scheduler/v1/schedule-rules/stat',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SCHEDULERULE)
DESCRIPTOR.services_by_name['ScheduleRule'] = _SCHEDULERULE
# @@protoc_insertion_point(module_scope)
| 50.035928 | 4,517 | 0.769196 |
c6b71b4c3e09976fe7726eb682b74cdf5af82966 | 990 | py | Python | django_boost/admin/sites.py | toshiki-tosshi/django-boost | 2431b743af2d976571d491ae232a5cb03c760b7e | [
"MIT"
] | null | null | null | django_boost/admin/sites.py | toshiki-tosshi/django-boost | 2431b743af2d976571d491ae232a5cb03c760b7e | [
"MIT"
] | null | null | null | django_boost/admin/sites.py | toshiki-tosshi/django-boost | 2431b743af2d976571d491ae232a5cb03c760b7e | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.db.models import Model
__all__ = ["register_all"]
def register_all(models, admin_class=admin.ModelAdmin):
"""
Easily register Models to Django admin site.
::
from yourapp import models
from django_boost.admin.sites import register_all
register_all(models)
Register all models defined in `models.py` in Django admin site.
Custom admin classes are also available.
::
from your_app import models
from your_app import admin
from django_boost.admin.sites import register_all
register_all(models, admin_class=admin.CustomAdmin)
"""
for attr in dir(models):
attr = getattr(models, attr, None)
if isinstance(attr, type):
if issubclass(attr, Model) and not attr._meta.abstract:
try:
admin.site.register(attr, admin_class)
except admin.sites.AlreadyRegistered:
pass
| 26.052632 | 70 | 0.651515 |
c6b79f701bcc0df19eeeaf217d68d4ce14a63d1a | 251 | py | Python | bot.py | White-ZacK/HLavalink | 917a2a5abf3df2b2fbdff93709b9eb9e47c033aa | [
"MIT"
] | null | null | null | bot.py | White-ZacK/HLavalink | 917a2a5abf3df2b2fbdff93709b9eb9e47c033aa | [
"MIT"
] | null | null | null | bot.py | White-ZacK/HLavalink | 917a2a5abf3df2b2fbdff93709b9eb9e47c033aa | [
"MIT"
] | null | null | null | import discord
import os
from discord.ext import commands
bot = commands.Bot(command_prefix=">")
TOKEN = os.environ.get('TOKEN')
bot.run(TOKEN)
| 17.928571 | 38 | 0.7251 |
c6b7b42e398b5ad8a87b392745a1b79c63f44e1e | 1,612 | py | Python | sdk/ml/azure-ai-ml/tests/batch_services/unittests/test_batch_deployment_schema.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/tests/batch_services/unittests/test_batch_deployment_schema.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/tests/batch_services/unittests/test_batch_deployment_schema.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | from pathlib import Path
import pytest
import yaml
from azure.ai.ml._schema._deployment.batch.batch_deployment import BatchDeploymentSchema
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, BatchDeploymentOutputAction
from azure.ai.ml.entities._util import load_from_dict
from azure.ai.ml.entities import BatchDeployment
def load_batch_deployment_entity_from_yaml(path: str, context={}) -> BatchDeployment:
"""batch deployment yaml -> batch deployment entity"""
with open(path, "r") as f:
cfg = yaml.safe_load(f)
context.update({BASE_PATH_CONTEXT_KEY: Path(path).parent})
deployment = load_from_dict(BatchDeploymentSchema, cfg, context)
return deployment
| 46.057143 | 99 | 0.782258 |
c6b8d428a66aa6d3e2e0df39f78679dd2657686d | 105 | py | Python | Programs/functions/returnFunction.py | LuciKritZ/python | ed5500f5aad3cb15354ca5ebf71748029fc6ae77 | [
"MIT"
] | null | null | null | Programs/functions/returnFunction.py | LuciKritZ/python | ed5500f5aad3cb15354ca5ebf71748029fc6ae77 | [
"MIT"
] | null | null | null | Programs/functions/returnFunction.py | LuciKritZ/python | ed5500f5aad3cb15354ca5ebf71748029fc6ae77 | [
"MIT"
] | null | null | null |
fun = display()
print(fun()) | 15 | 22 | 0.590476 |
c6badd66c9c53436c0cfcf31174d258e7727a76d | 795 | py | Python | test.py | Roulbac/GanSeg | 78f354da5d724b93ead3ac6c2b15ae18d3ac0aea | [
"MIT"
] | 20 | 2019-04-13T07:07:49.000Z | 2022-02-23T03:10:40.000Z | test.py | Roulbac/GanSeg | 78f354da5d724b93ead3ac6c2b15ae18d3ac0aea | [
"MIT"
] | null | null | null | test.py | Roulbac/GanSeg | 78f354da5d724b93ead3ac6c2b15ae18d3ac0aea | [
"MIT"
] | 4 | 2019-04-13T13:50:39.000Z | 2020-11-08T03:50:54.000Z | from options.test_parser import TestParser
from models import create_model, get_model_parsing_modifier
from datasets import create_dataset, get_dataset_parsing_modifier
parser = TestParser()
model_name = parser.get_model_name()
dataset_name = parser.get_dataset_name()
print('Model name: {}'.format(model_name))
print('Dataset name: {}'.format(dataset_name))
model_parser_modifier = get_model_parsing_modifier(model_name)
model_parser_modifier(parser, is_train=False)
dataset_parser_modifier = get_dataset_parsing_modifier(dataset_name)
dataset_parser_modifier(parser, is_train=False)
opts, _ = parser.parse_options()
opts_str = parser.make_opts_string(opts, verbose=True)
model = create_model(opts)
dataset = create_dataset(opts)
if opts.eval:
model.set_eval()
model.test(dataset)
| 27.413793 | 68 | 0.820126 |
c6bbabf1d22c4d30fab6e968dbe23f93d2189af5 | 67 | py | Python | codes/course1/demo3_3.py | BigShuang/big-shuang-python-introductory-course | c4fd1343c4c539567180072c749b68bda7c28075 | [
"MIT"
] | null | null | null | codes/course1/demo3_3.py | BigShuang/big-shuang-python-introductory-course | c4fd1343c4c539567180072c749b68bda7c28075 | [
"MIT"
] | null | null | null | codes/course1/demo3_3.py | BigShuang/big-shuang-python-introductory-course | c4fd1343c4c539567180072c749b68bda7c28075 | [
"MIT"
] | null | null | null | for i in range(11):
v = 2 ** i
print("2^%s = %s" % (i, v))
| 16.75 | 31 | 0.38806 |
c6bbf866443aff7a6fcd220b4ae5ee2ac61f6a5c | 353 | py | Python | 2018-12-31.py | shangpf1/python_study | 6730519ce7b5cf4612e1c778ae5876cfbb748a4f | [
"MIT"
] | null | null | null | 2018-12-31.py | shangpf1/python_study | 6730519ce7b5cf4612e1c778ae5876cfbb748a4f | [
"MIT"
] | null | null | null | 2018-12-31.py | shangpf1/python_study | 6730519ce7b5cf4612e1c778ae5876cfbb748a4f | [
"MIT"
] | null | null | null | #
from selenium import webdriver
from os import path
driver = webdriver.Chrome()
d = path.dirname('__file__')
index = path.join(d,'index.png')
driver.get("https://www.baidu.com/")
#
driver.maximize_window()
#
driver.save_screenshot(index)
#
driver.back()
#
driver.forward()
#
driver.refresh()
driver.quit() | 12.172414 | 36 | 0.716714 |
c6bcdd4e1b6e9560584746d256ad5769eed1114e | 4,016 | py | Python | flask_webapi/exceptions.py | viniciuschiele/flask-webapi | 4901c0b78fc61b8db18c211c5858b84901d0f4ab | [
"MIT"
] | null | null | null | flask_webapi/exceptions.py | viniciuschiele/flask-webapi | 4901c0b78fc61b8db18c211c5858b84901d0f4ab | [
"MIT"
] | null | null | null | flask_webapi/exceptions.py | viniciuschiele/flask-webapi | 4901c0b78fc61b8db18c211c5858b84901d0f4ab | [
"MIT"
] | null | null | null | """
Handles exceptions raised by Flask WebAPI.
"""
from . import status
| 30.656489 | 108 | 0.586404 |
c6bec2b7b19f2adc7fd34bc6ce05b27edb1743ba | 5,133 | py | Python | plugins/module_utils/fortiwebcloud/request.py | fortinet/fortiwebcloud-ansible | 4a6a2b139b88d6428494ca87d570a0a09988b15d | [
"MIT"
] | 5 | 2021-01-09T23:09:22.000Z | 2022-01-22T12:34:25.000Z | plugins/module_utils/fortiwebcloud/request.py | fortinet/fortiwebcloud-ansible | 4a6a2b139b88d6428494ca87d570a0a09988b15d | [
"MIT"
] | 2 | 2021-01-19T03:46:53.000Z | 2021-06-28T15:19:24.000Z | plugins/module_utils/fortiwebcloud/request.py | fortinet/fortiwebcloud-ansible | 4a6a2b139b88d6428494ca87d570a0a09988b15d | [
"MIT"
] | 2 | 2021-09-17T11:13:31.000Z | 2021-11-30T10:53:49.000Z | #!/usr/bin/python
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2020 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import json
import time
import threading
import urllib.parse
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.basic import to_text
from ansible.module_utils.six.moves import urllib
from ansible_collections.fortinet.fortiwebcloud.plugins.module_utils.fortiwebcloud.settings import (API_VER, DOMAIN)
# Global FWB REST connection session
| 36.664286 | 116 | 0.643678 |
c6bf070a0e1401995e4a06960552d64f43d04d96 | 497 | py | Python | tests/test_account.py | thangduong/lendingclub2 | b16552807b69b81804369fd1a9058fa8f89ce1ef | [
"MIT"
] | null | null | null | tests/test_account.py | thangduong/lendingclub2 | b16552807b69b81804369fd1a9058fa8f89ce1ef | [
"MIT"
] | null | null | null | tests/test_account.py | thangduong/lendingclub2 | b16552807b69b81804369fd1a9058fa8f89ce1ef | [
"MIT"
] | null | null | null | # Filename: test_account.py
"""
Test the lendingclub2.accountmodule
"""
# PyTest
import pytest
# lendingclub2
from lendingclub2.account import InvestorAccount
from lendingclub2.error import LCError
| 20.708333 | 62 | 0.702213 |
c6c065b4b597b12187960a9bbab5ef9b81fb5b2a | 3,318 | py | Python | src/webserver.py | sadjadeb/qrCode_ticket | 36e5762e6fcb77315385922bb4568f2e0b67888c | [
"MIT"
] | 10 | 2021-12-25T16:58:45.000Z | 2022-03-21T02:25:10.000Z | src/webserver.py | sadjadeb/qrCode_ticket | 36e5762e6fcb77315385922bb4568f2e0b67888c | [
"MIT"
] | 2 | 2021-12-31T10:48:57.000Z | 2022-01-01T12:05:02.000Z | src/webserver.py | sadjadeb/qrCode_ticket | 36e5762e6fcb77315385922bb4568f2e0b67888c | [
"MIT"
] | null | null | null | from config import *
from excel_handler import get_users_from_excel
from fastapi import FastAPI, HTTPException, status, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from typing import Optional
import pathlib
import uvicorn
app = FastAPI()
templates_path = pathlib.Path(__file__).parent.resolve().parent.resolve() / 'templates'
templates = Jinja2Templates(directory=templates_path)
app.mount("/static", StaticFiles(directory=templates_path / 'static'), name="static")
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
users = get_users_from_excel(OUTPUT_FILE_PATH)
users_entrance = {}
def run_server():
uvicorn.run(app, host="127.0.0.1", port=8000)
| 34.926316 | 98 | 0.61965 |
c6c0d208582ba1aed39ac80bef8ef9d7e28b0eae | 337 | py | Python | main/test/test.py | gitter-badger/grow-controller-Rpi | 0107251af85a4dc23b61b8be66fe49d597fd776b | [
"Unlicense"
] | 3 | 2017-03-21T22:35:01.000Z | 2021-08-19T03:16:39.000Z | main/test/test.py | gitter-badger/grow-controller-Rpi | 0107251af85a4dc23b61b8be66fe49d597fd776b | [
"Unlicense"
] | null | null | null | main/test/test.py | gitter-badger/grow-controller-Rpi | 0107251af85a4dc23b61b8be66fe49d597fd776b | [
"Unlicense"
] | null | null | null |
###PiPlate buttons
print('sudo crontab /home/pi/grow-controller-Rpi/main/ref/crontab.cron')
'''
while True:
time.sleep(0.5) # without this time.sleep, 23% cpu usage. with 3%
if lcd.is_pressed(LCD.UP):
GPIO.output(pin1, GPIO.LOW) # on
if lcd.is_pressed(LCD.DOWN):
GPIO.output(pin1, GPIO.HIGH) # off
''' | 22.466667 | 72 | 0.64095 |
c6c2c18414f129c6c748df8b453b0adccb5dbf36 | 2,010 | py | Python | generate.py | xphip/feather | 18a9c88bdb545e4f33a35e0e771b07d8c5c8c56e | [
"MIT"
] | null | null | null | generate.py | xphip/feather | 18a9c88bdb545e4f33a35e0e771b07d8c5c8c56e | [
"MIT"
] | null | null | null | generate.py | xphip/feather | 18a9c88bdb545e4f33a35e0e771b07d8c5c8c56e | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
import os
import subprocess
import sys
# # OLD
# def SVGMinify():
# icons_dir = './icons'
# icons_min_dir = './icons-min'
# total_icons = 0
# for icon in os.listdir(icons_dir):
# [name, ext] = icon.split('.')
# icon_min = "{}/{}.min.{}".format(icons_min_dir, name, ext)
# i = "{}/{}.{}".format(icons_dir, name, ext)
# if os.path.exists(icon_min) == 0:
# subprocess.call(['svgo', '-q', i, '-o', icon_min])
# total_icons += 1
# if total_icons > 0:
# print 'Total icons minified: ' + str(total_icons)
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1] == "css":
CSSMaker()
print 'Done!'
elif sys.argv[1] == "cssuri":
CSSUriMaker()
print 'Done!'
else:
Usage()
| 21.612903 | 101 | 0.593035 |
c6c3cf7f18578ef4fee0cf3ceb347dcb151e1993 | 3,827 | py | Python | Lib/corpuscrawler/crawl_pl.py | cash/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 95 | 2019-06-13T23:34:21.000Z | 2022-03-12T05:22:49.000Z | Lib/corpuscrawler/crawl_pl.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 31 | 2019-06-02T18:56:53.000Z | 2021-08-10T20:16:02.000Z | Lib/corpuscrawler/crawl_pl.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 35 | 2019-06-18T08:26:24.000Z | 2022-01-11T13:59:40.000Z | # coding: utf-8
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
from corpuscrawler.util import (
crawl_deutsche_welle, crawl_udhr, extract, cleantext, clean_paragraphs, urlpath
)
| 40.284211 | 92 | 0.629736 |
c6c6489454579f788af2d644a9acb3a3264844fe | 1,245 | py | Python | froide/publicbody/search_indexes.py | rufuspollock/froide | 8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4 | [
"MIT"
] | null | null | null | froide/publicbody/search_indexes.py | rufuspollock/froide | 8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4 | [
"MIT"
] | null | null | null | froide/publicbody/search_indexes.py | rufuspollock/froide | 8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4 | [
"MIT"
] | null | null | null | from django.conf import settings
from haystack import indexes
from celery_haystack.indexes import CelerySearchIndex
from .models import PublicBody
PUBLIC_BODY_BOOSTS = settings.FROIDE_CONFIG.get("public_body_boosts", {})
| 37.727273 | 81 | 0.727711 |
c6c9aa4e57c89e6f69fa55d265d499cc88ae995f | 1,519 | py | Python | 4_factory/factory_method/dependent_pizza_store.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/factory_method/dependent_pizza_store.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/factory_method/dependent_pizza_store.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | from chicago_style_clam_pizza import ChicagoStyleClamPizza
from chicago_style_cheese_pizza import ChicagoStyleCheesePizza
from chicago_style_pepperoni_pizza import ChicagoStylePepperoniPizza
from chicago_style_veggie_pizza import ChicagoStyleVeggiePizza
from ny_style_clam_pizza import NYStyleClamPizza
from ny_style_cheese_pizza import NYStyleCheesePizza
from ny_style_pepperoni_pizza import NYStylePepperoniPizza
from ny_style_veggie_pizza import NYStyleVeggiePizza
| 35.325581 | 68 | 0.631336 |
c6cac8b3c9901ec09333ce8b40056a0c6f21d27c | 459 | py | Python | tests/performance/cte-arm/tests/rf_mnist.py | alexbarcelo/dislib | 989f81f235ae30b17410a8d805df258c7d931b38 | [
"Apache-2.0"
] | 36 | 2018-10-22T19:21:14.000Z | 2022-03-22T12:10:01.000Z | tests/performance/cte-arm/tests/rf_mnist.py | alexbarcelo/dislib | 989f81f235ae30b17410a8d805df258c7d931b38 | [
"Apache-2.0"
] | 329 | 2018-11-22T18:04:57.000Z | 2022-03-18T01:26:55.000Z | tests/performance/cte-arm/tests/rf_mnist.py | alexbarcelo/dislib | 989f81f235ae30b17410a8d805df258c7d931b38 | [
"Apache-2.0"
] | 21 | 2019-01-10T11:46:39.000Z | 2022-03-17T12:59:45.000Z | import performance
import dislib as ds
from dislib.classification import RandomForestClassifier
if __name__ == "__main__":
main()
| 24.157895 | 73 | 0.723312 |
c6d0b39109db93442e531726d432358337458672 | 2,275 | py | Python | pwn/shellcode/misc/exit.py | Haabb/pwnfork | c2530ea2fd2f9d4e65df234afeb8f7def93afe49 | [
"MIT"
] | 1 | 2016-08-29T03:38:42.000Z | 2016-08-29T03:38:42.000Z | pwn/shellcode/misc/exit.py | Haabb/pwnfork | c2530ea2fd2f9d4e65df234afeb8f7def93afe49 | [
"MIT"
] | null | null | null | pwn/shellcode/misc/exit.py | Haabb/pwnfork | c2530ea2fd2f9d4e65df234afeb8f7def93afe49 | [
"MIT"
] | null | null | null | from pwn.internal.shellcode_helper import *
from ..misc.pushstr import pushstr
| 29.545455 | 78 | 0.465934 |
c6d1c365ef9f848b0908e928e06218bc28eb4a5c | 1,037 | py | Python | backend/bundle/tests/seeker_tests/samples/indentation.py | fossabot/Graphery | 61f23b2ad4ad0fa5dff643047597f9bb6cae35a2 | [
"MIT"
] | 5 | 2020-08-26T00:15:01.000Z | 2021-01-11T17:24:51.000Z | backend/bundle/tests/seeker_tests/samples/indentation.py | fossabot/Graphery | 61f23b2ad4ad0fa5dff643047597f9bb6cae35a2 | [
"MIT"
] | 69 | 2020-08-02T23:45:44.000Z | 2021-04-17T03:04:32.000Z | backend/bundle/tests/seeker_tests/samples/indentation.py | fossabot/Graphery | 61f23b2ad4ad0fa5dff643047597f9bb6cae35a2 | [
"MIT"
] | 4 | 2020-09-10T05:40:49.000Z | 2020-12-20T11:44:16.000Z | from bundle import seeker
expected_output = '''
Source path:... Whatever
call 5 def main():
line 6 f2()
call 9 def f2():
line 10 f3()
Source path:... Whatever
call 18 def f4():
line 19 f5()
call 22 def f5():
line 23 pass
return 23 pass
Return value:.. None
return 19 f5()
Return value:.. None
Elapsed time: 00:00:00.000134
return 10 f3()
Return value:.. None
return 6 f2()
Return value:.. None
Elapsed time: 00:00:00.000885
'''
| 21.604167 | 52 | 0.387657 |
c6d2c43f2fbd2525762b6b965846526e85874c64 | 1,451 | py | Python | qiskit_experiments/framework/matplotlib.py | QuantumHardware/qiskit-experiments | c09cf35bb922419354955abe8d536a97a9ea286b | [
"Apache-2.0"
] | 72 | 2021-02-24T19:28:51.000Z | 2022-03-27T02:56:59.000Z | qiskit_experiments/framework/matplotlib.py | dongcc/qiskit-experiments | 894dcf41ac69ace9e6a0a3c4800d4b6994ac3b5a | [
"Apache-2.0"
] | 509 | 2021-03-04T13:46:00.000Z | 2022-03-31T18:09:16.000Z | qiskit_experiments/framework/matplotlib.py | dongcc/qiskit-experiments | 894dcf41ac69ace9e6a0a3c4800d4b6994ac3b5a | [
"Apache-2.0"
] | 70 | 2021-02-24T19:21:39.000Z | 2022-03-05T04:00:12.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Matplotlib helper functions
"""
from matplotlib.figure import Figure
from matplotlib.backends.backend_svg import FigureCanvasSVG
default_figure_canvas = FigureCanvasSVG # pylint: disable=invalid-name
"""Matplotlib canvas to use when rendering a figure. This needs to be a
canvas for a `non-interactive backend
<https://matplotlib.org/stable/tutorials/introductory/usage.html#the-builtin-backends>`_.
The default is `FigureCanvasSVG`."""
def get_non_gui_ax():
"""Return a matplotlib axes that can be used in a child thread.
Analysis/plotting is done in a separate thread (so it doesn't block the
main thread), but matplotlib doesn't support GUI mode in a child thread.
This function creates a separate Figure and attaches a non-GUI
SVG canvas to it.
Returns:
matplotlib.axes.Axes: A matplotlib axes that can be used in a child thread.
"""
figure = Figure()
_ = default_figure_canvas(figure)
return figure.subplots()
| 36.275 | 89 | 0.751206 |
c6d4b9bc3a7c3d3b66374d69e6147ebd024b69ea | 14,117 | py | Python | effect_tools.py | rsjones94/hurricane_analysis | b619526dcf40ea83e9ae3ba92f3a1d28fce25776 | [
"MIT"
] | null | null | null | effect_tools.py | rsjones94/hurricane_analysis | b619526dcf40ea83e9ae3ba92f3a1d28fce25776 | [
"MIT"
] | null | null | null | effect_tools.py | rsjones94/hurricane_analysis | b619526dcf40ea83e9ae3ba92f3a1d28fce25776 | [
"MIT"
] | null | null | null | import os
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from read import clean_read
from detrend import *
def get_effect(data, param, mean, stddev, start_index, lag=3, effect_type=1,
returning_gap=0, dropthrough=(0, 0), forcing=(None, None),
max_effect=365, max_dropout=5):
"""
For a given parameter, finds the time it takes for the time series to return to normalcy
after a peturbation
Args:
data: A DataFrame of gauge data
param: the column in data to use
mean: the mean value of the pre-effect window
stddev: the standard deviation of the pre-effect window
start_index: the index of the storm peturbation
lag: the number of days allowed for an effect to begin. Minimum is 1
effect_type: the INITIAL expected effect of the peturbation. 1 indicates a positive effect, -1
indicates a negative effect
returning_gap: number of days where an increasing effect is allowed to reverse trend
before it is considered to be on its reverse trend
dropthrough: A list or tuple indicating the number of dropthroughs allowed and the number of days
the time series is allotted to drop through before being considered terminated.
A dropthrough is when a parameter is outside the normal range for that parameter and quickly
becomes outside the normal range but with opposite valence, e.g., it is above the normal range and
quickly goes to being below the normal range.
forcing: a tuple of 1) the number of days a returning trend can be reversed before it is forced to
return by calculating the best fit line for the last n returning days and
calculating the date of intersection. This allows an effect window to be
estimated even when additional storms/forcing effects follow the initial
peturbation. Default is None, which will never force a completion.
2) the number of points to include in the forcing slope fit line
max_effect: the maximum number of days an effect can continue before being terminated
max_dropout: number of continuous days of no signal before mandatory termination
Returns:
A list with two parts. The first is a list of the start and end indices of the effect
(or None, if there was no effect). The second is list, (days_above, days_below, days_between,
termination_type, forcing_start, forcing_slope). termination_type can be "natural", "forced",
None or 'dropout'
If not forced, forcing_start and forcing_slope will be None.
"""
returner = [[None, None], [0, 0, 0, 'natural', None, None]]
force_completion = forcing[0] # number of days to regress before completion is forced
force_history = forcing[1]
dropthrough = [dropthrough[0], dropthrough[1]]
comp_dict = {1: greater, -1: lesser}
exes = np.array(data.index)
orig = np.array(data[param])
whys = np.array(pd.Series(orig).interpolate(limit_direction='both'))
low = mean - stddev
high = mean + stddev
normalcy = (low, high)
if effect_type == 1:
comp_ind = 1
comp_val = normalcy[comp_ind] # high
elif effect_type == -1:
comp_ind = 0
comp_val = normalcy[comp_ind] # low
else:
raise Exception('effect_type must be 1 or -1')
effect_begun = False
i = start_index - 1
while lag > 0:
lag -= 1
i += 1
val = whys[i]
if comp_dict[effect_type](val, comp_val):
effect_begun = True
returner[0][0] = i
break
if not effect_begun:
returner[1][3] = None
return returner
# print(f'Effect begins at {i} {whys[i]}')
i -= 1
is_returning = False
has_real_val = False
nan_count = 0
ret_gap_count = 0
while True:
i += 1
# print(f'Checking {i} {whys[i]}')
if i > (i + max_effect):
returner[1][3] = 'max_effect'
if np.isnan(orig[i]):
nan_count += 1
# print(f'NANNER: {nan_count}')
if nan_count > max_dropout:
returner[1][3] = 'dropout'
# print('dropping out')
i -= nan_count - 1
break
else:
has_real_val = True
nan_count = 0
last_val = whys[i - 1]
val = whys[i]
towards_pre = comp_dict[effect_type](last_val, val)
# print(f'Towards pre: {towards_pre}')
if towards_pre and not is_returning: # checking to see if the data has started going back to pre-peturbation
ret_gap_count += 1
# print(f'Retgap: {ret_gap_count} at {i}')
if ret_gap_count > returning_gap or comp_dict[effect_type](comp_val, val):
# print(f'returning at {i}')
is_returning = True
ret_gap_count = 0
elif not is_returning:
ret_gap_count = 0
# print(f'past pre-pet')
if is_returning:
if comp_dict[effect_type](comp_val, val): # check to see if we've returned to normalcy
# print(f'we normal at {i}')
if dropthrough[0] == 0: # if no dropthroughs left then we're done
# print('no dropthroughs left')
break
else:
if within(val, normalcy): # if we're within normalcy, check to see if we'll drop through in time
# print('need to check dropthrough')
does_drop_through, ind = drops_through(whys, i, normalcy, dropthrough[1])
# print(f'Drops thru? {does_drop_through}')
if does_drop_through: # if it does drop through, go on
days_to_drop = ind - i
returner[1][2] += days_to_drop - 1
i = ind - 1
else: # if it doesn't, then we're done
# print('did not drop thru')
break
dropthrough[0] -= 1
effect_type = -effect_type
comp_ind ^= 1 # bit flip from 0 to 1 and vice versa
comp_val = normalcy[comp_ind]
is_returning = False
elif force_completion and comp_dict[effect_type](val, last_val):
# print('moving away?')
# check to see if the data is moving away from pre-pet again
# assuming force_completion is numeric
# print('Force completion active')
# print(f'Func {comp_dict[effect_type]}, vals {val,last_val}. Ind {i}')
# print('ddtr:')
dn = days_to_return(whys, i - 1, func=comp_dict[-effect_type], max_nan=max_dropout)
# print(f'{dn}')
# print(dn)
if dn <= force_completion: # if we return in time
if last_val > high:
returner[1][0] += (dn - 2)
if last_val < low:
returner[1][1] += (dn - 2)
i += (dn - 2)
else: # force completion
# print(f'Forcing completion')
try:
ind, days_to_force, slope = forced_return(exes, whys, i - 1, normalcy, history=force_history)
# print(f'Completion forced at {ind} from {i-1}. Takes {days_to_force} days. Slope: {slope}')
returner[1][3] = 'forced'
returner[1][4] = i - 1
returner[1][5] = slope
to_add = days_to_force - 1
if last_val > high:
returner[1][0] += to_add
if last_val < low:
returner[1][1] += to_add
i = ind
except ValueError:
returner[1][3] = 'forcing error'
i -= 1
break
# print('eob')
if val > high:
returner[1][0] += 1
elif val < low:
returner[1][1] += 1
else:
returner[1][2] += 1
returner[0][1] = i
if not has_real_val:
returner = [[None, None], [0, 0, 0, 'dropout', None, None]]
if returner[0][0] == returner[0][1]: # happens sometimes when there is a dropout but an effect is registered due to
# interpolation at the storm start
returner = [[None, None], [0, 0, 0, 'natural', None, None]]
return returner
def forced_return(exes, whys, i, window, history=3):
"""
Gives the index of a forced return and the slope of the return
Args:
exes: x vals
whys: y vals
i: index of the return begin
window: the min and max of the return window
history: number of points to include in the best fit
Returns:
tuple (index_of_return, days_to_return, slope)
"""
# print('\nFORCING:')
while True:
x = exes[(i - history + 1):(i + 1)]
y = whys[(i - history + 1):(i + 1)]
m, b = np.polyfit(x, y, 1)
# print(f'{m}')
if whys[i] > window[1] and m >= 0:
history -= 1
elif whys[i] < window[0] and m <= 0:
history -= 1
elif np.isclose(m, 0):
history -= 1
else:
break
if history == 1:
raise ValueError('Forced return impossible')
# print('lin_func defined')
if whys[i] > window[1]:
func = lesser
comp = window[1]
# print('func def')
elif whys[i] < window[0]:
func = greater
comp = window[0]
# print('func def')
else:
Exception('Whoah. something weird with forced_return()')
val = whys[i]
n = 0
while not func(val, comp):
i += 1
n += 1
val = lin_func(index=i)
# print(val)
# print('finished')
return i, n, m
def days_to_return(exes, i, func, max_nan=0):
"""
Returns the number of days for a series to return to above/below the indexed value
Args:
exes: series of x vals
i: index to start at
func: a function, either lesser or greater as defined in this module
max_nan: maximum allowable consecutive nans
Returns:
num of days to return
"""
if func is lesser:
# print('looking for when vals drop below comp')
pass
elif func is greater:
# print('looking for when vals rise above comp')
pass
initial = exes[i]
nas = 0
n = 0
try:
while nas <= max_nan:
i += 1
n += 1
val = exes[i]
# print(f'Compare {val} to initial ({initial})')
if np.isnan(val):
nas += 1
elif func(val, initial):
break
except IndexError:
pass
return n
def drops_through(exes, i, window, allowed):
"""
Checks if exes drops through the window fast enough from index i
Args:
exes: the x data
i: the index being checked
window: the min and max of the window
allowed: number of days allowed to pass through the window
Returns:
bool
"""
val = exes[i]
while within(val, window):
i -= 1
val = exes[i]
if val > window[1]:
func = lesser
comp = window[0]
# print('First val out of window is above. Checking to see when val goes below window')
elif val < window[0]:
func = greater
comp = window[1]
# print('First val out of window is below. Checking to see when val goes above window')
else:
raise Exception('Whoah. something weird with drop_through()')
count = 0
while count < allowed:
i += 1
count += 1
val = exes[i]
# print(val,comp)
if func(val, comp):
return True, i
return False, -1
###############
'''
choice_param = 'Discharge Detrend'
choice_gauge = '02218565'
# 04249000
# 015765185
# 0209303205
results_folder = r'E:\hurricane\results'
data_folder = r'E:\hurricane\station_data\modified'
data = clean_read(os.path.join(data_folder,choice_gauge+'.csv'))
result_df = pd.read_csv(os.path.join(results_folder,choice_param+'.csv'), dtype={'Gauge':str})
for index,line in result_df.iterrows():
if np.isnan(line['Pre-effect Window']):
continue
gauge = line['Gauge']
start = line['Storm Index']
mean = line['Pre-effect Mean']
stddev = line['Pre-effect Stddev']
if gauge == choice_gauge:
break
low = mean - stddev
high = mean + stddev
(es, ee), stats = get_effect(data, choice_param, mean, stddev, start, lag=3, effect_type=1,
returning_gap=1, dropthrough=[1,2], forcing=(3,4), max_effect=365, max_dropout=5)
plt.figure()
plt.plot(data.index,data[choice_param])
plt.axvline(start, color='red')
plt.axhline(high, color='orange')
plt.axhline(low, color='orange')
if stats[3] is not None:
plt.axvline(es, color='green', linestyle='dashed')
plt.axvline(ee, color='blue')
if stats[3] == 'forced':
x1 = stats[4]
x2 = ee
y1 = data[choice_param][stats[4]]
y2 = y1 + (x2-x1)*stats[5]
fx = [x1,x2]
fy = [y1,y2]
plt.plot(fx,fy,color='black', linestyle='dashed')
plt.xlim(start-28,start+28)
plt.title(f'Above: {stats[0]}, Below: {stats[1]}, Between: {stats[2]} \n'
f'Termination Type: {stats[3]}')
plt.show()
'''
| 32.602771 | 120 | 0.551463 |
c6d681ac44ef1494d6073c997560935007da32f3 | 131 | py | Python | lightning_transformers/task/nlp/multiple_choice/datasets/swag/__init__.py | maksym-taranukhin/lightning-transformers | aa7202657973b5b65c3c36eb745621043859ebc4 | [
"Apache-2.0"
] | 451 | 2021-04-21T15:53:59.000Z | 2022-03-29T10:39:45.000Z | lightning_transformers/task/nlp/multiple_choice/datasets/swag/__init__.py | mathemusician/lightning-transformers | b2ef06113433e6a178ce4d3c9df7ede8064e247f | [
"Apache-2.0"
] | 92 | 2021-04-21T18:42:58.000Z | 2022-03-30T05:29:54.000Z | lightning_transformers/task/nlp/multiple_choice/datasets/swag/__init__.py | mathemusician/lightning-transformers | b2ef06113433e6a178ce4d3c9df7ede8064e247f | [
"Apache-2.0"
] | 51 | 2021-04-22T05:35:28.000Z | 2022-03-17T13:08:12.000Z | from lightning_transformers.task.nlp.multiple_choice.datasets.swag.data import ( # noqa: F401
SwagMultipleChoiceDataModule,
)
| 32.75 | 94 | 0.80916 |
c6d6b79b9b74cb519b433548531f1d028f0803ab | 871 | py | Python | warningshot.py | DeadpoolPancakes/nerf-sentry | 0f9cccd78e66f4020f1960871fd35c328a697086 | [
"MIT"
] | null | null | null | warningshot.py | DeadpoolPancakes/nerf-sentry | 0f9cccd78e66f4020f1960871fd35c328a697086 | [
"MIT"
] | null | null | null | warningshot.py | DeadpoolPancakes/nerf-sentry | 0f9cccd78e66f4020f1960871fd35c328a697086 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
Motor1Enable = 5
Motor1B = 24
Motor1A = 27
Motor2Enable = 17
Motor2B = 6
Motor2A = 22
#single shot script used as a warning shot
# Set up defined GPIO pins
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1Enable,GPIO.OUT)
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2Enable,GPIO.OUT)
# Turn the firing motor on
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2Enable,GPIO.HIGH)
# warm it up for half a second
sleep(0.5)
#turn on firing mechanism
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1Enable,GPIO.HIGH)
# Stop the motor
sleep(0.5)
GPIO.output(Motor2Enable,GPIO.LOW)
GPIO.output(Motor1Enable,GPIO.LOW)
# Always end this script by cleaning the GPIO
GPIO.cleanup() | 21.243902 | 45 | 0.771527 |
c6d814a8e68b9da379529a21009897f7697124d2 | 1,979 | py | Python | ampadb_index/parse_md.py | ampafdv/ampadb | 25c804a5cb21afcbe4e222a3b48cca27ff2d9e19 | [
"MIT"
] | null | null | null | ampadb_index/parse_md.py | ampafdv/ampadb | 25c804a5cb21afcbe4e222a3b48cca27ff2d9e19 | [
"MIT"
] | 28 | 2016-10-21T16:04:56.000Z | 2018-11-10T20:55:40.000Z | ampadb_index/parse_md.py | ampafdv/ampadb | 25c804a5cb21afcbe4e222a3b48cca27ff2d9e19 | [
"MIT"
] | 2 | 2016-10-22T19:24:45.000Z | 2017-02-11T10:49:02.000Z | import html
import markdown
import bleach
import lxml.html
from lxml.html import builder as E
TAGS = [
'p', 'img', 'em', 'strong', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ol', 'ul',
'li', 'br', 'hr', 'a', 'img', 'blockquote', 'b', 'i', 'u', 's', 'pre',
'code', 'table', 'thead', 'tr', 'th', 'tbody', 'td'
]
ATTRS = {
'ol': ['start'],
'a': ['href', 'title', 'rel'],
'img': ['src', 'title', 'alt'],
'th': ['align'],
'td': ['align']
}
STYLES = []
| 29.984848 | 79 | 0.560889 |
c6d82f38c4cf48f9145d5b9fdd0bf2b8b2b2ea04 | 552 | py | Python | removeModule.py | ahmedwab/MMM-ModularHandlet.py | 8bdc59730507333d280f2120849c5881dac7b1ad | [
"MIT"
] | 1 | 2022-01-16T20:21:15.000Z | 2022-01-16T20:21:15.000Z | removeModule.py | ahmedwab/MMM-ModuleHandler | 8bdc59730507333d280f2120849c5881dac7b1ad | [
"MIT"
] | null | null | null | removeModule.py | ahmedwab/MMM-ModuleHandler | 8bdc59730507333d280f2120849c5881dac7b1ad | [
"MIT"
] | null | null | null | import subprocess
import os
| 27.6 | 76 | 0.565217 |
c6d82fc284eef62f6b254b22655051352ba00a72 | 532 | py | Python | src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py | robertpardillo/Funnel | f45e419f55e085bbb95e17c47b4c94a7c625ba9b | [
"MIT"
] | 1 | 2021-05-18T16:10:49.000Z | 2021-05-18T16:10:49.000Z | src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py | robertpardillo/Funnel | f45e419f55e085bbb95e17c47b4c94a7c625ba9b | [
"MIT"
] | null | null | null | src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py | robertpardillo/Funnel | f45e419f55e085bbb95e17c47b4c94a7c625ba9b | [
"MIT"
] | null | null | null |
from ...abstractObjects.hybridShapes.line import LinePtPt
| 40.923077 | 82 | 0.781955 |
c6d92303c364567cf9a1dd4b401fd0429cd92a45 | 195 | py | Python | Latest/venv/Lib/site-packages/pyface/resource/__init__.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-01-09T20:04:31.000Z | 2022-01-09T20:04:31.000Z | Latest/venv/Lib/site-packages/pyface/resource/__init__.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | 1 | 2022-02-15T12:01:57.000Z | 2022-03-24T19:48:47.000Z | Latest/venv/Lib/site-packages/pyface/resource/__init__.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
""" Support for managing resources such as images and sounds.
Part of the TraitsGUI project of the Enthought Tool Suite.
"""
| 32.5 | 62 | 0.733333 |
c6d9810ee3519ae415fa0512f84807c328a50106 | 1,223 | py | Python | Lab Activity 6.py | Jeralph-Red/OOP-58001 | 4e38f9a0a58098a121a61e640a53e9568bf529b0 | [
"Apache-2.0"
] | null | null | null | Lab Activity 6.py | Jeralph-Red/OOP-58001 | 4e38f9a0a58098a121a61e640a53e9568bf529b0 | [
"Apache-2.0"
] | null | null | null | Lab Activity 6.py | Jeralph-Red/OOP-58001 | 4e38f9a0a58098a121a61e640a53e9568bf529b0 | [
"Apache-2.0"
] | null | null | null | from tkinter import *
window=Tk()
mywin=SemGrade(window)
window.title('Semestral Grade Calculator')
window.geometry("400x300+10+10")
window.mainloop() | 31.358974 | 88 | 0.567457 |
c6da86aae41063146c3bc7bd5c1f243c9c0368e2 | 1,853 | py | Python | parse_wfd.py | ajsimon1/Cazar | 6831dbdb63764ad2159eaad45fe2b6cfc7edd553 | [
"MIT"
] | null | null | null | parse_wfd.py | ajsimon1/Cazar | 6831dbdb63764ad2159eaad45fe2b6cfc7edd553 | [
"MIT"
] | null | null | null | parse_wfd.py | ajsimon1/Cazar | 6831dbdb63764ad2159eaad45fe2b6cfc7edd553 | [
"MIT"
] | null | null | null | import os
import sys
import pandas as pd
from xml.etree import ElementTree as et
cwd = os.getcwd()
filepath = 'C:\\Users\\asimon\\Desktop\\Practice-' \
'Training\\p21_template_out3.xml'
if __name__ == '__main__':
df = parse_wfd_xml(filepath)
writer = pd.ExcelWriter('wfd_output.xlsx')
df.to_excel(writer, 'Sheet1')
writer.save()
| 39.425532 | 75 | 0.447922 |
c6db094a77c778676e8dbdbc124941b532991717 | 1,598 | py | Python | setup.py | yonglehou/memsql-loader | 5e7bb5787991aa990889c4e709f63a3529544268 | [
"MIT"
] | 1 | 2021-05-10T03:37:26.000Z | 2021-05-10T03:37:26.000Z | setup.py | yonglehou/memsql-loader | 5e7bb5787991aa990889c4e709f63a3529544268 | [
"MIT"
] | null | null | null | setup.py | yonglehou/memsql-loader | 5e7bb5787991aa990889c4e709f63a3529544268 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
# get version
from memsql_loader import __version__
setup(
name='memsql-loader',
version=__version__,
author='MemSQL',
author_email='support@memsql.com',
url='https://github.com/memsql/memsql-loader',
download_url='https://github.com/memsql/memsql-loader/releases/latest',
license='LICENSE.txt',
description='MemSQL Loader helps you run complex ETL workflows against MemSQL',
long_description=open('README.md').read(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
platforms=[ "Linux", "Mac OS X" ],
entry_points={
'console_scripts': [
'memsql-loader = memsql_loader.main:main'
]
},
packages=[
'memsql_loader',
'memsql_loader.api',
'memsql_loader.cli',
'memsql_loader.db',
'memsql_loader.execution',
'memsql_loader.loader_db',
'memsql_loader.util',
'memsql_loader.util.apsw_sql_step_queue',
'memsql_loader.vendor',
'memsql_loader.vendor.glob2',
],
zip_safe=False,
install_requires=[
'memsql==2.14.4',
'wraptor==0.6.0',
'clark==0.1.0',
'voluptuous==0.8.5',
'boto==2.28.0',
'pycurl==7.19.3.1',
'prettytable==0.7.2',
'pywebhdfs==0.2.4',
'requests==2.5.1',
],
tests_require=[
'docker-py==0.3.1',
'pytest==2.5.2',
'pytest-xdist==1.10',
'pexpect==3.3',
'requests==2.2.1',
],
)
| 27.084746 | 83 | 0.574468 |
c6dbe3048a8498d4b259596610f445fd78aa7173 | 17,022 | py | Python | p20191120_wada.py | tmseegoslo/wada | 1f0163ccc0e0815ae7586291712f8920b00cf7ba | [
"Apache-2.0"
] | null | null | null | p20191120_wada.py | tmseegoslo/wada | 1f0163ccc0e0815ae7586291712f8920b00cf7ba | [
"Apache-2.0"
] | null | null | null | p20191120_wada.py | tmseegoslo/wada | 1f0163ccc0e0815ae7586291712f8920b00cf7ba | [
"Apache-2.0"
] | null | null | null | #MNE tutorial
#Import modules
import os
import numpy as np
import mne
import re
import complexity_entropy as ce
#Import specific smodules for filtering
from numpy.fft import fft, fftfreq
from scipy import signal
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import matplotlib.pyplot as plt
### PUT ALL PARAMETERS HERE ###
### ### ### ### ### ### ### ###
### PUT FUNCTIONS HERE OR BETTER, IN SEPARATE FILE ###
### ### ### ### ### ### ### ### ### ### ### ### ### ###
#Path(s) to data #UPDATE TO READ ALL SUBFOLDERS IN A FOLDER
data_folder = 'Y:\Data\Wada Data Swiss\Visit_JFS_BJE\Originals'
data_raw_file = os.path.join(data_folder,
'wadatest_14_06_19.edf')
### LOOP OVER ALL SUBJECTS FOR PREPROCESSING ###
### consider putting pre-processing ###
#Read data
raw = mne.io.read_raw_edf(data_raw_file, misc=['ECG EKG-REF'],
stim_channel='Event EVENT-REF', preload=True)
#Convenience function to trim channel names
#Trim channel names
raw.rename_channels(ch_rename)
#Print overall and detailed info about raw dataset
print(raw)
print(raw.info)
#Read montage
montage = mne.channels.make_standard_montage('standard_postfixed')
#Set montage
raw.set_montage(montage)
#Plot sensor locations
#raw.plot_sensors(show_names=True)
#Temporarily add dummy annotation to spare user from adding new label
raw.annotations.append(onset=raw.times[0]-1.0, duration=0.0, description='Slow EEG')
#Plot raw EEG traces. Mark onset of slow EEG
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Mark onset of slow EEG')
#Crop data around the newly inserted marker
seg_length = 300 #seconds
times_slow = [a['onset'] for a in raw.annotations if 'Slow' in a['description']]
tmin = times_slow[1]-seg_length
tmax = times_slow[1]+seg_length
raw = raw.crop(tmin=tmin,tmax=tmax)
#Temporarily add dummy annotation to spare user from adding new label
raw.annotations.append(onset=raw.times[0]-1.0, duration=0.0, description='BAD_segments')
#Plot raw EEG traces. Reject obviously bad channels and mark bad segments
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Reject obviously bad channels and bad segments')
# Making and inserting events for epoching data
epoch_length = 10.0 # sec
overlap = 9.0 # sec
event_id = 1
t_min = 0.0
events = mne.make_fixed_length_events(raw, id=event_id, start=t_min,
stop=None, duration=epoch_length,
first_samp=True, overlap=overlap)
raw.add_events(events, stim_channel='EVENT', replace=False)
# Check that events are in the right place
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Check position of events', events=events)
# Read epochs
rawepochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=t_min,
tmax=epoch_length, baseline=(None, None), picks='eeg',
preload=True, reject=None, proj=False)
#Plot epoched data
rawepochs.plot(n_epochs=10, n_channels=22, scalings=dict(eeg=1e-4, misc=1e-3, stim=100))
#Plot power spectrum
rawepochs.plot_psd(fmax=180)
#Filter the data from 1-100 Hz using the default options
#NOTE: Usually you should apply high-pass and low-pass filter separately, but
#this is done 'behind the scenes' in this case
epochs = rawepochs.copy().filter(1, 80, picks='eeg', filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto',
method='fir', phase='zero', fir_window='hamming',
fir_design='firwin')
#Plot power spectra
epochs.plot_psd(fmax=180)
#Plot epoched EEG traces. Reject obviously bad channels and mark bad segments
epochs.plot(n_epochs=10, n_channels=22, scalings=dict(eeg=3e-4, misc=1e-3, stim=100),
title='Reject obviously bad channels and bad segments')
#Set up and fit the ICA
ica = mne.preprocessing.ICA(method = 'infomax', fit_params=dict(extended=True),
random_state=0, max_iter=1000)
ica.fit(epochs, picks='eeg')
#Quick look at components
ica.plot_components(inst=epochs, plot_std=True,
picks='eeg',
psd_args=dict(fmax=85))
#Plot time course of ICs
ica.plot_sources(epochs)
# =============================================================================
# #Check components one by one and mark bad ones
# n_comps = ica.get_components().shape[1]
# is_brain = [True for i in range(0,n_comps)]
# print('Press a keyboard key for brain, and a mouse button for non-brain')
# for i in range(0,n_comps) :
# ica.plot_properties(prep, picks=i, psd_args=dict(fmin=0, fmax=110))
# is_brain[i] = plt.waitforbuttonpress()
# plt.close()
# idx_bad = [i for i, x in enumerate(is_brain) if not(x)]
# ica.exclude = idx_bad
# =============================================================================
ica.apply(epochs)
#Plot cleaned data
epochs.plot(scalings=dict(eeg=3e-4, misc=1e-3, stim=1),n_epochs=5)
#Compare power spectra
epochs.plot_psd(fmax=90)
#Set bipolar (double banana) reference
anodes = ['Fp2', 'F8', 'T4', 'T6', 'Fp1', 'F7', 'T3', 'T5',
'Fp2', 'F4', 'C4', 'P4', 'Fp1', 'F3', 'C3', 'P3',
'Fz', 'Cz',
'T6', 'T5',
'T4', 'T3']
cathodes = ['F8', 'T4', 'T6', 'O2', 'F7', 'T3', 'T5', 'O1',
'F4', 'C4', 'P4', 'O2', 'F3', 'C3', 'P3', 'O1',
'Cz', 'Pz',
'A2', 'A1',
'T2', 'T1']
#Read montage
montage = mne.channels.make_standard_montage('standard_postfixed')
#Set montage
epochs.set_montage(montage)
epochs_bipolar = mne.set_bipolar_reference(epochs, anodes, cathodes,
drop_refs=False)
#Print info for bipolar (double banana) reference raw data
print(prep_bi)
print(prep_bi.info['ch_names'])
#WARNING: Plotting of sensor locations does not work, set locations first
#Plot sensor locations for bipolar (double banana) reference raw data
#raw_bi.plot_sensors(show_names=True)
# =============================================================================
# order=np.array([0, 2, 4, 6, 21, 8, 22, 23, 10, 12,
# 14, 15,
# 1, 3, 5, 7, 18, 9, 19, 20, 11, 13,
# 16, 17])
# =============================================================================
ch_names = ['T3-T1', 'T5-A1', 'Fp1-F7', 'F7-T3', 'T3-T5', 'T5-O1', 'Fp1-F3',
'F3-C3', 'C3-P3', 'P3-O1', 'Fz-Cz', 'Cz-Pz', 'Fp2-F4', 'F4-C4',
'C4-P4', 'P4-O2', 'Fp2-F8', 'F8-T4', 'T4-T6', 'T6-O2', 'T4-T2',
'T6-A2', 'EKG', 'EVENT']
# =============================================================================
# ch_names = ['T1-A1','F7-A1','T3-A1','T5-A1','Fp1-A1','F3-A1','C3-A1','P3-A1','O1-A1',
# 'Fz-Cz','Pz-Cz',
# 'O2-A2','P4-A2','C4-A2','F4-A2','Fp2-A2','T6-A2','T4-A2','F8-A2','T2-A2',
# 'EKG','EVENT']
# =============================================================================
prep_bi.reorder_channels(ch_names)
#Plot re-referenced data (bipolar double banana reference)
prep_bi.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False)
#Compare power spectra
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
ax.set_xlim(0, 110)
ax.set_ylim(-70, 50)
#raw.plot_psd(fmax=110, ax=ax)
prep_bi.plot_psd(fmax=110, ax=ax)
prep_short = prep_bi.copy()
# =============================================================================
# # Filter again
# prep_short = prep_short.filter(1, 80, picks='eeg', filter_length='auto',
# l_trans_bandwidth='auto', h_trans_bandwidth='auto',
# method='fir', phase='zero', fir_window='hamming',
# fir_design='firwin')
# #Compare power spectra
# fig = plt.figure()
# ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
# ax.set_xlim(0, 100)
# ax.set_ylim(-70, 50)
# prep_short.plot_psd(fmax=100, ax=ax)
# =============================================================================
#prep_short = prep_short.crop(tmin=3840,tmax=4740)
#Plot cropped data
prep_short.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False)
#Get start of infusion.
#WARNING: Hard coded index + not equal to start of slowing of EEG
#time_ipsi_slow = prep_short.annotations[0]['onset']-prep_short._first_time
time_ipsi_slow = prep_short.annotations[1]['onset']-prep_short._first_time #!!! Horrible hack! Manually inserted annotation
epoch_length = 16
time_first_event = time_ipsi_slow - epoch_length*(time_ipsi_slow//epoch_length)
events = mne.make_fixed_length_events(prep_short, id=1, start=time_first_event,
stop=None, duration=epoch_length,
first_samp=True, overlap=0.0)
prep_short.add_events(events, stim_channel='EVENT', replace=False)
#Plot data with added events
prep_short.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False, events=events)
# Read epochs
epochs = mne.Epochs(prep_short, events=events, event_id=1, tmin=0.0,
tmax=epoch_length, baseline=(None, None), picks='eeg',
preload=True, reject=None, proj=False)
#Plot epoched data
epochs.plot(n_epochs=3, n_channels=22, scalings=dict(eeg=1e-4, misc=1e-3, stim=100))
#Get the 3D matrix of epoched EEG-data
data = epochs.get_data(picks='eeg')
idx_left = [2,3,4,5,6,7,8,9] #[3,4,7,8] #[2,3,4,5,7,8]
idx_right = [12,13,14,15,16,17,18,19] #[13,14,17,18] #[13,14,16,17,18,19]
idx_all = idx_left+idx_right #[3,4,7,8,13,14,17,18]
#Calculate Lempel-Ziv complexity
LZC = np.zeros(data.shape[0])
LZCcontra = np.zeros(data.shape[0])
LZCipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
LZC[i] = ce.LZc(np.transpose(data[i,idx_all,:]))
LZCcontra[i] = ce.LZc(np.transpose(data[i,idx_left,:]))
LZCipsi[i] = ce.LZc(np.transpose(data[i,idx_right,:]))
#Plot LZC vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
#plt.plot(range(1,data.shape[0]+1), LZC/LZC[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), LZCcontra/LZCcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), LZCipsi/LZCipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), LZC/LZC[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), LZCcontra/LZCcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), LZCipsi/LZCipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('LZC/LZC_baseline')
plt.legend(('tLZCcontra', 'tLZCipsi'))
plt.title('Lempel-Ziv complexity - 16s epochs - 8 bipolar channels - 1-30 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#Calculate amplitude coalition entropy
ACE = np.zeros(data.shape[0])
ACEcontra = np.zeros(data.shape[0])
ACEipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
ACE[i] = ce.ACE(data[i,idx_all,:])
ACEcontra[i] = ce.ACE(data[i,idx_left,:])
ACEipsi[i] = ce.ACE(data[i,idx_right,:])
#Plot ACE vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#plt.plot(range(1,data.shape[0]+1), ACE/ACE[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), ACEcontra/ACEcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), ACEipsi/ACEipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), ACE/ACE[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), ACEcontra/ACEcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), ACEipsi/ACEipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('ACE/ACE_baseline')
plt.legend(('ACEcontra', 'ACEipsi'))
plt.title('Amplitude coalition entropy - 16s epochs - 8 bipolar channels - 1-35 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#Calculate synchrony coalition entropy
SCE = np.zeros(data.shape[0])
SCEcontra = np.zeros(data.shape[0])
SCEipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
SCE[i] = ce.SCE(data[i,idx_all,:])
SCEcontra[i] = ce.SCE(data[i,idx_left,:])
SCEipsi[i] = ce.SCE(data[i,idx_right,:])
#Plot SCE vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
#plt.plot(range(1,data.shape[0]+1), SCE/SCE[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), SCEcontra/SCEcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), SCEipsi/SCEipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), SCE/SCE[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), SCEcontra/SCEcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), SCEipsi/SCEipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('SCE/SCE_baseline')
plt.legend(('SCEcontra', 'SCEipsi'))
plt.title('Synchrony coalition entropy - 16s epochs - 8 bipolar channels - 1-35 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
## POSSIBLY USEFUL ##
# =============================================================================
# #Resample if needed (Warning: looking at PSD there seems to be some passband-ripples?)
# prep = raw.copy().resample(64)
#
# #Compare power spectra
# raw.plot_psd(fmax=32)
# prep.plot_psd(fmax=32)
#
# #Compare EEG traces
# raw.plot(start=0, duration=15, n_channels=26,
# scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
# remove_dc=True)
# prep.plot(start=0, duration=15, n_channels=26,
# scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
# remove_dc=True)
# =============================================================================
# =============================================================================
# #Construct and visualize FIR filter (recommended over IIR for most applications)
# sfreq = 1000.
# f_p = 40.
# flim = (1.0, sfreq / 2.0) # limits for plotting
# nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
# freq = [0, f_p, f_p, nyq]
# gain = [1, 1, 0, 0]
#
# third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1.0 / 3.]
# ax = plt.subplots(1, figsize=third_height)[1]
# plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
# =============================================================================
## GRAVEYARD ##
# =============================================================================
# stim_data = np.zeros((1, len(prep_short.times)))
# info = mne.create_info(['STI'], raw.info['sfreq'], ['stim'])
# stim_raw = mne.io.RawArray(stim_data, info)
# raw.add_channels([stim_raw], force_update_info=True)
#
# =============================================================================
# =============================================================================
# #Set bipolar (double banana) reference
# anodes = ['Fp2', 'F8', 'T4', 'T6', 'Fp1', 'F7', 'T3', 'T5',
# 'Fp2', 'F4', 'C4', 'P4', 'Fp1', 'F3', 'C3', 'P3',
# 'Fz', 'Cz',
# 'T6', 'T5',
# 'T4', 'T3']
# cathodes = ['F8', 'T4', 'T6', 'O2', 'F7', 'T3', 'T5', 'O1',
# 'F4', 'C4', 'P4', 'O2', 'F3', 'C3', 'P3', 'O1',
# 'Cz', 'Pz',
# 'A2', 'A1',
# 'T2', 'T1']
# raw_bi = mne.set_bipolar_reference(raw, anodes, cathodes)
# #Print info for bipolar (double banana) reference raw data
# print(raw_bi)
# print(raw_bi.info)
# #WARNING: Plotting of sensor locations does not work, set locations first
# #Plot sensor locations for bipolar (double banana) reference raw data
# #raw_bi.plot_sensors(show_names=True)
# =============================================================================
| 39.311778 | 124 | 0.584714 |
c6dc529d66bad976f5633ed5b6e53c5c1922f83f | 1,790 | py | Python | classifiers.py | mavroudisv/Mahalanobis-Classifier | 9029b2d84215afd02d8ccdbe3be7ea875b83deb6 | [
"MIT"
] | 1 | 2021-01-12T19:12:06.000Z | 2021-01-12T19:12:06.000Z | classifiers.py | mavroudisv/Mahalanobis-Classifier | 9029b2d84215afd02d8ccdbe3be7ea875b83deb6 | [
"MIT"
] | null | null | null | classifiers.py | mavroudisv/Mahalanobis-Classifier | 9029b2d84215afd02d8ccdbe3be7ea875b83deb6 | [
"MIT"
] | null | null | null | import numpy as np
import scipy as sp
| 37.291667 | 118 | 0.6 |
c6dcf725bd23764de094f21a2a52e9e26e955427 | 1,982 | py | Python | augmentation/postprocessor.py | abamaxa/docvision_generator | 8017f29c7d908cb80ddcd59e345a222271fa74de | [
"MIT"
] | 2 | 2020-02-06T17:30:41.000Z | 2020-08-04T10:35:46.000Z | augmentation/postprocessor.py | abamaxa/docvision_generator | 8017f29c7d908cb80ddcd59e345a222271fa74de | [
"MIT"
] | null | null | null | augmentation/postprocessor.py | abamaxa/docvision_generator | 8017f29c7d908cb80ddcd59e345a222271fa74de | [
"MIT"
] | null | null | null | import os
import shutil
import json
import time
import cv2
import numpy as np
import PIL
if __name__ == '__main__' :
erode_all(True) | 29.58209 | 88 | 0.639758 |