hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7bc111fc110f0ab3862581da0b6b979e7a706d1e | 3,234 | py | Python | drowsiness_detector.py | zhww-107/drowsiness_detector | 855995e1da36ffc0ec1fda7df8ea1aafc35c416d | [
"BSD-2-Clause"
] | 1 | 2020-05-12T12:31:51.000Z | 2020-05-12T12:31:51.000Z | drowsiness_detector.py | zhww-107/drowsiness_detector | 855995e1da36ffc0ec1fda7df8ea1aafc35c416d | [
"BSD-2-Clause"
] | null | null | null | drowsiness_detector.py | zhww-107/drowsiness_detector | 855995e1da36ffc0ec1fda7df8ea1aafc35c416d | [
"BSD-2-Clause"
] | null | null | null | from imutils import face_utils
from scipy.spatial import distance
import cv2
import dlib
import imutils
import pygame
import time
# Initializing the alert sound
pygame.mixer.init()
alert_sound = pygame.mixer.Sound("alert_sound.wav")
default_volume = 0.2
# Eye-Aspect-Ratio data
EAR_threshhold = 0.17 # One valid frame is counted when EAR is lower than this value
frame_count = 0 # Number of frames when EAR is lower than EAR_threshhold
EAR_total_frame = 25 # Having frame_count larger than this value is considered drowsiness
# Play the alarm in a given volume
# Given an eye landmark, compute its eye_aspect_ratio
# Initialize the face detector and Facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Access the camera
cap = cv2.VideoCapture(0)
# Main loop for drowsiness detection
while True:
# Read the camera input, resize it, and concert it to grayscale frame
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
raw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in grayscale frame
bounds = detector(raw,0)
for bound in bounds:
# Predict facial landmarks for each detected face
shape = predictor(raw,bound)
# Convert the facial lanmarks into a 1-D numpy array (x, y)
shape = face_utils.shape_to_np(shape)
# Left and right eyes' indexes for facial landmarks
left_eye = shape[42:48]
right_eye = shape[36:42]
# The main EAR is the average of left and right eye's EAR
left_EAR = eye_aspect_ratio(left_eye)
right_EAR = eye_aspect_ratio(right_eye)
EAR = (left_EAR + right_EAR) / 2
# Draw the facial landmarks for left eye
for (x, y) in left_eye:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
# Draw the facial landmarks for right eye
for (x, y) in right_eye:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
# Alarm when drowsiness is detected
if EAR < EAR_threshhold:
frame_count += 1
# Volume increases gradually
if frame_count >= EAR_total_frame:
alert(0.2 + (frame_count - 25) * 0.2)
time.sleep(3)
else:
frame_count = 0
# Display informations
cv2.putText(frame, "Frame: {:.0f}".format(frame_count), (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Eye-Aspect-Ratio: {:.2f}".format(EAR), (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Press Q to exit.", (410, 320),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Display the frame
cv2.imshow("Drowsiness_Detector", frame)
# Provide a way to exit the program -- pressing "Q"
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows() | 31.096154 | 89 | 0.649969 |
7bc160c90d8d420f5bacbdb3fbe421c84e36aaf4 | 11,809 | py | Python | trunk-tap.py | schreiberstein/trunk-tap.py | aacf32816e2a558e31ebc431edf84e23ef22146d | [
"MIT"
] | 15 | 2017-10-22T15:08:58.000Z | 2022-01-03T22:21:12.000Z | trunk-tap.py | ideechaniz/trunk-tap.py | aacf32816e2a558e31ebc431edf84e23ef22146d | [
"MIT"
] | 2 | 2018-04-04T18:52:54.000Z | 2019-02-20T10:16:13.000Z | trunk-tap.py | ideechaniz/trunk-tap.py | aacf32816e2a558e31ebc431edf84e23ef22146d | [
"MIT"
] | 6 | 2017-10-23T03:03:16.000Z | 2021-07-03T16:28:29.000Z | #!/usr/bin/env python3
# < trunk-tap.py >
# Version 1.0 < 20171022 >
# Copyright 2017: Alexander Schreiber < schreiberstein[at]gmail.com >
# https://github.com/schreiberstein/trunk-tap.py
# MIT License:
# ============
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# See: https://opensource.org/licenses/MIT
# Introduction:
# =============
# trunk-tap.py is a Linux command line utility to connects a set of 802.1Q VLANs to a TINC VPN/OpenVPN TAP-interface and is designed to be invoked by ifup/ifdown scripts after starting or stopping a VPN connection.
# Dependencies (on Debian): python3, iproute2, bridge-utils, vlan (including kernel module '8021q' in /etc/modules)
# It reads the filenames from the content of a folder containing files corresponding to the VLAN ID (e.g. '100', '105', ...), then creates VLAN interfaces on a local Ethernet adapter used as "trunk port" (e.g. 'eth1.100', 'eth1.105', ...).
# The script then proceeds to generate bridge interfaces for every VLAN ID. (e.g. "trunk0.100", "trunk0.105", ...) and attaches the respective Ethernet VLAN interfaces to the bridge. (e.g. 'trunk0.105 <-> eth1.105', ...)
# After that, the local infrastructure is ready to be attached to the VPN layer 2 tunnel.
# This is achieved by enabling the TAP interface ("up"), creating VLAN interfaces on the TAP adapter (e.g. 'tap0.100', 'tap0.105', ...) and attaching them to the respective bridge.
# Illustration:
# =============
# (TINC VPN / OpenVPN)
# -------- SITE 1 ------- -------- SITE 2 -------
# eth1.100 <-> trunk0.100 <--\ ################ /--> trunk0.100 <-> eth1.100
# eth1.105 <-> trunk0.105 <--->> ---TAP-TUNNEL--- <<---> trunk0.105 <-> eth1.105
# eth1.110 <-> trunk0.110 <--/ ################ \--> trunk0.110 <-> eth1.110
# Hint: Interface names (ethernet adapter, bridge name, ...) do not neccesarily have to be identical among sites.
# --------------------------------------------------------------------------------------------------------------- #
# Code:
# =====
# Import required Python3 modules
import os, sys, subprocess
from pathlib import Path
# Create VLAN-interfaces on trunk interface (e.g. 'eth1.100', 'eth1.105', ...)
# Function to remove VLAN interfaces from trunk interface
# Function to create main bridge (no VLAN ID - May be used to attach a VLAN/network to provide network to devices without VLAN support (VLAN0 - untagged))
# Function to remove bridge
# Creates bridges to be used for VLAN bridging (e.g. 'trunk0.100', 'trunk0.105', ..) - illustration: eth1.105 <-> Bridge: trunk0.105 <-> tap0.105
# Function to remove VLAN interfaces from the bridge
# Function to bridge the VLANs of the physical interface with the VLANs of the bridge
# Create VLAN-interfaces on tap interface
# Function to bridge the VLANs of the physical interface with the VLANs of the bridge
# Function to enable ("up") the tap interface
# Function to disable ("down") the tap interface
# Function to remove VLAN interfaces from tap interface
# Function to remove members attached by the tap_bridge() function
# Function to remove members attached by the bridge() function
# ------------------------
# Note: Order of execution
# ------------------------
# Start:
# ------
# trunk_vlan_add()
# bridge_add()
# bridge_vlan_add()
# bridge()
# tap_if_up()
# tap_vlan_add()
# tap_bridge()
# Stop:
# -----
# tap_unbridge()
# tap_vlan_del()
# tap_if_down()
# unbridge()
# bridge_vlan_del()
# bridge_del()
# trunk_vlan_del()
# Start function - Used to execute all other functions
# Stop function - reverses the actions performed by start()
# # # # # # # # #
# Main function #
# # # # # # # # #
# Only run main if the script is explicitly executed (e.g. './trunktap.py')
if __name__ == "__main__":
main()
| 41.146341 | 260 | 0.655348 |
7bc353399a2502106befa0365666e5d586522d04 | 4,404 | py | Python | tests/common/mock_cgroup_commands.py | rbgithuub/WALinuxAgent | c0462f33bb5e3a33430fe3d172676d85cefa6227 | [
"Apache-2.0"
] | null | null | null | tests/common/mock_cgroup_commands.py | rbgithuub/WALinuxAgent | c0462f33bb5e3a33430fe3d172676d85cefa6227 | [
"Apache-2.0"
] | null | null | null | tests/common/mock_cgroup_commands.py | rbgithuub/WALinuxAgent | c0462f33bb5e3a33430fe3d172676d85cefa6227 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import contextlib
import os
import re
import subprocess
from azurelinuxagent.common.utils import fileutil
from tests.tools import patch, data_dir
#
# Default values for the mocked commands.
#
# The output comes from an Ubuntu 18 system
#
_default_commands = [
(r"systemctl --version",
'''systemd 237
+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid
'''),
(r"mount -t cgroup",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
'''),
(r"mount -t cgroup2",
'''cgroup on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime)
'''),
(r"systemctl show walinuxagent\.service --property CPUAccounting",
'''CPUAccounting=no
'''),
(r"systemctl show walinuxagent\.service --property MemoryAccounting",
'''MemoryAccounting=no
'''),
(r"systemd-run --unit=([^\s]+) --scope ([^\s]+)",
'''
Running scope as unit: TEST_UNIT.scope
Thu 28 May 2020 07:25:55 AM PDT
'''),
]
_default_files = (
(r"/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'proc_self_cgroup')),
(r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'proc_pid_cgroup')),
(r"/sys/fs/cgroup/unified/cgroup.controllers", os.path.join(data_dir, 'cgroups', 'sys_fs_cgroup_unified_cgroup.controllers')),
)
| 38.631579 | 178 | 0.711172 |
7bc78e4dfebfc4162a535f0855d380aa68aa6df8 | 1,474 | py | Python | main.py | saiamphora/XOR-NEATpy | 091b6d6fc3b662491c8216227f5305841521e0ed | [
"Unlicense"
] | 1 | 2021-11-29T03:30:49.000Z | 2021-11-29T03:30:49.000Z | main.py | saiamphora/XOR-NEATpy | 091b6d6fc3b662491c8216227f5305841521e0ed | [
"Unlicense"
] | 1 | 2021-11-29T15:28:09.000Z | 2021-11-29T15:28:09.000Z | main.py | saiamphora/XOR-NEATpy | 091b6d6fc3b662491c8216227f5305841521e0ed | [
"Unlicense"
] | null | null | null | from __future__ import print_function
import os
import neat
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [(0.0,),(1.0,),(1.0,),(0.0,)]
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path) | 33.5 | 82 | 0.651967 |
7bc7b39f24b5e8a02751dc33b721dc3411814fe9 | 16,866 | py | Python | iBlock.py | RussianOtter/iBlock | e0db1b94fd2d8ed9538ad42df1a706cc782bb2f3 | [
"MIT"
] | 5 | 2017-10-02T06:01:01.000Z | 2022-03-08T05:51:51.000Z | iBlock.py | RussianOtter/iBlock | e0db1b94fd2d8ed9538ad42df1a706cc782bb2f3 | [
"MIT"
] | null | null | null | iBlock.py | RussianOtter/iBlock | e0db1b94fd2d8ed9538ad42df1a706cc782bb2f3 | [
"MIT"
] | null | null | null | """
_ _____ _ _
|_| __ | |___ ___| |_
| | __ -| | . | _| '_|
|_|_____|_|___|___|_,_|
iBlock is a machine learning video game!
This game is played on a 8x6 board (48 spaces) and the goal is to fill up the enemy's column with your pieces! Once that happens the game will reset and log all the data for the AI's to observe! In the first few games the AI will take random moves and attempt winning. Once one of the AI's win, the information on how they one gets processed and they try to attempt it again using that information!
Rather then focusing on attacking, these AI naturally plays offensively! You will see them defend their base while at the same time try to attack the enemy!
The AI also doesn't know which spaces it must fill to win so as it plays it must learn on it's own (this also allows for the creation of custom maps).
iBlock has multiple different game options for how to set up the way the AI will play! New gamemodes coming soon!
Copyright (c) SavSec 2017
Copyright (c) SavSec iBlock 2017
Format:
Encoding: UTF-8
Tab: 2
System:
Python 2.7
Modules: sys, time, random
License:
MIT License
Developer:
@Russian_Otter - Instagram
"""
import sys, random, time, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--intelligence",help="Activates dynamic machine learning mode for both players",action="store_true")
parser.add_argument("-r", "--random",help="Activates random machine learning mode for both players",action="store_true")
parser.add_argument("-p", "--pvai",help="Activates Player vs AI mode",action="store_true")
parser.add_argument("-R", "--Reset",help="Activates reset mode for both players",action="store_true")
parser.add_argument("-sm", "--show-moves",help="Shows the last move for each turn",action="store_true")
parser.add_argument("-d", "--display",help="Set to False to disable table display",default=True)
parser.add_argument("-pg", "--progress",help="Displays progress graphs", action="store_true")
parser.add_argument("-t", "--time",help="Turn rate for each player",default=0.05)
parser.add_argument("-q", "--quick",help="Plays a 1 match game", action="store_true")
parser.add_argument("-H", "--Hide",help="Hides help",action="store_true")
args = parser.parse_args()
if args.pvai:
human_mode = True
else:
human_mode = False
if args.Reset:
fresh_start1,fresh_start0 = True,True
else:
fresh_start0,fresh_start1 = False,False
if args.show_moves:
show_move = True
else:
show_move = False
if args.progress:
progress_graphing = True
else:
progress_graphing = False
display = args.display
mtime = float(args.time)
if show_move:
from time import gmtime, strftime
if progress_graphing:
"""
import matplotlib.pyplot as plt
import numpy as np
Still in progress
"""
pass
global last_move
last_move = ["41"]
table = {
"1":".", "2":".", "3":".", "4":".", "5":".", "6":".", "7":".", "8":"0",
"9":".", "10":".", "11":".", "12":".", "13":".", "14":".", "15":".", "16":".",
"17":".", "18":".", "19":".", "20":".", "21":".", "22":".", "23":".", "24":".",
"25":".", "26":".", "27":".", "28":".", "29":".", "30":".", "31":".", "32":".",
"33":".", "34":".", "35":".", "36":".", "37":".", "38":".", "39":".", "40":".",
"41":"1", "42":".", "43":".", "44":".", "45":".", "46":".", "47":".", "48":"."
}
# up left = -9
# up down = +-8
# right left = +-1
# down right = +9
# up right = -7
# down left = +7
def reset_knowldge():
"""
Reseting knowldge wipes all past game history and updates it with random winning moves.
"""
print "Reseting Knowldge..."
time.sleep(1)
if not fresh_start0 or not fresh_start1:
print "You must change values: \"fresh_start0\" and \"fresh_start1\" to True before reseting."
print "Be sure to change those values back to False while not in reset mode."
time.sleep(3)
if mtime > 0.0009 or display == True:
print "Consider Temporarily Changing You Game Settings For Reset:"
print "-Speed should be less than 0.0009"
print "-Display should be turned off"
time.sleep(3)
try:
iblock(False,False)
except:
pass
print "Reset Complete!"
time.sleep(1)
def random_ai_mode():
"""
Random AI mode disables the learning ability of the program which causes it to make random moves.
(Personally this is more entiretaining than Intelligence Mode)
"""
print "Starting Random AI Mode..."
if mtime < 0.05:
print "Consider changing the frame rate to more than 0.05 while in random mode"
time.sleep(3)
if display == False:
print "Consider changing display to True inorder to view the game in random mode"
time.sleep(3)
time.sleep(1)
try:
iblock(False,False)
except:
print "Game Paused"
def intelligent_1v1():
"""
This is a 1 match mode to quickly see who wins a fast fight
"""
print "Starting Intelligent 1v1..."
if mtime < 0.005:
print "Consider changing the frame rate to more than 0.005 while in intelligence mode"
time.sleep(3)
if display == False:
print "Consider changing display to True inorder to view the game in intelligence mode"
time.sleep(3)
time.sleep(1)
try:
iblock(True,True)
except:
print "Game Paused"
def human_vs_iblock():
"""
You'll probably loose...
"""
# Coming Soon #
if not args.Hide:
print """
_ _____ _ _
|_| __ | |___ ___| |_
| | __ -| | . | _| '_|
|_|_____|_|___|___|_,_|
"""
parser.print_help()
print
print "Available Game Modes/Options:"
print "-Random Mode"
print "-Intelligence Mode"
print "-1 Match Intelligence Mode"
print "-Reset Mode"
print "-Human vs Player Mode"
print "\n(Enter the function name for the gamemode you want in the python terminal or set your arguments to choose your gamemode)\n"
print "Set arguments to \"-H\" to disable this message."
time.sleep(0.5)
if len(sys.argv) > 1:
if args.intelligence:
intelligence_mode()
sys.exit()
if args.random:
random_ai_mode()
sys.exit()
if args.Reset:
print "Stop the program once both player's fitness is at your desired stat"
reset_knowldge()
sys.exit()
if args.quick:
intelligent_1v1()
sys.exit()
if human_mode:
try:
iblockgo()
except:
print "Game Paused/Stopped"
| 27.115756 | 398 | 0.600024 |
7bc9519279bbaea50bce0ecf16967333a0bd62b5 | 319 | py | Python | Autre/Internet.py | Yaya-Cout/Python | 500a2bc18cbb0b9bf1470943def8fd8e8e76d36d | [
"Unlicense"
] | 5 | 2020-12-05T14:00:39.000Z | 2021-12-02T11:44:54.000Z | Autre/Internet.py | Yaya-Cout/Python | 500a2bc18cbb0b9bf1470943def8fd8e8e76d36d | [
"Unlicense"
] | 11 | 2021-03-15T17:51:43.000Z | 2021-11-24T13:24:39.000Z | Autre/Internet.py | Yaya-Cout/Python | 500a2bc18cbb0b9bf1470943def8fd8e8e76d36d | [
"Unlicense"
] | 1 | 2021-01-02T14:15:10.000Z | 2021-01-02T14:15:10.000Z |
if __name__ == "__main__":
main()
| 19.9375 | 70 | 0.567398 |
7bc96e1706c4c4494a902bdb9aa51a33d9269620 | 6,502 | py | Python | older/rc-qradar-search/query_runner/components/ariel_query.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | older/rc-qradar-search/query_runner/components/ariel_query.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | older/rc-qradar-search/query_runner/components/ariel_query.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | """Action Module circuits component to update incidents from QRadar Ariel queries"""
import logging
from datetime import datetime
import time
import copy
import json
from string import Template
from pkg_resources import Requirement, resource_filename
import resilient_circuits.template_functions as template_functions
from query_runner.lib.query_action import QueryRunner
from query_runner.lib.qradar_rest_client import QRadarClient
from query_runner.lib.misc import SearchTimeout, SearchFailure
try:
basestring
except NameError:
basestring = str
LOG = logging.getLogger(__name__)
CONFIG_DATA_SECTION = 'ariel'
def config_section_data():
"""sample config data for use in app.config"""
section_config_fn = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/app.config.qradar")
query_dir = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/queries_ariel")
with open(section_config_fn, 'r') as section_config_file:
section_config = Template(section_config_file.read())
return section_config.safe_substitute(directory=query_dir)
#############################
# Functions for running Query
#############################
def _wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval):
""" Poll QRadar until search execution finishes """
start_time = time.time()
search_status = qradar_client.get_search_status(search_id)
if not search_status:
# Sometimes it takes a little while to be able to query a search id
time.sleep(4)
search_status = qradar_client.get_search_status(search_id)
while search_status.get("status", "") in ("WAIT", "EXECUTE", "SORTING"):
if timeout != 0:
if time.time() - start_time > timeout:
raise SearchTimeout(search_id, search_status.get("status", ""))
time.sleep(polling_interval)
search_status = qradar_client.get_search_status(search_id)
if search_status.get("status", "") != "COMPLETED":
LOG.error(search_status)
raise SearchFailure(search_id, search_status.get("status", ""))
# end _wait_for_query_to_complete
def _get_query_results(search_id, qradar_client, item_range):
""" Get results from a complete QRadar query """
if item_range:
headers = {"Range": item_range}
else:
headers = None
url = "ariel/searches/{0}/results".format(search_id, headers=headers)
response = qradar_client.get(url)
LOG.debug(response)
# Replace "NULL" with ""
response = remove_nulls(response)
return response
# end _get_query_results
def remove_nulls(d):
""" recursively replace 'NULL' with '' in dictionary """
if isinstance(d, basestring):
if d == u'NULL':
return u''
else:
return d
new = {}
LOG.debug("d={d} ".format(d=d))
LOG.debug("type of d is {t}".format(t=type(d)))
for k, v in d.items():
if isinstance(v, dict):
v = remove_nulls(v)
elif isinstance(v, list):
v = [remove_nulls(v1) for v1 in v]
elif isinstance(v, basestring) and v == u'NULL':
v = u''
new[k] = v
LOG.info("Returning: {n}".format(n=new))
return new
def run_search(options, query_definition, event_message):
""" Run Ariel search and return result """
# Read the options and construct a QRadar client
qradar_url = options.get("qradar_url", "")
qradar_token = options.get("qradar_service_token", "")
timeout = int(options.get("query_timeout", 600))
polling_interval = int(options.get("polling_interval", 5))
if not all((qradar_url, qradar_token, timeout, polling_interval)):
LOG.error("Configuration file missing required values!")
raise Exception("Missing Configuration Values")
verify = options.get("qradar_verify", "")
if verify[:1].lower() in ("0", "f", "n"):
verify = False
else:
verify = True
qradar_client = QRadarClient(qradar_url, qradar_token, verify=verify)
error = None
response = None
try:
params = {'query_expression': query_definition.query}
url = "ariel/searches"
response = qradar_client.post(url, params=params)
LOG.debug(response)
search_id = response.get('search_id', '')
if not search_id:
error = "Query Failed: " + response.get("message", "No Error Message Found")
else:
LOG.info("Queued Search %s", search_id)
_wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval)
# Query Execution Finished, Get Results
response = _get_query_results(search_id, qradar_client, query_definition.range)
except Exception as exc:
if not query_definition.onerror:
raise
LOG.error(exc)
error = u"{}".format(exc)
if error:
mapdata = copy.deepcopy(event_message)
mapdata.update(query_definition.vars)
mapdata.update({"query": query_definition.query})
mapdata.update({"error": error})
error_template = json.dumps({"events": [query_definition.onerror]}, indent=2)
error_rendered = template_functions.render_json(error_template, mapdata)
response = error_rendered
if not response or len(response["events"]) == 0:
LOG.warn("No data returned from query")
if query_definition.default:
mapdata = copy.deepcopy(event_message)
mapdata.update(query_definition.vars)
mapdata.update({"query": query_definition.query})
default_template = json.dumps({"events": [query_definition.default]}, indent=2)
default_rendered = template_functions.render_json(default_template, mapdata)
response = default_rendered
return response
# end run_search
| 36.324022 | 113 | 0.669948 |
7bcaa605df103e994b12588df4d84741fe74b87f | 2,371 | py | Python | first/sendmail-practice.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
] | null | null | null | first/sendmail-practice.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
] | null | null | null | first/sendmail-practice.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from email import encoders
from email.header import Header
from email.mime.multipart import MIMEBase, MIMEMultipart
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
#
#
from_addr = input('From')
password = input('Password')
#
to_addr = input('To')
#
smtp_server = input('SMTP server')
#
#
msg = MIMEText('Hello,send by Python...', 'plain', 'utf-8')
# HTML
msg = MIMEText('<html><body><h1>Hello</h1>' +
'<p>send by <a href="http://www.python.org">Python</a>...</p>' +
'</body></html>', 'html', 'utf-8')
#
msg['From'] = _format_addr('Python<%s>' % from_addr)
#
msg['To'] = _format_addr('<%s>' % to_addr)
#
msg['Subject'] = Header('SMTP...', 'utf-8').encode()
#
msg = MIMEMultipart()
msg = MIMEMultipart('alternative')
msg['From'] = _format_addr('Python<%s>' % from_addr)
msg['To'] = _format_addr('<%s>' % to_addr)
msg['Subject'] = Header('SMTP', 'utf-8').encode()
# MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
msg.attach(MIMEText('<html><body><h1>Hello</h1>' +
'<p><img src="cid:0"></p>' +
'</body></html>', 'html', 'utf-8'))
with open('/Users/doc88/Desktop/banner.png', 'rb') as f:
# MIME
mime = MIMEBase('image', 'jpeg', filename='banner.png')
#
mime.add_header('Content-Disposition', 'attachment', filename='banner.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
#
mime.set_payload(f.read())
# Base64
encoders.encode_base64(mime)
# MIMEMultipart:
msg.attach(mime)
try:
#
#
server = smtplib.SMTP_SSL(smtp_server, 465)
# SMTP
server.set_debuglevel(1)
#
server.login(from_addr, password)
#
#
server.sendmail(from_addr, [to_addr], msg.as_string())
#
server.quit()
print('Success!')
except smtplib.SMTPException as e:
print('Fail,%s' % e) | 28.22619 | 79 | 0.634753 |
7bcea7388e12344b8c218c07128ff9fb1cd5ed79 | 1,519 | py | Python | yat-master/pymodule/common_sql/plain_parser/reader.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | yat-master/pymodule/common_sql/plain_parser/reader.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | yat-master/pymodule/common_sql/plain_parser/reader.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | #!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
| 27.125 | 84 | 0.623436 |
7bcfdbc346740098cdd0e1ea01a84bd850dcb6f3 | 2,895 | py | Python | web/admin.py | dschien/greendoors-web | 26a10e909e6447f1709d27e58340f08372ce8f26 | [
"MIT"
] | null | null | null | web/admin.py | dschien/greendoors-web | 26a10e909e6447f1709d27e58340f08372ce8f26 | [
"MIT"
] | 2 | 2020-06-05T17:29:54.000Z | 2021-06-10T18:58:13.000Z | web/admin.py | dschien/greendoors-web | 26a10e909e6447f1709d27e58340f08372ce8f26 | [
"MIT"
] | null | null | null |
__author__ = 'schien'
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.admin import BooleanFieldListFilter
from api.models import Scan, Measure, InstalledMeasure, MeasureCategory, App, MessageThread, RedirectUrl, TrackableURL, Click, UserProfile, Favourite, \
LoggerMessage
from api.models import Device, House, Note, HomeOwnerProfile, Message
admin.site.register(RedirectUrl, RedirectUrlAdmin)
admin.site.register(TrackableURL)
admin.site.register(Click)
admin.site.register(House, HouseAdmin)
admin.site.register(Message, MessagesAdmin)
admin.site.register(MessageThread)
admin.site.register(Device, CreatedDateAdmin)
admin.site.register(Scan, CreatedDateAdmin)
admin.site.register(Note, CreatedDateAdmin)
# Define a new User admin
# bristol
admin.site.register(Measure)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(HomeOwnerProfile)
admin.site.register(InstalledMeasure)
admin.site.register(MeasureCategory)
admin.site.register(App)
admin.site.register(LoggerMessage)
# frome
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin) | 24.327731 | 152 | 0.760622 |
c8730231294cec0e238e9725d099edb7ac1ec02d | 7,359 | py | Python | compecon/basisSpline.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
] | null | null | null | compecon/basisSpline.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
] | null | null | null | compecon/basisSpline.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
] | 1 | 2021-06-01T03:47:35.000Z | 2021-06-01T03:47:35.000Z | import numpy as np
from scipy.sparse import csc_matrix, diags, tril
from .basis import Basis
__author__ = 'Randall'
# TODO: complete this class
# todo: compare performance of csr_matrix and csc_matrix to deal with sparse interpolation operators
# fixme: interpolation is 25 slower than in matlab when 2 dimensions!! 2x slower with only one
| 36.430693 | 119 | 0.512298 |
c873b44db1fbe52cb97100b99eb41550c409cc9f | 2,279 | py | Python | vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 4 | 2019-01-11T03:41:28.000Z | 2019-09-12T06:57:17.000Z | vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | null | null | null | vendors/rez-2.23.1-py2.7/rez/backport/shutilwhich.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 2 | 2019-01-10T05:00:18.000Z | 2020-02-15T16:32:56.000Z | import os
import os.path
import sys
# Modified version from Python-3.3. 'env' environ dict override has been added.
def which(cmd, mode=os.F_OK | os.X_OK, env=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `env` defaults to os.environ,
if not supplied.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
# Short circuit. If we're given a full path which matches the mode
# and it exists, we're done here.
if _access_check(cmd, mode):
return cmd
if env is None:
env = os.environ
path = env.get("PATH", os.defpath).split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
default_pathext = \
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC'
pathext = env.get("PATHEXT", default_pathext).split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]
# If it does match, only test that one, otherwise we have to try
# others.
files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
dir = os.path.normcase(dir)
if not dir in seen:
seen.add(dir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| 36.174603 | 79 | 0.617376 |
c876f748ac3b92bbe9dd6ace6cf8630a36ac3d08 | 6,469 | py | Python | src/symbol_table.py | harkiratbehl/PyGM | e0a4e0b865afb607dfa0525ca386bfbe77bb6508 | [
"MIT"
] | 2 | 2019-02-13T11:30:08.000Z | 2021-02-14T04:20:44.000Z | src/symbol_table.py | harkiratbehl/PyGM | e0a4e0b865afb607dfa0525ca386bfbe77bb6508 | [
"MIT"
] | null | null | null | src/symbol_table.py | harkiratbehl/PyGM | e0a4e0b865afb607dfa0525ca386bfbe77bb6508 | [
"MIT"
] | null | null | null | """Defines the classes SymbolTable and SymbolTableNode"""
import sys
from numpy import ones
| 34.227513 | 83 | 0.534549 |
c879174dc589e41a31be3771fbf140871339c500 | 151 | py | Python | setup.py | Will-Robin/NorthNet | 343238afbefd02b7255ef6013cbfb0e801bc2b3b | [
"BSD-3-Clause"
] | null | null | null | setup.py | Will-Robin/NorthNet | 343238afbefd02b7255ef6013cbfb0e801bc2b3b | [
"BSD-3-Clause"
] | 2 | 2022-02-23T12:03:32.000Z | 2022-02-23T14:27:29.000Z | setup.py | Will-Robin/NorthNet | 343238afbefd02b7255ef6013cbfb0e801bc2b3b | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, version
setup(
name="NorthNet",
version="0.0",
author="William E. Robinson",
packages = ["NorthNet"],
)
| 16.777778 | 37 | 0.635762 |
c87b5c6d8dff26ac4e6274273976c58563c8553b | 13,380 | py | Python | clustering/runner.py | kburnik/naps-clustering | 8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19 | [
"MIT"
] | null | null | null | clustering/runner.py | kburnik/naps-clustering | 8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19 | [
"MIT"
] | null | null | null | clustering/runner.py | kburnik/naps-clustering | 8ceaad61e7f1c3d76ad9e7c7491b705b936a6f19 | [
"MIT"
] | null | null | null | """Class with high-level methods for processing NAPS and NAPS BE datasets."""
from config import DATA_NAPS_BE_ALL
from lib import partition_naps
from lib import plot
from lib import plot_clusters
from lib import plot_clusters_with_probability
from lib import plot_setup
from lib import read_naps
from lib import read_naps_be
from lib import reindex_partitions
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy
import sklearn
| 34.307692 | 78 | 0.65568 |
c87d1cba2782a99d03e9fe56c04a83d537ce2a1a | 2,936 | py | Python | Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | Algorithms_medium/1618. Maximum Font to Fit a Sentence in a Screen.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | """
1618. Maximum Font to Fit a Sentence in a Screen
Medium
You are given a string text. We want to display text on a screen of width w and height h. You can choose any font size from array fonts, which contains the available font sizes in ascending order.
You can use the FontInfo interface to get the width and height of any character at any available font size.
The FontInfo interface is defined as such:
interface FontInfo {
// Returns the width of character ch on the screen using font size fontSize.
// O(1) per call
public int getWidth(int fontSize, char ch);
// Returns the height of any character on the screen using font size fontSize.
// O(1) per call
public int getHeight(int fontSize);
}
The calculated width of text for some fontSize is the sum of every getWidth(fontSize, text[i]) call for each 0 <= i < text.length (0-indexed). The calculated height of text for some fontSize is getHeight(fontSize). Note that text is displayed on a single line.
It is guaranteed that FontInfo will return the same value if you call getHeight or getWidth with the same parameters.
It is also guaranteed that for any font size fontSize and any character ch:
getHeight(fontSize) <= getHeight(fontSize+1)
getWidth(fontSize, ch) <= getWidth(fontSize+1, ch)
Return the maximum font size you can use to display text on the screen. If text cannot fit on the display with any font size, return -1.
Example 1:
Input: text = "helloworld", w = 80, h = 20, fonts = [6,8,10,12,14,16,18,24,36]
Output: 6
Example 2:
Input: text = "leetcode", w = 1000, h = 50, fonts = [1,2,4]
Output: 4
Example 3:
Input: text = "easyquestion", w = 100, h = 100, fonts = [10,15,20,25]
Output: -1
Constraints:
1 <= text.length <= 50000
text contains only lowercase English letters.
1 <= w <= 107
1 <= h <= 104
1 <= fonts.length <= 105
1 <= fonts[i] <= 105
fonts is sorted in ascending order and does not contain duplicates.
"""
# """
# This is FontInfo's API interface.
# You should not implement it, or speculate about its implementation
# """
#class FontInfo(object):
# Return the width of char ch when fontSize is used.
# def getWidth(self, fontSize, ch):
# """
# :type fontSize: int
# :type ch: char
# :rtype int
# """
#
# def getHeight(self, fontSize):
# """
# :type fontSize: int
# :rtype int
# """ | 32.622222 | 260 | 0.642711 |
c880853878e1cff80cb76bcab65d294bfff7d0f4 | 6,407 | py | Python | climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-01-14T06:37:42.000Z | 2022-01-14T06:37:42.000Z | climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | climateeconomics/sos_wrapping/sos_wrapping_dice/tempchange/tempchange_discipline.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
from climateeconomics.core.core_dice.tempchange_model import TempChange
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import pandas as pd
| 33.025773 | 129 | 0.605119 |
c8813251417f083ef4764a6d0d80104c34d5a26a | 56,368 | py | Python | pymkm/pymkm_app.py | Guibod/pymkm | 58ac805c8072979f3059c7faafc264386ae98141 | [
"MIT"
] | null | null | null | pymkm/pymkm_app.py | Guibod/pymkm | 58ac805c8072979f3059c7faafc264386ae98141 | [
"MIT"
] | null | null | null | pymkm/pymkm_app.py | Guibod/pymkm | 58ac805c8072979f3059c7faafc264386ae98141 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
The PyMKM example app.
"""
__author__ = "Andreas Ehrlund"
__version__ = "2.0.4"
__license__ = "MIT"
import os
import csv
import json
import shelve
import logging
import logging.handlers
import pprint
import uuid
import sys
from datetime import datetime
import micromenu
import progressbar
import requests
import tabulate as tb
from pkg_resources import parse_version
from .pymkm_helper import PyMkmHelper
from .pymkmapi import PyMkmApi, CardmarketError
| 38.319511 | 168 | 0.482064 |
c8829aec3d5b9877236b2115916c5ca2a14ab73b | 333 | py | Python | Datasets/Terrain/us_ned_physio_diversity.py | monocilindro/qgis-earthengine-examples | 82aea8926d34ed3f4ad4a4a345ddbd225819d28f | [
"MIT"
] | 646 | 2019-12-03T06:09:03.000Z | 2022-03-28T03:37:08.000Z | Datasets/Terrain/us_ned_physio_diversity.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 10 | 2019-12-30T03:42:44.000Z | 2021-05-22T07:34:07.000Z | Datasets/Terrain/us_ned_physio_diversity.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 219 | 2019-12-06T02:20:53.000Z | 2022-03-30T15:14:27.000Z | import ee
from ee_plugin import Map
dataset = ee.Image('CSP/ERGo/1_0/US/physioDiversity')
physiographicDiversity = dataset.select('b1')
physiographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-94.625, 39.825, 7)
Map.addLayer(
physiographicDiversity, physiographicDiversityVis,
'Physiographic Diversity')
| 23.785714 | 54 | 0.738739 |
c88407b58490b10ee7b7b9dec303ca0721d6f4c4 | 281 | py | Python | timesheet/forms.py | pincoin/windmill | fe373e5ca27c775a926e9a5538931f9394196d90 | [
"MIT"
] | null | null | null | timesheet/forms.py | pincoin/windmill | fe373e5ca27c775a926e9a5538931f9394196d90 | [
"MIT"
] | 7 | 2020-02-12T01:22:46.000Z | 2021-06-10T18:43:01.000Z | timesheet/forms.py | pincoin/windmill | fe373e5ca27c775a926e9a5538931f9394196d90 | [
"MIT"
] | null | null | null | from django import forms
from . import models
| 20.071429 | 62 | 0.701068 |
c8845f1c14219b145ec8b7fa1bba57f5b2418dfb | 497 | py | Python | bin/base64util.py | SnowleopardXI/stash | a14f016e5b568095af8d1e78addedc562e3cde70 | [
"MIT"
] | null | null | null | bin/base64util.py | SnowleopardXI/stash | a14f016e5b568095af8d1e78addedc562e3cde70 | [
"MIT"
] | null | null | null | bin/base64util.py | SnowleopardXI/stash | a14f016e5b568095af8d1e78addedc562e3cde70 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
print('Choose your choice:')
n='''
1:Encode string to base64
2:Decode base64 to string
'''
c=int(eval(input(n))) #
if c == 1: #1
print('Type string to be encoded:')
inp=input()
out = str(base64.encodebytes(inp.encode("utf-8")), "utf-8")
print(out) # b
if c == 2:
print('Type string to be decoded:')
inp2=bytes(input(),('utf-8'))
dec = base64.decodebytes(inp2)
print(dec.decode())
| 24.85 | 63 | 0.593561 |
c884c97e5f0b08128955897f09554f008fe34589 | 9,781 | py | Python | Code/nebulae/lib/sc/sc.py | CarlColglazier/QB_Nebulae_V2 | 3326fa1c672ba0845b28cb55847bea0c9b8e9a05 | [
"MIT"
] | 8 | 2020-05-14T20:18:29.000Z | 2021-08-08T15:18:28.000Z | Code/nebulae/lib/sc/sc.py | alex-thibodeau/QB_Nebulae_V2 | 34bcf341ea8eddaa9f9ce2e7c2d2438e00e50f54 | [
"MIT"
] | null | null | null | Code/nebulae/lib/sc/sc.py | alex-thibodeau/QB_Nebulae_V2 | 34bcf341ea8eddaa9f9ce2e7c2d2438e00e50f54 | [
"MIT"
] | null | null | null | import time, os, sys
import scsynth, scosc
server = 0 # reference to app's sc server process
sndLoader = 0
synthon = 0 # did we start the scsythn process?
##workingpath = os.getcwd() # must be set to the right path in case something special is need
sndpath = os.path.join( os.getcwd() , 'sounds' )
synthdefpath = os.path.join( os.getcwd() , 'synthdefs' )
def start( exedir='', port=57110, inputs=2, outputs=2, samplerate=44100, verbose=0,
spew=0, startscsynth=0 ) :
""" starts scsynth process. interfaces scsynth module.
Inits the OSC communication and classes that handle it
exe='', exedir='', port=57110, inputs=2, outputs=2, samplerate=44100, verbose=0, spew=0
"""
global server, sndLoader # because they are init in this func
exe = 'scsynth'
# if none is set take workingdir as exedir on mac and windows
if sys.platform == 'win32' :
exe += '.exe' # add extension
if exedir == '' : exedir = 'C:\Program Files\SuperCollider'
elif os.uname()[0] == 'Linux' :
if exedir == '' : exedir = '/usr/bin'
if not os.path.isfile(os.path.join(exedir, exe)): # in case it is in /usr/bin/local
print 'Error : /usr/bin/scsynth does not exist. Trying to find scsnth in /usr/local/bin...'
exedir = '/usr/local/bin'
elif sys.platform == 'darwin':
if exedir == '' : exedir = '/Applications/SuperCollider'
print "trying to run scsynth from :", exedir
server = scsynth.start(
#exe = exe,
#exedir = exedir,
port = port,
#inputs = inputs,
#outputs = outputs,
#samplerate = samplerate,
verbose = verbose,
spew = spew,
)
if startscsynth : # starts scsynth server process
global synthon
synthon = 1
server.instance = scsynth.startServer(
exe = exe,
exedir = exedir,
port = port,
inputs = inputs,
outputs = outputs,
samplerate = samplerate,
verbose = verbose,
#spew = spew,
)
time.sleep(1) # wait to start up
sndLoader = scsynth.Loader(server) # manages sound files
def register(address, fun) :
""" bind OSC address to function callback
"""
server.listener.register( address, fun )
# sound buffer related utilities.
def loadSnd(filename, wait=False) :
""" load sound buffer from current sound folder (sc.sndpath) and return buffer's id
sends back /b_info labeled OSC message. The arguments to /b_info are as
follows:
int - buffer number
int - number of frames
int - number of channels
"""
abspath = os.path.join( sndpath, filename )
return loadSndAbs(abspath, wait)
def unloadSnd(buf_id) :
""" unload sound buffer from server memory by buffer id
"""
sndLoader.unload( buf_id, wait=False )
def loadSndAbs(path, wait=False) :
""" same as loadSnd but takes absolute path to snd file
"""
if os.path.isfile(path) :
return sndLoader.load( path, wait, b_query=True )
else :
print "file %s does NOT exist" % path
return 0
# classes
| 37.190114 | 120 | 0.600552 |
c884d28504ed798c203413f680ec73fe70726669 | 357 | py | Python | test/test_api/test_routes/test_about.py | MRmlik12/biblioteczka | 3fcde24cd42d0155c3a20585d20ac0d0a7989101 | [
"MIT"
] | null | null | null | test/test_api/test_routes/test_about.py | MRmlik12/biblioteczka | 3fcde24cd42d0155c3a20585d20ac0d0a7989101 | [
"MIT"
] | 3 | 2021-07-29T08:34:09.000Z | 2021-07-29T10:12:34.000Z | test/test_api/test_routes/test_about.py | MRmlik12/catana | 3fcde24cd42d0155c3a20585d20ac0d0a7989101 | [
"MIT"
] | null | null | null | import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from starlette.status import HTTP_200_OK
pytestmark = pytest.mark.asyncio
| 29.75 | 83 | 0.809524 |
c88551ac723dd08106aa9434592b74d5d60bf757 | 2,614 | py | Python | linefinder/job_scripts/linefinder_sightlines.py | zhafen/linefinder | 0f4f36a83246f1b833d0c281e635d86be3d1eb95 | [
"MIT"
] | null | null | null | linefinder/job_scripts/linefinder_sightlines.py | zhafen/linefinder | 0f4f36a83246f1b833d0c281e635d86be3d1eb95 | [
"MIT"
] | 12 | 2018-08-26T14:10:18.000Z | 2021-04-15T21:48:58.000Z | linefinder/job_scripts/linefinder_sightlines.py | zhafen/linefinder | 0f4f36a83246f1b833d0c281e635d86be3d1eb95 | [
"MIT"
] | 1 | 2021-05-19T16:45:21.000Z | 2021-05-19T16:45:21.000Z | import linefinder.linefinder as linefinder
import linefinder.config as linefinder_config
import linefinder.utils.file_management as file_management
########################################################################
sim_name = 'm12i'
'''The simulation to run tracking on.'''
tag = '{}_sightline'.format( sim_name )
'''Identifying tag used as part of the filenames.
E.g. the IDs file will have the format `ids_{}.hdf5.format( tag )`.
'''
# Tracking Parameters
tracker_kwargs = {
# What particle types to track. Typically just stars and gas.
'p_types': [ 0, 4,],
# What snapshots to compile the particle tracks for.
'snum_start': 1,
'snum_end': 600,
'snum_step': 1,
}
file_manager = file_management.FileManager()
sampler_kwargs = {
'ignore_duplicates': True,
'p_types': [ 0, 4 ],
'snapshot_kwargs': {
'sdir': file_manager.get_sim_dir( sim_name ),
'halo_data_dir': file_manager.get_halo_dir( sim_name ),
'main_halo_id': linefinder_config.MAIN_MT_HALO_ID[sim_name],
'ahf_index': 600,
'length_scale_used': 'R_vir',
}
}
visualization_kwargs = {
'install_firefly': True,
'export_to_firefly_kwargs': {
'firefly_dir': '/work/03057/zhafen/firefly_repos/sightline',
'classifications': [
'is_in_CGM',
'is_CGM_IGM_accretion',
'is_CGM_wind',
'is_CGM_satellite_wind',
'is_CGM_satellite_ISM',
],
'classification_ui_labels': [ 'All', 'IGMAcc', 'Wind', 'SatWind', 'Sat' ],
'tracked_properties': [
'logT',
'logZ',
'logDen',
'vr_div_v_cool',
'logvr_div_v_cool_offset',
],
'tracked_filter_flags': [ True, ] * 5,
'tracked_colormap_flags': [ True, ] * 5,
'snum': 465,
},
}
# This is the actual function that runs linefinder.
# In general you don't need to touch this function but if you want to,
# for example, turn off one of the steps because you're rerunning and you
# already did that step, you can do so below.
linefinder.run_linefinder_jug(
sim_name = sim_name,
tag = tag,
galdef = '_galdefv3',
# The galdef is a set of parameters used for the galaxy linking and
# classification steps. Don't touch this unless you know what you're doing.
tracker_kwargs = tracker_kwargs,
sampler_kwargs = sampler_kwargs,
visualization_kwargs = visualization_kwargs,
run_id_selecting = False,
run_id_sampling = False,
run_tracking = False,
run_galaxy_linking = False,
run_classifying = False,
)
| 30.045977 | 82 | 0.630451 |
c8864bea2e2f25d967c38986aef9fb5517d5143b | 285 | py | Python | SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py | dingchaofan/AlgorithmSolution | 46198e3f0dbda867e7b75f0d0e52be5f0181238a | [
"MIT"
] | 1 | 2020-06-23T02:18:39.000Z | 2020-06-23T02:18:39.000Z | SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py | dingchaofan/AlgorithmSolution | 46198e3f0dbda867e7b75f0d0e52be5f0181238a | [
"MIT"
] | null | null | null | SwordToOffer/SwordToOffer-PythonSolution/47_Sum_Solution.py | dingchaofan/AlgorithmSolution | 46198e3f0dbda867e7b75f0d0e52be5f0181238a | [
"MIT"
] | 1 | 2021-01-11T12:07:03.000Z | 2021-01-11T12:07:03.000Z | # 47. 1+2+3+...+n
# 1+2+3+...+nforwhileifelseswitchcaseA?B:C
# -*- coding:utf-8 -*- | 21.923077 | 73 | 0.540351 |
c8870211f55a315e2890fcb0bc548ae67550546d | 137 | py | Python | apps/users/urls.py | akundev/akundotdev | 98b47925b948c920789c5acebad86944162bf53a | [
"Apache-2.0"
] | null | null | null | apps/users/urls.py | akundev/akundotdev | 98b47925b948c920789c5acebad86944162bf53a | [
"Apache-2.0"
] | 3 | 2021-03-30T14:21:08.000Z | 2021-07-07T03:04:26.000Z | apps/users/urls.py | almazkun/akundotdev | 98b47925b948c920789c5acebad86944162bf53a | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from .views import AboutTemplateView
urlpatterns = [path("", AboutTemplateView.as_view(), name="about")]
| 19.571429 | 67 | 0.759124 |
c8879bded50ae8fbfe4e76e5d099e8ada2d7784b | 2,969 | py | Python | fedireads/broadcast.py | thricedotted/fedireads | a1fbba1ba31e569489378176b0894a0a8907c14c | [
"CC0-1.0"
] | null | null | null | fedireads/broadcast.py | thricedotted/fedireads | a1fbba1ba31e569489378176b0894a0a8907c14c | [
"CC0-1.0"
] | null | null | null | fedireads/broadcast.py | thricedotted/fedireads | a1fbba1ba31e569489378176b0894a0a8907c14c | [
"CC0-1.0"
] | 1 | 2021-01-30T22:38:20.000Z | 2021-01-30T22:38:20.000Z | ''' send out activitypub messages '''
from base64 import b64encode
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from datetime import datetime
import json
import requests
from fedireads import incoming
from fedireads.settings import DOMAIN
def get_recipients(user, post_privacy, direct_recipients=None):
''' deduplicated list of recipient inboxes '''
recipients = direct_recipients or []
if post_privacy == 'direct':
# all we care about is direct_recipients, not followers
return recipients
# load all the followers of the user who is sending the message
followers = user.followers.all()
if post_privacy == 'public':
# post to public shared inboxes
shared_inboxes = set(u.shared_inbox for u in followers)
recipients += list(shared_inboxes)
# TODO: not every user has a shared inbox
# TODO: direct to anyone who's mentioned
if post_privacy == 'followers':
# don't send it to the shared inboxes
inboxes = set(u.inbox for u in followers)
recipients += list(inboxes)
return recipients
def broadcast(sender, activity, recipients):
''' send out an event '''
errors = []
for recipient in recipients:
try:
sign_and_send(sender, activity, recipient)
except requests.exceptions.HTTPError as e:
# TODO: maybe keep track of users who cause errors
errors.append({
'error': e,
'recipient': recipient,
'activity': activity,
})
return errors
def sign_and_send(sender, activity, destination):
''' crpyto whatever and http junk '''
# TODO: handle http[s] with regex
inbox_fragment = sender.inbox.replace('https://%s' % DOMAIN, '')
now = datetime.utcnow().isoformat()
signature_headers = [
'(request-target): post %s' % inbox_fragment,
'host: https://%s' % DOMAIN,
'date: %s' % now
]
message_to_sign = '\n'.join(signature_headers)
# TODO: raise an error if the user doesn't have a private key
signer = pkcs1_15.new(RSA.import_key(sender.private_key))
signed_message = signer.sign(SHA256.new(message_to_sign.encode('utf8')))
signature = {
'keyId': '%s#main-key' % sender.actor,
'algorithm': 'rsa-sha256',
'headers': '(request-target) host date',
'signature': b64encode(signed_message).decode('utf8'),
}
signature = ','.join('%s="%s"' % (k, v) for (k, v) in signature.items())
response = requests.post(
destination,
data=json.dumps(activity),
headers={
'Date': now,
'Signature': signature,
'Host': 'https://%s' % DOMAIN,
'Content-Type': 'application/activity+json; charset=utf-8',
},
)
if not response.ok:
response.raise_for_status()
incoming.handle_response(response)
| 32.988889 | 76 | 0.630852 |
c887c627a5de312187bb987f26d6bea4c3b72084 | 733 | py | Python | polls/views.py | druss16/danslist | ad06f8fa8df5936db7a60e9820f0c89a77f8879a | [
"MIT"
] | null | null | null | polls/views.py | druss16/danslist | ad06f8fa8df5936db7a60e9820f0c89a77f8879a | [
"MIT"
] | null | null | null | polls/views.py | druss16/danslist | ad06f8fa8df5936db7a60e9820f0c89a77f8879a | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
from .models import Question
# Create your views here.
| 29.32 | 68 | 0.777626 |
c889096998408750f88d5b4c179ee06539614ee4 | 48,562 | py | Python | hawc_hal/HAL.py | torresramiro350/hawc_hal | 048536df22bdfa3ace2925e60d802beb76775849 | [
"BSD-3-Clause"
] | null | null | null | hawc_hal/HAL.py | torresramiro350/hawc_hal | 048536df22bdfa3ace2925e60d802beb76775849 | [
"BSD-3-Clause"
] | null | null | null | hawc_hal/HAL.py | torresramiro350/hawc_hal | 048536df22bdfa3ace2925e60d802beb76775849 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division
from builtins import str
from builtins import range
from astropy.utils.misc import isiterable
from past.utils import old_div
import copy
import collections
import numpy as np
import healpy as hp
import astropy.units as u
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import poisson
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve_fft as convolve
from astropy.coordinates import Angle
from threeML.plugin_prototype import PluginPrototype
from threeML.utils.statistics.gammaln import logfactorial
from threeML.parallel import parallel_client
from threeML.io.logging import setup_logger
log = setup_logger(__name__)
log.propagate = False
from tqdm.auto import tqdm
from astromodels import Parameter
from hawc_hal.maptree import map_tree_factory
from hawc_hal.maptree.map_tree import MapTree
from hawc_hal.maptree.data_analysis_bin import DataAnalysisBin
from hawc_hal.response import hawc_response_factory
from hawc_hal.convolved_source import ConvolvedPointSource, \
ConvolvedExtendedSource3D, ConvolvedExtendedSource2D, ConvolvedSourcesContainer
from hawc_hal.healpix_handling import FlatSkyToHealpixTransform
from hawc_hal.healpix_handling import SparseHealpix
from hawc_hal.healpix_handling import get_gnomonic_projection
from hawc_hal.psf_fast import PSFConvolutor
from hawc_hal.log_likelihood import log_likelihood
from hawc_hal.util import ra_to_longitude
def get_saturated_model_likelihood(self):
"""
Returns the likelihood for the saturated model (i.e. a model exactly equal to observation - background).
:return:
"""
return sum(self._saturated_model_like_per_maptree.values())
def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=None):
"""
Set the active analysis bins to use during the analysis. It can be used in two ways:
- Specifying a range: if the response and the maptree allows it, you can specify a minimum id and a maximum id
number. This only works if the analysis bins are numerical, like in the normal fHit analysis. For example:
> set_active_measurement(bin_id_min=1, bin_id_max=9)
- Specifying a list of bins as strings. This is more powerful, as allows to select any bins, even
non-contiguous bins. For example:
> set_active_measurement(bin_list=[list])
:param bin_id_min: minimum bin (only works for fHit analysis. For the others, use bin_list)
:param bin_id_max: maximum bin (only works for fHit analysis. For the others, use bin_list)
:param bin_list: a list of analysis bins to use
:return: None
"""
# Check for legal input
if bin_id_min is not None:
assert bin_id_max is not None, (
"If you provide a minimum bin, you also need to provide a maximum bin."
)
# Make sure they are integers
bin_id_min = int(bin_id_min)
bin_id_max = int(bin_id_max)
self._active_planes = []
for this_bin in range(bin_id_min, bin_id_max + 1):
this_bin = str(this_bin)
if this_bin not in self._all_planes:
raise ValueError(f"Bin {this_bin} is not contained in this maptree.")
self._active_planes.append(this_bin)
else:
assert bin_id_max is None, (
"If you provie a maximum bin, you also need to provide a minimum bin."
)
assert bin_list is not None
self._active_planes = []
for this_bin in bin_list:
if not this_bin in self._all_planes:
raise ValueError(f"Bin {this_bin} is not contained in this maptree.")
self._active_planes.append(this_bin)
if self._likelihood_model:
self.set_model( self._likelihood_model )
def display(self, verbose=False):
"""
Prints summary of the current object content.
"""
log.info("Region of Interest: ")
log.info("-------------------")
self._roi.display()
log.info("")
log.info("Flat sky projection: ")
log.info("--------------------")
log.info(
f"Width x height {self._flat_sky_projection.npix_width} x {self._flat_sky_projection.npix_height} px"
)
#log.info("Width x height: %s x %s px" % (self._flat_sky_projection.npix_width,
# self._flat_sky_projection.npix_height))
log.info(f"Pixel sizes: {self._flat_sky_projection.pixel_size} deg")
#log.info("Pixel sizes: %s deg" % self._flat_sky_projection.pixel_size)
log.info("")
log.info("Response: ")
log.info("---------")
self._response.display(verbose)
log.info("")
log.info("Map Tree: ")
log.info("----------")
self._maptree.display()
log.info("")
#log.info("Active energy/nHit planes ({}):".format(len(self._active_planes)))
log.info(f"Active energy/nHit planes ({len(self._active_planes)}):")
log.info("-------------------------------")
log.info(self._active_planes)
def set_model(self, likelihood_model_instance):
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
self._likelihood_model = likelihood_model_instance
# Reset
self._convolved_point_sources.reset()
self._convolved_ext_sources.reset()
# For each point source in the model, build the convolution class
for source in list(self._likelihood_model.point_sources.values()):
this_convolved_point_source = ConvolvedPointSource(source, self._response, self._flat_sky_projection)
self._convolved_point_sources.append(this_convolved_point_source)
# Samewise for extended sources
ext_sources = list(self._likelihood_model.extended_sources.values())
# NOTE: ext_sources evaluate to False if empty
if ext_sources:
# We will need to convolve
self._setup_psf_convolutors()
for source in ext_sources:
if source.spatial_shape.n_dim == 2:
this_convolved_ext_source = ConvolvedExtendedSource2D(source,
self._response,
self._flat_sky_projection)
else:
this_convolved_ext_source = ConvolvedExtendedSource3D(source,
self._response,
self._flat_sky_projection)
self._convolved_ext_sources.append(this_convolved_ext_source)
def get_excess_background(self, ra, dec, radius):
"""
Calculates area, excess (data - background) and model counts of source at different
distance from the source.
:param: radius: radial distance away from the center (degrees).
:returns: tuple of numpy.ndarrays for areas, excess, model, and background
this information is used in the get_radial_profile function.
"""
radius_radians = np.deg2rad(radius)
total_counts = np.zeros(len(self._active_planes), dtype=float)
background = np.zeros_like(total_counts)
observation = np.zeros_like(total_counts)
model = np.zeros_like(total_counts)
signal = np.zeros_like(total_counts)
area = np.zeros_like(total_counts)
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
longitude = ra_to_longitude(ra)
latitude = dec
center = hp.ang2vec(longitude, latitude, lonlat=True)
for i, energy_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[energy_id]
this_nside = data_analysis_bin.observation_map.nside
pixels_at_radius = hp.query_disc(
this_nside,
center,
radius_radians,
inclusive=False,
)
# calculate the areas per bin by the product
# of pixel area by the number of pixels at each radial bin
area[i] = hp.nside2pixarea(this_nside)*pixels_at_radius.shape[0]
# NOTE: select active pixels according to each radial bin
bin_active_pixel_indexes = np.searchsorted(self._active_pixels[energy_id], pixels_at_radius)
# obtain the excess, background, and expected excess at each radial bin
data = data_analysis_bin.observation_map.as_partial()
bkg = data_analysis_bin.background_map.as_partial()
mdl = self._get_model_map(energy_id, n_point_sources, n_ext_sources).as_partial()
bin_data = np.array([data[i] for i in bin_active_pixel_indexes])
bin_bkg = np.array([bkg[i] for i in bin_active_pixel_indexes])
bin_model = np.array([mdl[i] for i in bin_active_pixel_indexes])
this_data_tot = np.sum(bin_data)
this_bkg_tot = np.sum(bin_bkg)
this_model_tot = np.sum(bin_model)
background[i] = this_bkg_tot
observation[i] = this_data_tot
model[i] = this_model_tot
signal[i] = this_data_tot - this_bkg_tot
return area, signal, model, background
def get_radial_profile(
self,
ra,
dec,
active_planes=None,
max_radius=3.0,
n_radial_bins=30,
model_to_subtract=None,
subtract_model_from_model=False,
):
"""
Calculates radial profiles of data - background & model.
:param ra: R.A. of origin for radial profile.
:param dec: Declination of origin of radial profile.
:param active_planes: List of analysis over which to average; if None, use HAWC default (bins 1-9).
:param: max_radius: Radius up to which the radial profile is evaluated;
for the disk to calculate the gamma/hadron weights (Default: 3.0).
:param n_radial_bins: Number of bins for the radial profile (Default: 30).
:param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None).
:param subtract_model_from_model: If True and model_to_subtract is not None,
subtract model from model too (Defalt: False).
:return: np.arrays with the radii, model profile, data profile, data uncertainty, and
list of analysis bins used.
"""
# default is to use all active bins
if active_planes is None:
active_planes = self._active_planes
# Make sure we use bins with data
good_planes = [plane_id in active_planes for plane_id in self._active_planes]
plane_ids = set(active_planes) & set(self._active_planes)
delta_r = 1.0*max_radius/n_radial_bins
radii = np.array([delta_r*(r + 0.5) for r in range(0, n_radial_bins)])
# Get area of all pixels in a given circle
# The area of each ring is then given by the difference between two
# subsequent circe areas.
area = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[0] for r in radii ]
)
temp = area[1:] - area[:-1]
area[1:] = temp
# model
# convert 'top hat' excess into 'ring' excesses.
model = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii]
)
temp = model[1:] - model[:-1]
model[1:] = temp
# signals
signal = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[1] for r in radii]
)
temp = signal[1:] - signal[:-1]
signal[1:] = temp
# backgrounds
bkg = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[3] for r in radii]
)
temp = bkg[1:] - bkg[:-1]
bkg[1:] = temp
counts = signal + bkg
if model_to_subtract is not None:
this_model = copy.deepcopy(self._likelihood_model)
self.set_model(model_to_subtract)
model_subtract = np.array(
[self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii]
)
temp = model_subtract[1:] - model_subtract[:-1]
model_subtract[1:] = temp
signal -= model_subtract
if subtract_model_from_model:
model -= model_subtract
self.set_model(this_model)
# NOTE: weights are calculated as expected number of gamma-rays/number of background counts.
# here, use max_radius to evaluate the number of gamma-rays/bkg counts.
# The weights do not depend on the radius, but fill a matrix anyway so
# there's no confusion when multiplying them to the data later.
# Weight is normalized (sum of weights over the bins = 1).
total_excess = np.array(
self.get_excess_background(ra, dec, max_radius)[1]
)[good_planes]
total_model = np.array(
self.get_excess_background(ra, dec, max_radius)[2]
)[good_planes]
total_bkg = np.array(
self.get_excess_background(ra, dec, max_radius)[3]
)[good_planes]
w = np.divide(total_model, total_bkg)
weight = np.array([w/np.sum(w) for r in radii])
# restric profiles to the user-specified analysis bins
area = area[:, good_planes]
signal = signal[:, good_planes]
model = model[:, good_planes]
counts = counts[:, good_planes]
bkg = bkg[:, good_planes]
# average over the analysis bins
excess_data = np.average(signal/area, weights=weight, axis=1)
excess_error = np.sqrt(np.sum(counts*weight*weight/(area*area), axis=1))
excess_model = np.average(model/area, weights=weight, axis=1)
return radii, excess_model, excess_data, excess_error, sorted(plane_ids)
def plot_radial_profile(
self,
ra,
dec,
active_planes=None,
max_radius=3.0,
n_radial_bins=30,
model_to_subtract=None,
subtract_model_from_model=False
):
"""
Plots radial profiles of data - background & model.
:param ra: R.A. of origin for radial profile.
:param dec: Declination of origin of radial profile.
:param active_planes: List of analysis bins over which to average;
if None, use HAWC default (bins 1-9).
:param max_radius: Radius up to which the radial profile is evaluated; also
used as the radius for the disk to calculate the gamma/hadron weights. Default: 3.0
:param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None).
:param subtract_model_from_model: If True and model_to_subtract is not None, subtract from model too (Default: False).
:return: plot of data - background vs model radial profiles.
"""
(
radii,
excess_model,
excess_data,
excess_error,
plane_ids,
) = self.get_radial_profile(
ra,
dec,
active_planes,
max_radius,
n_radial_bins,
model_to_subtract,
subtract_model_from_model,
)
#font = {
# "family":"serif",
# "weight":"regular",
# "size":12
#}
#mpl.rc("font", **font)
fig, ax = plt.subplots(figsize=(10,8))
plt.errorbar(
radii,
excess_data,
yerr=excess_error,
capsize=0,
color="black",
label="Excess (data-bkg)",
fmt=".",
)
plt.plot(radii, excess_model, color="red", label="Model")
plt.legend(bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1)
plt.axhline(0, color="deepskyblue", linestyle="--")
x_limits=[0, max_radius]
plt.xlim(x_limits)
plt.ylabel(r"Apparent Radial Excess [sr$^{-1}$]")
plt.xlabel(
f"Distance from source at ({ra:0.2f} $^{{\circ}}$, {dec:0.2f} $^{{\circ}}$)"
)
if len(plane_ids) == 1:
title = f"Radial Profile, bin {plane_ids[0]}"
else:
tmptitle=f"Radial Profile, bins \n{plane_ids}"
width=70
title="\n".join(
tmptitle[i:i+width] for i in range(0, len(tmptitle), width)
)
title=tmptitle
plt.title(title)
ax.grid(True)
try:
plt.tight_layout()
except:
pass
return fig
def display_spectrum(self):
"""
Make a plot of the current spectrum and its residuals (integrated over space)
:return: a matplotlib.Figure
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
total_counts = np.zeros(len(self._active_planes), dtype=float)
total_model = np.zeros_like(total_counts)
model_only = np.zeros_like(total_counts)
net_counts = np.zeros_like(total_counts)
yerr_low = np.zeros_like(total_counts)
yerr_high = np.zeros_like(total_counts)
for i, energy_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[energy_id]
this_model_map_hpx = self._get_expectation(data_analysis_bin, energy_id, n_point_sources, n_ext_sources)
this_model_tot = np.sum(this_model_map_hpx)
this_data_tot = np.sum(data_analysis_bin.observation_map.as_partial())
this_bkg_tot = np.sum(data_analysis_bin.background_map.as_partial())
total_counts[i] = this_data_tot
net_counts[i] = this_data_tot - this_bkg_tot
model_only[i] = this_model_tot
this_wh_model = this_model_tot + this_bkg_tot
total_model[i] = this_wh_model
if this_data_tot >= 50.0:
# Gaussian limit
# Under the null hypothesis the data are distributed as a Gaussian with mu = model
# and sigma = sqrt(model)
# NOTE: since we neglect the background uncertainty, the background is part of the
# model
yerr_low[i] = np.sqrt(this_data_tot)
yerr_high[i] = np.sqrt(this_data_tot)
else:
# Low-counts
# Under the null hypothesis the data are distributed as a Poisson distribution with
# mean = model, plot the 68% confidence interval (quantile=[0.16,1-0.16]).
# NOTE: since we neglect the background uncertainty, the background is part of the
# model
quantile = 0.16
mean = this_wh_model
y_low = poisson.isf(1-quantile, mu=mean)
y_high = poisson.isf(quantile, mu=mean)
yerr_low[i] = mean-y_low
yerr_high[i] = y_high-mean
residuals = old_div((total_counts - total_model), np.sqrt(total_model))
residuals_err = [old_div(yerr_high, np.sqrt(total_model)),
old_div(yerr_low, np.sqrt(total_model))]
yerr = [yerr_high, yerr_low]
return self._plot_spectrum(net_counts, yerr, model_only, residuals, residuals_err)
def get_log_like(self):
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
# Make sure that no source has been added since we filled the cache
assert (n_point_sources == self._convolved_point_sources.n_sources_in_cache and
n_ext_sources == self._convolved_ext_sources.n_sources_in_cache), (
"The number of sources has changed. Please re-assign the model to the plugin."
)
#assert n_point_sources == self._convolved_point_sources.n_sources_in_cache and \
# n_ext_sources == self._convolved_ext_sources.n_sources_in_cache, \
# "The number of sources has changed. Please re-assign the model to the plugin."
# This will hold the total log-likelihood
total_log_like = 0
for bin_id in self._active_planes:
data_analysis_bin = self._maptree[bin_id]
this_model_map_hpx = self._get_expectation(data_analysis_bin, bin_id, n_point_sources, n_ext_sources)
# Now compare with observation
bkg_renorm = list(self._nuisance_parameters.values())[0].value
obs = data_analysis_bin.observation_map.as_partial() # type: np.array
bkg = data_analysis_bin.background_map.as_partial() * bkg_renorm # type: np.array
this_pseudo_log_like = log_likelihood(obs,
bkg,
this_model_map_hpx)
total_log_like += this_pseudo_log_like - self._log_factorials[bin_id] \
- self._saturated_model_like_per_maptree[bin_id]
return total_log_like
def write(self, response_file_name, map_tree_file_name):
"""
Write this dataset to disk in HDF format.
:param response_file_name: filename for the response
:param map_tree_file_name: filename for the map tree
:return: None
"""
self._maptree.write(map_tree_file_name)
self._response.write(response_file_name)
def get_simulated_dataset(self, name):
"""
Return a simulation of this dataset using the current model with current parameters.
:param name: new name for the new plugin instance
:return: a HAL instance
"""
# First get expectation under the current model and store them, if we didn't do it yet
if self._clone is None:
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
expectations = collections.OrderedDict()
for bin_id in self._maptree:
data_analysis_bin = self._maptree[bin_id]
if bin_id not in self._active_planes:
expectations[bin_id] = None
else:
expectations[bin_id] = self._get_expectation(data_analysis_bin, bin_id,
n_point_sources, n_ext_sources) + \
data_analysis_bin.background_map.as_partial()
if parallel_client.is_parallel_computation_active():
# Do not clone, as the parallel environment already makes clones
clone = self
else:
clone = copy.deepcopy(self)
self._clone = (clone, expectations)
# Substitute the observation and background for each data analysis bin
for bin_id in self._clone[0]._maptree:
data_analysis_bin = self._clone[0]._maptree[bin_id]
if bin_id not in self._active_planes:
continue
else:
# Active plane. Generate new data
expectation = self._clone[1][bin_id]
new_data = np.random.poisson(expectation, size=(1, expectation.shape[0])).flatten()
# Substitute data
data_analysis_bin.observation_map.set_new_values(new_data)
# Now change name and return
self._clone[0]._name = name
# Adjust the name of the nuisance parameter
old_name = list(self._clone[0]._nuisance_parameters.keys())[0]
new_name = old_name.replace(self.name, name)
self._clone[0]._nuisance_parameters[new_name] = self._clone[0]._nuisance_parameters.pop(old_name)
# Recompute biases
self._clone[0]._compute_likelihood_biases()
return self._clone[0]
def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False):
"""
Make a figure containing 4 maps for each active analysis bins with respectively model, data,
background and residuals. The model, data and residual maps are smoothed, the background
map is not.
:param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel, for all but
background maps
:param display_colorbar: whether or not to display the colorbar in the residuals
:return: a matplotlib.Figure
"""
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
# This is the resolution (i.e., the size of one pixel) of the image
resolution = 3.0 # arcmin
# The image is going to cover the diameter plus 20% padding
xsize = self._get_optimal_xsize(resolution)
n_active_planes = len(self._active_planes)
n_columns = 4
fig, subs = plt.subplots(n_active_planes, n_columns,
figsize=(2.7 * n_columns, n_active_planes * 2), squeeze=False)
prog_bar = tqdm(total = len(self._active_planes), desc="Smoothing planes")
images = ['None'] * n_columns
for i, plane_id in enumerate(self._active_planes):
data_analysis_bin = self._maptree[plane_id]
# Get the center of the projection for this plane
this_ra, this_dec = self._roi.ra_dec_center
# Make a full healpix map for a second
whole_map = self._get_model_map(plane_id, n_point_sources, n_ext_sources).as_dense()
# Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that:
longitude = ra_to_longitude(this_ra)
# Declination is already between -90 and 90
latitude = this_dec
# Background and excess maps
bkg_subtracted, _, background_map = self._get_excess(data_analysis_bin, all_maps=True)
# Make all the projections: model, excess, background, residuals
proj_model = self._represent_healpix_map(fig, whole_map,
longitude, latitude,
xsize, resolution, smoothing_kernel_sigma)
# Here we removed the background otherwise nothing is visible
# Get background (which is in a way "part of the model" since the uncertainties are neglected)
proj_data = self._represent_healpix_map(fig, bkg_subtracted,
longitude, latitude,
xsize, resolution, smoothing_kernel_sigma)
# No smoothing for this one (because a goal is to check it is smooth).
proj_bkg = self._represent_healpix_map(fig, background_map,
longitude, latitude,
xsize, resolution, None)
proj_residuals = proj_data - proj_model
# Common color scale range for model and excess maps
vmin = min(np.nanmin(proj_model), np.nanmin(proj_data))
vmax = max(np.nanmax(proj_model), np.nanmax(proj_data))
# Plot model
images[0] = subs[i][0].imshow(proj_model, origin='lower', vmin=vmin, vmax=vmax)
subs[i][0].set_title('model, bin {}'.format(data_analysis_bin.name))
# Plot data map
images[1] = subs[i][1].imshow(proj_data, origin='lower', vmin=vmin, vmax=vmax)
subs[i][1].set_title('excess, bin {}'.format(data_analysis_bin.name))
# Plot background map.
images[2] = subs[i][2].imshow(proj_bkg, origin='lower')
subs[i][2].set_title('background, bin {}'.format(data_analysis_bin.name))
# Now residuals
images[3] = subs[i][3].imshow(proj_residuals, origin='lower')
subs[i][3].set_title('residuals, bin {}'.format(data_analysis_bin.name))
# Remove numbers from axis
for j in range(n_columns):
subs[i][j].axis('off')
if display_colorbar:
for j, image in enumerate(images):
plt.colorbar(image, ax=subs[i][j])
prog_bar.update(1)
fig.set_tight_layout(True)
return fig
def display_stacked_image(self, smoothing_kernel_sigma=0.5):
"""
Display a map with all active analysis bins stacked together.
:param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel to apply
:return: a matplotlib.Figure instance
"""
# This is the resolution (i.e., the size of one pixel) of the image in arcmin
resolution = 3.0
# The image is going to cover the diameter plus 20% padding
xsize = self._get_optimal_xsize(resolution)
active_planes_bins = [self._maptree[x] for x in self._active_planes]
# Get the center of the projection for this plane
this_ra, this_dec = self._roi.ra_dec_center
# Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that:
longitude = ra_to_longitude(this_ra)
# Declination is already between -90 and 90
latitude = this_dec
total = None
for i, data_analysis_bin in enumerate(active_planes_bins):
# Plot data
background_map = data_analysis_bin.background_map.as_dense()
this_data = data_analysis_bin.observation_map.as_dense() - background_map
idx = np.isnan(this_data)
# this_data[idx] = hp.UNSEEN
if i == 0:
total = this_data
else:
# Sum only when there is no UNSEEN, so that the UNSEEN pixels will stay UNSEEN
total[~idx] += this_data[~idx]
delta_coord = (self._roi.data_radius.to("deg").value * 2.0) / 15.0
fig, sub = plt.subplots(1, 1)
proj = self._represent_healpix_map(fig, total, longitude, latitude, xsize, resolution, smoothing_kernel_sigma)
cax = sub.imshow(proj, origin='lower')
fig.colorbar(cax)
sub.axis('off')
hp.graticule(delta_coord, delta_coord)
return fig
def inner_fit(self):
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
return self.get_log_like()
def get_number_of_data_points(self):
"""
Return the number of active bins across all active analysis bins
:return: number of active bins
"""
n_points = 0
for bin_id in self._maptree:
n_points += self._maptree[bin_id].observation_map.as_partial().shape[0]
return n_points
def _get_model_map(self, plane_id, n_pt_src, n_ext_src):
"""
This function returns a model map for a particular bin
"""
if plane_id not in self._active_planes:
raise ValueError(
f"{plane_id} not a plane in the current model"
)
model_map = SparseHealpix(self._get_expectation(self._maptree[plane_id], plane_id, n_pt_src, n_ext_src),
self._active_pixels[plane_id],
self._maptree[plane_id].observation_map.nside)
return model_map
def _get_excess(self, data_analysis_bin, all_maps=True):
"""
This function returns the excess counts for a particular bin
if all_maps=True, also returns the data and background maps
"""
data_map = data_analysis_bin.observation_map.as_dense()
bkg_map = data_analysis_bin.background_map.as_dense()
excess = data_map - bkg_map
if all_maps:
return excess, data_map, bkg_map
return excess
def _write_a_map(self, file_name, which, fluctuate=False, return_map=False):
"""
This writes either a model map or a residual map, depending on which one is preferred
"""
which = which.lower()
assert which in ['model', 'residual']
n_pt = self._likelihood_model.get_number_of_point_sources()
n_ext = self._likelihood_model.get_number_of_extended_sources()
map_analysis_bins = collections.OrderedDict()
if fluctuate:
poisson_set = self.get_simulated_dataset("model map")
for plane_id in self._active_planes:
data_analysis_bin = self._maptree[plane_id]
bkg = data_analysis_bin.background_map
obs = data_analysis_bin.observation_map
if fluctuate:
model_excess = poisson_set._maptree[plane_id].observation_map \
- poisson_set._maptree[plane_id].background_map
else:
model_excess = self._get_model_map(plane_id, n_pt, n_ext)
if which == 'residual':
bkg += model_excess
if which == 'model':
obs = model_excess + bkg
this_bin = DataAnalysisBin(plane_id,
observation_hpx_map=obs,
background_hpx_map=bkg,
active_pixels_ids=self._active_pixels[plane_id],
n_transits=data_analysis_bin.n_transits,
scheme='RING')
map_analysis_bins[plane_id] = this_bin
# save the file
new_map_tree = MapTree(map_analysis_bins, self._roi)
new_map_tree.write(file_name)
if return_map:
return new_map_tree
def write_model_map(self, file_name, poisson_fluctuate=False, test_return_map=False):
"""
This function writes the model map to a file.
The interface is based off of HAWCLike for consistency
"""
if test_return_map:
log.warning("test_return_map=True should only be used for testing purposes!")
return self._write_a_map(file_name, 'model', poisson_fluctuate, test_return_map)
def write_residual_map(self, file_name, test_return_map=False):
"""
This function writes the residual map to a file.
The interface is based off of HAWCLike for consistency
"""
if test_return_map:
log.warning("test_return_map=True should only be used for testing purposes!")
return self._write_a_map(file_name, 'residual', False, test_return_map)
| 38.20771 | 126 | 0.609942 |
c88aff50b9e6ce0d5c309be594a03b1f208a90db | 15,227 | py | Python | sshcustodian/sshcustodian.py | jkglasbrenner/sshcustodian | 870d1088f27e1528e27f94f55f2efad7dad32d5d | [
"MIT"
] | null | null | null | sshcustodian/sshcustodian.py | jkglasbrenner/sshcustodian | 870d1088f27e1528e27f94f55f2efad7dad32d5d | [
"MIT"
] | null | null | null | sshcustodian/sshcustodian.py | jkglasbrenner/sshcustodian | 870d1088f27e1528e27f94f55f2efad7dad32d5d | [
"MIT"
] | null | null | null | # File: sshcustodian/sshcustodian.py
# -*- coding: utf-8 -*-
# Python 2/3 Compatibility
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from six.moves import filterfalse
"""
This module creates a subclass of the main Custodian class in the Custodian
project (github.com/materialsproject/custodian), which is a wrapper that
manages jobs running on computing clusters. The Custodian module is part of The
Materials Project (materialsproject.org/).
This subclass adds the functionality to copy the temporary directory created
via monty to the scratch partitions on slave compute nodes, provided that the
cluster's filesystem is configured in this way. The implementation invokes a
subprocess to utilize the ssh executable installed on the cluster, so it is not
particularly elegant or platform independent, nor is this solution likely to be
general to all clusters. This is why this modification has not been submitted
as a pull request to the main Custodian project.
"""
# Import modules
import logging
import subprocess
import sys
import datetime
import time
import os
import re
from itertools import islice, groupby
from socket import gethostname
from monty.tempfile import ScratchDir
from monty.shutil import gzip_dir
from monty.json import MontyEncoder
from monty.serialization import dumpfn
from custodian.custodian import Custodian
from custodian.custodian import CustodianError
# Module-level logger
logger = logging.getLogger(__name__)
| 43.505714 | 80 | 0.568858 |
c88ca1454e3c43e792033b4722a580761e424d90 | 17,217 | py | Python | sherlock/__init__.py | akudelka/sherlock | 9e85f36c01e0cb1d495283f024423bc60c3f7a4e | [
"MIT"
] | 165 | 2015-01-12T09:09:19.000Z | 2022-03-14T11:26:23.000Z | sherlock/__init__.py | akudelka/sherlock | 9e85f36c01e0cb1d495283f024423bc60c3f7a4e | [
"MIT"
] | 35 | 2015-01-07T14:57:24.000Z | 2022-03-24T17:43:28.000Z | sherlock/__init__.py | akudelka/sherlock | 9e85f36c01e0cb1d495283f024423bc60c3f7a4e | [
"MIT"
] | 38 | 2015-03-11T09:10:05.000Z | 2022-01-17T11:29:38.000Z | '''
Sherlock: Distributed Locks with a choice of backend
====================================================
:mod:`sherlock` is a library that provides easy-to-use distributed inter-process
locks and also allows you to choose a backend of your choice for lock
synchronization.
|Build Status| |Coverage Status|
.. |Build Status| image:: https://travis-ci.org/vaidik/sherlock.png
:target: https://travis-ci.org/vaidik/sherlock/
.. |Coverage Status| image:: https://coveralls.io/repos/vaidik/incoming/badge.png
:target: https://coveralls.io/r/vaidik/incoming
Overview
--------
When you are working with resources which are accessed by multiple services or
distributed services, more than often you need some kind of locking mechanism
to make it possible to access some resources at a time.
Distributed Locks or Mutexes can help you with this. :mod:`sherlock` provides
the exact same facility, with some extra goodies. It provides an easy-to-use API
that resembles standard library's `threading.Lock` semantics.
Apart from this, :mod:`sherlock` gives you the flexibilty of using a backend of
your choice for managing locks.
:mod:`sherlock` also makes it simple for you to extend :mod:`sherlock` to use
backends that are not supported.
Features
++++++++
* API similar to standard library's `threading.Lock`.
* Support for With statement, to cleanly acquire and release locks.
* Backend agnostic: supports `Redis`_, `Memcached`_ and `Etcd`_ as choice of
backends.
* Extendable: can be easily extended to work with any other of backend of
choice by extending base lock class. Read :ref:`extending`.
.. _Redis: http://redis.io
.. _Memcached: http://memcached.org
.. _Etcd: http://github.com/coreos/etcd
Supported Backends and Client Libraries
+++++++++++++++++++++++++++++++++++++++
Following client libraries are supported for every supported backend:
* Redis: `redis-py`_
* Memcached: `pylibmc`_
* Etcd: `python-etcd`_
.. _redis-py: http://github.com
.. _pylibmc: http://github.com
.. _python-etcd: https://github.com/jplana/python-etcd
As of now, only the above mentioned libraries are supported. Although
:mod:`sherlock` takes custom client objects so that you can easily provide
settings that you want to use for that backend store, but :mod:`sherlock` also
checks if the provided client object is an instance of the supported clients
and accepts client objects which pass this check, even if the APIs are the
same. :mod:`sherlock` might get rid of this issue later, if need be and if
there is a demand for that.
Installation
------------
Installation is simple.
.. code:: bash
pip install sherlock
.. note:: :mod:`sherlock` will install all the client libraries for all the
supported backends.
Basic Usage
-----------
:mod:`sherlock` is simple to use as at the API and semantics level, it tries to
conform to standard library's :mod:`threading.Lock` APIs.
.. code-block:: python
import sherlock
from sherlock import Lock
# Configure :mod:`sherlock`'s locks to use Redis as the backend,
# never expire locks and retry acquiring an acquired lock after an
# interval of 0.1 second.
sherlock.configure(backend=sherlock.backends.REDIS,
expire=None,
retry_interval=0.1)
# Note: configuring sherlock to use a backend does not limit you
# another backend at the same time. You can import backend specific locks
# like RedisLock, MCLock and EtcdLock and use them just the same way you
# use a generic lock (see below). In fact, the generic Lock provided by
# sherlock is just a proxy that uses these specific locks under the hood.
# acquire a lock called my_lock
lock = Lock('my_lock')
# acquire a blocking lock
lock.acquire()
# check if the lock has been acquired or not
lock.locked() == True
# release the lock
lock.release()
Support for ``with`` statement
++++++++++++++++++++++++++++++
.. code-block:: python
# using with statement
with Lock('my_lock'):
# do something constructive with your locked resource here
pass
Blocking and Non-blocking API
+++++++++++++++++++++++++++++
.. code-block:: python
# acquire non-blocking lock
lock1 = Lock('my_lock')
lock2 = Lock('my_lock')
# successfully acquire lock1
lock1.acquire()
# try to acquire lock in a non-blocking way
lock2.acquire(False) == True # returns False
# try to acquire lock in a blocking way
lock2.acquire() # blocks until lock is acquired to timeout happens
Using two backends at the same time
+++++++++++++++++++++++++++++++++++
Configuring :mod:`sherlock` to use a backend does not limit you from using
another backend at the same time. You can import backend specific locks like
RedisLock, MCLock and EtcdLock and use them just the same way you use a generic
lock (see below). In fact, the generic Lock provided by :mod:`sherlock` is just
a proxy that uses these specific locks under the hood.
.. code-block:: python
import sherlock
from sherlock import Lock
# Configure :mod:`sherlock`'s locks to use Redis as the backend
sherlock.configure(backend=sherlock.backends.REDIS)
# Acquire a lock called my_lock, this lock uses Redis
lock = Lock('my_lock')
# Now acquire locks in Memcached
from sherlock import MCLock
mclock = MCLock('my_mc_lock')
mclock.acquire()
Tests
-----
To run all the tests (including integration), you have to make sure that all
the databases are running. Make sure all the services are running:
.. code:: bash
# memcached
memcached
# redis-server
redis-server
# etcd (etcd is probably not available as package, here is the simplest way
# to run it).
wget https://github.com/coreos/etcd/releases/download/<version>/etcd-<version>-<platform>.tar.gz
tar -zxvf etcd-<version>-<platform>.gz
./etcd-<version>-<platform>/etcd
Run tests like so:
.. code:: bash
python setup.py test
Documentation
-------------
Available `here`_.
.. _here: http://sher-lock.readthedocs.org
Roadmap
-------
* Support for `Zookeeper`_ as backend.
* Support for `Gevent`_, `Multithreading`_ and `Multiprocessing`_.
.. _Zookeeper: http://zookeeper.apache.org/
.. _Gevent: http://www.gevent.org/
.. _Multithreading: http://docs.python.org/2/library/multithreading.html
.. _Multiprocessing: http://docs.python.org/2/library/multiprocessing.html
License
-------
See `LICENSE`_.
**In short**: This is an open-source project and exists in the public domain
for anyone to modify and use it. Just be nice and attribute the credits
wherever you can. :)
.. _LICENSE: http://github.com/vaidik/sherlock/blob/master/LICENSE.rst
Distributed Locking in Other Languages
--------------------------------------
* NodeJS - https://github.com/thedeveloper/warlock
'''
import etcd
import pylibmc
import redis
def configure(**kwargs):
'''
Set basic global configuration for :mod:`sherlock`.
:param backend: global choice of backend. This backend will be used
for managing locks by :class:`sherlock.Lock` class
objects.
:param client: global client object to use to connect with backend
store. This client object will be used to connect to the
backend store by :class:`sherlock.Lock` class instances.
The client object must be a valid object of the client
library. If the backend has been configured using the
`backend` parameter, the custom client object must belong
to the same library that is supported for that backend.
If the backend has not been set, then the custom client
object must be an instance of a valid supported client.
In that case, :mod:`sherlock` will set the backend by
introspecting the type of provided client object.
:param str namespace: provide global namespace
:param float expire: provide global expiration time. If expicitly set to
`None`, lock will not expire.
:param float timeout: provide global timeout period
:param float retry_interval: provide global retry interval
Basic Usage:
>>> import sherlock
>>> from sherlock import Lock
>>>
>>> # Configure sherlock to use Redis as the backend and the timeout for
>>> # acquiring locks equal to 20 seconds.
>>> sherlock.configure(timeout=20, backend=sherlock.backends.REDIS)
>>>
>>> import redis
>>> redis_client = redis.StrictRedis(host='X.X.X.X', port=6379, db=1)
>>> sherlock.configure(client=redis_client)
'''
_configuration.update(**kwargs)
# Create a backends singleton
backends = _Backends()
# Create a configuration singleton
_configuration = _Configuration()
# Import important Lock classes
from . import lock
from .lock import *
| 34.228628 | 100 | 0.606319 |
c88d252547df6d3f79fae0aefc72512a6ebb61d4 | 7,199 | py | Python | misc.py | ChristophReich1996/Semantic_Pyramid_for_Image_Generation | 00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509 | [
"MIT"
] | 46 | 2020-04-13T07:54:49.000Z | 2022-03-01T06:29:15.000Z | misc.py | ChristophReich1996/Semantic_Pyramid_for_Image_Generation | 00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509 | [
"MIT"
] | 2 | 2020-07-27T15:11:09.000Z | 2021-04-04T10:58:03.000Z | misc.py | ChristophReich1996/Semantic_Pyramid_for_Image_Generation | 00e6e7787a5d90b9c09f50a5d7039cb9b5cd4509 | [
"MIT"
] | 5 | 2020-06-22T01:56:30.000Z | 2021-12-22T04:34:49.000Z | from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from skimage.draw import random_shapes
import os
import json
def get_masks_for_training(
mask_shapes: List[Tuple] =
[(1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,), (365,)],
device: str = 'cpu', add_batch_size: bool = False,
p_random_mask: float = 0.3) -> List[torch.Tensor]:
'''
Method returns random masks similar to 3.2. of the paper
:param mask_shapes: (List[Tuple]) Shapes of the features generated by the vgg16 model
:param device: (str) Device to store tensor masks
:param add_batch_size: (bool) If true a batch size is added to each mask
:param p_random_mask: (float) Probability that a random mask is generated else no mask is utilized
:return: (List[torch.Tensor]) Generated masks for each feature tensor
'''
# Select layer where no masking is used. Every output from the deeper layers get mapped out. Every higher layer gets
# masked by a random shape
selected_stage = random.choice(list(range(len(mask_shapes))) + [0, 1])
# Make masks
masks = []
# Apply spatial varying masks
spatial_varying_masks = (np.random.rand() < p_random_mask) \
and (selected_stage < (len(mask_shapes) - 1)) \
and (selected_stage > 0)
# Init random mask
if spatial_varying_masks:
random_mask = random_shapes(tuple(reversed(mask_shapes))[selected_stage + 1][1:],
min_shapes=1,
max_shapes=4,
min_size=min(8, tuple(reversed(mask_shapes))[selected_stage + 1][1] // 2),
allow_overlap=True)[0][:, :, 0]
# Random mask to torch tensor
random_mask = torch.tensor(random_mask, dtype=torch.float32, device=device)[None, :, :]
# Change range of mask to [0, 1]
random_mask = (random_mask == 255.0).float()
# Loop over all shapes
for index, mask_shape in enumerate(reversed(mask_shapes)):
# Case if spatial varying masks are applied after selected stage
if spatial_varying_masks:
if index == selected_stage:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
elif index < selected_stage:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(F.interpolate(random_mask[None], size=mask_shape[1:], mode='nearest')[0])
# Case if only one stage is selected
else:
if index == selected_stage:
masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device))
else:
masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device))
# Add batch size dimension
if add_batch_size:
for index in range(len(masks)):
masks[index] = masks[index].unsqueeze(dim=0)
# Reverse order of masks to match the features of the vgg16 model
masks.reverse()
return masks
def normalize_0_1_batch(input: torch.tensor) -> torch.tensor:
'''
Normalize a given tensor to a range of [-1, 1]
:param input: (Torch tensor) Input tensor
:return: (Torch tensor) Normalized output tensor
'''
input_flatten = input.view(input.shape[0], -1)
return ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / (
torch.max(input_flatten, dim=1)[0][:, None, None, None] -
torch.min(input_flatten, dim=1)[0][:, None, None, None]))
def normalize_m1_1_batch(input: torch.tensor) -> torch.tensor:
'''
Normalize a given tensor to a range of [-1, 1]
:param input: (Torch tensor) Input tensor
:return: (Torch tensor) Normalized output tensor
'''
input_flatten = input.view(input.shape[0], -1)
return 2 * ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / (
torch.max(input_flatten, dim=1)[0][:, None, None, None] -
torch.min(input_flatten, dim=1)[0][:, None, None, None])) - 1
| 44.99375 | 120 | 0.607862 |
c88f24e0c4f56b49a1514bbc5fcfcc00efd5e15c | 4,204 | py | Python | EasyMCDM/models/Irmo.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | 4 | 2022-03-05T20:51:38.000Z | 2022-03-15T17:10:22.000Z | EasyMCDM/models/Irmo.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | null | null | null | EasyMCDM/models/Irmo.py | qanastek/EasyMCDM | 7fa2e2dfe9397834ca9f50211ea2717a16785394 | [
"MIT"
] | 1 | 2022-03-08T13:45:22.000Z | 2022-03-08T13:45:22.000Z | import math
from typing import Dict, List, Tuple, Union
from EasyMCDM.models.MCDM import MCDM
# Instant-Runoff Multicriteria Optimization (IRMO)
| 37.873874 | 218 | 0.562559 |
c8919966f9b0c8cb69e17d80a649cb9b3d0b7138 | 2,046 | py | Python | ramp/estimators/r.py | kvh/ramp | 8618ce673e49b95f40c9659319c3cb72281dacac | [
"MIT"
] | 214 | 2015-01-01T07:42:25.000Z | 2022-03-08T08:57:49.000Z | ramp/estimators/r.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
] | 8 | 2020-05-19T20:15:40.000Z | 2020-05-19T20:15:41.000Z | ramp/estimators/r.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
] | 87 | 2015-01-13T19:25:15.000Z | 2021-05-16T10:40:05.000Z | import numpy as np
from rpy2.robjects import FloatVector
from rpy2.robjects.packages import importr
from rpy2 import robjects
stats = importr('stats')
base = importr('base')
| 29.652174 | 74 | 0.610948 |
c89234777cdd2b2357d8a397dcec12fefab43a56 | 1,138 | py | Python | tests/decorators/test_timer.py | ShaneMicro/azure-functions-python-library | f56564effbf291a27e1bd5751a38484af387bb7f | [
"MIT"
] | null | null | null | tests/decorators/test_timer.py | ShaneMicro/azure-functions-python-library | f56564effbf291a27e1bd5751a38484af387bb7f | [
"MIT"
] | 1 | 2022-03-02T11:49:02.000Z | 2022-03-02T11:49:02.000Z | tests/decorators/test_timer.py | ShaneMicro/azure-functions-python-library | f56564effbf291a27e1bd5751a38484af387bb7f | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
from azure.functions.decorators.constants import TIMER_TRIGGER
from azure.functions.decorators.core import BindingDirection, DataType
from azure.functions.decorators.timer import TimerTrigger
| 37.933333 | 70 | 0.598418 |
c895e6b35498811fbcaa8204ceff2eff7744a4b3 | 8,368 | py | Python | src/client.py | Da3dalu2/SimpleNetworkSimulator | 447bc099b35720ab8d6e8a9703bb2354f1f01cae | [
"MIT"
] | null | null | null | src/client.py | Da3dalu2/SimpleNetworkSimulator | 447bc099b35720ab8d6e8a9703bb2354f1f01cae | [
"MIT"
] | null | null | null | src/client.py | Da3dalu2/SimpleNetworkSimulator | 447bc099b35720ab8d6e8a9703bb2354f1f01cae | [
"MIT"
] | null | null | null | import socket
import threading
import time
from threading import Thread
import utilities as utils
import error_handling as check
BUFFER_SIZE = 1024
BROADCAST_MAC = "FF:FF:FF:FF:FF:FF"
| 32.30888 | 80 | 0.603848 |
c8962401f6f771809773c10b2765a3a3a3c92f1b | 2,569 | py | Python | great_expectations/rule_based_profiler/types/builder.py | afeld/great_expectations | ca2dc1f8951c727040d680b543aee91753c2c862 | [
"Apache-2.0"
] | 1 | 2022-01-26T18:51:29.000Z | 2022-01-26T18:51:29.000Z | great_expectations/rule_based_profiler/types/builder.py | afeld/great_expectations | ca2dc1f8951c727040d680b543aee91753c2c862 | [
"Apache-2.0"
] | null | null | null | great_expectations/rule_based_profiler/types/builder.py | afeld/great_expectations | ca2dc1f8951c727040d680b543aee91753c2c862 | [
"Apache-2.0"
] | 1 | 2021-11-29T07:37:28.000Z | 2021-11-29T07:37:28.000Z | import json
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.types import SerializableDictDot, safe_deep_copy
from great_expectations.util import deep_filter_properties_iterable
| 42.114754 | 119 | 0.69093 |
c8963aa9c2fd19d072617ac3bc9699a61aa29633 | 449 | py | Python | Day_3_Boolean_Logic_Conditionals/Day3_ex1_RL.py | lenovreg/Python_TietoEvry_Feb2022 | 1e37f524c1b78bb9752500261a953b812fc697db | [
"MIT"
] | null | null | null | Day_3_Boolean_Logic_Conditionals/Day3_ex1_RL.py | lenovreg/Python_TietoEvry_Feb2022 | 1e37f524c1b78bb9752500261a953b812fc697db | [
"MIT"
] | null | null | null | Day_3_Boolean_Logic_Conditionals/Day3_ex1_RL.py | lenovreg/Python_TietoEvry_Feb2022 | 1e37f524c1b78bb9752500261a953b812fc697db | [
"MIT"
] | null | null | null | # #1. Health check
# # Ask user for their temperature.
# # If the user enters below 35, then output "not too cold"
# # If 35 to 37 (inclusive), output "all right"
# # If the temperature over 37, then output "possible fever"
#
user_temp = float(input('What is your temperature?'))
if user_temp < 35:
print('not too cold?')
elif user_temp >= 35 and user_temp <= 37:
print('all right')
else: # temperature over 37
print('possible fever')
| 32.071429 | 62 | 0.679287 |
c896cf21816f76cd01ad1bacb6b82f675af14297 | 12,510 | py | Python | services/core-api/tests/now_submissions/resources/test_application_resource.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | 25 | 2018-07-09T19:04:37.000Z | 2022-03-15T17:27:10.000Z | services/core-api/tests/now_submissions/resources/test_application_resource.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | 983 | 2018-04-25T20:08:07.000Z | 2022-03-31T21:45:20.000Z | services/core-api/tests/now_submissions/resources/test_application_resource.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | 58 | 2018-05-15T22:35:50.000Z | 2021-11-29T19:40:52.000Z | import json
from tests.factories import (NOWSubmissionFactory, MineFactory, NOWClientFactory,
NOWApplicationIdentityFactory)
| 55.110132 | 97 | 0.681455 |
c89c4416cb922696e6077b691fa44b4a364a4846 | 447 | py | Python | output/models/nist_data/list_pkg/non_positive_integer/schema_instance/nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/non_positive_integer/schema_instance/nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/non_positive_integer/schema_instance/nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.list_pkg.non_positive_integer.schema_instance.nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd.nistschema_sv_iv_list_non_positive_integer_enumeration_2 import (
NistschemaSvIvListNonPositiveIntegerEnumeration2,
NistschemaSvIvListNonPositiveIntegerEnumeration2Type,
)
__all__ = [
"NistschemaSvIvListNonPositiveIntegerEnumeration2",
"NistschemaSvIvListNonPositiveIntegerEnumeration2Type",
]
| 44.7 | 201 | 0.888143 |
c89d84cb20f102af7452f0c152beca85a101d946 | 386 | py | Python | cache-basic.py | kurapikats/python-basics | 7b81e5e8de44186b573b74f05c78b56894df0ed7 | [
"MIT"
] | null | null | null | cache-basic.py | kurapikats/python-basics | 7b81e5e8de44186b573b74f05c78b56894df0ed7 | [
"MIT"
] | null | null | null | cache-basic.py | kurapikats/python-basics | 7b81e5e8de44186b573b74f05c78b56894df0ed7 | [
"MIT"
] | null | null | null | import time
cache = {}
print(cache_compute(1, 2))
print(cache_compute(3, 5))
print(cache_compute(3, 5))
print(cache_compute(6, 7))
print(cache_compute(1, 2))
| 14.846154 | 30 | 0.585492 |
c8a19d3ee1214101499b5145f53a93867a82f056 | 675 | py | Python | dl/src/CookieManager.py | PatrykCholewa/PI_Stored | 4ff4d72fe56281b76ddf7b759c19aabbce3c9899 | [
"MIT"
] | null | null | null | dl/src/CookieManager.py | PatrykCholewa/PI_Stored | 4ff4d72fe56281b76ddf7b759c19aabbce3c9899 | [
"MIT"
] | null | null | null | dl/src/CookieManager.py | PatrykCholewa/PI_Stored | 4ff4d72fe56281b76ddf7b759c19aabbce3c9899 | [
"MIT"
] | null | null | null | from datetime import datetime
import jwt
from src import ConfigManager
secret = ConfigManager.get_config("DL_COOKIE_SECRET_KEY")
secure = ConfigManager.get_config("APP_SECURE")
| 23.275862 | 58 | 0.708148 |
c8a2956bd7fb979e05d6c1af9814b3f364a7b696 | 2,403 | py | Python | printing/Spooler.py | mrlinqu/intsa_term_client | 596335da6dbdf7eb543b1dcf2c33bcc222aa3321 | [
"MIT"
] | null | null | null | printing/Spooler.py | mrlinqu/intsa_term_client | 596335da6dbdf7eb543b1dcf2c33bcc222aa3321 | [
"MIT"
] | 1 | 2020-11-07T12:44:56.000Z | 2020-11-07T12:46:52.000Z | printing/Spooler.py | mrlinqu/intsa_term_client | 596335da6dbdf7eb543b1dcf2c33bcc222aa3321 | [
"MIT"
] | null | null | null | # Copyright 2020 by Roman Khuramshin <mr.linqu@gmail.com>.
# All rights reserved.
# This file is part of the Intsa Term Client - X2Go terminal client for Windows,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
import logging
import threading
import os
import time
import win32print
from .Handler import Handler | 28.607143 | 168 | 0.61881 |
c8a3493cfeb4dfbb80acc4a2be0aae2d1cb8c74f | 1,264 | py | Python | mytests/test_SimpleCalc.py | KishoreParihar/DemoPythonTest | f9dadbf6cfcd4e6877e31ca65851882f73234307 | [
"MIT"
] | null | null | null | mytests/test_SimpleCalc.py | KishoreParihar/DemoPythonTest | f9dadbf6cfcd4e6877e31ca65851882f73234307 | [
"MIT"
] | null | null | null | mytests/test_SimpleCalc.py | KishoreParihar/DemoPythonTest | f9dadbf6cfcd4e6877e31ca65851882f73234307 | [
"MIT"
] | null | null | null | import unittest
import sys
sys.path.append(".")
sys.path.insert(0, '..\\')
from calculator.simplecalculator import Calculator
if __name__ == '__main__':
unittest.main()
| 23.849057 | 58 | 0.606013 |
c8a47ee8db41845109ebaa2bf272e65a01b66623 | 2,683 | py | Python | argos/countdown.9s.py | solettitiger/countdown | c5df89c7d67984171de08508ef4433ea9d6fbbd1 | [
"MIT"
] | null | null | null | argos/countdown.9s.py | solettitiger/countdown | c5df89c7d67984171de08508ef4433ea9d6fbbd1 | [
"MIT"
] | null | null | null | argos/countdown.9s.py | solettitiger/countdown | c5df89c7d67984171de08508ef4433ea9d6fbbd1 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
import sys
import subprocess
import os
from playsound import playsound
# ******************************************************************
# Definitionen
# ******************************************************************
filename = 'countdown.txt'
audiofile = 'ringing.mp3'
settimer = 'add.py'
stoptimer = 'stop.py'
overlay = 'overlay.py'
title = ""
zeit = ""
command = ""
path = ""
diff = 0
# ******************************************************************
# Funktionen
# ******************************************************************
# ******************************************************************
# Main
# ******************************************************************
if __name__ == "__main__":
main()
| 26.83 | 151 | 0.561685 |
c8a59080304794abe4b7a5451fd69be502c0aee2 | 1,392 | py | Python | restapi/v1/serializers.py | asntech/jaspar | ae86731e8f197d6830e6d778835f218d4eb1b9e8 | [
"BSD-3-Clause"
] | 3 | 2017-11-20T23:03:20.000Z | 2020-02-15T19:32:23.000Z | restapi/v1/serializers.py | asntech/jaspar | ae86731e8f197d6830e6d778835f218d4eb1b9e8 | [
"BSD-3-Clause"
] | 3 | 2019-12-12T09:26:55.000Z | 2021-06-10T19:24:19.000Z | restapi/v1/serializers.py | asntech/jaspar | ae86731e8f197d6830e6d778835f218d4eb1b9e8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
## Author: Aziz Khan
## License: GPL v3
## Copyright 2017 Aziz Khan <azez.khan__AT__gmail.com>
from rest_framework import serializers
from portal.models import Matrix, MatrixAnnotation
from django.http import HttpRequest
| 29 | 90 | 0.733477 |
c8a8f855a2d0fbd314903aae2f023f9e8c19884d | 5,043 | py | Python | multimodal_models/StackGAN_V2_PyTorch/models.py | kumayu0108/model-zoo | 4285779f6ff51fa1efb0625d67b428e90c343c0c | [
"MIT"
] | 43 | 2020-05-16T21:05:34.000Z | 2022-02-08T11:33:29.000Z | multimodal_models/StackGAN_V2_PyTorch/models.py | kumayu0108/model-zoo | 4285779f6ff51fa1efb0625d67b428e90c343c0c | [
"MIT"
] | 52 | 2020-05-14T16:18:08.000Z | 2021-11-02T19:13:47.000Z | multimodal_models/StackGAN_V2_PyTorch/models.py | kumayu0108/model-zoo | 4285779f6ff51fa1efb0625d67b428e90c343c0c | [
"MIT"
] | 69 | 2020-05-14T13:39:23.000Z | 2021-07-30T00:51:27.000Z | import torch
import torch.nn as nn
from generator_model import G1, G2
from helper_functions.Blocks import downBlock, Block3x3_leakRelu
from helper_functions.ret_image import Interpolate, condAugmentation
from helper_functions.initial_weights import weights_init
from helper_functions.losses import KLloss, custom_loss
from helper_functions.Blocks import upScale, normalBlock, Residual
import helper_functions.config as cfg | 39.093023 | 78 | 0.615507 |
c8a9475637b6493e4ff65f91b1c3dca0e1d6f885 | 382 | py | Python | utils/agro_utils.py | TiagoMarta/data_fusion_Vineyard-Segmentation | de54e149d36027bb314b5890ea4a1e71ba472d17 | [
"Unlicense",
"MIT"
] | 3 | 2021-08-04T08:03:50.000Z | 2022-03-25T11:22:09.000Z | utils/agro_utils.py | TiagoMarta/data_fusion_Vineyard-Segmentation | de54e149d36027bb314b5890ea4a1e71ba472d17 | [
"Unlicense",
"MIT"
] | null | null | null | utils/agro_utils.py | TiagoMarta/data_fusion_Vineyard-Segmentation | de54e149d36027bb314b5890ea4a1e71ba472d17 | [
"Unlicense",
"MIT"
] | null | null | null | import numpy as np
def NDVI(nir,red):
'''
# https://eos.com/make-an-analysis/ndvi/
Inputs: nxm numpy arrays
NIR reflection in the near-infrared spectrum
RED reflection in the red range of the spectrum
'''
num = nir-red
dom = nir+red
ndvi = np.divide(num,dom)
ndvi[np.isnan(ndvi)]=0 # Clean array with nan
return(ndvi) | 25.466667 | 57 | 0.609948 |
c8a98f7aadc1b3bec71524384698aed558c36091 | 3,805 | py | Python | generator/api/routes.py | horvathandris/phenoflow | d0109f3702bc180954051170a56e017af52636fb | [
"MIT"
] | null | null | null | generator/api/routes.py | horvathandris/phenoflow | d0109f3702bc180954051170a56e017af52636fb | [
"MIT"
] | null | null | null | generator/api/routes.py | horvathandris/phenoflow | d0109f3702bc180954051170a56e017af52636fb | [
"MIT"
] | null | null | null | from starlette.applications import Starlette
from starlette.responses import JSONResponse
from api import workflow
import oyaml as yaml
app = Starlette(debug=True)
| 57.651515 | 252 | 0.70276 |
c8adae8d9f3f33704f82f32bb3e323260ea0ba97 | 29,151 | py | Python | tccli/services/tsf/v20180326/help.py | zyh911/tencentcloud-cli | dfc5dbd660d4c60d265921c4edc630091478fc41 | [
"Apache-2.0"
] | null | null | null | tccli/services/tsf/v20180326/help.py | zyh911/tencentcloud-cli | dfc5dbd660d4c60d265921c4edc630091478fc41 | [
"Apache-2.0"
] | null | null | null | tccli/services/tsf/v20180326/help.py | zyh911/tencentcloud-cli | dfc5dbd660d4c60d265921c4edc630091478fc41 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
DESC = "tsf-2018-03-26"
INFO = {
"DeletePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
}
],
"desc": ""
},
"DescribeSimpleGroups": {
"params": [
{
"name": "GroupIdList",
"desc": "ID"
},
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "Limit",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "SearchWord",
"desc": ""
},
{
"name": "AppMicroServiceType",
"desc": "Mservice mesh, P M"
}
],
"desc": ""
},
"CreateGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "GroupName",
"desc": ""
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "GroupDesc",
"desc": ""
}
],
"desc": ""
},
"CreateCluster": {
"params": [
{
"name": "ClusterName",
"desc": ""
},
{
"name": "ClusterType",
"desc": ""
},
{
"name": "VpcId",
"desc": "ID"
},
{
"name": "ClusterCIDR",
"desc": "IPCIDR"
},
{
"name": "ClusterDesc",
"desc": ""
},
{
"name": "TsfRegionId",
"desc": "TSF"
},
{
"name": "TsfZoneId",
"desc": "TSF"
},
{
"name": "SubnetId",
"desc": "ID"
}
],
"desc": ""
},
"DescribePkgs": {
"params": [
{
"name": "ApplicationId",
"desc": "IDID"
},
{
"name": "SearchWord",
"desc": "ID"
},
{
"name": "OrderBy",
"desc": "\"UploadTime\""
},
{
"name": "OrderType",
"desc": "0/1"
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"ModifyContainerReplicas": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "InstanceNum",
"desc": ""
}
],
"desc": ""
},
"DescribeConfigSummary": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "SearchWord",
"desc": ""
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20"
}
],
"desc": ""
},
"DeployContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "Server",
"desc": "server"
},
{
"name": "TagName",
"desc": ",v1"
},
{
"name": "InstanceNum",
"desc": ""
},
{
"name": "Reponame",
"desc": "/tsf/nginx"
},
{
"name": "CpuLimit",
"desc": " CPU K8S limit request 2 "
},
{
"name": "MemLimit",
"desc": " MiB K8S limit request 2 "
},
{
"name": "JvmOpts",
"desc": "jvm"
},
{
"name": "CpuRequest",
"desc": " CPU K8S request"
},
{
"name": "MemRequest",
"desc": " MiB K8S request"
},
{
"name": "DoNotStart",
"desc": ""
},
{
"name": "RepoName",
"desc": "/tsf/nginx"
},
{
"name": "UpdateType",
"desc": "0: 1:"
},
{
"name": "UpdateIvl",
"desc": ""
}
],
"desc": ""
},
"AddClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "InstanceIdList",
"desc": "ID"
},
{
"name": "OsName",
"desc": ""
},
{
"name": "ImageId",
"desc": "ID"
},
{
"name": "Password",
"desc": ""
},
{
"name": "KeyId",
"desc": ""
},
{
"name": "SgId",
"desc": ""
},
{
"name": "InstanceImportMode",
"desc": "RTSFMagent"
}
],
"desc": "TSF"
},
"DescribePodInstances": {
"params": [
{
"name": "GroupId",
"desc": "groupId"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20 1~50"
}
],
"desc": ""
},
"DescribeServerlessGroups": {
"params": [
{
"name": "SearchWord",
"desc": "groupName"
},
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "OrderBy",
"desc": " createTimeid name createTime"
},
{
"name": "OrderType",
"desc": "101"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20 1~50"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
}
],
"desc": "Serverless"
},
"CreateNamespace": {
"params": [
{
"name": "NamespaceName",
"desc": ""
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "NamespaceDesc",
"desc": ""
},
{
"name": "NamespaceResourceType",
"desc": "(DEF)"
},
{
"name": "NamespaceType",
"desc": "(DEFGLOBAL)"
},
{
"name": "NamespaceId",
"desc": "ID"
}
],
"desc": ""
},
"DeleteApplication": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
}
],
"desc": ""
},
"DeleteMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "ID"
}
],
"desc": ""
},
"StartGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"DeleteNamespace": {
"params": [
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
}
],
"desc": ""
},
"DescribeGroupInstances": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "SearchWord",
"desc": ""
},
{
"name": "OrderBy",
"desc": ""
},
{
"name": "OrderType",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"DeleteConfig": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
}
],
"desc": ""
},
"DescribePublicConfigSummary": {
"params": [
{
"name": "SearchWord",
"desc": ""
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20"
}
],
"desc": ""
},
"DeletePkgs": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "PkgIds",
"desc": "ID"
}
],
"desc": "\n10001000UpperDeleteLimit"
},
"RevocationPublicConfig": {
"params": [
{
"name": "ConfigReleaseId",
"desc": "ID"
}
],
"desc": ""
},
"DescribePublicConfigs": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20"
},
{
"name": "ConfigIdList",
"desc": "ID"
},
{
"name": "ConfigName",
"desc": ""
},
{
"name": "ConfigVersion",
"desc": ""
}
],
"desc": ""
},
"DescribeSimpleClusters": {
"params": [
{
"name": "ClusterIdList",
"desc": "ID"
},
{
"name": "ClusterType",
"desc": ""
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20 1~50"
},
{
"name": "SearchWord",
"desc": "idname"
}
],
"desc": ""
},
"CreateServerlessGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "GroupName",
"desc": "1~60"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
}
],
"desc": "Serverless"
},
"DescribeConfigs": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "ConfigId",
"desc": "ID"
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
},
{
"name": "ConfigIdList",
"desc": "ID"
},
{
"name": "ConfigName",
"desc": ""
},
{
"name": "ConfigVersion",
"desc": ""
}
],
"desc": ""
},
"DescribeConfig": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
}
],
"desc": ""
},
"DescribeMicroservices": {
"params": [
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "SearchWord",
"desc": ""
},
{
"name": "OrderBy",
"desc": ""
},
{
"name": "OrderType",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"StartContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"RemoveInstances": {
"params": [
{
"name": "ClusterId",
"desc": " ID"
},
{
"name": "InstanceIdList",
"desc": " ID "
}
],
"desc": " TSF "
},
"ExpandGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "InstanceIdList",
"desc": "ID"
}
],
"desc": ""
},
"DeleteGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"DescribeContainerGroupDetail": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": " "
},
"DeleteContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"RollbackConfig": {
"params": [
{
"name": "ConfigReleaseLogId",
"desc": "ID"
},
{
"name": "ReleaseDesc",
"desc": ""
}
],
"desc": ""
},
"ModifyMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": " ID"
},
{
"name": "MicroserviceDesc",
"desc": ""
}
],
"desc": ""
},
"CreatePublicConfig": {
"params": [
{
"name": "ConfigName",
"desc": ""
},
{
"name": "ConfigVersion",
"desc": ""
},
{
"name": "ConfigValue",
"desc": "yaml"
},
{
"name": "ConfigVersionDesc",
"desc": ""
},
{
"name": "ConfigType",
"desc": ""
}
],
"desc": ""
},
"DescribeImageTags": {
"params": [
{
"name": "ApplicationId",
"desc": "Id"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20 1~100"
},
{
"name": "QueryImageIdFlag",
"desc": "0: 1:"
},
{
"name": "SearchWord",
"desc": " tag "
}
],
"desc": ""
},
"DescribeServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": "Serverless"
},
"DescribeMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "ID"
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"DescribePublicConfigReleaseLogs": {
"params": [
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20"
}
],
"desc": ""
},
"DescribeApplicationAttribute": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
}
],
"desc": ""
},
"RevocationConfig": {
"params": [
{
"name": "ConfigReleaseId",
"desc": "ID"
}
],
"desc": ""
},
"ReleasePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ReleaseDesc",
"desc": ""
}
],
"desc": ""
},
"ReleaseConfig": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
},
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "ReleaseDesc",
"desc": ""
}
],
"desc": ""
},
"DescribeReleasedConfig": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": "group"
},
"CreateContainGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "GroupName",
"desc": "1~60"
},
{
"name": "InstanceNum",
"desc": ""
},
{
"name": "AccessType",
"desc": "0: 1: 2NodePort"
},
{
"name": "ProtocolPorts",
"desc": ""
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "CpuLimit",
"desc": " CPU K8S limit"
},
{
"name": "MemLimit",
"desc": " MiB K8S limit"
},
{
"name": "GroupComment",
"desc": "200"
},
{
"name": "UpdateType",
"desc": "0: 1:"
},
{
"name": "UpdateIvl",
"desc": ""
},
{
"name": "CpuRequest",
"desc": " CPU K8S request"
},
{
"name": "MemRequest",
"desc": " MiB K8S request"
}
],
"desc": ""
},
"DescribePublicConfigReleases": {
"params": [
{
"name": "ConfigName",
"desc": ""
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "Limit",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "ConfigId",
"desc": "ID"
}
],
"desc": ""
},
"DescribeGroups": {
"params": [
{
"name": "SearchWord",
"desc": ""
},
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "OrderBy",
"desc": ""
},
{
"name": "OrderType",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "GroupResourceTypeList",
"desc": ""
}
],
"desc": ""
},
"DescribeSimpleNamespaces": {
"params": [
{
"name": "NamespaceIdList",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "Limit",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "NamespaceResourceTypeList",
"desc": ""
},
{
"name": "SearchWord",
"desc": "idname"
},
{
"name": "NamespaceTypeList",
"desc": ""
},
{
"name": "NamespaceName",
"desc": ""
},
{
"name": "IsDefault",
"desc": "01"
}
],
"desc": " "
},
"DescribeConfigReleaseLogs": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "ApplicationId",
"desc": "ID"
}
],
"desc": ""
},
"CreateMicroservice": {
"params": [
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "MicroserviceName",
"desc": ""
},
{
"name": "MicroserviceDesc",
"desc": ""
}
],
"desc": ""
},
"DescribeDownloadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "PkgId",
"desc": "ID"
}
],
"desc": "TSFCOSAPICOSCOS APISDK\nCOShttps://cloud.tencent.com/document/product/436"
},
"DeployServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "PkgId",
"desc": "ID"
},
{
"name": "Memory",
"desc": " 1Gi 2Gi 4Gi 8Gi 16Gi 1Gi"
},
{
"name": "InstanceRequest",
"desc": " [1, 4] 1"
},
{
"name": "StartupParameters",
"desc": ""
}
],
"desc": "Serverless"
},
"DescribeGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"CreateConfig": {
"params": [
{
"name": "ConfigName",
"desc": ""
},
{
"name": "ConfigVersion",
"desc": ""
},
{
"name": "ConfigValue",
"desc": ""
},
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "ConfigVersionDesc",
"desc": ""
},
{
"name": "ConfigType",
"desc": ""
}
],
"desc": ""
},
"DescribeContainerGroups": {
"params": [
{
"name": "SearchWord",
"desc": "groupName"
},
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "OrderBy",
"desc": " createTimeid name createTime"
},
{
"name": "OrderType",
"desc": "101"
},
{
"name": "Offset",
"desc": "0"
},
{
"name": "Limit",
"desc": "20 1~50"
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "NamespaceId",
"desc": " ID"
}
],
"desc": ""
},
"DeleteImageTags": {
"params": [
{
"name": "ImageTags",
"desc": ""
}
],
"desc": ""
},
"DescribeClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "SearchWord",
"desc": ""
},
{
"name": "OrderBy",
"desc": ""
},
{
"name": "OrderType",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
}
],
"desc": ""
},
"CreateApplication": {
"params": [
{
"name": "ApplicationName",
"desc": ""
},
{
"name": "ApplicationType",
"desc": "VCSserverless"
},
{
"name": "MicroserviceType",
"desc": "Mservice meshNG"
},
{
"name": "ApplicationDesc",
"desc": ""
},
{
"name": "ApplicationLogConfig",
"desc": ""
},
{
"name": "ApplicationResourceType",
"desc": ""
},
{
"name": "ApplicationRuntimeType",
"desc": "runtime"
}
],
"desc": ""
},
"StopGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"ShrinkGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"DeployGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "PkgId",
"desc": "ID"
},
{
"name": "StartupParameters",
"desc": ""
}
],
"desc": ""
},
"DescribeApplications": {
"params": [
{
"name": "SearchWord",
"desc": ""
},
{
"name": "OrderBy",
"desc": ""
},
{
"name": "OrderType",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "Limit",
"desc": ""
},
{
"name": "ApplicationType",
"desc": ""
},
{
"name": "MicroserviceType",
"desc": ""
},
{
"name": "ApplicationResourceTypeList",
"desc": ""
}
],
"desc": ""
},
"DeleteServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "groupId"
}
],
"desc": "Serverless"
},
"DescribeUploadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "PkgName",
"desc": ""
},
{
"name": "PkgVersion",
"desc": ""
},
{
"name": "PkgType",
"desc": ""
},
{
"name": "PkgDesc",
"desc": ""
}
],
"desc": "TSFCOSIdCOS APISDK\nCOShttps://cloud.tencent.com/document/product/436"
},
"DescribeConfigReleases": {
"params": [
{
"name": "ConfigName",
"desc": ""
},
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "NamespaceId",
"desc": "ID"
},
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "Limit",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "ConfigId",
"desc": "ID"
},
{
"name": "ApplicationId",
"desc": "ID"
}
],
"desc": ""
},
"StopContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
}
],
"desc": ""
},
"DescribeSimpleApplications": {
"params": [
{
"name": "ApplicationIdList",
"desc": "ID"
},
{
"name": "ApplicationType",
"desc": ""
},
{
"name": "Limit",
"desc": ""
},
{
"name": "Offset",
"desc": ""
},
{
"name": "MicroserviceType",
"desc": ""
},
{
"name": "ApplicationResourceTypeList",
"desc": ""
},
{
"name": "SearchWord",
"desc": "idname"
}
],
"desc": ""
},
"DescribePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "ID"
}
],
"desc": ""
},
"ModifyContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "AccessType",
"desc": "0: 1: 2NodePort"
},
{
"name": "ProtocolPorts",
"desc": "ProtocolPorts"
},
{
"name": "UpdateType",
"desc": "0: 1:"
},
{
"name": "UpdateIvl",
"desc": ","
}
],
"desc": ""
},
"DescribeApplication": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
}
],
"desc": ""
},
"ShrinkInstances": {
"params": [
{
"name": "GroupId",
"desc": "ID"
},
{
"name": "InstanceIdList",
"desc": "ID"
}
],
"desc": ""
},
"ModifyUploadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "ID"
},
{
"name": "PkgId",
"desc": "DescribeUploadInfoID"
},
{
"name": "Result",
"desc": "COS0"
},
{
"name": "Md5",
"desc": "MD5"
},
{
"name": "Size",
"desc": ""
}
],
"desc": "COSTSF\n"
},
"AddInstances": {
"params": [
{
"name": "ClusterId",
"desc": "ID"
},
{
"name": "InstanceIdList",
"desc": "ID"
},
{
"name": "OsName",
"desc": ""
},
{
"name": "ImageId",
"desc": "ID"
},
{
"name": "Password",
"desc": ""
},
{
"name": "KeyId",
"desc": ""
},
{
"name": "SgId",
"desc": ""
},
{
"name": "InstanceImportMode",
"desc": "RTSFMagent"
}
],
"desc": "TSF"
}
} | 18.567516 | 165 | 0.392028 |
c8b067f63a4c14a9b78ac5bf7aace3e8420c7a16 | 1,729 | py | Python | workflow_scripts/test_models.py | jcwchen/models | 2fd86acdd51037570e1daefa03873237b76bd5a6 | [
"MIT"
] | 1 | 2020-12-19T14:46:23.000Z | 2020-12-19T14:46:23.000Z | workflow_scripts/test_models.py | sumit6597/models | 2fd86acdd51037570e1daefa03873237b76bd5a6 | [
"MIT"
] | null | null | null | workflow_scripts/test_models.py | sumit6597/models | 2fd86acdd51037570e1daefa03873237b76bd5a6 | [
"MIT"
] | 1 | 2021-08-08T11:47:35.000Z | 2021-08-08T11:47:35.000Z | import onnx
from pathlib import Path
import subprocess
import sys
cwd_path = Path.cwd()
# obtain list of added or modified files in this PR
obtain_diff = subprocess.Popen(['git', 'diff', '--name-only', '--diff-filter=AM', 'origin/master', 'HEAD'],
cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = obtain_diff.communicate()
diff_list = stdoutput.split()
# identify list of changed onnx models in model Zoo
model_list = [str(model).replace("b'","").replace("'", "") for model in diff_list if ".onnx" in str(model)]
# run lfs install before starting the tests
run_lfs_install()
print("\n=== Running ONNX Checker on added models ===\n")
# run checker on each model
failed_models = []
for model_path in model_list:
model_name = model_path.split('/')[-1]
print("Testing:", model_name)
try:
pull_lfs_file(model_path)
model = onnx.load(model_path)
onnx.checker.check_model(model)
print("Model", model_name, "has been successfully checked!")
except Exception as e:
print(e)
failed_models.append(model_path)
if len(failed_models) != 0:
print(str(len(failed_models)) +" models failed onnx checker.")
sys.exit(1)
print(len(model_list), "model(s) checked.")
| 35.285714 | 156 | 0.707924 |
c8b4dfd0fac657e7ac7e488ed975872bacfb263c | 25 | py | Python | manager/__init__.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
] | 6 | 2020-02-28T21:18:16.000Z | 2020-03-13T16:45:57.000Z | manager/__init__.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
] | 6 | 2020-02-28T12:42:52.000Z | 2020-03-16T03:49:09.000Z | manager/__init__.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
] | 6 | 2020-03-05T13:04:25.000Z | 2020-03-13T16:46:03.000Z | from .utils import Config | 25 | 25 | 0.84 |
c8b5d127b254896268904720f95e3739d411d338 | 1,374 | py | Python | src/classifier/utils/create_data.py | maxscheijen/dutch-sentiment-classifier | 6b3149d906710fadc0b104a9f79ca389a7f5cba3 | [
"Apache-2.0"
] | null | null | null | src/classifier/utils/create_data.py | maxscheijen/dutch-sentiment-classifier | 6b3149d906710fadc0b104a9f79ca389a7f5cba3 | [
"Apache-2.0"
] | null | null | null | src/classifier/utils/create_data.py | maxscheijen/dutch-sentiment-classifier | 6b3149d906710fadc0b104a9f79ca389a7f5cba3 | [
"Apache-2.0"
] | null | null | null | import glob
import pandas as pd
from tqdm import tqdm
from classifier import config
| 28.625 | 68 | 0.54294 |
c8b602b1d86d1edc850b44d842ce6f3bb89f273d | 642 | py | Python | pip/setup.py | siphr/urdu-digit | 133fcea917ce4584c2f98b470f9e3063f9f03c99 | [
"MIT"
] | null | null | null | pip/setup.py | siphr/urdu-digit | 133fcea917ce4584c2f98b470f9e3063f9f03c99 | [
"MIT"
] | null | null | null | pip/setup.py | siphr/urdu-digit | 133fcea917ce4584c2f98b470f9e3063f9f03c99 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="urdu_digit",
version="0.0.17",
keywords=["urdu", "numeric", "digit", "converter"],
description="English to Urdu numeric digit converter.",
long_description=open('README.md').read(),
project_urls={
'Homepage': 'https://www.techtum.dev/work-urdu-digit-211001.html',
'Source': 'https://github.com/siphr/urdu-digit',
'Tracker': 'https://github.com/siphr/urdu-digit/issues',
},
author="siphr",
author_email="pypi@techtum.dev",
packages=['urdu_digit'],
platforms="any",
install_requires=[]
)
| 25.68 | 74 | 0.641745 |
c8b68cb341dae475cc25f2d74d8dcd06d0f58623 | 1,682 | py | Python | algorithms/intervals.py | calebperkins/algorithms | 9f4a029261160e6b12b8bedd53f0a0ebf541237a | [
"MIT"
] | null | null | null | algorithms/intervals.py | calebperkins/algorithms | 9f4a029261160e6b12b8bedd53f0a0ebf541237a | [
"MIT"
] | null | null | null | algorithms/intervals.py | calebperkins/algorithms | 9f4a029261160e6b12b8bedd53f0a0ebf541237a | [
"MIT"
] | null | null | null | import collections
Interval = collections.namedtuple("Interval", "start, end")
| 29 | 116 | 0.521998 |
c8bd12730bd20c4875906f949b15caeb99026f0f | 4,874 | py | Python | utils/visualization.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
] | 3 | 2019-07-27T14:00:42.000Z | 2020-01-17T17:07:51.000Z | utils/visualization.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
] | null | null | null | utils/visualization.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
] | 4 | 2019-10-22T02:58:26.000Z | 2020-10-06T09:59:26.000Z | import numpy as np
import matplotlib.pyplot as plt
| 34.083916 | 100 | 0.55437 |
c8c0726d584812a525a610e545b5c0960badaf74 | 18,223 | py | Python | tests/unit/core/tensorrt_loaders.py | ParikhKadam/NeMo | ee11f7c4666d410d91f9da33c61f4819ea625013 | [
"Apache-2.0"
] | 10 | 2020-03-17T08:32:06.000Z | 2021-04-19T19:03:50.000Z | tests/unit/core/tensorrt_loaders.py | dcmartin/NeMo | d2120a40bf23d3e38ff5677c2685c712f297e6b1 | [
"Apache-2.0"
] | 1 | 2020-06-11T00:54:42.000Z | 2020-06-11T00:54:42.000Z | tests/unit/core/tensorrt_loaders.py | dcmartin/NeMo | d2120a40bf23d3e38ff5677c2685c712f297e6b1 | [
"Apache-2.0"
] | 3 | 2020-03-10T05:10:07.000Z | 2020-12-08T01:33:35.000Z | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import time
import warnings
from collections import OrderedDict
import numpy as np
import onnx
import tensorrt as trt
from .tensorrt_format import FormatManager
from .tensorrt_runner import (
DEFAULT_SHAPE_VALUE,
TRT_LOGGER,
TensorRTRunnerV2,
default_value,
find_in_dict,
get_input_metadata_from_profile,
is_dimension_dynamic,
is_shape_dynamic,
is_valid_shape_override,
send_on_queue,
write_timestamped,
)
from nemo import logging, logging_mode
# Caches data loaded by a DataLoader for use across multiple runners.
# ONNX loaders return ONNX models in memory.
| 43.70024 | 548 | 0.610492 |
c8c0d558d52b83f545c1d622f249b8f8181f6952 | 420 | py | Python | vstreamer_server/application/VideoStreamerServerApplication.py | artudi54/video-streamer | 66e5e722ed66abe5877488f177c0ac4f13325382 | [
"MIT"
] | 2 | 2019-10-08T10:49:52.000Z | 2021-10-01T11:26:31.000Z | vstreamer_server/application/VideoStreamerServerApplication.py | artudi54/video-streamer | 66e5e722ed66abe5877488f177c0ac4f13325382 | [
"MIT"
] | 1 | 2019-05-16T13:48:29.000Z | 2019-05-16T13:48:49.000Z | vstreamer_server/application/VideoStreamerServerApplication.py | artudi54/video-streamer | 66e5e722ed66abe5877488f177c0ac4f13325382 | [
"MIT"
] | 1 | 2019-10-08T10:49:56.000Z | 2019-10-08T10:49:56.000Z | import logging
import signal
from PySide2 import QtCore
import vstreamer_utils
| 28 | 62 | 0.754762 |
c8c12c77067e0a8b65aeb31d29a9acc363766542 | 2,345 | py | Python | serial_scripts/reset_config/test_reset_config.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
] | 1 | 2017-06-13T04:42:34.000Z | 2017-06-13T04:42:34.000Z | serial_scripts/reset_config/test_reset_config.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 1 | 2021-06-01T22:18:29.000Z | 2021-06-01T22:18:29.000Z | serial_scripts/reset_config/test_reset_config.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | null | null | null | #Define environment variable FABRIC_UTILS_PATH and provide path to fabric_utils before running
import time
import os
from contrail_fixtures import *
import testtools
from tcutils.commands import *
from fabric.context_managers import settings
from tcutils.wrappers import preposttest_wrapper
from tcutils.util import *
from fabric.api import run
from fabric.state import connections
import test
from upgrade.verify import VerifyFeatureTestCases
from base import ResetConfigBaseTest
| 40.431034 | 94 | 0.690405 |
c8c174e66db5ae93829e5da36ac5e18a48241662 | 15,382 | py | Python | server/services/wiki/pages/overview_service.py | hotosm/oeg-reporter | f0c3da80ba380df907a818db224e9ca2ae0018b3 | [
"BSD-2-Clause"
] | 1 | 2021-02-03T13:37:48.000Z | 2021-02-03T13:37:48.000Z | server/services/wiki/pages/overview_service.py | hotosm/oeg-reporter | f0c3da80ba380df907a818db224e9ca2ae0018b3 | [
"BSD-2-Clause"
] | 8 | 2020-07-16T23:17:51.000Z | 2020-10-14T20:40:00.000Z | server/services/wiki/pages/overview_service.py | hotosm/oeg-reporter | f0c3da80ba380df907a818db224e9ca2ae0018b3 | [
"BSD-2-Clause"
] | null | null | null | from server.services.wiki.pages.templates import OverviewPageTemplates
from server.services.wiki.pages.page_service import PageService
from server.services.wiki.mediawiki_service import MediaWikiService
from server.services.wiki.wiki_text_service import WikiTextService
from server.services.wiki.wiki_table_service import WikiTableService
from server.services.wiki.wiki_section_service import WikiSectionService
from server.models.serializers.document import OverviewPageSchema
| 39.64433 | 88 | 0.594981 |
c8c21cc5ec4a4f6297ac9cc8b0615e326672a6bb | 414 | py | Python | App/migrations/0011_playlist_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | null | null | null | App/migrations/0011_playlist_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | 44 | 2022-01-21T01:33:59.000Z | 2022-03-26T23:35:25.000Z | App/migrations/0011_playlist_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-03-06 02:23
from django.db import migrations, models
| 21.789474 | 76 | 0.628019 |
c8c3d449685f28e78f767aafb617c4bfc465febb | 2,779 | py | Python | emerald/database_operations.py | femmerling/EmeraldBox | 68f5776577f0c929ca1f5ba23f1dfe480f813037 | [
"MIT"
] | 17 | 2015-01-15T21:41:16.000Z | 2021-01-10T15:34:09.000Z | emerald/database_operations.py | femmerling/EmeraldBox | 68f5776577f0c929ca1f5ba23f1dfe480f813037 | [
"MIT"
] | null | null | null | emerald/database_operations.py | femmerling/EmeraldBox | 68f5776577f0c929ca1f5ba23f1dfe480f813037 | [
"MIT"
] | 5 | 2015-02-07T02:41:18.000Z | 2016-11-11T02:50:21.000Z | import imp
import os.path
from app import db
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
# end of file | 41.477612 | 144 | 0.77366 |
c8c574de241b0c8199ec3be2586cfc5532691047 | 5,253 | py | Python | xmuda/eval_sem_pcd.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
] | null | null | null | xmuda/eval_sem_pcd.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
] | null | null | null | xmuda/eval_sem_pcd.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
] | null | null | null | from xmuda.models.SSC2d_proj3d2d import SSC2dProj3d2d
from xmuda.data.NYU.nyu_dm import NYUDataModule
from xmuda.data.semantic_kitti.kitti_dm import KittiDataModule
from xmuda.common.utils.sscMetrics import SSCMetrics
from xmuda.data.NYU.params import class_relation_freqs as NYU_class_relation_freqs, class_freq_1_4 as NYU_class_freq_1_4, class_freq_1_8 as NYU_class_freq_1_8, class_freq_1_16 as NYU_class_freq_1_16
import numpy as np
import torch
import torch.nn.functional as F
from xmuda.models.ssc_loss import get_class_weights
from tqdm import tqdm
import pickle
import os
#model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/no_mask_255/v12_removeCPThreshold_KLnonzeros_LRDecay30_NYU_1_0.0001_0.0001_CPThreshold0.0_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=030-val/mIoU=0.26983.ckpt"
model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/kitti/v12_ProjectScale2_CPAt1_8_1divlog_LargerFOV_kitti_1_FrusSize_4_WD0_lr0.0001_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=037-val/mIoU=0.11056.ckpt"
class_weights = {
'1_4': get_class_weights(NYU_class_freq_1_4).cuda(),
'1_8': get_class_weights(NYU_class_freq_1_8).cuda(),
'1_16': get_class_weights(NYU_class_freq_1_16).cuda(),
}
#dataset = "NYU"
dataset = "kitti"
if dataset == "NYU":
NYU_root = "/gpfswork/rech/kvd/uyl37fq/data/NYU/depthbin"
NYU_preprocess_dir = "/gpfsscratch/rech/kvd/uyl37fq/precompute_data/NYU"
kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti"
full_scene_size = (240, 144, 240)
output_scene_size = (60, 36, 60)
NYUdm = NYUDataModule(NYU_root, NYU_preprocess_dir, batch_size=4, num_workers=3)
NYUdm.setup()
_C = 12
data_loader = NYUdm.val_dataloader()
else:
kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti"
kitti_depth_root = "/gpfsscratch/rech/kvd/uyl37fq/Adabin/KITTI/"
kitti_logdir = '/gpfsscratch/rech/kvd/uyl37fq/logs/kitti'
kitti_tsdf_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/TSDF_pred_depth_adabin/kitti"
kitti_label_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/labels/kitti"
kitti_occ_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/occupancy_adabin/kitti"
kitti_sketch_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/sketch_3D/kitti"
kitti_mapping_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/mapping_adabin/kitti"
full_scene_size = (256, 256, 32)
KITTIdm = KittiDataModule(root=kitti_root,
data_aug=True,
TSDF_root=kitti_tsdf_root,
label_root=kitti_label_root,
mapping_root=kitti_mapping_root,
occ_root=kitti_occ_root,
depth_root=kitti_depth_root,
sketch_root=kitti_sketch_root,
batch_size=1,
num_workers=3)
KITTIdm.setup()
_C = 20
data_loader = KITTIdm.val_dataloader()
class_relation_weights = get_class_weights(NYU_class_relation_freqs)
model = SSC2dProj3d2d.load_from_checkpoint(model_path)
model.cuda()
model.eval()
count = 0
out_dict = {}
count = 0
write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/draw_output/kitti"
with torch.no_grad():
for batch in tqdm(data_loader):
if dataset == "NYU":
y_true = batch['ssc_label_1_4'].detach().cpu().numpy()
valid_pix_4 = batch['valid_pix_4']
else:
y_true = batch['ssc_label_1_1'].detach().cpu().numpy()
# valid_pix_1 = batch['valid_pix_1']
valid_pix_1 = batch['valid_pix_double']
batch['img'] = batch['img'].cuda()
pred = model(batch)
y_pred = torch.softmax(pred['ssc'], dim=1).detach().cpu().numpy()
y_pred = np.argmax(y_pred, axis=1)
for i in range(y_true.shape[0]):
out_dict = {
"y_pred": y_pred[i].astype(np.uint16),
"y_true": y_true[i].astype(np.uint16),
}
if dataset == "NYU":
filepath = os.path.join(write_path, batch['name'][i] + ".pkl")
out_dict["cam_pose"] = batch['cam_pose'][i].detach().cpu().numpy()
out_dict["vox_origin"] = batch['vox_origin'][i].detach().cpu().numpy()
elif dataset == "kitti":
filepath = os.path.join(write_path, batch['sequence'][i], batch['frame_id'][i] + ".pkl")
out_dict['valid_pix_1'] = valid_pix_1[i].detach().cpu().numpy()
out_dict['cam_k'] = batch['cam_k'][i].detach().cpu().numpy()
out_dict['T_velo_2_cam'] = batch['T_velo_2_cam'][i].detach().cpu().numpy()
os.makedirs(os.path.join(write_path, batch['sequence'][i]), exist_ok=True)
with open(filepath, 'wb') as handle:
pickle.dump(out_dict, handle)
print("wrote to", filepath)
count += 1
# if count == 4:
# break
# write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/output"
# filepath = os.path.join(write_path, "output.pkl")
# with open(filepath, 'wb') as handle:
# pickle.dump(out_dict, handle)
# print("wrote to", filepath)
| 44.897436 | 234 | 0.663811 |
c8c6f7ca2165cf621b2f2448c66168d6e16e7af2 | 9,695 | py | Python | hnn/src/apps/dataparallel.py | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 | [
"MIT"
] | 2,075 | 2019-02-25T08:54:38.000Z | 2022-03-31T10:44:50.000Z | hnn/src/apps/dataparallel.py | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 | [
"MIT"
] | 176 | 2019-03-12T02:58:42.000Z | 2022-03-22T20:17:23.000Z | hnn/src/apps/dataparallel.py | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 | [
"MIT"
] | 437 | 2019-03-11T21:36:21.000Z | 2022-03-29T02:40:53.000Z | # Author: penhe@microsoft.com
# Date: 05/30/2019
#
""" Data parallel module
"""
from collections import OrderedDict
import numpy as np
import torch
from torch.cuda.comm import broadcast_coalesced
from torch.cuda.comm import reduce_add_coalesced
from torch.nn.parallel import parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs,gather
import torch.cuda.comm as comm
import pdb
from bert.optimization import BertAdam
def optimizer_factory(args, training_steps=None, init_spec=None, no_decay=['bias', 'LayerNorm.weight']):
return optimizer_fn
| 35.643382 | 139 | 0.664569 |
c8c808427fd949238223a24b72518b4c7f83bcd8 | 1,190 | py | Python | mall/serializers.py | turing0/mallProject | cc56d25c51fa03584f99a633a6f606622cfb1e5d | [
"MIT"
] | null | null | null | mall/serializers.py | turing0/mallProject | cc56d25c51fa03584f99a633a6f606622cfb1e5d | [
"MIT"
] | null | null | null | mall/serializers.py | turing0/mallProject | cc56d25c51fa03584f99a633a6f606622cfb1e5d | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import User
from .models import Product
from django.contrib.auth import get_user_model
| 36.060606 | 92 | 0.657143 |
c8ca44f18c6c1244335778442d0b31143cb496f7 | 811 | py | Python | ch02/multiSinal_button.py | you-know-who-2017/pythonQT | a713bfacbb53c5f23e9d7f61dc44592335a24423 | [
"MIT"
] | null | null | null | ch02/multiSinal_button.py | you-know-who-2017/pythonQT | a713bfacbb53c5f23e9d7f61dc44592335a24423 | [
"MIT"
] | null | null | null | ch02/multiSinal_button.py | you-know-who-2017/pythonQT | a713bfacbb53c5f23e9d7f61dc44592335a24423 | [
"MIT"
] | null | null | null | '''
Author: geekli
Date: 2020-12-27 10:38:46
LastEditTime: 2020-12-27 10:40:44
LastEditors: your name
Description:
FilePath: \pythonQT\ch02\multiSinal_button.py
'''
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | 25.34375 | 62 | 0.630086 |
c8cbd8c6d4128ec1fba81659c9414d125347bfa3 | 105 | py | Python | archive/2021-03-7/results/notebooks/advb_article/get_hmod_sample.py | CambridgeSemiticsLab/BH_time_collocations | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | 5 | 2019-06-19T19:42:21.000Z | 2021-04-20T22:43:45.000Z | archive/2021-03-7/results/notebooks/advb_article/get_hmod_sample.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | 2 | 2020-02-25T10:19:40.000Z | 2020-03-13T15:29:01.000Z | archive/2021-03-7/results/notebooks/advb_article/get_hmod_sample.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
] | null | null | null | from __main__ import *
hm_df = functs_df[~((functs_df.head_type == 'prep') & (functs_df.suffix))].copy()
| 35 | 81 | 0.695238 |
c8cc6707f00bfb68eb5be0a694507e862c881eb3 | 1,123 | py | Python | autodc/components/hpo_optimizer/hpo_optimizer_builder.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | autodc/components/hpo_optimizer/hpo_optimizer_builder.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | autodc/components/hpo_optimizer/hpo_optimizer_builder.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | from autodc.components.hpo_optimizer.smac_optimizer import SMACOptimizer
from autodc.components.hpo_optimizer.mfse_optimizer import MfseOptimizer
from autodc.components.hpo_optimizer.bohb_optimizer import BohbOptimizer
from autodc.components.hpo_optimizer.tpe_optimizer import TPEOptimizer
| 46.791667 | 86 | 0.688335 |
c8ccf268808a95f71f44af0d1f8a0dcac8ac8aa6 | 835 | py | Python | record_voice.py | y1255018/voice-printer | cea33ae978a0709346bdbaf009f4fa07a97c7463 | [
"MIT"
] | null | null | null | record_voice.py | y1255018/voice-printer | cea33ae978a0709346bdbaf009f4fa07a97c7463 | [
"MIT"
] | 1 | 2020-05-10T12:57:46.000Z | 2020-05-10T12:59:27.000Z | record_voice.py | y1255018/voice-printer | cea33ae978a0709346bdbaf009f4fa07a97c7463 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys, select, termios,tty
import os
if __name__ == "__main__":
main() | 19.880952 | 50 | 0.568862 |
c8cd1764a3562bbf6dce2fed67c34407e35a1349 | 1,516 | py | Python | findpeak.py | BartMassey/pdx-cs-sound | 52f671f155f71eb75a635d9b125f9324889dd329 | [
"MIT"
] | null | null | null | findpeak.py | BartMassey/pdx-cs-sound | 52f671f155f71eb75a635d9b125f9324889dd329 | [
"MIT"
] | null | null | null | findpeak.py | BartMassey/pdx-cs-sound | 52f671f155f71eb75a635d9b125f9324889dd329 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Copyright (c) 2019 Bart Massey
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
# Find maximum and minimum sample in an audio file.
import sys
import wave as wav
# Get the signal file.
wavfile = wav.open(sys.argv[1], 'rb')
# Channels per frame.
channels = wavfile.getnchannels()
# Bytes per sample.
width = wavfile.getsampwidth()
# Sample rate
rate = wavfile.getframerate()
# Number of frames.
frames = wavfile.getnframes()
# Length of a frame
frame_width = width * channels
# Get the signal and check it.
max_sample = None
min_sample = None
wave_bytes = wavfile.readframes(frames)
# Iterate over frames.
for f in range(0, len(wave_bytes), frame_width):
frame = wave_bytes[f : f + frame_width]
# Iterate over channels.
for c in range(0, len(frame), width):
# Build a sample.
sample_bytes = frame[c : c + width]
# XXX Eight-bit samples are unsigned
sample = int.from_bytes(sample_bytes,
byteorder='little',
signed=(width>1))
# Check extrema.
if max_sample == None:
max_sample = sample
if min_sample == None:
min_sample = sample
if sample > max_sample:
max_sample = sample
if sample < min_sample:
min_sample = sample
wavfile.close()
print("min: {} max: {}".format(min_sample, max_sample))
| 25.694915 | 56 | 0.638522 |
c8ce16cc98ba530c9d0d89640e062797670ba6af | 275 | py | Python | thywill_apps/src/thywill_apps/test/proof_of_concept/__init__.py | exratione/thywill-python | 2078d6f6fc12034eac60a7cc30bf2bc0d27a8732 | [
"MIT"
] | 1 | 2015-04-26T19:49:35.000Z | 2015-04-26T19:49:35.000Z | thywill_apps/src/thywill_apps/test/proof_of_concept/__init__.py | exratione/thywill-python | 2078d6f6fc12034eac60a7cc30bf2bc0d27a8732 | [
"MIT"
] | null | null | null | thywill_apps/src/thywill_apps/test/proof_of_concept/__init__.py | exratione/thywill-python | 2078d6f6fc12034eac60a7cc30bf2bc0d27a8732 | [
"MIT"
] | null | null | null | '''
A very simple test application to exercise a round trip of messages through the thywill system.
This also illustrates the bare, bare minimum implementation of the 'thywill_interface.py' module -
all it does is echo back incoming messages to the client who sent them.
''' | 45.833333 | 98 | 0.789091 |
c8ce9069c002bb7867b82767bde341a14df75d08 | 104 | py | Python | integration/tests/error_assert_variable.py | youhavethewrong/hurl | 91cc14882a5f1ef7fa86be09a9f5581cef680559 | [
"Apache-2.0"
] | 1,013 | 2020-08-27T12:38:48.000Z | 2022-03-31T23:12:23.000Z | integration/tests/error_assert_variable.py | youhavethewrong/hurl | 91cc14882a5f1ef7fa86be09a9f5581cef680559 | [
"Apache-2.0"
] | 217 | 2020-08-31T11:18:10.000Z | 2022-03-30T17:50:30.000Z | integration/tests/error_assert_variable.py | youhavethewrong/hurl | 91cc14882a5f1ef7fa86be09a9f5581cef680559 | [
"Apache-2.0"
] | 54 | 2020-09-02T09:41:06.000Z | 2022-03-19T15:33:05.000Z | from tests import app
| 14.857143 | 36 | 0.721154 |
c8d09ce36295ecfe93aeeecfaa8a003ce925b428 | 6,979 | py | Python | src/jk_sysinfo/get_proc_cpu_info.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null | src/jk_sysinfo/get_proc_cpu_info.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null | src/jk_sysinfo/get_proc_cpu_info.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
] | null | null | null |
import typing
from jk_cachefunccalls import cacheCalls
from jk_cmdoutputparsinghelper import ValueParser_ByteWithUnit
from .parsing_utils import *
from .invoke_utils import run
#import jk_json
_parserColonKVP = ParseAtFirstDelimiter(delimiter=":", valueCanBeWrappedInDoubleQuotes=False, keysReplaceSpacesWithUnderscores=True)
#
# Returns:
#
# [
# {
# "<key>": "<value>",
# ...
# },
# ...
# ]
#
def parse_proc_cpu_info(stdout:str, stderr:str, exitcode:int) -> typing.Tuple[list,dict]:
"""
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 1000.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 800.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 800.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 1100.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
initial apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
"""
if exitcode != 0:
raise Exception()
cpuInfos = splitAtEmptyLines(stdout.split("\n"))
retExtra = {}
ret = []
for group in cpuInfos:
d = _parserColonKVP.parseLines(group)
if "processor" not in d:
for k, v in d.items():
retExtra[k.lower()] = v
continue
if "cache_size" in d:
d["cache_size_kb"] = ValueParser_ByteWithUnit.parse(d["cache_size"]) // 1024
del d["cache_size"]
if "bogomips" in d:
d["bogomips"] = float(d["apicid"])
elif "BogoMIPS" in d:
d["bogomips"] = float(d["BogoMIPS"])
del d["BogoMIPS"]
if "bugs" in d:
d["bugs"] = d["bugs"].split()
if "flags" in d:
d["flags"] = sorted(d["flags"].split())
elif "Features" in d:
d["flags"] = sorted(d["Features"].split())
del d["Features"]
# bool
for key in [ "fpu", "fpu_exception", "wp" ]:
if key in d:
d[key.lower()] = d[key] == "yes"
if key != key.lower():
del d[key]
# int
for key in [ "CPU_architecture", "CPU_revision", "physical_id", "initial_apicid", "cpu_cores", "core_id", "clflush_size", "cache_alignment", "apicid" ]:
if key in d:
d[key.lower()] = int(d[key])
if key != key.lower():
del d[key]
# float
for key in [ "cpu_MHz" ]:
if key in d:
d[key.lower()] = float(d[key])
if key != key.lower():
del d[key]
# str
for key in [ "CPU_implementer", "CPU_part", "CPU_variant" ]:
if key in d:
d[key.lower()] = d[key]
if key != key.lower():
del d[key]
d["processor"] = int(d["processor"])
if "siblings" in d:
d["siblings"] = int(d["siblings"])
#jk_json.prettyPrint(d)
ret.append(d)
return ret, retExtra
#
#
# Returns:
#
# [
# {
# "<key>": "<value>",
# ...
# },
# ...
# ]
#
#
| 29.572034 | 612 | 0.71271 |
c8d14c78402ef6d14f3e0943706f524623b640ce | 900 | py | Python | src/telegram/telegram.py | timepieces141/refactored-telegram | 02dce4b1273afb5fd8b80cbdc64a560dc75dbeec | [
"MIT"
] | null | null | null | src/telegram/telegram.py | timepieces141/refactored-telegram | 02dce4b1273afb5fd8b80cbdc64a560dc75dbeec | [
"MIT"
] | null | null | null | src/telegram/telegram.py | timepieces141/refactored-telegram | 02dce4b1273afb5fd8b80cbdc64a560dc75dbeec | [
"MIT"
] | null | null | null | '''
This module provides the Telegram.
'''
| 20.930233 | 61 | 0.564444 |
c8d1af14aa978ccc8ecf4f4ebec0ffa36d951d1c | 345 | py | Python | test/test_report.py | aymatveev/testing_framework | 3e522d23b46ddb27b3b389210c244aaee5c3370e | [
"MIT"
] | null | null | null | test/test_report.py | aymatveev/testing_framework | 3e522d23b46ddb27b3b389210c244aaee5c3370e | [
"MIT"
] | null | null | null | test/test_report.py | aymatveev/testing_framework | 3e522d23b46ddb27b3b389210c244aaee5c3370e | [
"MIT"
] | null | null | null | from testing_framework.report import report
from typing import Tuple
import html | 23 | 62 | 0.695652 |
c8d1c681c7ce88bcb176a7a0b8c693c830a7bc65 | 160 | py | Python | Python/mixedfractions/mixedfractions.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
] | 12 | 2016-10-03T20:43:43.000Z | 2021-06-12T17:18:42.000Z | Python/mixedfractions/mixedfractions.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
] | null | null | null | Python/mixedfractions/mixedfractions.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
] | 10 | 2017-11-14T19:56:37.000Z | 2021-02-02T07:39:57.000Z | while(True):
inp = [int(x) for x in input().split()]
if inp[0] == 0 and inp[1] == 0:
break
print(inp[0]//inp[1], inp[0]%inp[1], "/", inp[1]) | 32 | 53 | 0.48125 |
c8d23bd00fcfedf98199c38fb1e64ea94cbde637 | 4,480 | py | Python | qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py | QuantumRoboticsURC/qrteam | bb28f4ad82eab6fb0706be13f8571e0b3261641e | [
"MIT"
] | null | null | null | qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py | QuantumRoboticsURC/qrteam | bb28f4ad82eab6fb0706be13f8571e0b3261641e | [
"MIT"
] | null | null | null | qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py | QuantumRoboticsURC/qrteam | bb28f4ad82eab6fb0706be13f8571e0b3261641e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import time
import rospy
import subprocess
import actionlib
from std_msgs.msg import Float32
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped
from actionlib_msgs.msg import GoalStatus, GoalStatusArray
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
| 39.646018 | 99 | 0.59375 |
c8d264727c0faf5a872f18da939f483862ce785c | 108 | py | Python | backend/application/contracts/schemas/__init__.py | uesleicarvalhoo/Ecommerce | 1d8d0f0c522dcd27fd90e315989b6fa93caf62b8 | [
"MIT"
] | null | null | null | backend/application/contracts/schemas/__init__.py | uesleicarvalhoo/Ecommerce | 1d8d0f0c522dcd27fd90e315989b6fa93caf62b8 | [
"MIT"
] | null | null | null | backend/application/contracts/schemas/__init__.py | uesleicarvalhoo/Ecommerce | 1d8d0f0c522dcd27fd90e315989b6fa93caf62b8 | [
"MIT"
] | null | null | null | from backend.domain.contracts import NewClient, NewOrder, NewOrderItem
from .new_product import NewProduct
| 27 | 70 | 0.851852 |
c8d5d6f27303f0d53ce075025843560499c32f81 | 508 | py | Python | backend/swagger_server/helpers/_add_audit_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 6 | 2019-01-29T05:58:37.000Z | 2021-11-02T22:47:02.000Z | backend/swagger_server/helpers/_add_audit_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 9 | 2020-09-09T04:53:01.000Z | 2022-03-08T22:52:18.000Z | backend/swagger_server/helpers/_add_audit_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 4 | 2019-01-29T07:38:55.000Z | 2021-10-16T21:06:42.000Z |
from uuid import UUID
import json
from ..mappings import *
def add_doc_audit_entry(session, doc_id, status, data):
""""Add an audit entry, requires that a commit
be run on the session afterwards
"""
if not isinstance(doc_id, UUID):
raise ValueError("Expecting UUID")
if not isinstance(data, dict):
raise ValueError("Expecting dict")
session.add(FileUsage(
document_id=doc_id.bytes,
fileusage_type=status,
data=json.dumps(data)
))
| 22.086957 | 55 | 0.65748 |
c8d758a027414f97b213413022804a7b0f68fe28 | 523 | py | Python | version.py | Jin-Tao-208/web_science_coursework | bb4ab2226b70e7b0f7bbef40ceb002900e757a31 | [
"MIT"
] | null | null | null | version.py | Jin-Tao-208/web_science_coursework | bb4ab2226b70e7b0f7bbef40ceb002900e757a31 | [
"MIT"
] | null | null | null | version.py | Jin-Tao-208/web_science_coursework | bb4ab2226b70e7b0f7bbef40ceb002900e757a31 | [
"MIT"
] | null | null | null | # versions of libraries used
import sys
import tweepy
import numpy as np
import pymongo
import emoji
import nltk.tokenize
import requests
print("Python version:{}".format(sys.version))
print("tweepy version:{}".format(tweepy.__version__))
print("pymongo version:{}".format(pymongo.__version__))
print("emoji version:{}".format(emoji.__version__))
print("requests version:{}".format(requests.__version__))
print("numpy version:{}".format(np.__version__))
print("nltk version:{}".format(nltk.__version__))
| 29.055556 | 58 | 0.745698 |
c8d9772ef30de66f59d67a0dc784ccc67d52e59f | 94 | py | Python | python3/binary.py | eiadshahtout/Python | b2406b0806bc55a9d8f5482a304a8d6968249018 | [
"MIT"
] | null | null | null | python3/binary.py | eiadshahtout/Python | b2406b0806bc55a9d8f5482a304a8d6968249018 | [
"MIT"
] | null | null | null | python3/binary.py | eiadshahtout/Python | b2406b0806bc55a9d8f5482a304a8d6968249018 | [
"MIT"
] | null | null | null |
count_ones(20) | 15.666667 | 27 | 0.712766 |
c8d9edb95baf53d14122148e741bd4d9e71e6992 | 6,968 | py | Python | adaboost.py | xxxzhi/AdaBoostClassifier | e5161cad03bdeb1c353b1c06dc32752a34c160d3 | [
"Apache-2.0"
] | 1 | 2019-03-15T03:10:08.000Z | 2019-03-15T03:10:08.000Z | adaboost.py | xxxzhi/AdaBoostClassifier | e5161cad03bdeb1c353b1c06dc32752a34c160d3 | [
"Apache-2.0"
] | null | null | null | adaboost.py | xxxzhi/AdaBoostClassifier | e5161cad03bdeb1c353b1c06dc32752a34c160d3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import dbm
from sklearn.datasets import load_iris
from classifer.base import BaseClassifier
from classifer.decision_tree import DecisionTreeClassifier
import numpy as np
def test():
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
import matplotlib.pyplot as plt
# We only take the two corresponding features
pairidx = 0
pair =[0,1]
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionAdaBoostClassifier(num_rounds=3)
# clf = DecisionTreeClassifier()
# print X
print y
clf.train(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
print '----'
print iris.data[:1, ]
values = np.c_[xx.ravel(), yy.ravel()]
Z = clf.predict(values)
print Z
print Z.shape
print xx.shape
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
if __name__ == '__main__':
test()
| 28.325203 | 104 | 0.550373 |
c8da9080a11e6c113c5b2a18202d6e7d74fba286 | 4,942 | py | Python | bioinfo/assembly/overlap.py | sohyongsheng/sequence_assembly | f2dea763da447f09f49de8fbf3ceaad8ed3e0559 | [
"MIT"
] | 1 | 2022-02-02T07:49:58.000Z | 2022-02-02T07:49:58.000Z | bioinfo/assembly/overlap.py | sohyongsheng/sequence_assembly | f2dea763da447f09f49de8fbf3ceaad8ed3e0559 | [
"MIT"
] | null | null | null | bioinfo/assembly/overlap.py | sohyongsheng/sequence_assembly | f2dea763da447f09f49de8fbf3ceaad8ed3e0559 | [
"MIT"
] | null | null | null | import numpy as np
from bioinfo.assembly.errors import InvalidPair
from bioinfo.molecules.sequence import Sequence
| 31.679487 | 68 | 0.474909 |
c8dab9e9589a6e0d7ec3775c63cd68cd42f91ee4 | 857 | py | Python | models/operations.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
] | null | null | null | models/operations.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
] | null | null | null | models/operations.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
] | null | null | null | from __future__ import annotations
from . import _base
| 25.205882 | 69 | 0.611435 |
c8dad2fb3e34935d8ee2d55f042a5e204873fdf4 | 187 | py | Python | tests/IT/fixtures/test_fixture_nested.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
] | null | null | null | tests/IT/fixtures/test_fixture_nested.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
] | 5 | 2018-12-27T02:52:01.000Z | 2019-01-02T01:52:55.000Z | tests/IT/fixtures/test_fixture_nested.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
] | null | null | null | import pytest
def test_with_fixture(fixture2):
assert fixture2 == 2
| 11.6875 | 32 | 0.71123 |
c8e095e4b5a713605e60ac5cfbe8f9beb652c2f1 | 390 | py | Python | search.py | kanttouchthis/clip-search | 463c3f2849a6f5ae7ebc6bfe7a932ec82f2ab0c1 | [
"MIT"
] | 1 | 2021-10-12T12:15:00.000Z | 2021-10-12T12:15:00.000Z | search.py | kanttouchthis/clip-search | 463c3f2849a6f5ae7ebc6bfe7a932ec82f2ab0c1 | [
"MIT"
] | null | null | null | search.py | kanttouchthis/clip-search | 463c3f2849a6f5ae7ebc6bfe7a932ec82f2ab0c1 | [
"MIT"
] | 1 | 2021-11-20T14:51:11.000Z | 2021-11-20T14:51:11.000Z | from searcher import CLIPSearcher
from utils import get_args
if __name__ == "__main__":
args = get_args()
cs = CLIPSearcher(device=args.device, store_path=args.store_path)
cs.load_dir(args.dir, save_every=args.save_every, recursive=args.recursive, load_new=(not args.dont_load_new))
cs.search(texts=args.texts, images=args.images, results=args.results, outdir=args.outdir)
| 43.333333 | 114 | 0.769231 |
c8e2a3f8d1524fcc6efb93afc74fa20ef2432c75 | 2,049 | py | Python | gemd/entity/template/has_property_templates.py | CitrineInformatics/gemd-python | 4f80045c1b481269c7451f6a205755c22093eb74 | [
"Apache-2.0"
] | 7 | 2020-04-02T11:11:09.000Z | 2022-02-05T23:19:51.000Z | gemd/entity/template/has_property_templates.py | CitrineInformatics/gemd-python | 4f80045c1b481269c7451f6a205755c22093eb74 | [
"Apache-2.0"
] | 24 | 2020-04-22T16:55:09.000Z | 2022-03-30T20:44:39.000Z | gemd/entity/template/has_property_templates.py | CitrineInformatics/gemd-python | 4f80045c1b481269c7451f6a205755c22093eb74 | [
"Apache-2.0"
] | 3 | 2020-05-08T00:50:02.000Z | 2020-12-19T00:48:56.000Z | """For entities that have a property template."""
from gemd.entity.link_by_uid import LinkByUID
from gemd.entity.setters import validate_list
from gemd.entity.template.base_template import BaseTemplate
from gemd.entity.template.property_template import PropertyTemplate
from gemd.entity.bounds.base_bounds import BaseBounds
from typing import Iterable
| 32.52381 | 84 | 0.625671 |
c8e3e5f641575e46034c6e7d21d6b9a28bd02474 | 1,547 | py | Python | app/main/forms.py | james-muriithi/blog | e653f2fbb3c1e5a873c393b4985cc12d726e451c | [
"Unlicense"
] | null | null | null | app/main/forms.py | james-muriithi/blog | e653f2fbb3c1e5a873c393b4985cc12d726e451c | [
"Unlicense"
] | null | null | null | app/main/forms.py | james-muriithi/blog | e653f2fbb3c1e5a873c393b4985cc12d726e451c | [
"Unlicense"
] | null | null | null | from app.models import Subscriber
from flask_wtf import FlaskForm
from wtforms import TextAreaField, StringField, IntegerField, EmailField
from wtforms.validators import InputRequired, ValidationError
from flask import flash
# comment form
# subscriber form | 40.710526 | 76 | 0.728507 |
c8e4d42dd8ef4d4d14c2794784ca0f4e4747b37c | 278 | py | Python | miner/config.py | czhang-nbai/swan | 03a6ade93d9b8b193bd05bf851779784eb2ffde5 | [
"MIT"
] | 6 | 2021-02-19T02:36:06.000Z | 2021-03-20T09:38:17.000Z | miner/config.py | czhang-nbai/swan | 03a6ade93d9b8b193bd05bf851779784eb2ffde5 | [
"MIT"
] | 27 | 2021-01-13T06:43:44.000Z | 2021-05-12T04:55:28.000Z | miner/config.py | czhang-nbai/swan | 03a6ade93d9b8b193bd05bf851779784eb2ffde5 | [
"MIT"
] | 7 | 2021-01-26T04:50:11.000Z | 2021-03-04T22:26:59.000Z | import toml
| 21.384615 | 55 | 0.694245 |
c8e6c52bd4d19fdf314e6096b12ca3b0f03e5a63 | 3,214 | py | Python | godaddy_dns.py | JohnMcSpedon/GoDaddy_DNS_migrator | e7439616f64a446254e4df05db115aaa0206691e | [
"MIT"
] | 4 | 2021-03-01T18:28:34.000Z | 2021-03-11T12:20:16.000Z | godaddy_dns.py | JohnMcSpedon/GoDaddy_DNS_migrator | e7439616f64a446254e4df05db115aaa0206691e | [
"MIT"
] | null | null | null | godaddy_dns.py | JohnMcSpedon/GoDaddy_DNS_migrator | e7439616f64a446254e4df05db115aaa0206691e | [
"MIT"
] | null | null | null | """
Retrieve GoDaddy DNS settings via their developer API
See also:
https://developer.godaddy.com/doc/endpoint/domains#/
"""
import os
import time
from pprint import pprint
from typing import List
import requests
import credential_loaders
BASE_URL = "https://api.godaddy.com"
# You can easily replace these with a different CredentialLoader to match your key management system
API_KEY_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_KEY")
API_SECRET_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_SECRET")
# API_KEY_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_key.txt")
# API_SECRET_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_secret.txt")
def _get_headers() -> dict:
"""Get authorization header for GoDaddy Developer API.
https://developer.godaddy.com/keys
"""
api_key = API_KEY_CRED_LOADER.load_credentials()
api_secret = API_SECRET_CRED_LOADER.load_credentials()
return {"Authorization": "sso-key {}:{}".format(api_key, api_secret)}
def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict:
"""Call GoDaddy developer API endpoint.
Only supports GET endpoints to keep access read-only.
"""
headers = _get_headers()
url = os.path.join(base_url, url_suffix)
resp = requests.get(url, headers=headers)
return resp.json()
def get_domains() -> List[str]:
"""Get list of Domains for this API key."""
ret = _call_endpoint("v1/domains")
# Example response:
# [{'createdAt': '2016-06-25T03:08:44.000Z',
# 'domain': 'mydomain.com',
# 'domainId': 12345678,
# 'expirationProtected': False,
# 'expires': '2020-06-25T03:08:44.000Z',
# 'holdRegistrar': False,
# 'locked': True,
# 'nameServers': None,
# 'privacy': False,
# 'renewAuto': True,
# 'renewDeadline': '2020-08-09T03:08:44.000Z',
# 'renewable': True,
# 'status': 'ACTIVE',
# 'transferProtected': False},]
domains = [d["domain"] for d in ret]
return domains
def get_domain_dns_records(domain):
"""Get DNS entries for a specific domain
Returns:
List with format (for example):
[ {'data': '160.153.162.20', 'name': '_dmarc', 'ttl': 3600, 'type': 'A'},
{'data': 'ns37.domaincontrol.com', 'name': '@', 'ttl': 3600, 'type': 'NS'}, ...]
"""
url_suffix = "v1/domains/{}/records".format(domain)
ret = _call_endpoint(url_suffix)
if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN":
# e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}
raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}")
return ret
def print_all_dns_records():
""" Print each domain and its DNS records (for domains linked to this API key)."""
for domain in sorted(get_domains()):
dns_records = get_domain_dns_records(domain)
print(domain)
pprint(dns_records)
print("*" * 50)
# TODO: poor man's rate limiter. improve?
time.sleep(2)
if __name__ == "__main__":
print_all_dns_records()
| 32.795918 | 120 | 0.671749 |
c8e80bc7bd958f10a7a1f279ed0d99283b77f722 | 1,184 | py | Python | preprocessing.py | Alloooshe/facelib_modular_face_recognition_pipline | 0313214b6f919e49e84235c1a6a4a4838b813e73 | [
"MIT"
] | 10 | 2019-12-29T13:38:56.000Z | 2021-03-15T07:21:52.000Z | preprocessing.py | Alloooshe/facelib_modular_face_recognition_pipline | 0313214b6f919e49e84235c1a6a4a4838b813e73 | [
"MIT"
] | 1 | 2021-03-15T07:45:45.000Z | 2021-03-17T11:10:53.000Z | preprocessing.py | Alloooshe/facelib_modular_face_recognition_pipline | 0313214b6f919e49e84235c1a6a4a4838b813e73 | [
"MIT"
] | 2 | 2020-05-03T08:33:39.000Z | 2021-02-06T16:49:54.000Z | import cv2
import numpy as np
| 28.878049 | 71 | 0.579392 |
c8e8ef9bc1df23fffd3b87a416935aa12a7c1e19 | 214 | py | Python | app/database/pronto_soccorso.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
] | null | null | null | app/database/pronto_soccorso.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
] | null | null | null | app/database/pronto_soccorso.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .tables.pronto_soccorsi import table
| 14.266667 | 41 | 0.696262 |
c8ea55c5455ae4d69b07f53ce37792d7f4a82837 | 132 | py | Python | 3_binary_tree/__init__.py | freshklauser/LeeCodeSummary | d9d776ddfc44fee844123b848d43a78e9ba4117e | [
"MIT"
] | null | null | null | 3_binary_tree/__init__.py | freshklauser/LeeCodeSummary | d9d776ddfc44fee844123b848d43a78e9ba4117e | [
"MIT"
] | null | null | null | 3_binary_tree/__init__.py | freshklauser/LeeCodeSummary | d9d776ddfc44fee844123b848d43a78e9ba4117e | [
"MIT"
] | 1 | 2021-11-18T01:58:29.000Z | 2021-11-18T01:58:29.000Z | # -*- coding: utf-8 -*-
# @Author : Administrator
# @DateTime : 2021/10/17 20:40
# @FileName : __init__.py
# @SoftWare : PyCharm
| 18.857143 | 30 | 0.621212 |
c8ebd9a417dcbfc90f2665cef2e143f107c15986 | 497 | py | Python | covid_19_stat.py | pavelkalinchuk/api | 3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2 | [
"Apache-2.0"
] | null | null | null | covid_19_stat.py | pavelkalinchuk/api | 3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2 | [
"Apache-2.0"
] | null | null | null | covid_19_stat.py | pavelkalinchuk/api | 3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2 | [
"Apache-2.0"
] | null | null | null | import requests
from datetime import date, timedelta
today = date.today()
yesterday = today - timedelta(days=1)
country = "Russia"
endpoint = f"https://api.covid19api.com/country/{country}/status/confirmed"
params = {"from": str(yesterday), "to": str(today)}
response = requests.get(endpoint, params=params).json()
total_confirmed = 0
for day in response:
cases = day.get("Cases", 0)
total_confirmed += cases
print("\n"f"Total Confirmed Covid-19 cases in {country}: {total_confirmed}")
| 29.235294 | 76 | 0.724346 |
c8ec940438930475725da4b1624b8e42cb723947 | 157 | py | Python | core/models/__init__.py | Brain-Engine/ImageNet | 893a8008e0e8e373bc66a7cbb40813db8495426a | [
"Apache-2.0"
] | 1 | 2021-05-17T11:49:12.000Z | 2021-05-17T11:49:12.000Z | core/models/__init__.py | Brain-Engine/ImageNet | 893a8008e0e8e373bc66a7cbb40813db8495426a | [
"Apache-2.0"
] | null | null | null | core/models/__init__.py | Brain-Engine/ImageNet | 893a8008e0e8e373bc66a7cbb40813db8495426a | [
"Apache-2.0"
] | 1 | 2021-05-17T11:49:22.000Z | 2021-05-17T11:49:22.000Z | # import models from torchvision
from torchvision.models import *
# import models from efficientnet
from .efficientnet import b0, b1, b2, b3, b4, b5, b6, b7
| 31.4 | 56 | 0.764331 |
c8ee532a04ed15373dc8d2091c28d0c7dca10643 | 2,834 | py | Python | MPI/py/plot_mpi_timing.py | mlxd/myscripts | b8b7d6b270ef24b06028e21f066c2bb587f94cef | [
"MIT"
] | null | null | null | MPI/py/plot_mpi_timing.py | mlxd/myscripts | b8b7d6b270ef24b06028e21f066c2bb587f94cef | [
"MIT"
] | null | null | null | MPI/py/plot_mpi_timing.py | mlxd/myscripts | b8b7d6b270ef24b06028e21f066c2bb587f94cef | [
"MIT"
] | null | null | null | #This file plots the results from the MPI timing runs
import sys
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.markers as mkr
plt_style='ggplot'
plt.rcParams['font.size'] = 11
plt.rcParams['font.family'] = 'serif'
plt.rcParams['axes.labelsize'] = 11
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['figure.titlesize'] = 12
#We begin by loading the CSV file of rank pairings and times into the appropriate format
StartStr = str(sys.argv[1])
EndStr = str(sys.argv[2])
start = np.loadtxt(open(StartStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')})
end = np.loadtxt(open(EndStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')})
ds=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(start['A'],start['B'],start['t']) }]
de=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(end['A'],end['B'],end['t']) }]
#We take note of the starting time over all ranks as a 0 offset
t0 = np.min(start['t'])
#3D Rank A:B vs time diagram
fig = plt.figure()
plt.style.use(plt_style)
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.set_zlabel('time [s]')
ax.set_ylabel('Rank To Merge')
ax.set_xlabel('Rank Base')
#Plot the recorded times and connect ranks that have been merged toegther
for a in ds[0].keys():
ax.scatter( ds[0][a][0], ds[0][a][1], ds[0][a][2]-t0, c='r', marker='o') #Plot start
ax.scatter( de[0][a][0], de[0][a][1], de[0][a][2]-t0, c='b', marker='x') #Plot end
ax.plot( [ ds[0][a][0], de[0][a][0] ], [ ds[0][a][1], de[0][a][1] ], [ ds[0][a][2] - t0, de[0][a][2] - t0 ], c='k') #Draw line between start and finish
ax.set_zlim3d([ 0, np.max(end['t']) - t0 ])
ax.set_ylim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
ax.set_xlim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
plt.show()
#Save the 3D plot output
plt.savefig('3d_%s_%s.pdf'%(StartStr, EndStr))
plt.clf()
plt.style.use( plt_style )
#2D connections diagram
#Draw lines to mark the MPI ranks
for ii in xrange(np.max([start['A'],start['B']])):
plt.axhline(ii, xmin=0, xmax=1, linewidth=0.5)
#Draw lines between the start and end for reducing 2 data sets
for a in ds[0].keys():
plt.plot( [ ds[0][a][2] - t0, de[0][a][2] - t0] , [ds[0][a][1], de[0][a][0]], linestyle='-', linewidth=0.5, c='k', alpha=0.8)
plt.scatter( start['t'] - t0, start['B'], marker='x', c='r', alpha=0.8)
plt.scatter( end['t'] - t0, end['A'], marker='o', c='b', alpha=0.8)
plt.xlabel('time [s]')
plt.ylabel('MPI rank')
plt.title('%s_%s'%(StartStr, EndStr))
plt.xlim([ 0, np.max(end['t']) - t0 ])
plt.ylim([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
plt.show()
#Save the 2D plot output
plt.savefig('2d_%s_%s.pdf'%(StartStr, EndStr))
| 38.821918 | 155 | 0.61856 |
c8efd5f50e23a88b242e0e5832ddd548e4a5108c | 1,809 | py | Python | src/entitykb/pipeline/filterers.py | genomoncology/entitykb | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | [
"MIT"
] | 25 | 2020-06-30T16:46:43.000Z | 2022-01-04T15:27:49.000Z | src/entitykb/pipeline/filterers.py | genomoncology/entitykb | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | [
"MIT"
] | 3 | 2020-11-25T15:09:33.000Z | 2021-05-08T11:25:14.000Z | src/entitykb/pipeline/filterers.py | genomoncology/entitykb | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | [
"MIT"
] | 2 | 2021-06-17T11:21:49.000Z | 2021-12-02T13:07:15.000Z | from typing import Iterator
from entitykb import Span, interfaces, Doc
| 27.409091 | 73 | 0.63571 |
c8effc674c65f81f1f4c9fdac1c750120b3d16ef | 716 | py | Python | octavia-cli/unit_tests/test_entrypoint.py | pluralsh/airbyte | 9b1ed03fe482f5154f6c1843b1be76de87f3605d | [
"MIT"
] | 1 | 2022-01-27T22:29:38.000Z | 2022-01-27T22:29:38.000Z | octavia-cli/unit_tests/test_entrypoint.py | pluralsh/airbyte | 9b1ed03fe482f5154f6c1843b1be76de87f3605d | [
"MIT"
] | null | null | null | octavia-cli/unit_tests/test_entrypoint.py | pluralsh/airbyte | 9b1ed03fe482f5154f6c1843b1be76de87f3605d | [
"MIT"
] | null | null | null | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
from click.testing import CliRunner
from octavia_cli import entrypoint
| 27.538462 | 116 | 0.734637 |
c8f2a4e3254c600092c6d8f19d958953e7b804a3 | 5,261 | py | Python | src/device/eltako/fsr61_actor.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
] | 1 | 2020-12-01T17:10:14.000Z | 2020-12-01T17:10:14.000Z | src/device/eltako/fsr61_actor.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
] | 1 | 2021-09-19T13:38:02.000Z | 2021-09-19T13:38:02.000Z | src/device/eltako/fsr61_actor.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
] | null | null | null | import json
import logging
import random
from datetime import datetime
from typing import Optional
from paho.mqtt.client import MQTTMessage
from enocean.protocol.constants import PACKET
from enocean.protocol.packet import RadioPacket
from src.command.switch_command import SwitchCommand
from src.common.json_attributes import JsonAttributes
from src.common.switch_state import SwitchState
from src.device.base.cyclic_device import CheckCyclicTask
from src.device.base.scene_actor import SceneActor
from src.device.eltako.fsr61_eep import Fsr61Eep, Fsr61Action, Fsr61Command
from src.device.misc.rocker_switch_tools import RockerSwitchTools, RockerAction, RockerButton
from src.enocean_connector import EnoceanMessage
from src.tools.enocean_tools import EnoceanTools
from src.tools.pickle_tools import PickleTools
| 40.469231 | 117 | 0.698536 |
c8f361858524234ea8e385c43bd790d28e9507fd | 1,960 | py | Python | neuroml/arraymorph_load_time_benchmark.py | NeuralEnsemble/libNeuroML | 75d1630a0c6354a3997c4068dc8cdc447491b6f8 | [
"BSD-3-Clause"
] | 20 | 2015-03-11T11:21:32.000Z | 2021-10-11T16:03:27.000Z | neuroml/arraymorph_load_time_benchmark.py | NeuralEnsemble/libNeuroML | 75d1630a0c6354a3997c4068dc8cdc447491b6f8 | [
"BSD-3-Clause"
] | 48 | 2015-01-15T18:41:01.000Z | 2022-01-05T13:53:58.000Z | neuroml/arraymorph_load_time_benchmark.py | NeuralEnsemble/libNeuroML | 75d1630a0c6354a3997c4068dc8cdc447491b6f8 | [
"BSD-3-Clause"
] | 16 | 2015-01-14T21:53:46.000Z | 2019-09-04T23:05:27.000Z | import numpy as np
import neuroml
import neuroml.arraymorph as am
| 31.612903 | 67 | 0.656122 |