hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fee39b66b3b2ef9dd7dd901d2d89a2d3c684442c | 11,043 | py | Python | leetcode_python/Linked_list/split-linked-list-in-parts.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Linked_list/split-linked-list-in-parts.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Linked_list/split-linked-list-in-parts.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
725. Split Linked List in Parts
Medium
0Given the head of a singly linked list and an integer k, split the linked list into k consecutive linked list parts.
The length of each part should be as equal as possible: no two parts should have a size differing by more than one. This may lead to some parts being null.
The parts should be in the order of occurrence in the input list, and parts occurring earlier should always have a size greater than or equal to parts occurring later.
Return an array of the k parts.
Example 1:
Input: head = [1,2,3], k = 5
Output: [[1],[2],[3],[],[]]
Explanation:
The first element output[0] has output[0].val = 1, output[0].next = null.
The last element output[4] is null, but its string representation as a ListNode is [].
Example 2:
Input: head = [1,2,3,4,5,6,7,8,9,10], k = 3
Output: [[1,2,3,4],[5,6,7],[8,9,10]]
Explanation:
The input has been split into consecutive parts with size difference at most 1, and earlier parts are a larger size than the later parts.
Constraints:
The number of nodes in the list is in the range [0, 1000].
0 <= Node.val <= 1000
1 <= k <= 50
"""
# V0
# IDEA : LINKED LIST OP + mod op
# V0'
# V0'
# IDEA : LINKED LIST OP
# V1
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/109284/Elegant-Python-with-Explanation-45ms
### Test case : dev
# V1'
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/139360/Simple-pythonic-solution.-Beats-100
def get_length(root):
ans = 0
while root is not None:
root = root.next
ans += 1
return ans
# V1''
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/237516/python-solution-beat-100
# V1'''
# http://bookshadow.com/weblog/2017/11/13/leetcode-split-linked-list-in-parts/
# V1''''
# https://blog.csdn.net/fuxuemingzhu/article/details/79543931
# V1'''''
# https://leetcode.com/problems/split-linked-list-in-parts/solution/
# IDEA : CREATE NEW LISTS
# time complexity : O(N+K)
# spce complexity : O(N,K)
# V1''''''
# https://leetcode.com/problems/split-linked-list-in-parts/solution/
# IDEA : SPLIT INPUT LIST
# time complexity : O(N+K)
# spce complexity : O(K)
# V2
# Time: O(n + k)
# Space: O(1) | 29.845946 | 167 | 0.472698 |
fee526d6327eadfd2a1c6fc5732f854eab5a5bb2 | 1,645 | py | Python | carl/charts.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | null | null | null | carl/charts.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | null | null | null | carl/charts.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | 1 | 2020-11-19T23:41:28.000Z | 2020-11-19T23:41:28.000Z | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
"""
def ecdf(sorted_views):
for view, data in sorted_views.iteritems():
yvals = np.arange(len(data))/float(len(data))
plt.plot(data, yvals, label=view)
plt.grid(True)
plt.xlabel('jaccard')
plt.ylabel('CDF')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("ecdf.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
"""
#def ecdf_polished(sorted_views):
| 25.703125 | 78 | 0.6231 |
fee57ff8598ad386cc6460807e129b503a56f217 | 1,740 | py | Python | tests/stimuli/test_flashed_images.py | balefebvre/pystim | ae51d8a4b478da6dec44b296407099c6257fa3fa | [
"MIT"
] | null | null | null | tests/stimuli/test_flashed_images.py | balefebvre/pystim | ae51d8a4b478da6dec44b296407099c6257fa3fa | [
"MIT"
] | null | null | null | tests/stimuli/test_flashed_images.py | balefebvre/pystim | ae51d8a4b478da6dec44b296407099c6257fa3fa | [
"MIT"
] | null | null | null | import pystim
bin_path = None # TODO correct.
vec_path = None # TODO correct.
trials_path = None # TODO correct.
stimulus = pystim.stimuli.flashed_images.load(bin_path, vec_path, trials_path)
print(stimulus.nb_frames)
print(stimulus.nb_diplays)
print(stimulus.nb_trials)
print(stimulus.nb_conditions)
print(stimulus.condition_nbs)
print(stimulus.condition_nbs_sequence)
# print(stimulus.nb_repetitions) # ill-defined?
print(stimulus.get_nb_repetitions(condition_nb))
print(stimulus.get_frame(display_nb))
print(stimulus.get_frame_by_display_nb(display_nb))
print(stimulus.get_nb_displays(trial_nb))
print(stimulus.get_display_nbs(trial_nb))
print(stimulus.get_nb_displays(condition_nb, condition_trial_nb))
print(stimulus.get_display_nbs(condition_nb, condition_trial_nb))
# Une condition c'est des paramtres et une (ou une suite) de binary frames.
# TODO stimulus doit permettre la gnration.
# TODO stimulus doit permettre de vrifier son intgrit.
# TODO stimulus doit faciliter l'analyse.
stimulus.get_trial_display_extend(trial_nb)
stimulus.get_trial_display_extend(condition_nb, condition_trial_nb)
stimulus.get_trial_display_extends(condition_nb)
condition = stimulus.get_condition(condition_nb) # une condition -> plusieurs trials, plusieurs displays
trial = stimulus.get_trial(trial_nb) # un trial -> une condition, plusieurs displays
display = stimulus.get_display(display_nb) # un display -> un trial, une condition
stimulus.get_display_nbs_extent(trial_nb)
stimulus.get_time_extent(trial_nb)
psr = response.get_peristimulus_responses(stimulus.get_trial_display_extends(condition_nb))
# Analyse.
# 1. Pour chaque enregistrement.
# a. Visualizer le taux de dcharge au cours temps (pour chaque neurone). | 34.8 | 105 | 0.820115 |
fee65bcaf5d8cc11fa9804e94169f7ab6dcff8da | 427 | py | Python | test/test_google.py | kcather/Legacy | dcf92aa7d5d4213736e3018ce4b0eb945d80afb7 | [
"MIT"
] | null | null | null | test/test_google.py | kcather/Legacy | dcf92aa7d5d4213736e3018ce4b0eb945d80afb7 | [
"MIT"
] | null | null | null | test/test_google.py | kcather/Legacy | dcf92aa7d5d4213736e3018ce4b0eb945d80afb7 | [
"MIT"
] | null | null | null | #### neeed to make sure google still work for sure
# this may have to run on non-python devs' boxes, try/catch an install of the requests lib to be SURE
try:
import requests
except:
import os
os.sys('easy_install pip')
os.sys('pip install requests')
import requests
#r = requests.get('http://www.google.com/')
r = requests.get('http://google.com')
if r.status_code = 200:
print "yep, it still there"
| 25.117647 | 101 | 0.683841 |
fee67822f155f266cc796b6f601f1860ad8b8823 | 4,760 | py | Python | examples/Kane1985/Chapter5/Ex10.10.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 298 | 2015-01-31T11:43:22.000Z | 2022-03-15T02:18:21.000Z | examples/Kane1985/Chapter5/Ex10.10.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 359 | 2015-01-17T16:56:42.000Z | 2022-02-08T05:27:08.000Z | examples/Kane1985/Chapter5/Ex10.10.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 109 | 2015-02-03T13:02:45.000Z | 2021-12-21T12:57:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 10.10 from Kane 1985."""
from __future__ import division
from sympy import expand, solve, symbols, sin, cos, S
from sympy.physics.mechanics import ReferenceFrame, RigidBody, Point
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
from util import generalized_active_forces, partial_velocities
from util import potential_energy
# Define generalized coordinates, speeds, and constants:
q0, q1, q2 = dynamicsymbols('q0:3')
q0d, q1d, q2d = dynamicsymbols('q0:3', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
LA, LB, LP = symbols('LA LB LP')
p1, p2, p3 = symbols('p1:4')
A1, A2, A3 = symbols('A1:4')
B1, B2, B3 = symbols('B1:4')
C1, C2, C3 = symbols('C1:4')
D11, D22, D33, D12, D23, D31 = symbols('D11 D22 D33 D12 D23 D31')
g, mA, mB, mC, mD, t = symbols('g mA mB mC mD t')
TA_star, TB_star, TC_star, TD_star = symbols('TA* TB* TC* TD*')
## --- reference frames ---
E = ReferenceFrame('E')
A = E.orientnew('A', 'Axis', [q0, E.x])
B = A.orientnew('B', 'Axis', [q1, A.y])
C = B.orientnew('C', 'Axis', [0, B.x])
D = C.orientnew('D', 'Axis', [0, C.x])
## --- points and their velocities ---
pO = Point('O')
pA_star = pO.locatenew('A*', LA * A.z)
pP = pO.locatenew('P', LP * A.z)
pB_star = pP.locatenew('B*', LB * B.z)
pC_star = pB_star.locatenew('C*', q2 * B.z)
pD_star = pC_star.locatenew('D*', p1 * B.x + p2 * B.y + p3 * B.z)
pO.set_vel(E, 0) # Point O is fixed in Reference Frame E
pA_star.v2pt_theory(pO, E, A) # Point A* is fixed in Reference Frame A
pP.v2pt_theory(pO, E, A) # Point P is fixed in Reference Frame A
pB_star.v2pt_theory(pP, E, B) # Point B* is fixed in Reference Frame B
# Point C* is moving in Reference Frame B
pC_star.set_vel(B, pC_star.pos_from(pB_star).diff(t, B))
pC_star.v1pt_theory(pB_star, E, B)
pD_star.set_vel(B, pC_star.vel(B)) # Point D* is fixed rel to Point C* in B
pD_star.v1pt_theory(pB_star, E, B) # Point D* is moving in Reference Frame B
# --- define central inertias and rigid bodies ---
IA = inertia(A, A1, A2, A3)
IB = inertia(B, B1, B2, B3)
IC = inertia(B, C1, C2, C3)
ID = inertia(B, D11, D22, D33, D12, D23, D31)
# inertia[0] is defined to be the central inertia for each rigid body
rbA = RigidBody('rbA', pA_star, A, mA, (IA, pA_star))
rbB = RigidBody('rbB', pB_star, B, mB, (IB, pB_star))
rbC = RigidBody('rbC', pC_star, C, mC, (IC, pC_star))
rbD = RigidBody('rbD', pD_star, D, mD, (ID, pD_star))
bodies = [rbA, rbB, rbC, rbD]
## --- generalized speeds ---
kde = [u1 - dot(A.ang_vel_in(E), A.x),
u2 - dot(B.ang_vel_in(A), B.y),
u3 - dot(pC_star.vel(B), B.z)]
kde_map = solve(kde, [q0d, q1d, q2d])
for k, v in kde_map.items():
kde_map[k.diff(t)] = v.diff(t)
# kinetic energy of robot arm E
K = sum(rb.kinetic_energy(E) for rb in bodies).subs(kde_map)
print('K = {0}'.format(msprint(K)))
# find potential energy contribution of the set of gravitational forces
forces = [(pA_star, -mA*g*E.x), (pB_star, -mB*g*E.x),
(pC_star, -mC*g*E.x), (pD_star, -mD*g*E.x)]
## --- define partial velocities ---
partials = partial_velocities([f[0] for f in forces],
[u1, u2, u3], E, kde_map)
## -- calculate generalized active forces ---
Fr, _ = generalized_active_forces(partials, forces)
V = potential_energy(Fr, [q0, q1, q2], [u1, u2, u3], kde_map)
#print('V = {0}'.format(msprint(V)))
print('\nSetting C = g*mD*p1, 1, 2, 3 = 0')
V = V.subs(dict(zip(symbols('C 1 2 3'), [g*mD*p1, 0, 0, 0] )))
print('V = {0}'.format(msprint(V)))
Z1 = u1 * cos(q1)
Z2 = u1 * sin(q1)
Z3 = -Z2 * u2
Z4 = Z1 * u2
Z5 = -LA * u1
Z6 = -(LP + LB*cos(q1))
Z7 = u2 * LB
Z8 = Z6 * u1
Z9 = LB + q2
Z10 = Z6 - q2*cos(q1)
Z11 = u2 * Z9
Z12 = Z10 * u1
Z13 = -sin(q1) * p2
Z14 = Z9 + p3
Z15 = Z10 + sin(q1)*p1 - cos(q1)*p3
Z16 = cos(q1) * p2
Z17 = Z13*u1 + Z14*u2
Z18 = Z15 * u1
Z19 = Z16*u1 - u2*p1 + u3
Z20 = u1 * Z5
Z21 = LB * sin(q1) * u2
Z22 = -Z2 * Z8
Z23 = Z21*u1 + Z2*Z7
Z24 = Z1*Z8 - u2*Z7
Z25 = Z21 - u3*cos(q1) + q2*sin(q1)*u2
Z26 = 2*u2*u3 - Z2*Z12
Z27 = Z25*u1 + Z2*Z11 - Z1*u3
Z28 = Z1*Z12 - u2*Z11
Z29 = -Z16 * u2
Z30 = Z25 + u2*(cos(q1)*p1 + sin(q1)*p3)
Z31 = Z13 * u2
Z32 = Z29*u1 + u2*(u3 + Z19) - Z2*Z18
Z33 = Z30*u1 + Z2*Z17 - Z1*Z19
Z34 = Z31*u1 + Z1*Z18 - u2*Z17
K_expected = S(1)/2*(A1*u1**2 + (B1 + C1)*Z1**2 + (B2 + C2)*u2**2 +
(B3 + C3)*Z2**2 + Z1*(D11*Z1 + D12*u2 + D31*Z2) +
u2*(D12*Z1 + D22*u2 + D23*Z2) +
Z2*(D31*Z1 + D23*u2 + D33*Z2) + mA*Z5**2 +
mB*(Z7**2 + Z8**2) + mC*(Z11**2 + Z12**2 + u3**2) +
mD*(Z17**2 + Z18**2 + Z19**2))
V_expected = g*((mB*LB + mC*Z9 + mD*Z14)*sin(q1) + mD*p1*cos(q1))
assert expand(K - K_expected) == 0
assert expand(V - V_expected) == 0
| 33.521127 | 76 | 0.602731 |
fee67e3507fde627d604b24556de9fa5e1ddebf0 | 1,179 | py | Python | src/test/test_pairwiseView.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | 1 | 2022-02-16T01:24:17.000Z | 2022-02-16T01:24:17.000Z | src/test/test_pairwiseView.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | null | null | null | src/test/test_pairwiseView.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from src.view import PairwiseView
import numpy as np
| 35.727273 | 112 | 0.603053 |
feea04b5b8f70213610fd5b8726978dd6e62c7f1 | 1,013 | py | Python | bmi.py | blorincz1/bmi-tool | b49e66bac422ab1fe411642937bd0679862b7042 | [
"MIT"
] | null | null | null | bmi.py | blorincz1/bmi-tool | b49e66bac422ab1fe411642937bd0679862b7042 | [
"MIT"
] | null | null | null | bmi.py | blorincz1/bmi-tool | b49e66bac422ab1fe411642937bd0679862b7042 | [
"MIT"
] | null | null | null | # prompt user to enter how much they weigh in pounds
weight = int(input ("How much do you weigh (in pounds)? "))
# prompt user to enter their height in inches
height = int(input ("What is your height (in inches)? "))
# this converts weight to kilograms
weight_in_kg = weight / 2.2
# this converts height to centimeters
height_in_meter = height * 2.54 / 100
# this calculates BMI
bmi = round(weight_in_kg / (height_in_meter ** 2), 1)
if bmi <= 18.5:
print("Oh no, your BMI is", bmi, "which means you are underwewight. Eat some food!")
elif bmi > 18.5 and bmi < 25:
print('Congratulations! Your BMI is', bmi, 'which means you are in the normal range. Keep up the good work!')
elif bmi > 25 and bmi < 30:
print('Uh oh, your BMI is', bmi, 'which means you are overweight. Make healthy choices and exercise!')
elif bmi > 30:
print('Oh boy, your BMI is', bmi, 'which means you are obese. GO SEE YOUR DOCTOR~')
else:
print('Uh oh, something went wrong.')
| 31.65625 | 115 | 0.664363 |
feee07121fe76d5736e52eb5411adc869715e8db | 7,031 | py | Python | day92021.py | GeirOwe/adventOfCode | fee1420cb8ecce8b7aaf9d48472364be191ca2a2 | [
"MIT"
] | 1 | 2021-12-20T11:10:59.000Z | 2021-12-20T11:10:59.000Z | day92021.py | GeirOwe/adventOfCode | fee1420cb8ecce8b7aaf9d48472364be191ca2a2 | [
"MIT"
] | null | null | null | day92021.py | GeirOwe/adventOfCode | fee1420cb8ecce8b7aaf9d48472364be191ca2a2 | [
"MIT"
] | 1 | 2021-12-02T14:40:12.000Z | 2021-12-02T14:40:12.000Z | # Day9 - 2021 Advent of code
# source: https://adventofcode.com/2021/day/9
import os
import numpy as np
#let's start
if __name__ == '__main__':
clear_console()
start_the_engine() | 43.94375 | 114 | 0.54345 |
feee0df189f0b37958204462a48904755aa19b63 | 7,420 | py | Python | cogs/Console.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
] | null | null | null | cogs/Console.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
] | null | null | null | cogs/Console.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from datetime import date, datetime
# Class handles commands related to console players
# Add this class to the cog list
def setup(bot):
bot.add_cog(ConsoleCommands(bot))
| 51.172414 | 119 | 0.568329 |
feeebbc5a748ddb1157bf558ba36f40a432ef1a6 | 666 | py | Python | documentation/demonstrations/abfFromWks.py | swharden/PyOriginTools | 536fb8e11234ffdc27e26b1800e0358179ca7d26 | [
"MIT"
] | 11 | 2018-04-22T20:34:53.000Z | 2022-03-12T12:02:47.000Z | documentation/demonstrations/abfFromWks.py | swharden/PyOriginTools | 536fb8e11234ffdc27e26b1800e0358179ca7d26 | [
"MIT"
] | 3 | 2018-01-11T14:54:46.000Z | 2018-04-26T13:45:18.000Z | documentation/demonstrations/abfFromWks.py | swharden/PyOriginTools | 536fb8e11234ffdc27e26b1800e0358179ca7d26 | [
"MIT"
] | 3 | 2019-05-14T13:36:14.000Z | 2020-09-02T16:13:57.000Z | R"""
try to get the worksheet name from a worksheet
run -pyf C:\Users\swharden\Documents\GitHub\PyOriginTools\documentation\demonstrations\abfFromWks.py
"""
import sys
if False:
# this code block will NEVER actually run
sys.path.append('../') # helps my IDE autocomplete
sys.path.append('../../') # helps my IDE autocomplete
sys.path.append('../../../') # helps my IDE autocomplete
import PyOriginTools as OR
import PyOrigin
if __name__=="__main__":
bookName,sheetName=OR.activeBookAndSheet()
worksheetPage=PyOrigin.WorksheetPages(bookName)
print(worksheetPage[0])
# for item in worksheetPage:
# print(item)
print("DONE") | 30.272727 | 100 | 0.711712 |
feef852c484bcfaf650545d694c36f762735f100 | 803 | py | Python | geniza/corpus/migrations/0018_document_doctype_help_link.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | null | null | null | geniza/corpus/migrations/0018_document_doctype_help_link.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | 5 | 2020-09-22T17:35:24.000Z | 2020-09-22T19:45:46.000Z | geniza/corpus/migrations/0018_document_doctype_help_link.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1 on 2021-08-19 15:49
import django.db.models.deletion
from django.db import migrations, models
| 29.740741 | 176 | 0.595268 |
fef0f2eca41493ff175b1ce22f370a3502ed826a | 50 | py | Python | rubin_sim/scheduler/features/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/scheduler/features/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/scheduler/features/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | from .features import *
from .conditions import *
| 16.666667 | 25 | 0.76 |
fef10be702d297731f0eada02c3e9a2ec0107a0f | 5,932 | py | Python | traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py | lzzppp/DERT | e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6 | [
"MIT"
] | 7 | 2020-08-21T02:19:15.000Z | 2021-12-30T02:02:40.000Z | traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py | lzzppp/DERT | e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6 | [
"MIT"
] | 1 | 2021-04-21T13:50:53.000Z | 2021-04-25T02:34:48.000Z | traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py | lzzppp/DERT | e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6 | [
"MIT"
] | 1 | 2020-12-02T07:15:13.000Z | 2020-12-02T07:15:13.000Z | import numpy as np
import h5py
from datetime import datetime
from geopy.distance import distance
import argparse
import pickle
import json
import os
def coord_distance(coords):
"""return distance between two points
geopy.distance.distance accept [lat, lon] input, while this dataset is [lon, lat]
"""
return distance((coords[0][1], coords[0][0]), (coords[1][1], coords[1][0])).meters
parser = argparse.ArgumentParser(description="extral trajectory's temporal related feature")
parser.add_argument("-region_name", type=str, default="region_porto_top100", help="")
args = parser.parse_args()
if __name__ == "__main__":
selected_feature = ['time_of_day', 'day_of_week', 'avg_speed', 'max_speed', 'trip_distance', 'trip_time']
with open('../hyper-parameters.json', 'r') as f:
hyper_param = json.loads(f.read())
with open('normalize_param.json', 'r') as f:
norm_param = json.loads(f.read())
feature_extractor = TestedFeatureExtractor(selected_feature, norm_param[args.region_name])
train_h5_path = hyper_param[args.region_name]['filepath']
test_h5_path = hyper_param[args.region_name]['testpath']
feature_extractor.extract_from_h5(train_h5_path, get_saved_path(hyper_param[args.region_name]['cityname'], 'train'))
feature_extractor.extract_from_h5(test_h5_path, get_saved_path(hyper_param[args.region_name]['cityname'], 'test'))
| 39.546667 | 134 | 0.630142 |
fef114610ec0d475191a1220ffe83885004935bc | 2,545 | py | Python | psystem/plot.py | ranocha/Dispersive-wave-error-growth-notebooks | cffe67961db325291a02258118d3c7261fcce788 | [
"MIT"
] | null | null | null | psystem/plot.py | ranocha/Dispersive-wave-error-growth-notebooks | cffe67961db325291a02258118d3c7261fcce788 | [
"MIT"
] | null | null | null | psystem/plot.py | ranocha/Dispersive-wave-error-growth-notebooks | cffe67961db325291a02258118d3c7261fcce788 | [
"MIT"
] | null | null | null | from clawpack.petclaw.solution import Solution
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as pl
from matplotlib import rc
import numpy as np
import os
#
| 27.074468 | 98 | 0.574853 |
fef15a29a302098c87559c64e7c95311ad1af7bc | 2,285 | py | Python | deepl/layers/utils.py | akamnev/deepl | 392c757e21dec7bdd72cb0f71298389ef0d13968 | [
"MIT"
] | 1 | 2020-06-08T14:06:36.000Z | 2020-06-08T14:06:36.000Z | deepl/layers/utils.py | akamnev/deepl | 392c757e21dec7bdd72cb0f71298389ef0d13968 | [
"MIT"
] | null | null | null | deepl/layers/utils.py | akamnev/deepl | 392c757e21dec7bdd72cb0f71298389ef0d13968 | [
"MIT"
] | null | null | null | import torch
from typing import List
def kl_div(mu, sigma):
"""
KL-divergence between a diagonal multivariate normal,
and a standard normal distribution (with zero mean and unit variance)
"""
sigma_2 = sigma * sigma
kld = 0.5 * torch.mean(mu * mu + sigma_2 - torch.log(sigma_2) - 1.0)
return kld
def kld_gaussian(mu, log_sigma, nu=0.0, rho=1.0):
"""
KL-divergence between a diagonal multivariate normal,
and a standard normal distribution
"""
device = mu.device
nu = torch.as_tensor(nu, device=device)
rho = torch.as_tensor(rho, device=device)
delta_variance = 2.0 * (log_sigma - torch.log(rho))
variance_term = torch.sum(torch.exp(delta_variance) - delta_variance)
mean_term = torch.sum((mu - nu) ** 2 / rho)
return 0.5 * (mean_term + variance_term - 1.0)
| 31.736111 | 122 | 0.617068 |
fef388e9c0a8cc5d31503d18e82095b931d385f7 | 13,762 | py | Python | main.py | ooshyun/filterdesign | 59dbea191b8cd44aa9f2d02d3787b5805d486ae2 | [
"MIT"
] | 1 | 2021-12-27T00:38:32.000Z | 2021-12-27T00:38:32.000Z | main.py | ooshyun/FilterDesign | 7162ccad8e1ae8aebca370da56be56603b9e8b24 | [
"MIT"
] | null | null | null | main.py | ooshyun/FilterDesign | 7162ccad8e1ae8aebca370da56be56603b9e8b24 | [
"MIT"
] | null | null | null | import os
import json
import numpy as np
from numpy import log10, pi, sqrt
import scipy.io.wavfile as wav
from scipy.fftpack import *
from src import (
FilterAnalyzePlot,
WaveProcessor,
ParametricEqualizer,
GraphicalEqualizer,
cvt_char2num,
maker_logger,
DEBUG,
)
if DEBUG:
PRINTER = maker_logger()
LIBRARY_PATH = "./" # First of all, it need to set the library(or this project) path
def filter_process():
"""Comparison between time domain and frequency domain using WavProcessor class
"""
from src import peaking, shelf
data_path = LIBRARY_PATH + "/test/data/wav/"
file_name = "White Noise.wav"
outfile_path = LIBRARY_PATH + "/test/result/wav/"
infile_path = os.path.join(data_path, file_name)
fs, data = wav.read(infile_path)
gain = 6
fc = 1033.59375
# time
wave_processor = WaveProcessor(wavfile_path=infile_path)
outfile_name = "White Noise_peak_time_domain.wav"
peak_filter = peaking(Wn=2 * fc / fs, Q=1 / np.sqrt(2), dBgain=gain)
wave_processor.filter_time_domain_list = peak_filter
wave_processor.run(savefile_path=outfile_path + outfile_name)
if len(wave_processor.time_filter_time) != 0:
print(
sum(wave_processor.time_filter_time) / len(wave_processor.time_filter_time)
)
# frequency
wave_processor = WaveProcessor(wavfile_path=infile_path)
outfile_name = "White Noise_peaking_freq_domain.wav"
fft_size = 256 # it should be designed before running
fft_band = np.arange(1, fft_size // 2 + 1) * fs / fft_size
coeff_frequency = np.ones(shape=(fft_size // 2 + 1,))
coeff_frequency[np.argwhere(fft_band == fc)] = 10 ** (gain / 20)
wave_processor.filter_freq_domain_list = coeff_frequency
wave_processor.run(savefile_path=outfile_path + outfile_name)
if len(wave_processor.time_filter_freq) != 0:
print(
sum(wave_processor.time_filter_freq) / len(wave_processor.time_filter_freq)
)
def serial_equalizer_plot():
"""Test frequency response for IIR filter cascade
"""
from src import peaking
data_path = LIBRARY_PATH + "/test/data/wav/"
infile_path = os.path.join(data_path, "White Noise.wav")
fs, _ = wav.read(infile_path)
ploter = FilterAnalyzePlot()
parametric_filter = ParametricEqualizer(fs)
fc_band = np.array([1000, 4000, 8000])
for f in fc_band:
peak_filter = peaking(Wn=2 * f / fs, dBgain=6, Q=4)
parametric_filter.coeff = peak_filter
ploter.filters = parametric_filter
ploter.plot(type=["freq", "phase", "pole"])
def serial_equalizer_process():
"""Test processing to wav for IIR filter cascade
"""
from src import peaking
data_path = LIBRARY_PATH + "/test/data/wav/"
result_path = LIBRARY_PATH + "/test/result/wav/"
infile_path = os.path.join(data_path, "White Noise.wav")
fs, _ = wav.read(infile_path)
wave_processor = WaveProcessor(wavfile_path=infile_path)
fc_band = np.array([1000, 4000, 8000])
for f in fc_band:
peak_filter = peaking(Wn=2 * f / fs, dBgain=12, Q=4)
b, a = peak_filter
wave_processor.filter_time_domain_list = b, a
# wave_processor.graphical_equalizer = True
wave_processor.run(
savefile_path=result_path + "/whitenoise_3peak_250_2000_8000.wav"
)
if len(wave_processor.time_filter_freq) != 0:
print(
sum(wave_processor.time_filter_freq) / len(wave_processor.time_filter_freq)
)
if len(wave_processor.time_filter_time) != 0:
print(
sum(wave_processor.time_filter_time) / len(wave_processor.time_filter_time)
)
def generator_test_vector_grahpical_equalizer():
"""Generate test vector for parallel strucuture equalizer called graphical equalizer
"""
sample_rate = 44100
# cuf-off freuqency case 1
cutoff_frequency = np.array(
(
20,
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
16000,
20000,
)
)
# gain
num_case = 5
test_gain_list = np.zeros(shape=(num_case, len(cutoff_frequency)))
# case 1
test_gain_list[0, :] = np.array(
[
12,
12,
10,
8,
4,
1,
0.5,
0,
0,
6,
6,
12,
6,
6,
-12,
12,
-12,
-12,
-12,
-12,
0,
0,
0,
0,
-3,
-6,
-9,
-12,
0,
0,
0,
]
)
# case 2
test_gain_list[1, 0::2] = 12
test_gain_list[1, 1::2] = -12
# case 3
test_gain_list[2, np.where(cutoff_frequency == 2000)] = 12
# case 4
test_gain_list[3, :] = np.ones_like(cutoff_frequency) * 12
# case 5
test_gain_list[4, 0::3] = 0
test_gain_list[4, 1::3] = 0
test_gain_list[4, 2::3] = 12
# cut-off frequency case 2, cutoff frequency with bandwith
f_bandwidth = np.array(
[
2.3,
2.9,
3.6,
4.6,
5.8,
7.3,
9.3,
11.6,
14.5,
18.5,
23.0,
28.9,
36.5,
46.3,
57.9,
72.9,
92.6,
116,
145,
185,
232,
290,
365,
463,
579,
730,
926,
1158,
1447,
1853,
2316,
]
)
f_upperband = np.array(
[
22.4,
28.2,
35.5,
44.7,
56.2,
70.8,
89.1,
112,
141,
178,
224,
282,
355,
447,
562,
708,
891,
1120,
1410,
1780,
2240,
2820,
3550,
4470,
5620,
7080,
8910,
11200,
14100,
17800,
22050,
]
)
f_lowerband = np.zeros_like(f_upperband)
f_lowerband[0] = 17.5
f_lowerband[1:] = f_upperband[:-1]
cutoff_frequency_bandwidth = np.zeros((2, len(cutoff_frequency)))
cutoff_frequency_bandwidth[0, :] = np.append(10, f_upperband[:-1])
cutoff_frequency_bandwidth[1, :] = cutoff_frequency
cutoff_frequency_bandwidth = cutoff_frequency_bandwidth.reshape(
(cutoff_frequency_bandwidth.shape[0] * cutoff_frequency_bandwidth.shape[1],),
order="F",
)
test_gain_bandwidth_list = np.zeros(
shape=(num_case, cutoff_frequency_bandwidth.shape[0])
)
for id_test_gain, test_gain in enumerate(test_gain_list):
buf_test_gain = np.zeros((2, len(cutoff_frequency)))
buf_test_gain[0, :] = test_gain
buf_test_gain[1, :] = test_gain
buf_test_gain = buf_test_gain.reshape(
(buf_test_gain.shape[0] * buf_test_gain.shape[1],), order="F"
)
buf_test_gain[1:] = buf_test_gain[:-1]
buf_test_gain[0] = 0
test_gain_bandwidth_list[id_test_gain, :] = buf_test_gain[:]
cutoff_frequency = cutoff_frequency.tolist()
test_gain_list = test_gain_list.tolist()
cutoff_frequency_bandwidth = cutoff_frequency_bandwidth.tolist()
test_gain_bandwidth_list = test_gain_bandwidth_list.tolist()
test_vector_graphical_equalizer = json.dumps(
{
"1": {
"sample_rate": sample_rate,
"cutoff_frequency": cutoff_frequency,
"test_gain": test_gain_list,
},
"2": {
"sample_rate": sample_rate,
"cutoff_frequency": cutoff_frequency_bandwidth,
"test_gain": test_gain_bandwidth_list,
},
},
indent=4,
)
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "w") as f:
f.write(test_vector_graphical_equalizer)
if __name__ == "__main__":
PRINTER.info("Hello Digital Signal Processing World!")
"""Single filter design"""
filter_plot()
filter_process()
"""Serial structure of filters design"""
serial_equalizer_plot()
serial_equalizer_process()
"""Parallel structure of filters design"""
generator_test_vector_grahpical_equalizer()
parallel_equalizer_plot()
parallel_equalizer_wav_process()
""" Analyze filter"""
analyze_filter()
pass
| 26.113852 | 88 | 0.575861 |
fef4b3fa8786cd370700430b9b9414a5a831d2bf | 3,322 | py | Python | time_transfer.py | EternityNull/alfred_scripts-TimeTransfer | d7c24c977d174d0b71b9903193ce8225a5538c7c | [
"MIT"
] | null | null | null | time_transfer.py | EternityNull/alfred_scripts-TimeTransfer | d7c24c977d174d0b71b9903193ce8225a5538c7c | [
"MIT"
] | null | null | null | time_transfer.py | EternityNull/alfred_scripts-TimeTransfer | d7c24c977d174d0b71b9903193ce8225a5538c7c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
import json
from datetime import datetime
from alfred import *
TIMESTAMP_SEC_RE = r'^\d{10}$' # 1643372599
TIMESTAMP_MSEC_RE = r'^\d{13}$' # 1643372599000
# 2022-01-28 10:00:00
DATETIME_LONG_STR = r'^[1-9]\d{3}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$'
DATETIME_SHORT_STR = r'^[1-9]\d{13}$' # 20220128100000
if __name__ == '__main__':
input_args = sys.argv[1:]
if len(input_args) > 2:
exit(1)
input_arg = ' '.join(input_args)
alfred_result = list()
if input_arg == 'now':
alfred_result.extend(judge_now())
else:
alfred_result.append(judge_input(input_arg))
print(json.dumps({"items": alfred_result}))
| 27.454545 | 84 | 0.609874 |
fef4d3e2153fde18995213ace718d0a7d41c56ac | 55 | py | Python | test.py | SquarerFive/ursina | 8d2a86a702a96fe2d3d3b608b87e755bf28cb2ae | [
"MIT"
] | null | null | null | test.py | SquarerFive/ursina | 8d2a86a702a96fe2d3d3b608b87e755bf28cb2ae | [
"MIT"
] | null | null | null | test.py | SquarerFive/ursina | 8d2a86a702a96fe2d3d3b608b87e755bf28cb2ae | [
"MIT"
] | null | null | null | import ursina
app = ursina.Ursina(init_showbase=True)
| 13.75 | 39 | 0.8 |
fef5faa5a487c2ba4ddeb8aafe0c3838370c774b | 14,598 | py | Python | ravager/bot/commands/admin_interface.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | null | null | null | ravager/bot/commands/admin_interface.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 1 | 2022-03-15T06:55:48.000Z | 2022-03-15T15:38:20.000Z | ravager/bot/commands/admin_interface.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 2 | 2022-02-09T21:30:57.000Z | 2022-03-15T06:19:57.000Z | import logging
from functools import wraps
import psutil
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ForceReply, ParseMode
from telegram.ext import CommandHandler, CallbackQueryHandler, MessageHandler, Filters
from ravager.bot.helpers.constants import *
from ravager.bot.helpers.timeout import ConversationTimeout
from ravager.config import MAX_TASKS_PER_USER, STORAGE_TIME, STORAGE_SIZE, GROUP_PASSWORD, USER_PASSWORD, ALLOWLIST, \
DOWNLOAD_DIR, LOGS_DIR,HEROKU_APP,HEROKU_API_TOKEN
from ravager.database.helpers.structs import UserStruct
from ravager.database.users import UserData
from ravager.helpers.humanize import humanize
logger = logging.getLogger(__file__)
HANDLE_ADMIN_PANEL, LIMITS_PANEL, FILTERS_PANEL, SYS_INFO_PANEL, LOGS_HANDLER = range(5)
limits_panel_text = "*Limits Configuration:*\
\nDownload storage size: *{}* GB\
\nDownload storage time: *{}* Hrs\n"
filter_panel_text = "*Filters and User Configuration:*\
\nFilters Enabled: *{}*\nGroup chat password: *{}*\
\nPrivate chat password: *{}*"
sys_info_text = "*System Information*\
\n*Cpu Usage Percent:* {}%\
\n*Used Ram:* {} {}\
\n*Available Ram:* {} {}\
\n*Network Ingress:* {} {}\
\n*Network Egress:* {} {}\
\n*Total Disk Space:* {} {}\
\n*Total Disk Space Available: *{} {}"
def system_options(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "sys_info":
psutil.cpu_percent(interval=0.1)
cpu_percent = psutil.cpu_percent(interval=0.1)
mem = psutil.virtual_memory()
disk_usage = psutil.disk_usage(str(DOWNLOAD_DIR))
net = psutil.net_io_counters(pernic=False, nowrap=True)
used_mem = humanize(mem.used)
available_mem = humanize(mem.available)
bytes_sent = humanize(net.bytes_sent)
bytes_recvd = humanize(net.bytes_recv)
total_disk_space = humanize(disk_usage.total)
total_free_space = humanize(disk_usage.free)
text = sys_info_text.format(cpu_percent, used_mem.size, used_mem.unit, available_mem.size, available_mem.unit,
bytes_recvd.size, bytes_recvd.unit, bytes_sent.size, bytes_sent.unit,
total_disk_space.size, total_disk_space.unit, total_free_space.size,
total_free_space.unit)
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_sys_info"))
return SYS_INFO_PANEL
if selection_option == "logs":
update.callback_query.edit_message_text(text="*Get yo logs*", parse_mode=ParseMode.MARKDOWN,
reply_markup=self.logs_panel())
return LOGS_HANDLER
| 50.164948 | 140 | 0.644746 |
fef71fd2689cde39a6617bb13c2101fc8e715b36 | 10,004 | py | Python | logo_rc.py | idocx/WHULibSeatReservation | 198fc62910a7937cc654069eb2f3fbf44b6e6f1d | [
"MIT"
] | 14 | 2019-02-24T01:53:37.000Z | 2021-03-27T02:21:24.000Z | logo_rc.py | Linqiaosong/WHULibSeatReservation | da89e1d3db920d41d6d74b3f83f8cdebad305457 | [
"MIT"
] | 3 | 2019-06-11T03:31:49.000Z | 2021-04-12T02:58:50.000Z | logo_rc.py | Linqiaosong/WHULibSeatReservation | da89e1d3db920d41d6d74b3f83f8cdebad305457 | [
"MIT"
] | 7 | 2019-06-06T17:31:27.000Z | 2020-11-08T13:03:49.000Z | #############################################################
# .doc
# Githubhttps://github.com/idocx/WHULibSeatReservation
#############################################################
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x03\xac\
\x00\
\x00\x67\xf6\x78\x9c\xed\x9c\x5b\x48\x53\x71\x1c\xc7\x7f\x53\xb3\
\xec\xa2\x54\x9a\x94\x3a\xcd\x6e\x56\xda\x7c\x90\xd2\x53\x82\x98\
\x46\x45\xbd\x04\xd1\xed\xa1\x1b\x58\x42\x85\x92\x97\x0a\xc1\xa0\
\x07\xbb\x88\x45\x35\xb5\x42\xc9\x9c\x36\x7c\x2b\x8d\x56\x91\x33\
\xba\x50\x10\x95\x1a\xb3\xcc\x62\xe5\x8b\xa2\xdb\xc4\x28\x0b\xb5\
\x7f\x67\x8a\x10\xd6\xff\x9c\xad\x79\x9c\xe2\xf7\x0b\x1f\x7e\xdb\
\x61\x6c\xdf\xcf\x39\x8f\xfb\x9d\x43\xa4\x22\x0f\x8a\x8e\x26\x31\
\x61\xa4\x5f\x44\xb4\x58\x7c\x95\x90\x30\xf8\x3e\x22\x91\xe8\x95\
\x78\x2c\x42\x7c\x67\xff\xc8\x1e\x11\x95\x78\x7c\x20\xeb\x09\x41\
\xdc\x96\x88\x07\x6d\x51\x2b\x8c\xb6\xf3\x4a\x42\x2f\xd9\x24\xa5\
\xfa\x6b\x1e\x76\xad\x8b\x36\xda\xcc\x4a\x12\x5c\xd5\xea\x83\xfe\
\xe8\x8f\xfe\xe8\x8f\xfe\xe8\x8f\xfe\xe8\x3f\x5e\xfa\x47\x54\xb6\
\x7e\x99\x11\x7f\xbb\xdb\x15\xc8\x53\x7b\x93\x48\xab\xe3\x73\xb9\
\x88\x28\xd7\x43\x89\xfe\x4b\xca\x3e\xb7\x7a\xab\x75\x3f\x5d\x81\
\x54\x85\x0d\x44\x85\xf5\x7c\xb4\x8f\xd0\x7f\xec\xf5\x67\x5b\xc9\
\xd3\x94\x19\xd7\x62\xca\x12\xac\x3c\x2a\xce\x1d\xe9\x4e\x32\xb4\
\xf4\xf1\x88\x2b\xad\xef\x9b\x16\xaa\xfb\xc5\x63\x4a\x88\xae\x57\
\xa9\xfe\xc6\x04\xf2\x32\x65\x0b\x1d\x4d\xd9\xab\x19\x0f\xfd\xb9\
\xc3\x2c\xd9\xd0\xcc\x45\x28\x7d\xc3\xa6\x87\xe9\xb8\xf8\xa8\xcb\
\xfb\xd0\x1f\xfd\xd1\x1f\xfd\xff\xa7\xff\xfd\x53\xdb\x59\x6e\xc9\
\x55\x2e\x87\xf2\x8a\xd9\x9c\x20\x3e\xbe\x81\x57\xbf\x79\xcc\x2e\
\x6d\x93\x42\xba\xbb\x6b\xfd\xe5\xb8\x97\xb2\x8e\x2d\x0e\xc8\xe3\
\x32\xd7\x37\xdf\x22\xdf\x0f\xfd\xd1\x1f\xfd\xd1\x1f\xfd\xd1\x1f\
\xfd\xd1\x1f\xfd\x47\xab\xbf\x31\x75\x2d\xdb\xb4\x34\x93\xcb\x2a\
\x75\xce\x47\xf1\xf7\x6f\xb8\x88\x56\xa9\xfe\xb2\x64\x09\x37\x86\
\xff\xee\x48\x65\xbc\xf7\xaf\xda\x4a\x9e\xe2\xf7\x3f\x35\x65\x09\
\x26\xe5\x88\x3d\xad\x54\x7f\x04\xf9\x9f\xb0\x81\xfc\x63\x76\x11\
\xf9\x8d\xd2\xa4\xc9\xfc\x69\xa6\x71\x37\xfd\xec\xe7\x6f\xf8\xe4\
\x9d\x67\xfb\x9e\x56\x82\xc8\x49\xfa\x63\x4f\xeb\xa4\x73\xd7\x10\
\x41\x10\x84\x97\x65\x0f\x3b\x57\x46\xd5\x5a\x0e\x68\x8c\xd6\x83\
\xee\x20\xaa\xae\x73\x2f\x55\x31\x6f\x77\xf9\xaf\x30\xda\x32\x34\
\x75\xb6\x4f\x4a\xff\xf7\xce\x43\x53\x6b\x69\x08\x7f\x60\xf5\x83\
\x3f\xfc\xe1\x0f\x7f\xf8\xc3\x1f\xfe\xf0\x87\x3f\xfc\xe1\x0f\x7f\
\xf8\xc3\x1f\xfe\xee\xf7\xd7\xd4\x5a\xcd\x91\x86\x0e\x73\x64\x4d\
\xfb\xe7\x91\x64\x79\x4d\x7b\xa3\xff\xbe\x27\xf3\x88\xb4\xd3\x1d\
\xa3\xc0\xbe\xc3\xaf\x1a\x6d\xff\xa8\x7b\x9d\x66\xff\x5d\x75\x96\
\xc9\x0b\x2b\x7b\x46\x12\xef\xf9\x15\x5f\x49\x55\xf8\x5c\x74\x7b\
\xe6\x20\x65\x44\x97\xe6\xb9\xc5\x7f\x5b\x9d\xd5\xd5\x1d\xf4\xe1\
\x4c\x0a\x2e\xff\x4e\xaa\xa2\x46\x27\xfe\xe7\xae\x22\xba\x18\x0c\
\x7f\xf8\xc3\xff\xef\x34\xa5\xc5\x86\x99\xb2\xe3\x1e\x37\x65\x0b\
\x5d\x8e\x90\x77\xe5\xc2\xf7\x24\xc3\x87\x5e\xa9\x7b\x37\xec\x24\
\x56\x37\xf7\x85\xee\x30\xf4\x4b\xdd\xc3\x31\x84\x4f\x88\xfc\x2e\
\xb5\x52\xfe\x0d\x69\x31\xe1\xa2\xff\x1b\x47\xf7\x26\xce\x16\x17\
\xb0\xe4\xbb\xef\x25\x77\xc7\xed\xac\xad\x7e\xc7\xe6\xef\x34\x48\
\xee\x90\x0f\x31\x55\x5d\xde\x0f\x7f\xf8\xc3\x1f\xfe\xf0\x87\x3f\
\xfc\xc7\xae\xff\xb5\x0b\x27\x58\xca\xcd\x1a\x96\xa2\xbf\x23\xc9\
\xfe\xf2\x1a\x16\xbd\xa5\x82\xcd\x0c\x29\x91\xc5\x37\xa8\xa4\xd7\
\x2b\xb0\xcc\xe6\x08\x9e\x01\xd7\x2d\xf2\xf7\x95\x29\xe7\xff\xfa\
\x78\x22\x7b\x91\xb3\x5e\x64\x83\x24\x8f\x8f\x6d\x64\xfb\xd6\xa4\
\xb1\x05\x01\x67\x64\x51\xcf\x3c\xfb\x83\x3c\x8a\xde\x3a\x84\x73\
\xd7\x7e\xc4\xfd\x1d\xa5\xfe\x68\x3c\x4b\x15\x52\x25\x77\xb8\x87\
\x08\x9b\x75\xba\xc7\x49\x27\xf8\xc3\x1f\xfe\xf0\x87\x3f\xfc\xe1\
\x0f\x7f\xf8\xc3\x1f\xfe\xf0\x87\x3f\xfc\xe1\x0f\x7f\xb7\xfb\x37\
\x66\xac\x61\xf9\x9b\x77\xb0\xdd\x31\x87\x65\xd9\xa6\x49\xb3\x10\
\x69\xf3\x15\x22\x9d\xa8\x60\xd6\x68\xfb\x3b\xc3\xbb\x2c\xa1\x99\
\xd7\x4f\xe9\xc0\x7f\x62\xfb\xbf\x4f\x8f\x0b\x32\x65\x0a\x7a\x91\
\x57\x6e\x23\x23\xf6\x96\xbb\xfc\x11\x04\x99\x58\x61\x0e\x67\xe8\
\xd3\x63\x61\x0e\x3e\xfb\x40\x6a\x76\xd1\x84\x98\xa1\x03\x8f\xb3\
\x10\xcf\x0a\x6f\xca\xe5\x37\xae\x47\xc0\x77\
\x00\x00\x03\xf7\
\x00\
\x00\x4e\x25\x78\x9c\xed\x9c\xef\x6f\x53\x55\x18\xc7\x4f\xb7\x75\
\xea\x2e\x1b\x6f\x08\x6b\x89\x64\x13\x0a\x84\x4c\xbb\xb6\x6a\x34\
\x22\xb9\x76\xd9\x1c\x77\x81\xd6\x0c\xcc\x24\x2e\xb6\xd8\x0a\x66\
\xbc\xd8\x4c\x11\x67\xd0\xde\xae\x64\x29\x66\x35\x75\x6d\x1c\x95\
\x00\x45\x19\x12\x34\x76\x59\x96\x69\x8a\x26\xb7\xd8\x50\x41\xcb\
\x0a\x66\x42\x25\xe2\x6d\x21\x6b\xf6\x23\xd8\xcd\x6d\xf4\xc7\xee\
\xbd\xc7\xb3\xfa\x4e\x5f\x99\x98\x88\xc9\x73\xf3\xdc\x9c\xef\xb9\
\x27\x9f\xcf\x79\xce\x1f\x70\xee\x7b\x2f\x18\x9a\x2b\x2b\xd6\x54\
\x20\x84\x2a\x99\x6d\x8d\xad\x08\xc9\x6d\xe4\xb5\x3e\x58\x4e\xbe\
\x0c\x60\x77\x2d\x19\x64\xb6\xd6\xe6\x06\x14\x1c\x7b\x78\x8a\x4c\
\xca\xf6\xe9\x77\xe8\x11\x1a\xf6\x50\xc2\x1e\x39\x99\x3f\xd4\xb5\
\x6d\x37\x21\xd6\xba\x96\x5f\x99\x67\x62\xee\x35\x84\x76\x9c\x67\
\x1a\xf5\xbb\xba\x5f\xb9\x9b\xe0\xa8\x30\x3e\x78\x23\x5b\xbf\x7b\
\xfb\x85\xed\xce\x0f\xae\x7f\xf2\xc6\xe6\x37\x3b\xea\xab\x9c\x4e\
\x67\xf2\xe6\xc7\xbe\xc3\xe3\x8f\xf4\x1d\x98\x5c\x17\xd5\x0f\xdf\
\x9a\x1e\xac\x3d\x62\xae\xa8\xda\x78\x60\x32\x71\xe6\x34\x5d\x7b\
\xb5\xa9\x4c\xdb\x44\x0d\x34\x50\x55\x6b\xd7\xa9\xa7\x92\xa3\x3d\
\x5b\x2e\x4f\x3c\xba\x18\x79\xf7\x64\xf6\xed\x67\x53\x73\xf4\xf1\
\xf9\x82\xde\xab\x62\xa0\xa0\xee\xff\x2a\x9c\xa7\xa5\x36\xe9\x62\
\x1c\x5f\x33\xe5\xaf\xd0\x32\x84\x84\x55\x6c\x2a\x20\xf5\x07\x8e\
\x8f\x91\xd5\x1f\xe3\x5f\xd2\x52\x1d\x97\x7f\x40\x4a\x98\x04\xde\
\xbe\x82\x2c\xef\xc2\xb7\x37\x9d\x1d\x21\xec\xc2\xc8\x15\x3f\x85\
\xd0\x7a\x4b\x90\x4c\xd6\x28\x6d\x32\x64\xb6\xc6\x3e\x54\x31\xad\
\xeb\x2d\x6a\x84\xdc\xee\x10\xe1\x3b\xac\x31\x39\x72\xf4\x6b\xdb\
\x48\x7e\xdf\x1d\x42\x48\xa3\x4b\xb7\x78\x55\xe7\xfa\xb5\xd5\x08\
\x3d\x1f\x6d\x27\x1d\x3c\xa9\x4b\x97\xa0\xf0\x25\x7f\x1d\xe3\x1d\
\x24\x5f\x10\x52\x28\x6d\x1b\x18\xef\xf8\x25\x90\x83\x1c\xe4\xff\
\x7b\xb9\xe3\x53\x3a\xcf\x0b\x51\x0f\xee\xa3\x97\x68\x19\xbd\x9f\
\x4d\xdf\xc0\x19\x6e\x31\xb2\x92\x55\xa0\x6e\x7e\x48\x9a\x8d\xd4\
\x0b\x56\x9d\xef\x18\x31\x9c\x9a\x32\xe2\xa3\xec\xfc\x77\x58\xcc\
\x88\x46\x47\x60\x98\x2b\x1c\x92\x92\x9d\x98\x8d\xdf\xa2\xcb\x91\
\x21\x17\xa1\x3f\x8a\x0d\xee\x25\x0d\xbc\xae\x4b\x37\x1c\x26\xfd\
\x19\xb4\xe9\xcf\x4e\x90\xf1\x98\x3b\x74\xd5\x4c\x76\xda\xe9\x0e\
\x75\xec\x23\xe3\x46\x8b\xfa\x09\x85\x4d\x96\x5c\x65\x09\xde\x5c\
\xd6\x5e\xf6\x53\x95\x04\x7b\xfa\xa2\xff\xf3\x83\x80\x03\x0e\x38\
\xe0\x80\x03\x0e\x38\xe0\x80\x03\x0e\x38\xe0\x80\x03\x0e\x38\xe0\
\x80\x03\x0e\x38\xe0\x80\x03\x0e\x38\xe0\xf7\x15\xde\xfb\xb2\x34\
\x6e\x2a\x34\xd3\xd2\x04\x57\xa0\x4b\x29\x21\x8f\xa7\xeb\x71\x6e\
\x48\xbc\x5d\x83\xec\x51\xa9\x90\x11\x3d\x98\xc7\x29\x3a\x17\x62\
\xab\xbe\xb9\xd3\x89\x85\xb8\x38\x12\xd0\x25\x91\x50\x55\xdc\x2c\
\x35\xc0\x2b\x8f\x48\x79\xfc\xfb\x59\x87\xdc\x7e\x87\x13\xe3\xac\
\xb0\x9f\x5b\xa0\xc2\xf1\x04\x2f\x9e\xe1\xc5\x4e\xbb\x98\xc5\x42\
\x67\x69\x26\x37\xca\x67\xbf\x96\x16\x8d\xca\x2e\x64\x77\x69\xdb\
\xee\xba\x43\x5d\x7d\xda\xea\x7b\x9a\x62\x67\xbf\x5a\x62\xf2\xf2\
\x68\xfb\xe8\xe3\xe9\x6f\x1f\x8b\xb6\x27\x22\xc5\xf6\xbb\x3d\xda\
\x6a\x85\xc2\xf6\xd3\x98\x9f\xfa\x4a\x61\xdb\xb3\xb2\x78\xc6\xd4\
\xd6\x68\xbb\xd9\x1c\x7b\x71\x8b\x45\xfd\xcb\xab\xb1\xde\xad\xc5\
\x93\xf4\x66\x15\xb6\x9e\x1e\x90\x82\x14\xa4\x20\x05\x29\x48\x41\
\x0a\x52\x90\x82\x14\xa4\x20\x05\x29\x48\x41\x0a\x52\x90\xfe\xbb\
\xd2\x0d\xcc\x09\xfb\x4c\x8d\x30\xe9\x11\xae\xf3\x4b\xd7\xb0\x6c\
\x41\xf4\xe1\xdf\x8e\x4a\x13\xc6\x43\xcf\xc9\x1c\x98\x75\x87\xc2\
\x61\x7f\x1d\x33\x6d\x2d\xee\x80\x66\x2b\x2d\x6a\x8d\x26\xdd\xe2\
\x7d\x46\x59\xec\x03\xbd\x43\x64\x2b\xdc\x21\xef\xa0\xe1\x4f\x31\
\xe2\x35\xe9\x92\x52\x4b\x50\x75\xce\x57\xdc\xbe\x24\xfc\x14\x28\
\x40\x01\x0a\x50\x80\x02\x14\xa0\xf8\x8b\x42\x8a\x73\xf3\x01\xc9\
\x84\x67\x58\xb1\x46\xa4\x51\x98\x33\x71\x73\x3f\x70\x39\x0e\x7f\
\x51\x63\xcf\xd9\x10\x0e\x9a\xc4\x94\x0b\xcf\x6e\xca\x8c\xa8\x18\
\x7a\x69\xf9\x12\xce\xec\x69\x36\xe7\x9b\x7f\xc9\xab\x62\xa6\xf7\
\xc6\xe4\x08\x15\x2f\xd9\xac\xb6\x42\x84\x08\x11\x22\x44\x88\x10\
\x21\x42\x84\x08\x11\x22\x44\x88\x10\x21\xfe\x3d\x2e\x0c\x49\x12\
\x7b\xcf\x65\x9f\xc9\xe0\x49\xb6\x0c\x39\x22\x52\xd4\x38\x66\xf9\
\xef\x7f\x3b\x04\x05\xf5\x8f\xaa\xf0\xd6\x52\xca\xc5\x52\xc3\x3f\
\xd3\xab\x5b\x2e\x9c\xfa\x1e\x91\x87\x69\x32\x34\x06\x1b\xcc\xce\
\x3f\x00\x9c\xbc\xe1\x52\
"
qt_resource_name = b"\
\x00\x04\
\x00\x05\x13\xbf\
\x00\x4c\
\x00\x4f\x00\x47\x00\x4f\
\x00\x08\
\x05\xe2\x41\xff\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2e\x00\x69\x00\x63\x00\x6f\
\x00\x08\
\x05\xe2\x59\x27\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x0e\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x24\x00\x01\x00\x00\x00\x01\x00\x00\x03\xb0\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0e\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6c\x4d\xe3\x39\x93\
\x00\x00\x00\x24\x00\x01\x00\x00\x00\x01\x00\x00\x03\xb0\
\x00\x00\x01\x6c\x42\xbf\x46\x67\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
qInitResources()
| 53.784946 | 104 | 0.703419 |
fef8828761203757d50e9784d410fa779ff9303d | 563 | py | Python | daoliagent/utils.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | daoliagent/utils.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | daoliagent/utils.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | import random
import six.moves.urllib.parse as urlparse
| 20.107143 | 54 | 0.614565 |
fef8bcaaac0327ab05b3750bfd80e03d8695818d | 2,745 | py | Python | cookbook/chap9/main.py | duyquang6/py-side-project | e3cdfcf424bbb15afad8241a357de49a1717fba6 | [
"Apache-2.0"
] | null | null | null | cookbook/chap9/main.py | duyquang6/py-side-project | e3cdfcf424bbb15afad8241a357de49a1717fba6 | [
"Apache-2.0"
] | null | null | null | cookbook/chap9/main.py | duyquang6/py-side-project | e3cdfcf424bbb15afad8241a357de49a1717fba6 | [
"Apache-2.0"
] | null | null | null | # 9.1. Putting a Wrapper Around a Function
#region
# import time
# from functools import wraps
# def timethis(func):
# '''
# Decorator that reports the execution time.
# '''
# @wraps(func)
# def wrapper(*args, **kwargs):
# start = time.time()
# result = func(*args, **kwargs)
# end = time.time()
# print(func.__name__, end-start)
# return result
# return wrapper
# @timethis
# def countdown(n):
# '''
# Count down
# '''
# while n > 0:
# n -= 1
# countdown(10000000)
# class A:
# @classmethod
# def method(cls):
# pass
# class B:
# # Equivalent definition of a class method
# def method(cls):
# pass
# method = classmethod(method)
#endregion
# 9.2. Preserving Function Metadata When Writing Decorators
#region
# import time
# from functools import wraps
# def timethis(func):
# '''
# Decorator that reports the execution time.
# '''
# @wraps(func)
# def wrapper(*args, **kwargs):
# start = time.time()
# result = func(*args, **kwargs)
# end = time.time()
# print(func.__name__, end-start)
# return result
# return wrapper
# @timethis
# def countdown(n):
# '''
# Count down
# '''
# while n > 0:
# n -= 1
#endregion
### 9.3. Unwrapping a Decorator
#region
# from functools import wraps
# def decorator1(func):
# @wraps(func)
# def wrapper(*args, **kwargs):
# print('Decorator 1')
# return func(*args, **kwargs)
# return wrapper
# def decorator2(func):
# @wraps(func)
# def wrapper(*args, **kwargs):
# print('Decorator 2')
# return func(*args, **kwargs)
# return wrapper
# @decorator1
# @decorator2
# def add(x, y):
# return x + y
# add(2, 3)
# add.__wrapped__(2, 3)
#endregion
### 9.4. Defining a Decorator That Takes Arguments
#region
from functools import wraps
import logging
def logged(level, name=None, message=None):
'''
Add logging to a function. level is the logging
level, name is the logger name, and message is the
log message. If name and message aren't specified,
they default to the function's module and name.
'''
return decorate
# Example use
#endregion | 20.639098 | 59 | 0.587614 |
fefa551e8285feb448d258e854941881fb3ad2e9 | 759 | py | Python | doggo_ears_definitions.py | jryzkns/doggo-ears | 004dbb8b07a0a2170ce0d04b6e1458b268cdd543 | [
"MIT"
] | 1 | 2020-08-28T16:49:32.000Z | 2020-08-28T16:49:32.000Z | doggo_ears_definitions.py | jryzkns/doggo-ears | 004dbb8b07a0a2170ce0d04b6e1458b268cdd543 | [
"MIT"
] | null | null | null | doggo_ears_definitions.py | jryzkns/doggo-ears | 004dbb8b07a0a2170ce0d04b6e1458b268cdd543 | [
"MIT"
] | null | null | null | import numpy as np
import torch
torch.manual_seed(0)
# PRE-PROCESSING
RAVDESS_DSET_PATH = "C:\\Users\\***\\Downloads\\RAVDESS\\"
TESS_DSET_PATH = "C:\\Users\\***\\Downloads\\TESS\\"
N_WORKERS = 15
# DATASET
emote_id = {
"01" : "neutral", "03" : "happy",
"04" : "sad", "05" : "angry"}
emote_idn = {
0 : "neutral", 1 : "happy",
2 : "sad", 3 : "angry"}
N_CATEGORIES = len(emote_id)
label_id = { n : torch.tensor(i)
for i, n in enumerate(emote_id.values())}
# AUDIO
window_duration = 0.5
LISTENER_RATE = 44100
N_FEATURES = 2
NUM_INFERENCE_WINDOW = 10
samples_per_wind = int(LISTENER_RATE * window_duration)
# TRAINING
BATCH_SIZE = 16
loader_params = { "batch_size" : BATCH_SIZE,
"shuffle" : True} | 22.323529 | 58 | 0.623188 |
fefb10e3bc54bf078e079e6dd58a9eee22dea396 | 7,752 | py | Python | vdp/pipeline/v1alpha/pipeline_service_pb2.py | instill-ai/protogen-python | 6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f | [
"Apache-2.0"
] | 1 | 2022-03-22T09:09:46.000Z | 2022-03-22T09:09:46.000Z | vdp/pipeline/v1alpha/pipeline_service_pb2.py | instill-ai/protogen-python | 6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f | [
"Apache-2.0"
] | 4 | 2022-03-16T12:36:12.000Z | 2022-03-22T10:53:12.000Z | vdp/pipeline/v1alpha/pipeline_service_pb2.py | instill-ai/protogen-python | 6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vdp/pipeline/v1alpha/pipeline_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from vdp.pipeline.v1alpha import healthcheck_pb2 as vdp_dot_pipeline_dot_v1alpha_dot_healthcheck__pb2
from vdp.pipeline.v1alpha import pipeline_pb2 as vdp_dot_pipeline_dot_v1alpha_dot_pipeline__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n+vdp/pipeline/v1alpha/pipeline_service.proto\x12\x14vdp.pipeline.v1alpha\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a&vdp/pipeline/v1alpha/healthcheck.proto\x1a#vdp/pipeline/v1alpha/pipeline.proto2\xcc\x10\n\x0fPipelineService\x12\x92\x01\n\x08Liveness\x12%.vdp.pipeline.v1alpha.LivenessRequest\x1a&.vdp.pipeline.v1alpha.LivenessResponse\"7\x82\xd3\xe4\x93\x02\x31Z\x1a\x12\x18/v1alpha/health/pipeline\x12\x13/v1alpha/__liveness\x12z\n\tReadiness\x12&.vdp.pipeline.v1alpha.ReadinessRequest\x1a\'.vdp.pipeline.v1alpha.ReadinessResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1alpha/__readiness\x12\x9c\x01\n\x0e\x43reatePipeline\x12+.vdp.pipeline.v1alpha.CreatePipelineRequest\x1a,.vdp.pipeline.v1alpha.CreatePipelineResponse\"/\xda\x41\x08pipeline\x82\xd3\xe4\x93\x02\x1e:\x08pipeline\"\x12/v1alpha/pipelines\x12\x81\x01\n\x0cListPipeline\x12).vdp.pipeline.v1alpha.ListPipelineRequest\x1a*.vdp.pipeline.v1alpha.ListPipelineResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/v1alpha/pipelines\x12\x8e\x01\n\x0bGetPipeline\x12(.vdp.pipeline.v1alpha.GetPipelineRequest\x1a).vdp.pipeline.v1alpha.GetPipelineResponse\"*\xda\x41\x04name\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1alpha/{name=pipelines/*}\x12\xba\x01\n\x0eUpdatePipeline\x12+.vdp.pipeline.v1alpha.UpdatePipelineRequest\x1a,.vdp.pipeline.v1alpha.UpdatePipelineResponse\"M\xda\x41\x14pipeline,update_mask\x82\xd3\xe4\x93\x02\x30:\x08pipeline2$/v1alpha/{pipeline.name=pipelines/*}\x12\x97\x01\n\x0e\x44\x65letePipeline\x12+.vdp.pipeline.v1alpha.DeletePipelineRequest\x1a,.vdp.pipeline.v1alpha.DeletePipelineResponse\"*\xda\x41\x04name\x82\xd3\xe4\x93\x02\x1d*\x1b/v1alpha/{name=pipelines/*}\x12\xa8\x01\n\x0eLookUpPipeline\x12+.vdp.pipeline.v1alpha.LookUpPipelineRequest\x1a,.vdp.pipeline.v1alpha.LookUpPipelineResponse\";\xda\x41\tpermalink\x82\xd3\xe4\x93\x02)\x12\'/v1alpha/{permalink=pipelines/*}:lookUp\x12\xa9\x01\n\x10\x41\x63tivatePipeline\x12-.vdp.pipeline.v1alpha.ActivatePipelineRequest\x1a..vdp.pipeline.v1alpha.ActivatePipelineResponse\"6\xda\x41\x04name\x82\xd3\xe4\x93\x02):\x01*\"$/v1alpha/{name=pipelines/*}:activate\x12\xb1\x01\n\x12\x44\x65\x61\x63tivatePipeline\x12/.vdp.pipeline.v1alpha.DeactivatePipelineRequest\x1a\x30.vdp.pipeline.v1alpha.DeactivatePipelineResponse\"8\xda\x41\x04name\x82\xd3\xe4\x93\x02+:\x01*\"&/v1alpha/{name=pipelines/*}:deactivate\x12\xb1\x01\n\x0eRenamePipeline\x12+.vdp.pipeline.v1alpha.RenamePipelineRequest\x1a,.vdp.pipeline.v1alpha.RenamePipelineResponse\"D\xda\x41\x14name,new_pipeline_id\x82\xd3\xe4\x93\x02\':\x01*\"\"/v1alpha/{name=pipelines/*}:rename\x12\xac\x01\n\x0fTriggerPipeline\x12,.vdp.pipeline.v1alpha.TriggerPipelineRequest\x1a-.vdp.pipeline.v1alpha.TriggerPipelineResponse\"<\xda\x41\x0bname,inputs\x82\xd3\xe4\x93\x02(:\x01*\"#/v1alpha/{name=pipelines/*}:trigger\x12\xae\x01\n\x1fTriggerPipelineBinaryFileUpload\x12<.vdp.pipeline.v1alpha.TriggerPipelineBinaryFileUploadRequest\x1a=.vdp.pipeline.v1alpha.TriggerPipelineBinaryFileUploadResponse\"\x0c\xda\x41\tname,file(\x01\x42\xea\x01\n\x18\x63om.vdp.pipeline.v1alphaB\x14PipelineServiceProtoP\x01ZFgithub.com/instill-ai/protogen-go/vdp/pipeline/v1alpha;pipelinev1alpha\xa2\x02\x03VPX\xaa\x02\x14Vdp.Pipeline.V1alpha\xca\x02\x14Vdp\\Pipeline\\V1alpha\xe2\x02 Vdp\\Pipeline\\V1alpha\\GPBMetadata\xea\x02\x16Vdp::Pipeline::V1alphab\x06proto3')
_PIPELINESERVICE = DESCRIPTOR.services_by_name['PipelineService']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\030com.vdp.pipeline.v1alphaB\024PipelineServiceProtoP\001ZFgithub.com/instill-ai/protogen-go/vdp/pipeline/v1alpha;pipelinev1alpha\242\002\003VPX\252\002\024Vdp.Pipeline.V1alpha\312\002\024Vdp\\Pipeline\\V1alpha\342\002 Vdp\\Pipeline\\V1alpha\\GPBMetadata\352\002\026Vdp::Pipeline::V1alpha'
_PIPELINESERVICE.methods_by_name['Liveness']._options = None
_PIPELINESERVICE.methods_by_name['Liveness']._serialized_options = b'\202\323\344\223\0021Z\032\022\030/v1alpha/health/pipeline\022\023/v1alpha/__liveness'
_PIPELINESERVICE.methods_by_name['Readiness']._options = None
_PIPELINESERVICE.methods_by_name['Readiness']._serialized_options = b'\202\323\344\223\002\026\022\024/v1alpha/__readiness'
_PIPELINESERVICE.methods_by_name['CreatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['CreatePipeline']._serialized_options = b'\332A\010pipeline\202\323\344\223\002\036:\010pipeline\"\022/v1alpha/pipelines'
_PIPELINESERVICE.methods_by_name['ListPipeline']._options = None
_PIPELINESERVICE.methods_by_name['ListPipeline']._serialized_options = b'\202\323\344\223\002\024\022\022/v1alpha/pipelines'
_PIPELINESERVICE.methods_by_name['GetPipeline']._options = None
_PIPELINESERVICE.methods_by_name['GetPipeline']._serialized_options = b'\332A\004name\202\323\344\223\002\035\022\033/v1alpha/{name=pipelines/*}'
_PIPELINESERVICE.methods_by_name['UpdatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['UpdatePipeline']._serialized_options = b'\332A\024pipeline,update_mask\202\323\344\223\0020:\010pipeline2$/v1alpha/{pipeline.name=pipelines/*}'
_PIPELINESERVICE.methods_by_name['DeletePipeline']._options = None
_PIPELINESERVICE.methods_by_name['DeletePipeline']._serialized_options = b'\332A\004name\202\323\344\223\002\035*\033/v1alpha/{name=pipelines/*}'
_PIPELINESERVICE.methods_by_name['LookUpPipeline']._options = None
_PIPELINESERVICE.methods_by_name['LookUpPipeline']._serialized_options = b'\332A\tpermalink\202\323\344\223\002)\022\'/v1alpha/{permalink=pipelines/*}:lookUp'
_PIPELINESERVICE.methods_by_name['ActivatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['ActivatePipeline']._serialized_options = b'\332A\004name\202\323\344\223\002):\001*\"$/v1alpha/{name=pipelines/*}:activate'
_PIPELINESERVICE.methods_by_name['DeactivatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['DeactivatePipeline']._serialized_options = b'\332A\004name\202\323\344\223\002+:\001*\"&/v1alpha/{name=pipelines/*}:deactivate'
_PIPELINESERVICE.methods_by_name['RenamePipeline']._options = None
_PIPELINESERVICE.methods_by_name['RenamePipeline']._serialized_options = b'\332A\024name,new_pipeline_id\202\323\344\223\002\':\001*\"\"/v1alpha/{name=pipelines/*}:rename'
_PIPELINESERVICE.methods_by_name['TriggerPipeline']._options = None
_PIPELINESERVICE.methods_by_name['TriggerPipeline']._serialized_options = b'\332A\013name,inputs\202\323\344\223\002(:\001*\"#/v1alpha/{name=pipelines/*}:trigger'
_PIPELINESERVICE.methods_by_name['TriggerPipelineBinaryFileUpload']._options = None
_PIPELINESERVICE.methods_by_name['TriggerPipelineBinaryFileUpload']._serialized_options = b'\332A\tname,file'
_PIPELINESERVICE._serialized_start=202
_PIPELINESERVICE._serialized_end=2326
# @@protoc_insertion_point(module_scope)
| 131.389831 | 3,390 | 0.821723 |
fefbae820a9ce01089538fc58c0ca13a3a6231eb | 119 | py | Python | slash/__init__.py | SilentJungle399/dpy-appcommands | d383ebd3414457aaaf1f65ff048604accb7bb1bc | [
"MIT"
] | 2 | 2021-09-02T13:06:46.000Z | 2021-09-03T07:19:54.000Z | slash/__init__.py | SilentJungle399/dpy-appcommands | d383ebd3414457aaaf1f65ff048604accb7bb1bc | [
"MIT"
] | null | null | null | slash/__init__.py | SilentJungle399/dpy-appcommands | d383ebd3414457aaaf1f65ff048604accb7bb1bc | [
"MIT"
] | 1 | 2021-08-14T03:38:42.000Z | 2021-08-14T03:38:42.000Z | __author__ = "SilentJungle399"
__version__ = "1.0.0"
from .client import *
from .models import *
from .enums import *
| 17 | 30 | 0.722689 |
fefc83e00d4e08e9e4f83915c661bd7690cde11d | 211 | py | Python | django-app/main/textanalyzers/textblobanalyzer.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | 9 | 2019-07-07T02:57:50.000Z | 2022-01-07T10:03:30.000Z | django-app/main/textanalyzers/textblobanalyzer.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | null | null | null | django-app/main/textanalyzers/textblobanalyzer.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | null | null | null | from .abstractanalyzer import AbstractAnalyzer
from textblob import TextBlob
| 16.230769 | 46 | 0.729858 |
fefccd0f2f86b8b353d1a858bb9e54ee6a296e8f | 850 | py | Python | 3/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | 3/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | 3/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | """3/1 adventofcode"""
with open("input.txt", "r", encoding="UTF-8") as i_file:
data = i_file.read().splitlines()
columns = [[row[i] for row in data] for i in range(len(data[0]))]
def binlst_to_int(values) -> int:
"""Returns int values of binary in list form"""
values = values[::-1]
total = 0
for i in range(len(values)):
total += values[i]*2**i
return total
def get_most(columns) -> list:
"""Returns list of most common values for each column"""
return [1 if column.count("1") > column.count("0") else 0 for column in columns]
def get_least(columns) -> list:
"""Returns list of least common values for each column"""
return [0 if column.count("0") < column.count("1") else 1 for column in columns]
print(binlst_to_int(get_most(columns))*binlst_to_int(get_least(columns)))
| 35.416667 | 85 | 0.64 |
fefdeea84966c3c376d5a46f9c21101aefc50772 | 193 | py | Python | landing/views.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | null | null | null | landing/views.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | 3 | 2015-12-08T17:14:31.000Z | 2016-01-29T18:46:59.000Z | landing/views.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | null | null | null | from braces.views import AnonymousRequiredMixin
from django.views.generic import TemplateView
| 32.166667 | 56 | 0.839378 |
3a00eea590558911d75f7435e45a186ce7c2a0a1 | 30,437 | py | Python | startExperiment.py | aydindemircioglu/radFS | b50b2a78f7c7975751b699b6b74a2761f7fa3501 | [
"MIT",
"Unlicense"
] | 1 | 2022-02-24T02:16:55.000Z | 2022-02-24T02:16:55.000Z | startExperiment.py | aydindemircioglu/radFS | b50b2a78f7c7975751b699b6b74a2761f7fa3501 | [
"MIT",
"Unlicense"
] | null | null | null | startExperiment.py | aydindemircioglu/radFS | b50b2a78f7c7975751b699b6b74a2761f7fa3501 | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/python3
from functools import partial
from datetime import datetime
import pandas as pd
from joblib import parallel_backend
import random
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
import shutil
import pathlib
import os
import math
import random
from matplotlib import pyplot
import matplotlib.pyplot as plt
import time
import copy
import random
import pickle
from joblib import Parallel, delayed
import tempfile
from xgboost import XGBClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB, BernoulliNB, CategoricalNB, ComplementNB
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from joblib import Parallel, delayed
import itertools
import multiprocessing
import socket
from glob import glob
from collections import OrderedDict
import logging
import mlflow
from typing import Dict, Any
import hashlib
import json
from pymrmre import mrmr
from pprint import pprint
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import RFE, RFECV
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_curve
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import IsolationForest, RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, SelectFromModel
from sklearn.feature_selection import mutual_info_classif
from mlflow import log_metric, log_param, log_artifact, log_dict, log_image
from loadData import *
from utils import *
from parameters import *
from extraFeatureSelections import *
### parameters
TrackingPath = "/data/results/radFS/mlrun.benchmark"
print ("Have", len(fselParameters["FeatureSelection"]["Methods"]), "Feature Selection Methods.")
print ("Have", len(clfParameters["Classification"]["Methods"]), "Classifiers.")
# wie CV: alle parameter gehen einmal durch
# this is pretty non-generic, maybe there is a better way, for now it works.
# if we do not want scaling to be performed on all data,
# we need to save thet scaler. same for imputer.
if __name__ == "__main__":
print ("Hi.")
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
# load data first
datasets = {}
dList = ["Li2020", "Carvalho2018", "Hosny2018A", "Hosny2018B", "Hosny2018C", "Ramella2018", "Keek2020", "Park2020", "Song2020" , "Toivonen2019"]
for d in dList:
eval (d+"().info()")
datasets[d] = eval (d+"().getData('./data/')")
print ("\tLoaded data with shape", datasets[d].shape)
# avoid race conditions later
try:
mlflow.set_tracking_uri(TrackingPath)
mlflow.create_experiment(d)
mlflow.set_experiment(d)
time.sleep(3)
except:
pass
for d in dList:
print ("\nExecuting", d)
data = datasets[d]
# generate all experiments
fselExperiments = generateAllExperiments (fselParameters)
print ("Created", len(fselExperiments), "feature selection parameter settings")
clfExperiments = generateAllExperiments (clfParameters)
print ("Created", len(clfExperiments), "classifier parameter settings")
print ("Total", len(clfExperiments)*len(fselExperiments), "experiments")
# generate list of experiment combinations
clList = []
for fe in fselExperiments:
for clf in clfExperiments:
clList.append( (fe, clf, data, d))
# execute
ncpus = 16
with parallel_backend("loky", inner_max_num_threads=1):
fv = Parallel (n_jobs = ncpus)(delayed(executeExperiments)(c) for c in clList)
#
| 36.451497 | 182 | 0.610934 |
3a01b5b20e16dc59b45be5e462160adb8ae019e0 | 692 | py | Python | dm/algorithms/HungarianAlg.py | forons/distance-measurement | 39741aefed0aa2f86e8959338c867398ce6494c7 | [
"MIT"
] | null | null | null | dm/algorithms/HungarianAlg.py | forons/distance-measurement | 39741aefed0aa2f86e8959338c867398ce6494c7 | [
"MIT"
] | null | null | null | dm/algorithms/HungarianAlg.py | forons/distance-measurement | 39741aefed0aa2f86e8959338c867398ce6494c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy import optimize, sparse
from .AbstractDistanceAlg import AbstractDistanceAlg
| 31.454545 | 81 | 0.669075 |
3a025d2fa53d6a334efac01743db85a3f7705e2e | 757 | py | Python | illallangi/delugeapi/filtercollection.py | illallangi/DelugeAPI | 8a949c0cf505992d5e6363d1ff3a9ed5147fc1a1 | [
"MIT"
] | null | null | null | illallangi/delugeapi/filtercollection.py | illallangi/DelugeAPI | 8a949c0cf505992d5e6363d1ff3a9ed5147fc1a1 | [
"MIT"
] | null | null | null | illallangi/delugeapi/filtercollection.py | illallangi/DelugeAPI | 8a949c0cf505992d5e6363d1ff3a9ed5147fc1a1 | [
"MIT"
] | null | null | null | from collections.abc import Sequence
from .filter import Filter
| 29.115385 | 112 | 0.649934 |
3a04e44a83831c5da0bf2cc7640fd1129f243146 | 97 | py | Python | odds/__init__.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | 1 | 2017-11-05T20:41:12.000Z | 2017-11-05T20:41:12.000Z | odds/__init__.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | 2 | 2021-03-31T18:43:15.000Z | 2021-12-13T19:46:28.000Z | odds/__init__.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | null | null | null | """
:copyright: Nick Hale
:license: MIT, see LICENSE for more details.
"""
__version__ = '0.0.1'
| 16.166667 | 44 | 0.670103 |
3a078ca91eafb1c88f7c5c3ad6afd4b81ea83805 | 1,386 | py | Python | src/io/protobuf_test.py | fritzo/pomagma | 224bb6adab3fc68e2d853e6365b4b86a8f7f468f | [
"Apache-2.0"
] | 10 | 2015-06-09T00:25:01.000Z | 2019-06-11T16:07:31.000Z | src/io/protobuf_test.py | fritzo/pomagma | 224bb6adab3fc68e2d853e6365b4b86a8f7f468f | [
"Apache-2.0"
] | 25 | 2015-03-23T23:16:01.000Z | 2017-08-29T03:35:59.000Z | src/io/protobuf_test.py | fritzo/pomagma | 224bb6adab3fc68e2d853e6365b4b86a8f7f468f | [
"Apache-2.0"
] | null | null | null | from google.protobuf import text_format
from pomagma.io import protobuf_test_pb2
from pomagma.io.protobuf import InFile, OutFile
from pomagma.util import in_temp_dir
from pomagma.util.testing import for_each
EXAMPLES = [
parse(''),
parse('''
optional_string: 'test'
'''),
parse('''
repeated_string: 'test1'
repeated_string: 'test2'
'''),
parse('''
optional_string: 'test'
repeated_string: 'test1'
repeated_string: 'test2'
optional_message: {
repeated_message: {}
repeated_message: {
optional_string: 'sub sub 1'
repeated_string: 'sub'
}
repeated_message: {
optional_string: 'sub 1'
}
repeated_message: {
repeated_string: 'sub 2'
}
}
'''),
]
| 24.75 | 55 | 0.585859 |
3a079d600f0144ca6ea7cb473635485bda6d1725 | 2,039 | py | Python | python/oneflow/test/modules/test_linspace.py | lizhimeng159/oneflow | b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_linspace.py | lizhimeng159/oneflow | b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_linspace.py | lizhimeng159/oneflow | b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
if __name__ == "__main__":
unittest.main()
| 33.983333 | 82 | 0.680726 |
3a081670c8619a8dbe9b2b1bb3b4d9935ec6801d | 1,577 | py | Python | alexia/apps/general/templatetags/menuitem.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 8 | 2015-06-29T20:01:22.000Z | 2020-10-19T13:49:38.000Z | alexia/apps/general/templatetags/menuitem.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 67 | 2015-10-05T16:57:14.000Z | 2022-03-28T19:57:36.000Z | alexia/apps/general/templatetags/menuitem.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 6 | 2015-10-05T13:54:34.000Z | 2021-11-30T05:11:58.000Z | import re
from django.template import Library, Node, TemplateSyntaxError
from django.template.base import token_kwargs
from django.urls import Resolver404, resolve
from django.utils.html import format_html
register = Library()
| 29.203704 | 112 | 0.637286 |
3a0830f683c3bcea14ab59eb19f8a4474d9635b6 | 3,984 | py | Python | superai/log/logger.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2020-12-03T18:18:16.000Z | 2020-12-03T18:18:16.000Z | superai/log/logger.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 13 | 2021-02-22T18:27:58.000Z | 2022-02-10T08:14:10.000Z | superai/log/logger.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2021-04-27T12:38:47.000Z | 2021-04-27T12:38:47.000Z | """ Log initializer """
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from rich.logging import RichHandler
from typing import List
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
WARNING = logging.WARNING
DEFAULT_LOG_FILENAME = "superai.log"
_log_format = (
"%(asctime)s - %(levelname)s - %(filename)s - %(threadName)s - [%(name)s:%(funcName)s:%(lineno)s] - %(message)s"
)
_rich_log_format = "%(message)s - %(threadName)s"
_date_format = "%Y-%m-%d %H:%M:%S"
_style = "{"
loggers: List[logging.Logger] = []
def create_file_handler(
log_format=_log_format,
log_filename=DEFAULT_LOG_FILENAME,
max_bytes=5000000,
backup_count=25,
):
"""Create rotating file handler"""
formatter = CustomFormatter(fmt=log_format, datefmt=_date_format, style=_style)
handler = RotatingFileHandler(log_filename, maxBytes=max_bytes, backupCount=backup_count)
handler.setFormatter(formatter)
return handler
def create_non_cli_handler(log_format=_log_format, stream=sys.stdout):
"""Create logging to non-CLI console (like ECS)"""
formatter = CustomFormatter(fmt=log_format, datefmt=_date_format)
console_handler = logging.StreamHandler(stream)
console_handler.setFormatter(formatter)
return console_handler
def create_cli_handler():
"""Create logging handler for CLI with rich structured output"""
rich_handler = RichHandler(rich_tracebacks=True)
return rich_handler
def get_logger(name=None, propagate=True):
"""Get logger object"""
logger = logging.getLogger(name)
logger.propagate = propagate
loggers.append(logger)
return logger
def exception(line):
"""Log exception"""
return logging.exception(line)
def debug(line):
"""Log debug"""
return logging.debug(line)
def warn(line):
"""Log warning"""
return logging.warn(line)
def error(line):
"""Log error"""
return logging.error(line)
def info(line):
"""Log info"""
return logging.info(line)
def init(filename=None, console=True, log_level=INFO, log_format=_log_format):
"""Initialize logging setup"""
if not log_format:
log_format = _log_format
log_handlers: List[logging.Handler] = []
if console:
if os.getenv("ECS", False) or os.getenv("JENKINS_URL", False):
log_handlers.append(create_non_cli_handler(log_format=log_format))
else:
# Use Rich for CLI
log_handlers.append(create_cli_handler())
# Set Format to short type for Rich
log_format = _rich_log_format
if filename is not None:
# Alwoys log to file with verbose format
log_handlers.append(create_file_handler(log_format=_log_format, log_filename=filename))
for pair in itertools.product(loggers, log_handlers):
pair[0].addHandler(pair[1])
pair[0].setLevel(log_level)
# Set Logging config based on CLI/Non/CLI Format
logging.basicConfig(format=log_format, level=log_level, handlers=log_handlers)
log = get_logger(__name__)
if log_level > logging.INFO:
log.log(level=log_level, msg=f"super.Ai logger initialized with log_level={log_level}")
return log
init()
| 29.511111 | 116 | 0.704317 |
3a090e5c232242360194af34105d0efa576a5d9f | 6,613 | py | Python | src/test.py | 0shimax/SE-Wavenet | f3cf8239175fec02565c81995e5b9f9e1bbd5eb1 | [
"MIT"
] | null | null | null | src/test.py | 0shimax/SE-Wavenet | f3cf8239175fec02565c81995e5b9f9e1bbd5eb1 | [
"MIT"
] | null | null | null | src/test.py | 0shimax/SE-Wavenet | f3cf8239175fec02565c81995e5b9f9e1bbd5eb1 | [
"MIT"
] | null | null | null | import argparse
from pathlib import Path
import torch
import torch.nn.functional as F
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
from data.data_loader import ActivDataset, loader
from models.focal_loss import FocalLoss
from models.ete_waveform import EteWave
from models.post_process import as_seaquence
torch.manual_seed(555)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device:", device)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='/home/sh70k/mnt/tracker_data/test', help='path to dataset')
parser.add_argument('--n-class', type=int, default=6, help='number of class')
parser.add_argument('--test_seq-len', type=int, default=200, help='fixed seaquence length')
parser.add_argument('--time-step', type=float, default=.25, help='fixed time interbal of input data')
parser.add_argument('--test-data-file-pointer-path', default='./data/test_data_file_pointer', help='path to test data file pointer')
parser.add_argument('--resume-model', default='/home/sh70k/mnt/tracker_data/results/model_ckpt_v1_average.pth', help='path to trained model')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch-size', type=int, default=1, help='input batch size')
parser.add_argument('--out-dir', default='/home/sh70k/mnt/tracker_data/results', help='folder to output data and model checkpoints')
args = parser.parse_args()
Path(args.out_dir).mkdir(parents=True, exist_ok=True),
main(args)
| 42.121019 | 145 | 0.665356 |
3a0d56385a100828a93d1a548339d663fa8c3ed6 | 4,031 | py | Python | code/ConvexHull.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | code/ConvexHull.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | code/ConvexHull.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | from structure_helper_class import structure_helper
from model_train_helper_class import model_train_helper
import matplotlib.pyplot as plt
import pandas as pd
from tabulate import tabulate
| 47.988095 | 161 | 0.611015 |
3a0e24a4de9a8532f6e0fffca390853480dadb10 | 5,460 | py | Python | PoPs/warning.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | PoPs/warning.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | PoPs/warning.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
Store and report warnings and errors in a PoPs database.
PoPs.check() returns a nested list of warning objects:
>>> warnings = PoPs.check()
>>> print( warnings )
May include or exclude specific classes of warning using the filter command.
filter() returns a new context instance:
>>> warnings2 = warnings.filter( exclude=[warning.unnormalizedGammas] )
Or, for easier searching you may wish to flatten the list (to get warnings alone without context messages):
>>> flat = warnings.flatten()
"""
# FIXME context class and base warning class are both identical to stuff in fudge.warning. Move to external utility?
__metaclass__ = type
#
# specific warning classes:
#
| 31.37931 | 116 | 0.630952 |
3a0f2160b69e0995f3cc76e9cebbc03eb599b9f1 | 2,077 | py | Python | libra/transaction/script.py | MaslDi/libra-client | 0983adfcb6787f7a16de4bf364cdf5596c183d88 | [
"MIT"
] | null | null | null | libra/transaction/script.py | MaslDi/libra-client | 0983adfcb6787f7a16de4bf364cdf5596c183d88 | [
"MIT"
] | null | null | null | libra/transaction/script.py | MaslDi/libra-client | 0983adfcb6787f7a16de4bf364cdf5596c183d88 | [
"MIT"
] | null | null | null | from canoser import Struct, Uint8, bytes_to_int_list, hex_to_int_list
from libra.transaction.transaction_argument import TransactionArgument, normalize_public_key
from libra.bytecode import bytecodes
from libra.account_address import Address
| 36.438596 | 93 | 0.641791 |
3a0f8c5dad18187b53b099da32a80926deec7934 | 172 | py | Python | Statistics/SampleMean.py | Shannon-NJIT/MiniProject2_Statistics | 961d579d40682c030b3aa88b4cd38fa828e8e01e | [
"MIT"
] | null | null | null | Statistics/SampleMean.py | Shannon-NJIT/MiniProject2_Statistics | 961d579d40682c030b3aa88b4cd38fa828e8e01e | [
"MIT"
] | 6 | 2019-11-04T22:48:39.000Z | 2019-11-14T01:18:49.000Z | Statistics/SampleMean.py | Shannon-NJIT/MiniProject2_Statistics | 961d579d40682c030b3aa88b4cd38fa828e8e01e | [
"MIT"
] | 4 | 2019-10-29T23:24:57.000Z | 2019-11-15T01:25:46.000Z | from Calculators.Division import division
| 21.5 | 50 | 0.715116 |
3a107df57da88f96818aa6ed0682c1887ef863ef | 1,901 | py | Python | puzzle/booking/candy.py | aliciawyy/dmining | 513f6f036f8f258281e1282fef052a74bf9cc3d3 | [
"Apache-2.0"
] | null | null | null | puzzle/booking/candy.py | aliciawyy/dmining | 513f6f036f8f258281e1282fef052a74bf9cc3d3 | [
"Apache-2.0"
] | 9 | 2017-10-25T10:03:36.000Z | 2018-06-12T22:49:22.000Z | puzzle/booking/candy.py | aliciawyy/dmining | 513f6f036f8f258281e1282fef052a74bf9cc3d3 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
N, M, T = read_line_to_list()
candies_ = [read_line_to_list() for _ in range(N)]
collector = CollectCandies(N, M, T, candies_)
print collector.get_max_sum()
| 32.220339 | 69 | 0.579695 |
3a110cf9f81c51a45a9e039e2675a3d01dca6237 | 13,818 | py | Python | SourceRepositoryTools/__init__.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | 1 | 2017-04-25T13:15:10.000Z | 2017-04-25T13:15:10.000Z | SourceRepositoryTools/__init__.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | null | null | null | SourceRepositoryTools/__init__.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | null | null | null | # ----------------------------------------------------------------------
# |
# | __init__.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-02-18 14:37:39
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
import os
import sys
import textwrap
from collections import OrderedDict
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# This file may be invoked by our included version of python - all imports will
# work as expected. But sometimes, this file may be invoked by embedded versions
# of python (for example, when used as part of a Mercurial plugin). At that point,
# we need to go through a bit more work to ensure that module-level imports work
# as expected.
try:
import inflect
import six
import wrapt
# If here, everything was found and all is good
except ImportError:
# If here, we are in a foreign python environment. Hard-code an import path
# to a known location of these base-level libraries. Because the libraries are
# so basic, it doesn't matter which one we use; therefore pick the lowest common
# denominator.
fundamental_repo = GetFundamentalRepository()
python_root = os.path.join(fundamental_repo, "Tools", "Python", "v2.7.10")
assert os.path.isdir(python_root), python_root
for suffix in [ os.path.join("Windows", "Lib", "site-packages"),
os.path.join("Ubuntu", "lib", "python2.7", "site-packages"),
]:
potential_dir = os.path.join(python_root, suffix)
if os.path.isdir(potential_dir):
sys.path.insert(0, potential_dir)
break
# Try it again
import inflect
import six
import wrapt
del sys.path[0]
# ----------------------------------------------------------------------
# Backwards compatibility
from SourceRepositoryTools.Impl.Configuration import *
from SourceRepositoryTools.Impl import Constants
from SourceRepositoryTools.Impl.Utilities import DelayExecute, \
GetLatestVersion, \
GetRepositoryUniqueId, \
GetVersionedDirectory
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def CreateDependencyMap(root_dir):
# Note that this functionality if very similar to that found in ActivationData.
# The difference between the two is this function will compile a map of all repositories
# under the code dir, while the code in ActivationData will only traverse environment
# data created during setup. Theoretically, it is possible for ActivationData
# to be implemented in terms of this function, but that would be too inefficient for
# general use.
from CommonEnvironment.NamedTuple import NamedTuple
from CommonEnvironment import Shell
from CommonEnvironment import SourceControlManagement
from SourceRepositoryTools.Impl.EnvironmentBootstrap import EnvironmentBootstrap
# ----------------------------------------------------------------------
RepoInfo = NamedTuple( "RepoInfo",
"UniqueId",
"Name",
"Root",
"Configurations",
)
ConfigInfo = NamedTuple( "ConfigInfo",
"ReliesOn",
"ReliedUponBy",
)
DependencyInfo = NamedTuple( "DependencyInfo",
"Configuration",
"Dependency",
)
# ----------------------------------------------------------------------
assert os.path.isdir(root_dir), root_dir
environent = Shell.GetEnvironment()
repositories = OrderedDict()
for scm, directory in SourceControlManagement.EnumSCMDirectories(root_dir):
result = GetRepositoryUniqueId( directory,
scm=scm,
throw_on_error=False,
)
if result is None:
continue
repo_name, repo_id = result
assert repo_id not in repositories, (repo_id, directory, repositories[repo_id].Root)
repo_bootstrap_data = EnvironmentBootstrap.Load(directory, environment=environent)
repo_bootstrap_data.Name = repo_name
repo_bootstrap_data.Id = repo_id
repo_bootstrap_data.Root = directory
repo_bootstrap_data.PriorityModifier = 0
repositories[repo_id] = repo_bootstrap_data
# Order by priority
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
for repo_id in six.iterkeys(repositories):
Walk(repo_id, 1)
priority_values = list(six.iteritems(repositories))
priority_values.sort(key=lambda x: x[1].PriorityModifier, reverse=True)
# Convert the repositories into a structure that is easier to process
results = OrderedDict()
for unique_id, repo_info in priority_values:
results[unique_id] = RepoInfo( unique_id,
repo_info.Name,
repo_info.Root,
OrderedDict(),
)
for config_name in six.iterkeys(repo_info.Configurations):
results[unique_id].Configurations[config_name] = ConfigInfo([], [])
# Populate the dependencies
for unique_id, repo_info in priority_values:
for config_name, config_info in six.iteritems(repo_info.Configurations):
# It is possible that a dependency is included more than once (as will be the case if someone
# includes Common_Enviroment as a dependency even though a dependency on Common_Enviroment is
# implied). Ensure that we are only looking at unique dependencies.
these_dependencies = []
dependency_lookup = set()
for dependency in config_info.Dependencies:
if dependency.Id in dependency_lookup:
continue
these_dependencies.append(( dependency, repositories[dependency.Id].PriorityModifier ))
dependency_lookup.add(dependency.Id)
# Ensure that the dependencies are ordered in priority order
these_dependencies.sort(key=lambda x: x[0].Id, reverse=True)
for dependency, priority_modifier in these_dependencies:
results[unique_id].Configurations[config_name].ReliesOn.append(DependencyInfo(dependency.Configuration, results[dependency.Id]))
results[dependency.Id].Configurations[dependency.Configuration].ReliedUponBy.append(DependencyInfo(config_name, results[unique_id]))
# Ensure that we can index by repo path as well as id
for unique_id in list(six.iterkeys(results)):
results[results[unique_id].Root] = results[unique_id]
return results
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GetRepositoryRootForFile(filename):
dirname = os.path.dirname(filename)
while True:
if os.path.isfile(os.path.join(dirname, Constants.REPOSITORY_ID_FILENAME)):
return dirname
potential_dirname = os.path.dirname(dirname)
if potential_dirname == dirname:
break
dirname = potential_dirname
raise Exception("Unable to find the repository root for '{}'".format(filename))
| 45.453947 | 285 | 0.481473 |
3a11220a149a467396eed9e2f60bcf713ed632ac | 3,213 | py | Python | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 3: Lectures.py | chrislangst/scalable-data-science | c7beee15c7dd14d27353c4864d927c1b76cd2fa9 | [
"Unlicense"
] | 138 | 2017-07-25T06:48:28.000Z | 2022-03-31T12:23:36.000Z | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 3: Lectures.py | chrislangst/scalable-data-science | c7beee15c7dd14d27353c4864d927c1b76cd2fa9 | [
"Unlicense"
] | 11 | 2017-08-17T13:45:54.000Z | 2021-06-04T09:06:53.000Z | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 3: Lectures.py | chrislangst/scalable-data-science | c7beee15c7dd14d27353c4864d927c1b76cd2fa9 | [
"Unlicense"
] | 74 | 2017-08-18T17:04:46.000Z | 2022-03-21T14:30:51.000Z | # Databricks notebook source exported at Mon, 14 Mar 2016 03:21:05 UTC
# MAGIC %md
# MAGIC **SOURCE:** This is from the Community Edition of databricks and has been added to this databricks shard at [/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x](/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x) as extra resources for the project-focussed course [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) that is prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand), and *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome).
# COMMAND ----------
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Module Three Lectures
# COMMAND ----------
# MAGIC %md
# MAGIC ### Lecture 5: Semi-Structured Data
# COMMAND ----------
displayHTML('https://youtube.com/embed/qzMs9Sq_DHw')
# COMMAND ----------
displayHTML('https://youtube.com/embed/pMSGGZVSwqo')
# COMMAND ----------
displayHTML('https://youtube.com/embed/NJyBQ-cQ3Ac')
# COMMAND ----------
displayHTML('https://youtube.com/embed/G_67yUxdDbU')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Llof8ZgCHFE')
# COMMAND ----------
displayHTML('https://youtube.com/embed/KjzoBzCxHMs')
# COMMAND ----------
displayHTML('https://youtube.com/embed/25YMAapjJgw')
# COMMAND ----------
displayHTML('https://youtube.com/embed/otrnf8MQ8S8')
# COMMAND ----------
displayHTML('https://youtube.com/embed/8vpmMbmUAiA')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Wc7zJG-N2B8')
# COMMAND ----------
displayHTML('https://youtube.com/embed/c2MFJI_NWVw')
# COMMAND ----------
# MAGIC %md
# MAGIC ### Lecture 6: Structured Data
# COMMAND ----------
displayHTML('https://youtube.com/embed/lODYQTgyqLk')
# COMMAND ----------
displayHTML('https://youtube.com/embed/BZuv__KF4qU')
# COMMAND ----------
displayHTML('https://youtube.com/embed/khFzRxjk2Tg')
# COMMAND ----------
displayHTML('https://youtube.com/embed/tAepBMlGvak')
# COMMAND ----------
displayHTML('https://youtube.com/embed/XAyWtVtBTlI')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Zp0EF2Dghik')
# COMMAND ----------
displayHTML('https://youtube.com/embed/iAqgcaKERHM')
# COMMAND ----------
displayHTML('https://youtube.com/embed/kaX4I2jENJc')
# COMMAND ----------
displayHTML('https://youtube.com/embed/tBsNkJyFr2w') | 30.6 | 749 | 0.698101 |
3a11c774870f73e9df814c0fb0e907ad67a018a8 | 2,075 | py | Python | src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py | Ankk98/einsteinpy | e6c3e3939063a7698410163b6de52e499bb3c8ea | [
"MIT"
] | null | null | null | src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py | Ankk98/einsteinpy | e6c3e3939063a7698410163b6de52e499bb3c8ea | [
"MIT"
] | null | null | null | src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py | Ankk98/einsteinpy | e6c3e3939063a7698410163b6de52e499bb3c8ea | [
"MIT"
] | null | null | null | from unittest import mock
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from einsteinpy.coordinates import SphericalDifferential
from einsteinpy.plotting import StaticGeodesicPlotter
def test_plot_calls_draw_attractor_Manualscale(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m, attractor_radius_scale=1500)
cl.plot(sph_obj, el, ss)
assert cl._attractor_present
assert cl.attractor_radius_scale == 1500
assert cl.get_curr_plot_radius != -1
def test_plot_calls_draw_attractor_AutoScale(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
assert cl._attractor_present
assert cl.get_curr_plot_radius != -1
| 28.040541 | 63 | 0.700241 |
3a14941cbf1878d6614fada903d6f5559aa474e0 | 367 | py | Python | pageOne.py | Priyanka1527/PageOne | ff129f305b13c8cac839e6a5f55f3853e1f16973 | [
"MIT"
] | null | null | null | pageOne.py | Priyanka1527/PageOne | ff129f305b13c8cac839e6a5f55f3853e1f16973 | [
"MIT"
] | null | null | null | pageOne.py | Priyanka1527/PageOne | ff129f305b13c8cac839e6a5f55f3853e1f16973 | [
"MIT"
] | null | null | null | #from inv_ind.py import inverted_index
import search
| 28.230769 | 81 | 0.599455 |
3a1626ac2fa1019fb590d26ad03b0ec329ab6d9d | 2,017 | py | Python | deciphon_cli/console/scan.py | EBI-Metagenomics/deciphon-cli | aa090c886db1f4dacc6bc88b46b6ebcecb79eaab | [
"MIT"
] | null | null | null | deciphon_cli/console/scan.py | EBI-Metagenomics/deciphon-cli | aa090c886db1f4dacc6bc88b46b6ebcecb79eaab | [
"MIT"
] | null | null | null | deciphon_cli/console/scan.py | EBI-Metagenomics/deciphon-cli | aa090c886db1f4dacc6bc88b46b6ebcecb79eaab | [
"MIT"
] | null | null | null | from enum import Enum
import typer
from fasta_reader import read_fasta
from deciphon_cli.core import ScanPost, SeqPost
from deciphon_cli.requests import get_json, get_plain, post_json
__all__ = ["app"]
app = typer.Typer()
| 24.901235 | 84 | 0.67526 |
3a163271adf00fd1d184016bb403b5d130a4068f | 1,655 | py | Python | neuralmaterial/lib/models/vgg.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | 4 | 2022-01-31T14:26:39.000Z | 2022-02-06T06:34:27.000Z | neuralmaterial/lib/models/vgg.py | NejcHirci/material_addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | 2 | 2022-01-30T10:35:04.000Z | 2022-01-30T10:35:04.000Z | neuralmaterial/lib/models/vgg.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
| 33.77551 | 113 | 0.578852 |
3a16438d4a6793d41974ba3f9e345b3deca9076f | 296 | py | Python | portfolio/admin.py | jokimies/django-pj-portfolio | ce32882fa3f5cc3206b2a61eb5cd88c0cdf243ec | [
"BSD-3-Clause"
] | 3 | 2017-02-02T19:58:57.000Z | 2021-08-10T14:43:37.000Z | portfolio/admin.py | jokimies/django-pj-portfolio | ce32882fa3f5cc3206b2a61eb5cd88c0cdf243ec | [
"BSD-3-Clause"
] | 4 | 2016-01-15T14:18:37.000Z | 2016-03-06T15:06:31.000Z | portfolio/admin.py | jokimies/django-pj-portfolio | ce32882fa3f5cc3206b2a61eb5cd88c0cdf243ec | [
"BSD-3-Clause"
] | 2 | 2019-10-12T02:05:49.000Z | 2022-03-08T16:25:17.000Z | from portfolio.models import Transaction, Security, Price, Account
from portfolio.models import PriceTracker
from django.contrib import admin
admin.site.register(Transaction)
admin.site.register(Security)
admin.site.register(Price)
admin.site.register(PriceTracker)
admin.site.register(Account)
| 29.6 | 66 | 0.841216 |
3a16bef75430d1f8616b4661d929e57eb96f5d11 | 1,295 | py | Python | quasimodo/cache/file_cache.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 16 | 2019-11-28T13:26:37.000Z | 2022-02-09T09:53:10.000Z | quasimodo/cache/file_cache.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 1 | 2021-03-26T20:31:48.000Z | 2021-07-15T08:52:47.000Z | quasimodo/cache/file_cache.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 3 | 2020-08-14T23:23:25.000Z | 2021-12-24T14:02:35.000Z | import os
import shutil
| 32.375 | 95 | 0.565251 |
3a16fcd29e32261f583e0fe17a97b6df4dbfd030 | 391 | py | Python | OpticsLab/components.py | AzizAlqasem/OpticsLab | a68c12edc9998f0709bae3da2fa0f85778e19bf0 | [
"MIT"
] | null | null | null | OpticsLab/components.py | AzizAlqasem/OpticsLab | a68c12edc9998f0709bae3da2fa0f85778e19bf0 | [
"MIT"
] | null | null | null | OpticsLab/components.py | AzizAlqasem/OpticsLab | a68c12edc9998f0709bae3da2fa0f85778e19bf0 | [
"MIT"
] | null | null | null | """ The components module has all optical components that are used in optics
"""
| 11.848485 | 76 | 0.557545 |
3a193908dfb0eb3ea9c064b546eae9b145317435 | 10,915 | py | Python | txraft/test_txraft.py | tehasdf/txraft | 860345e4a10d438d3fc69d752f09a06546c92d08 | [
"MIT"
] | null | null | null | txraft/test_txraft.py | tehasdf/txraft | 860345e4a10d438d3fc69d752f09a06546c92d08 | [
"MIT"
] | null | null | null | txraft/test_txraft.py | tehasdf/txraft | 860345e4a10d438d3fc69d752f09a06546c92d08 | [
"MIT"
] | null | null | null | from twisted.internet.defer import succeed
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
from txraft import Entry, RaftNode, MockRPC, STATE
from txraft.commands import AppendEntriesCommand, RequestVotesCommand
| 30.319444 | 93 | 0.599542 |
3a19793608f407d01e4af46fb22f949e028fb9e8 | 6,867 | py | Python | prototype/c2dn/script/analysis/extractData.py | Thesys-lab/C2DN | 55aa7fc1cd13ab0c80a9c25aa0288b454616d83c | [
"Apache-2.0"
] | null | null | null | prototype/c2dn/script/analysis/extractData.py | Thesys-lab/C2DN | 55aa7fc1cd13ab0c80a9c25aa0288b454616d83c | [
"Apache-2.0"
] | null | null | null | prototype/c2dn/script/analysis/extractData.py | Thesys-lab/C2DN | 55aa7fc1cd13ab0c80a9c25aa0288b454616d83c | [
"Apache-2.0"
] | null | null | null |
import os, sys
sys.path.append(os.path.expanduser("~/workspace/"))
from pyutils.common import *
if __name__ == "__main__":
BASE_DIR = "/nvme/log/p/2021-02-01/"
# load_all_fe_metrics(f"{BASE_DIR}/0124/aws_CDN_akamai2_expLatency_unavail0_1000G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0124/aws_C2DN_akamai2_expLatency_unavail0_43_1000G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0125/aws_CDN_akamai2_expLatency_unavail1_1000G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0125/aws_C2DN_akamai2_expLatency_unavail1_43_1000G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0127/aws_CDN_akamai1_expLatency_unavail0_100G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0127/aws_C2DN_akamai1_expLatency_unavail0_43_100G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0130/aws_CDN_akamai1_expLatency_unavail0_100G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0130/aws_C2DN_akamai1_expLatency_unavail0_43_100G/", system="C2DN")
load_all_fe_metrics(f"{BASE_DIR}/aws_CDN_akamai2_expLatency_unavail0_1000G/", system="CDN")
load_all_fe_metrics(f"{BASE_DIR}/aws_C2DN_akamai2_expLatency_unavail0_43_1000G/", system="C2DN")
| 42.388889 | 128 | 0.642493 |
3a1a4878173988f64e8012e0966e1a78c639eef8 | 4,116 | py | Python | ToDo/settings/common.py | adarsh9780/2Do | b0f3067b34c49987a4bbb7b56813d73805d83918 | [
"MIT"
] | null | null | null | ToDo/settings/common.py | adarsh9780/2Do | b0f3067b34c49987a4bbb7b56813d73805d83918 | [
"MIT"
] | 10 | 2020-01-03T16:56:27.000Z | 2022-01-13T00:41:57.000Z | ToDo/settings/common.py | adarsh9780/2Do | b0f3067b34c49987a4bbb7b56813d73805d83918 | [
"MIT"
] | null | null | null | """
Django settings for ToDo project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4@_rz2!t@z1jvzsw84+42xxr1v2yz7qhop$khg($i@8s5@73yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
ALLOWED_HOSTS = ['10.10.131.76', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Custom Apps
'CreateCard',
#Crispy forms
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ToDo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ToDo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# This is where static files will be collected.
STATIC_ROOT = os.path.join(BASE_DIR, 'Static_Root')
# apart from looking in 'my_app/static', this setting will also
# look for static files mentioned in below directories.
# Remove the contents if you have one.
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
# ]
# Media settings
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "Media_Root")
# TWILIO_ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
# TWILIO_AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
# TWILIO_CALLER_ID = os.environ['TWILIO_CALLER_ID']
#for Twilio
TWILIO_ACCOUNT_SID='ACf444eea774e6a2e4e0b81cd4b8cb3a8d'
TWILIO_AUTH_TOKEN='4dd33e0cf293066f9df8d7d385d454f7'
TWILIO_CALLER_ID='+18042943446'
#Crispy form
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| 27.078947 | 91 | 0.710641 |
3a1ad1cbd5fa6fd57f60b6cfe90e8e847de62504 | 89 | py | Python | openamundsen/modules/__init__.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 3 | 2021-05-28T06:46:36.000Z | 2021-06-14T13:39:25.000Z | openamundsen/modules/__init__.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 22 | 2021-04-28T12:31:58.000Z | 2022-03-09T18:29:12.000Z | openamundsen/modules/__init__.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 1 | 2021-06-01T12:48:54.000Z | 2021-06-01T12:48:54.000Z | from . import (
canopy,
evapotranspiration,
radiation,
snow,
soil,
)
| 11.125 | 23 | 0.573034 |
3a1b3de82b0cb02451c59c3a93b30506f022268a | 188 | py | Python | config/urls.py | laactech/django-security-headers-example | 86ea0b7209f8871c32100ada31fe00aa4a8e9f63 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T22:08:27.000Z | 2019-10-09T22:08:27.000Z | config/urls.py | laactech/django-security-headers-example | 86ea0b7209f8871c32100ada31fe00aa4a8e9f63 | [
"BSD-3-Clause"
] | 7 | 2020-06-05T23:45:57.000Z | 2022-02-10T10:40:54.000Z | config/urls.py | laactech/django-security-headers-example | 86ea0b7209f8871c32100ada31fe00aa4a8e9f63 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from django_security_headers_example.core.views import LandingPageView
urlpatterns = [
path("", view=LandingPageView.as_view(), name="landing_page"),
]
| 20.888889 | 70 | 0.776596 |
3a1bb607068330f96d4bdb50c12759ee1c1a9528 | 14,071 | py | Python | tests/unit/test_experiments_analytics.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 1,690 | 2017-11-29T20:13:37.000Z | 2022-03-31T12:58:11.000Z | tests/unit/test_experiments_analytics.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 2,762 | 2017-12-04T05:18:03.000Z | 2022-03-31T23:40:11.000Z | tests/unit/test_experiments_analytics.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 961 | 2017-11-30T16:44:03.000Z | 2022-03-30T23:12:09.000Z | from __future__ import absolute_import
import mock
import pytest
import pandas as pd
from collections import OrderedDict
from sagemaker.analytics import ExperimentAnalytics
| 40.66763 | 100 | 0.538341 |
3a1c1e3d3d934a3c220e33611b61500c0a74317b | 14,244 | py | Python | uni_ticket/migrations/0001_initial.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 15 | 2019-09-06T06:47:08.000Z | 2022-01-17T06:39:54.000Z | uni_ticket/migrations/0001_initial.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 69 | 2019-09-06T12:03:19.000Z | 2022-03-26T14:30:53.000Z | uni_ticket/migrations/0001_initial.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 13 | 2019-09-11T10:54:20.000Z | 2021-11-23T09:09:19.000Z | # Generated by Django 2.1.7 on 2019-04-04 12:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 53.750943 | 713 | 0.582982 |
3a20f5e777be4409e899dec4e5460fecff5677e0 | 10,325 | py | Python | baselines/baseline_summarunner/main.py | PKULiuHui/LiveBlogSum | b6a22521ee454e649981d70ddca6c89a1bac5a4c | [
"MIT"
] | null | null | null | baselines/baseline_summarunner/main.py | PKULiuHui/LiveBlogSum | b6a22521ee454e649981d70ddca6c89a1bac5a4c | [
"MIT"
] | null | null | null | baselines/baseline_summarunner/main.py | PKULiuHui/LiveBlogSum | b6a22521ee454e649981d70ddca6c89a1bac5a4c | [
"MIT"
] | null | null | null | # coding:utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import math
import re
import sys
from Vocab import Vocab
from Dataset import Dataset
from RNN_RNN import RNN_RNN
import os, json, argparse, random
sys.path.append('../../')
from myrouge.rouge import get_rouge_score
parser = argparse.ArgumentParser(description='SummaRuNNer')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints1/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-hidden_size', type=int, default=200)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=800)
parser.add_argument('-seg_num', type=int, default=10)
# train
parser.add_argument('-lr', type=float, default=1e-3)
parser.add_argument('-max_norm', type=float, default=5.0)
parser.add_argument('-batch_size', type=int, default=5)
parser.add_argument('-epochs', type=int, default=8)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-embedding', type=str, default='../../word2vec/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../word2vec/word2id.json')
parser.add_argument('-train_dir', type=str, default='../../data/bbc_opt/train/')
parser.add_argument('-valid_dir', type=str, default='../../data/bbc_opt/test/')
parser.add_argument('-sent_trunc', type=int, default=20)
parser.add_argument('-doc_trunc', type=int, default=10)
parser.add_argument('-blog_trunc', type=int, default=80)
parser.add_argument('-valid_every', type=int, default=100)
# test
parser.add_argument('-load_model', type=str, default='')
parser.add_argument('-test_dir', type=str, default='../../data/bbc_opt/test/')
parser.add_argument('-ref', type=str, default='outputs/ref/')
parser.add_argument('-hyp', type=str, default='outputs/hyp/')
parser.add_argument('-sum_len', type=int, default=1) #
parser.add_argument('-mmr', type=float, default=0.75)
# other
parser.add_argument('-test', action='store_true')
parser.add_argument('-use_cuda', type=bool, default=False)
use_cuda = torch.cuda.is_available()
args = parser.parse_args()
if use_cuda:
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
args.use_cuda = use_cuda
# rouge_1_f
# MMR
# loss, rouge
if __name__ == '__main__':
if args.test:
test()
else:
train()
| 36.743772 | 120 | 0.606683 |
3a236c93064f118a008812da513e38be43b9a0c5 | 3,512 | py | Python | data_utils/split_data.py | amitfishy/deep-objdetect | d8fc03bdb532443588b910fb9cb488766c8f6a97 | [
"MIT"
] | null | null | null | data_utils/split_data.py | amitfishy/deep-objdetect | d8fc03bdb532443588b910fb9cb488766c8f6a97 | [
"MIT"
] | null | null | null | data_utils/split_data.py | amitfishy/deep-objdetect | d8fc03bdb532443588b910fb9cb488766c8f6a97 | [
"MIT"
] | null | null | null | import os
from random import shuffle
import pascalvoc_to_yolo
| 46.210526 | 229 | 0.825456 |
3a26a3c6be42741ef5f1bdf670939b37671499bb | 547 | py | Python | books/migrations/0002_auto_20200518_1636.py | JorgeluissilvaC/intellinext_books | 0495744920dac6ee98c7ad024f8d8f85d0838238 | [
"MIT"
] | null | null | null | books/migrations/0002_auto_20200518_1636.py | JorgeluissilvaC/intellinext_books | 0495744920dac6ee98c7ad024f8d8f85d0838238 | [
"MIT"
] | null | null | null | books/migrations/0002_auto_20200518_1636.py | JorgeluissilvaC/intellinext_books | 0495744920dac6ee98c7ad024f8d8f85d0838238 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-18 21:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 24.863636 | 122 | 0.645338 |
3a278df76c850ba375f90a83b4923f079000c2f6 | 1,385 | py | Python | debian-11-PrisonPC/xfce/log-terminal-attempt.py | mijofa/bootstrap2020 | 38f557f4f0e72eaefe366f12f6adac3e2f9c9abd | [
"MIT"
] | null | null | null | debian-11-PrisonPC/xfce/log-terminal-attempt.py | mijofa/bootstrap2020 | 38f557f4f0e72eaefe366f12f6adac3e2f9c9abd | [
"MIT"
] | null | null | null | debian-11-PrisonPC/xfce/log-terminal-attempt.py | mijofa/bootstrap2020 | 38f557f4f0e72eaefe366f12f6adac3e2f9c9abd | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import sys
import syslog # FIXME: use systemd.journal.send()?
import gi
gi.require_version('Notify', '0.7')
import gi.repository.Notify # noqa: E402
__doc__ = """ an ersatz xterm that says "No!" and quits """
# Tell the central server.
# FIXME: ends up in user journal, not system journal.
# Does rsyslog forward user journal??
who = os.environ.get('XUSER', os.geteuid())
syslog.openlog('noterm4u', facility=syslog.LOG_AUTH)
syslog.syslog(f'{who} tried to open a terminal ({sys.argv[1:]}).')
# Tell the end user.
gi.repository.Notify.init("Terminal")
gi.repository.Notify.Notification.new(
summary='Not allowed',
body='Your attempt to perform a blocked action has been reported.',
icon='dialog-warning-symbolic').show()
# https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html#Exit-Status says
# If a command is not found, the child process created to execute it returns a status of 127.
# If a command is found but is not executable, the return status is 126.
# Pretend to whoever called us, that we are not instaled.
# Probably has no effect whatsoever.
# UPDATE: if we do this, we get a big popup:
#
# Failed to execute default Terminal Emulator.
# Input/output error.
# [ ] Do not show this message again
# [ Close ]
#
# That's a bit shit, so DON'T exit with an error.
# exit(127)
| 34.625 | 95 | 0.704693 |
3a2b8a858ee6da50e87c4cd8bfce4156f67a9cc7 | 844 | py | Python | lgtv.py | aakropotkin/PyWebOSTV | 4c060541b397dc20f79049fa9390c1b6b1a7050b | [
"MIT"
] | null | null | null | lgtv.py | aakropotkin/PyWebOSTV | 4c060541b397dc20f79049fa9390c1b6b1a7050b | [
"MIT"
] | null | null | null | lgtv.py | aakropotkin/PyWebOSTV | 4c060541b397dc20f79049fa9390c1b6b1a7050b | [
"MIT"
] | null | null | null | #! /usr/bin/env nix-shell
#! nix-shell -i python3 -p "[python3] ++ (with pkgs.python37Packages; [ requests future ws4py pytest pylint coveralls twine wheel ])"
# <<END Extended Shebang>>
import json
from pywebostv.discovery import *
from pywebostv.connection import *
from pywebostv.controls import *
with open('/home/camus/.lgtv.json') as f:
store = json.load(f)
client = WebOSClient(store['hostname'])
client.connect()
for status in client.register(store):
if status == WebOSClient.PROMPTED:
print("Please accept the connect on the TV!")
elif status == WebOSClient.REGISTERED:
print("Registration successful!")
ctrl = InputControl(client)
system = SystemControl(client)
media = MediaControl(client)
app = ApplicationControl(client)
inp = InputControl(client)
inp.connect_input()
# vim: set filetype=python :
| 28.133333 | 133 | 0.728673 |
3a2e8191805b6dc90c6ff13576324c98a0708604 | 2,102 | py | Python | lutin_lua.py | generic-library/lua | 1dddc5e025d94bd62ae6ca9e9e3f2cd11ed23a35 | [
"MIT"
] | null | null | null | lutin_lua.py | generic-library/lua | 1dddc5e025d94bd62ae6ca9e9e3f2cd11ed23a35 | [
"MIT"
] | null | null | null | lutin_lua.py | generic-library/lua | 1dddc5e025d94bd62ae6ca9e9e3f2cd11ed23a35 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
| 18.438596 | 58 | 0.569458 |
3a328bda03f529d92fa1c790651cd4083a64c3f3 | 2,657 | py | Python | tests/lib/io/test_marshall.py | yukgu/covid-model-seiir-pipeline | 3433034d3f089938e7993b6321d570365bdf62db | [
"BSD-3-Clause"
] | 23 | 2020-05-25T00:20:32.000Z | 2022-01-18T10:32:09.000Z | tests/lib/io/test_marshall.py | yukgu/covid-model-seiir-pipeline | 3433034d3f089938e7993b6321d570365bdf62db | [
"BSD-3-Clause"
] | 15 | 2020-06-15T16:34:22.000Z | 2021-08-15T22:11:37.000Z | tests/lib/io/test_marshall.py | yukgu/covid-model-seiir-pipeline | 3433034d3f089938e7993b6321d570365bdf62db | [
"BSD-3-Clause"
] | 11 | 2020-05-24T21:57:29.000Z | 2021-09-07T18:21:15.000Z | import pandas
import pytest
from covid_model_seiir_pipeline.lib.io import RegressionRoot
from covid_model_seiir_pipeline.lib.io.marshall import (
CSVMarshall,
ParquetMarshall,
)
| 37.422535 | 105 | 0.694016 |
3a3466847842fadedb0751fe60c731009684a618 | 727 | py | Python | bot/commands/settings.py | mercdev-corp/repsoter | 5ab98e84556143d4688ae5497443916fa63431b0 | [
"MIT"
] | 2 | 2020-10-26T09:26:13.000Z | 2022-03-22T18:10:01.000Z | bot/commands/settings.py | mercdev-corp/repsoter | 5ab98e84556143d4688ae5497443916fa63431b0 | [
"MIT"
] | null | null | null | bot/commands/settings.py | mercdev-corp/repsoter | 5ab98e84556143d4688ae5497443916fa63431b0 | [
"MIT"
] | 2 | 2020-02-11T08:11:19.000Z | 2022-03-20T18:16:41.000Z | from telegram import Update
from telegram.ext import CallbackContext, CommandHandler
from bot.settings import settings
from bot.utils import get_log
from ._utils import require_owner
log = get_log(__name__)
handler = CommandHandler('settings', command)
| 30.291667 | 80 | 0.621733 |
3a34c3856763aba4f082175e4e23858129d09e5b | 3,595 | py | Python | civbot/commands/cmd_add_game.py | thyjukki/Civi-Botti-2.0 | 7b9ff6bf3e97b90f61286e7688db731f91365e88 | [
"MIT"
] | null | null | null | civbot/commands/cmd_add_game.py | thyjukki/Civi-Botti-2.0 | 7b9ff6bf3e97b90f61286e7688db731f91365e88 | [
"MIT"
] | 3 | 2020-04-28T09:19:11.000Z | 2021-06-01T23:21:32.000Z | civbot/commands/cmd_add_game.py | thyjukki/Civi-Botti-2.0 | 7b9ff6bf3e97b90f61286e7688db731f91365e88 | [
"MIT"
] | null | null | null | import telegram
from telegram.ext import CommandHandler, ConversationHandler, MessageHandler, \
Filters
from civbot.commands.cmd_cancel import cancel_all
from civbot.models import User, Subscription
SELECT = 1
# noinspection PyUnusedLocal
| 28.307087 | 79 | 0.628929 |
3a351e34d111e613d1ab5005378d1998b8366f78 | 982 | bzl | Python | internal/run.bzl | kennethzfeng/rules_nomad | b5c000b3c860157917f2af0eebc689ea8c2f796d | [
"MIT"
] | null | null | null | internal/run.bzl | kennethzfeng/rules_nomad | b5c000b3c860157917f2af0eebc689ea8c2f796d | [
"MIT"
] | null | null | null | internal/run.bzl | kennethzfeng/rules_nomad | b5c000b3c860157917f2af0eebc689ea8c2f796d | [
"MIT"
] | null | null | null | # Rule nomad_run generates a runner script to execute nomad run with the given
# job file.
#
# NOTE(kfeng): This rule currently assumes that the nomad executable is
# installed on the host machine, and is in one of the directories listed in
# the PATH environment variable. In the future, this project may fetch
# the nomad executable directly instead of relying on the executable on
# the host machine.
nomad_run = rule(
implementation = _impl,
attrs = {
"job": attr.label(
allow_single_file = True,
mandatory = True,
),
},
executable = True,
)
| 28.057143 | 78 | 0.657841 |
3a354a29d377cbf952a940a0b75110dea65c2d7e | 1,355 | py | Python | tutorials/W1D4_Optimization/solutions/W1D4_Tutorial1_Solution_9732cf5a.py | carsen-stringer/course-content-dl | 27749aec56a3d2a43b3890483675ad0338a2680f | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | tutorials/W1D4_Optimization/solutions/W1D4_Tutorial1_Solution_9732cf5a.py | carsen-stringer/course-content-dl | 27749aec56a3d2a43b3890483675ad0338a2680f | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | tutorials/W1D4_Optimization/solutions/W1D4_Tutorial1_Solution_9732cf5a.py | carsen-stringer/course-content-dl | 27749aec56a3d2a43b3890483675ad0338a2680f | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | def rmsprop_update(loss, params, grad_sq, lr=1e-1, alpha=0.8):
"""Perform an RMSprop update on a collection of parameters
Args:
loss (tensor): A scalar tensor containing the loss whose gradient will be computed
params (iterable): Collection of parameters with respect to which we compute gradients
grad_sq (iterable): Moving average of squared gradients
lr (float): Scalar specifying the learning rate or step-size for the update
alpha (float): Moving average parameter
"""
# Clear up gradients as Pytorch automatically accumulates gradients from
# successive backward calls
zero_grad(params)
# Compute gradients on given objective
loss.backward()
for (par, gsq) in zip(params, grad_sq):
# Update estimate of gradient variance
gsq.data = alpha * gsq.data + (1-alpha) * par.grad.data**2
# Update parameters
par.data -= lr * (par.grad.data / (1e-8 + gsq.data)**0.5)
set_seed(2021)
model = MLP(in_dim=784, out_dim=10, hidden_dims=[])
print('\n The model parameters before the update are: \n')
print_params(model)
loss = loss_fn(model(X), y).to(DEVICE)
grad_sq = [0.0001*i for i in list(model.parameters())]
## Uncomment below to test your function
rmsprop_update(loss, list(model.parameters()), grad_sq=grad_sq, lr=1e-2)
print('\n The model parameters after the update are: \n')
print_params(model) | 39.852941 | 90 | 0.724723 |
3a35de756e73312c8d8aa96bb05d403a7ba20ad8 | 4,289 | py | Python | tridentstream/inputs/rfs/handler.py | tridentstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | 6 | 2020-01-03T14:50:09.000Z | 2021-09-13T01:44:31.000Z | tridentstream/inputs/rfs/handler.py | tidalstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | null | null | null | tridentstream/inputs/rfs/handler.py | tidalstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | null | null | null | import logging
from urllib.parse import urljoin
import requests
from thomas import Item, StreamerBase, router
from unplugged import Schema, fields
from twisted.internet import threads
from ...exceptions import NotModifiedException, PathNotFoundException
from ...plugins import InputPlugin
from ...stream import Stream
logger = logging.getLogger(__name__)
| 30.41844 | 96 | 0.609466 |
3a35e243be4e6577ec779fc127c120ca3ef47d2e | 741 | py | Python | twitter/test_api.py | jsnowacki/aws-cdk-twitter-sentiment | 364291cf5976cf13eb277cd2945a324b048b1df9 | [
"MIT"
] | null | null | null | twitter/test_api.py | jsnowacki/aws-cdk-twitter-sentiment | 364291cf5976cf13eb277cd2945a324b048b1df9 | [
"MIT"
] | null | null | null | twitter/test_api.py | jsnowacki/aws-cdk-twitter-sentiment | 364291cf5976cf13eb277cd2945a324b048b1df9 | [
"MIT"
] | null | null | null | from api import get_secret, get_tweepy_api, TwitterApiSecret
import json
SECRET_NAME = "TwitterAPIKeys"
| 30.875 | 60 | 0.727395 |
3a3672cb76e143ae0a5005d6285eaadd341c12b6 | 34,698 | py | Python | SAI/bm/sai_adapter/test/ptf_tests/tests/sail2_new.py | bocon13/stratum-sonic | 9be75505869ee81d30ef9b65276f7d55f495658f | [
"Apache-2.0"
] | null | null | null | SAI/bm/sai_adapter/test/ptf_tests/tests/sail2_new.py | bocon13/stratum-sonic | 9be75505869ee81d30ef9b65276f7d55f495658f | [
"Apache-2.0"
] | null | null | null | SAI/bm/sai_adapter/test/ptf_tests/tests/sail2_new.py | bocon13/stratum-sonic | 9be75505869ee81d30ef9b65276f7d55f495658f | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thrift SAI interface L2 tests
"""
import sys
# sys.path.append('../')
# from sai_types import *
import socket
from switch import *
import sai_base_test
import random
| 49.357041 | 136 | 0.645426 |
3a37961a35f717a520a82adff518def2441c92f7 | 2,024 | py | Python | app/main/service/exp_service.py | ayoyin/REST-API | 965cda0f87ba8055ee78e9300ca80d5ed79a41c8 | [
"MIT"
] | 1 | 2021-06-01T14:35:11.000Z | 2021-06-01T14:35:11.000Z | app/main/service/exp_service.py | ayoyin/REST-API | 965cda0f87ba8055ee78e9300ca80d5ed79a41c8 | [
"MIT"
] | 10 | 2021-05-26T22:27:59.000Z | 2021-06-03T21:04:43.000Z | app/main/service/exp_service.py | ayoyin/REST-API | 965cda0f87ba8055ee78e9300ca80d5ed79a41c8 | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from model.exp_model import Experience, ExperienceSchema | 31.625 | 83 | 0.682312 |
3a387d0d5be89499283e51eedf3d994a0ac9cdc2 | 4,850 | py | Python | SearchService/query_converter.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | SearchService/query_converter.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | SearchService/query_converter.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | """ Code for turning a GAE Search query into a SOLR query. """
import logging
import sys
from constants import INDEX_NAME_FIELD, INDEX_LOCALE_FIELD
from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER
sys.path.append(APPSCALE_PYTHON_APPSERVER)
from google.appengine.api.search import query_parser
from google.appengine.api.search import QueryParser
def prepare_solr_query(index, gae_query, projection_fields,
sort_fields, limit, offset):
""" Constructor query parameters dict to be sent to Solr.
Args:
index: An Index for the query to run.
gae_query: A str representing query sent by user.
projection_fields: A list of fields to fetch for each document.
sort_fields: a list of tuples of form (<FieldName>, "desc"/"asc")
limit: a max number of document to return.
offset: an integer representing offset.
Returns:
A dict containing http query params to be sent to Solr.
"""
params = {}
solr_query = '{}:{}'.format(INDEX_NAME_FIELD, index.name)
if not isinstance(gae_query, unicode):
gae_query = unicode(gae_query, 'utf-8')
logging.debug(u'GAE Query: {}'.format(gae_query))
if gae_query:
query_tree = query_parser.ParseAndSimplify(gae_query)
logging.debug(u'Tree dump: {}'.format(query_tree.toStringTree()))
solr_query += ' AND ' + _create_query_string(index.name, query_tree)
params['q'] = solr_query
# Use edismax as the parsing engine for more query abilities.
params['defType'] = 'edismax'
# Restrict to only known index fields.
search_fields = ['id'] + [field['name'] for field in index.schema]
params['qf'] = ' '.join(search_fields)
# Get the field list for the query.
if projection_fields:
fields_list = ['id', INDEX_NAME_FIELD, INDEX_LOCALE_FIELD] + [
'{}_{}'.format(index.name, field_name)
for field_name in projection_fields
]
params['fl'] = ' '.join(fields_list)
# Set sort order.
if sort_fields:
sort_list = _get_sort_list(index.name, sort_fields)
params['sort'] = ','.join(sort_list)
params['rows'] = limit
params['start'] = offset
logging.debug(u'Solr request params: {}'.format(params))
return params
def _get_sort_list(index_name, sort_fields):
""" Generates a list of Solr sort expressions:
strings containing fields name and direction.
Args:
index_name: A str representing full index name (appID_namespace_index).
sort_fields: A list of tuples of form (<FieldName>, "desc"/"asc").
Returns:
A list containing fields with direction to order by.
"""
#TODO deal with default values of sort expressions.
field_list = []
for field_name, direction in sort_fields:
new_field = '{}_{} {}'.format(index_name, field_name, direction)
field_list.append(new_field)
return field_list
def _create_query_string(index_name, query_tree):
""" Creates a SOLR query string from a antlr3 parse tree.
Args:
index_name: A str representing full index name (appID_namespace_index).
query_tree: A antlr3.tree.CommonTree.
Returns:
A string which can be sent to SOLR.
"""
query_tree_type = query_tree.getType()
has_nested = query_tree_type in [
QueryParser.CONJUNCTION, QueryParser.DISJUNCTION, QueryParser.NEGATION
]
if has_nested:
# Processes nested query parts
nested = [
_create_query_string(index_name, child)
for child in query_tree.children
]
if query_tree_type == QueryParser.CONJUNCTION:
return '({})'.format(' AND '.join(nested))
if query_tree_type == QueryParser.DISJUNCTION:
return '({})'.format(' OR '.join(nested))
if query_tree_type == QueryParser.NEGATION:
return 'NOT ({})'.format(' AND '.join(nested))
# Process leaf of the tree
if query_tree_type in query_parser.COMPARISON_TYPES:
field, match = query_tree.children
if field.getType() == QueryParser.GLOBAL:
value = query_parser.GetQueryNodeText(match).strip('"')
escaped_value = value.replace('"', '\\"')
return '"{}"'.format(escaped_value)
else:
field_name = query_parser.GetQueryNodeText(field)
value = query_parser.GetQueryNodeText(match).strip('"')
internal_field_name = '{}_{}'.format(index_name, field_name)
escaped_value = value.replace('"', '\\"')
oper = _get_operator(query_tree_type)
return '{}{}"{}"'.format(internal_field_name, oper, escaped_value)
else:
raise ParsingError('Unexpected query tree type: {}'.format(query_tree_type))
# TODO handle range operators
def _get_operator(op_code):
""" Returns the string equivalent of the operation code.
Args:
op_code: An int which maps to a comparison operator.
Returns:
A str, the SOLR operator which maps from the operator code.
"""
# TODO
if op_code == QueryParser.EQ:
return ':'
return ':'
| 33.680556 | 80 | 0.703711 |
3a393e7c4f3f1d263e29f99079506e54bfc2ef8b | 367 | py | Python | scripts/hackathon/create_evaluable_CAG.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 25 | 2018-03-03T11:57:57.000Z | 2022-01-16T21:19:54.000Z | scripts/hackathon/create_evaluable_CAG.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 385 | 2018-02-21T16:52:06.000Z | 2022-02-17T07:44:56.000Z | scripts/hackathon/create_evaluable_CAG.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 19 | 2018-03-20T01:08:11.000Z | 2021-09-29T01:04:49.000Z | import sys
import pickle
if __name__ == "__main__":
create_evaluable_CAG(sys.argv[1], sys.argv[2])
| 24.466667 | 58 | 0.6703 |
3a3c0a988b2a4e559c53ae9edf07f389f8af9b71 | 774 | py | Python | texture_dat_vulnerability/texture_gen.py | krystalgamer/spidey-tools | 59648b5305e829718c22ec8fd91c795f7551d89d | [
"MIT"
] | 15 | 2017-07-04T20:27:43.000Z | 2022-03-21T21:30:55.000Z | texture_dat_vulnerability/texture_gen.py | krystalgamer/spidey-tools | 59648b5305e829718c22ec8fd91c795f7551d89d | [
"MIT"
] | 7 | 2017-12-04T11:13:07.000Z | 2020-07-27T18:42:23.000Z | texture_dat_vulnerability/texture_gen.py | krystalgamer/spidey-tools | 59648b5305e829718c22ec8fd91c795f7551d89d | [
"MIT"
] | 5 | 2018-08-21T17:02:22.000Z | 2022-03-21T21:18:46.000Z | byt = open("xor_key.bin", "rb").read()
final = "\x00\x00\x00\x00\x6A\x00\x6A\x00\x68".encode()
final = [e for e in final]
final.append(0x26)
final.append(0xFC)
final.append(0x19)
final.append(0x00)
final.append(0x6A)
final.append(0x00)
final = [e for e in final]
final.append(0xB8)
final.append(0xC8)
final.append(0x59)
final.append(0x51)
final.append(0x00)
final.append(0xFF)
final.append(0xE0)
pwn_str = "Game has been pwnd\x00".encode()
for e in pwn_str:
final.append(e)
while len(final) != 0x220:
final.append(0x61)
final.append(0x14)
final.append(0xFC)
final.append(0x19)
final.append(0x00)
final = bytearray(bytes(final))
for index,_ in enumerate(final[4:]):
final[4+index] ^= byt[index%0x190]
with open("texture.dat", "wb") as f:
f.write(final)
| 20.368421 | 55 | 0.706718 |
3a3c22b7737a192dfe1f9e9024ae59ca8fe3e8e0 | 3,721 | py | Python | inclearn/convnet/my_resnet.py | romilbhardwaj/incremental_learning.pytorch | 77097ef4dd4fc6b6c35d13ef66856d6f8a15598d | [
"MIT"
] | 3 | 2019-07-01T14:43:05.000Z | 2019-12-27T13:26:52.000Z | inclearn/convnet/my_resnet.py | rahulvigneswaran/incremental_learning.pytorch | 786ecda7dbce5977894737d61cd5e3a30f61aac6 | [
"MIT"
] | null | null | null | inclearn/convnet/my_resnet.py | rahulvigneswaran/incremental_learning.pytorch | 786ecda7dbce5977894737d61cd5e3a30f61aac6 | [
"MIT"
] | null | null | null | ''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : 14besekjaved@seecs.edu.pk '''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def resnet_rebuffi(n=5):
return CifarResNet(n=n)
| 27.562963 | 108 | 0.58452 |
3a3d256dc2972ac84c9fb003786b75e70d7fb65f | 406 | py | Python | IO/__init__.py | killian-mahe/the_eternal_kingdom | 82798246e4c5608b508487407c9d4154fd59f615 | [
"MIT"
] | 2 | 2020-03-27T15:01:22.000Z | 2020-04-30T20:09:00.000Z | IO/__init__.py | killian-mahe/the_eternal_kingdom | 82798246e4c5608b508487407c9d4154fd59f615 | [
"MIT"
] | null | null | null | IO/__init__.py | killian-mahe/the_eternal_kingdom | 82798246e4c5608b508487407c9d4154fd59f615 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
IO
~~~~~~~~~~~~~~~~~~~
A Python module for Input and Ouput interactions
:copyright: (c) 2020 Killian Mah
:license: MIT, see LICENSE for more details.
"""
__title__ = 'io'
__author__ = 'Killian Mah'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Killian Mah'
__version__ = '0.0.1'
from .terminal import Terminal
from .keyboard import Keyboard
from .file import File | 18.454545 | 48 | 0.687192 |
3a3ec3da72c85292efaee127eb5ad56d111e5946 | 2,095 | py | Python | src/nlplib/general/thread.py | rectangletangle/nlplib | 7dcc0daf050a73c03b7d7f0257ad0b862586a6e3 | [
"BSD-2-Clause"
] | 1 | 2015-11-18T12:59:52.000Z | 2015-11-18T12:59:52.000Z | src/nlplib/general/thread.py | rectangletangle/nlplib | 7dcc0daf050a73c03b7d7f0257ad0b862586a6e3 | [
"BSD-2-Clause"
] | null | null | null | src/nlplib/general/thread.py | rectangletangle/nlplib | 7dcc0daf050a73c03b7d7f0257ad0b862586a6e3 | [
"BSD-2-Clause"
] | null | null | null | ''' Tools for dealing with multithreaded programs. '''
from concurrent.futures import ThreadPoolExecutor, as_completed
from nlplib.general.iterate import chunked
__all__ = ['simultaneously']
if __name__ == '__main__' :
from nlplib.general.unittest import UnitTest
__test__(UnitTest())
__demo__()
| 32.734375 | 119 | 0.673031 |
3a3fde2cf2ecbd1e9eca3699e4a52186eb8eddb3 | 781 | py | Python | gazepattern/eyedetector/migrations/0005_experiment.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
] | null | null | null | gazepattern/eyedetector/migrations/0005_experiment.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
] | 12 | 2020-06-05T22:56:39.000Z | 2022-02-10T10:35:13.000Z | gazepattern/eyedetector/migrations/0005_experiment.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
] | 1 | 2019-10-06T23:40:45.000Z | 2019-10-06T23:40:45.000Z | # Generated by Django 2.2.5 on 2019-09-28 19:25
from django.db import migrations, models
import django.db.models.deletion
| 32.541667 | 142 | 0.618438 |
3a40757daf1bd20cc9fcc10f04000eea8ce07c26 | 108 | py | Python | reversestring.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | reversestring.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | reversestring.py | fairoz-ahmed/Python_Practice | e498f81fca02f0773f1c6e9f93e5f1cf1f94eb89 | [
"MIT"
] | null | null | null | inp=input("Enter a string: ")
rev=0
while (inp>0):
dig=inp%10
rev=rev*10+dig
inp=inp//10
print(rev) | 15.428571 | 30 | 0.62963 |
3a43287b070e57b4e1131e9830fa7848ee4816f3 | 1,424 | py | Python | appdaemon/apps/exhaust/exhaust.py | Mithras/ha | d37f8673eed27a85f76c97ee3e924d2ddc033ee5 | [
"MIT"
] | 3 | 2019-10-27T06:10:26.000Z | 2020-07-21T01:27:11.000Z | appdaemon/apps/exhaust/exhaust.py | Mithras/ha | d37f8673eed27a85f76c97ee3e924d2ddc033ee5 | [
"MIT"
] | null | null | null | appdaemon/apps/exhaust/exhaust.py | Mithras/ha | d37f8673eed27a85f76c97ee3e924d2ddc033ee5 | [
"MIT"
] | null | null | null | import globals
| 40.685714 | 85 | 0.614466 |
3a434ceb156d2330f24628b42fbe27c084ea9e69 | 474 | py | Python | meregistro/apps/registro/models/AnexoBaja.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | meregistro/apps/registro/models/AnexoBaja.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | meregistro/apps/registro/models/AnexoBaja.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from apps.registro.models.Anexo import Anexo
from django.core.exceptions import ValidationError
import datetime
| 24.947368 | 54 | 0.71519 |
3a4437265de98cfb27b3d5feaa4dc75634628d02 | 2,159 | py | Python | test/test.py | fmaida/rosie | 3906d11231aadaf9095f00fde8a73bc186403660 | [
"MIT"
] | null | null | null | test/test.py | fmaida/rosie | 3906d11231aadaf9095f00fde8a73bc186403660 | [
"MIT"
] | null | null | null | test/test.py | fmaida/rosie | 3906d11231aadaf9095f00fde8a73bc186403660 | [
"MIT"
] | null | null | null | import os
import unittest
from rosie import Rosie
from rosie import DocumentNotFound
# from test import create
# create(100)
| 35.393443 | 89 | 0.593793 |
3a4470dbdf1585da275d005ec538924932b37f02 | 2,726 | py | Python | server/tests/test_api.py | lachierussell/FadZmaq | deb89c35df05603552ce95627ac8400c6788fbcb | [
"BSD-2-Clause"
] | 2 | 2019-09-02T06:56:46.000Z | 2019-09-15T08:43:54.000Z | server/tests/test_api.py | lachierussell/FadZmaq | deb89c35df05603552ce95627ac8400c6788fbcb | [
"BSD-2-Clause"
] | 11 | 2019-08-27T19:08:24.000Z | 2019-10-18T01:45:54.000Z | server/tests/test_api.py | lachierussell/FadZmaq | deb89c35df05603552ce95627ac8400c6788fbcb | [
"BSD-2-Clause"
] | 1 | 2019-10-25T05:42:48.000Z | 2019-10-25T05:42:48.000Z | # @file
#
# FadZmaq Project
# Professional Computing. Semester 2 2019
#
# Copyright FadZmaq 2019 All rights reserved.
# @author Lachlan Russell 22414249@student.uwa.edu.au
# @author Jordan Russell jordanrussell@live.com
import json
# Tests that the server is up at all.
# Not implemented
# Not implemented
# Basic test the profile API
# To be expanded when we receive data from DB -Jordan
# Not implemented yet
| 29.311828 | 106 | 0.739178 |
3a44e47df6767fcc400ca98f82e16bb29f7143a3 | 7,728 | py | Python | HeifImagePlugin.py | uploadcare/heif-image-plugin | 164230d08472403b709e2d0c78e8de0207e9312a | [
"MIT"
] | 6 | 2021-12-09T16:57:55.000Z | 2022-03-22T13:34:53.000Z | HeifImagePlugin.py | uploadcare/heif-image-plugin | 164230d08472403b709e2d0c78e8de0207e9312a | [
"MIT"
] | 5 | 2021-11-24T15:59:35.000Z | 2022-03-11T16:29:53.000Z | HeifImagePlugin.py | uploadcare/heif-image-plugin | 164230d08472403b709e2d0c78e8de0207e9312a | [
"MIT"
] | 1 | 2022-02-07T11:59:30.000Z | 2022-02-07T11:59:30.000Z | import inspect
import subprocess
import tempfile
from copy import copy
from weakref import WeakKeyDictionary
import piexif
import pyheif
from cffi import FFI
from PIL import Image, ImageFile
from pyheif.error import HeifError
ffi = FFI()
_keep_refs = WeakKeyDictionary()
pyheif_supports_transformations = (
'transformations' in inspect.signature(pyheif.HeifFile).parameters
)
HEIF_ENC_BIN = 'heif-enc'
def _rotate_heif_file(heif):
"""
Heif files already contain transformation chunks imir and irot which are
dominate over Orientation tag in EXIF.
This is not aligned with other formats behaviour and we MUST fix EXIF after
loading to prevent unexpected rotation after resaving in other formats.
And we come up to there is no reasons to force rotation of HEIF images
after loading since we need update EXIF anyway.
"""
orientation = heif.transformations['orientation_tag']
if not (1 <= orientation <= 8):
return heif
exif = {'0th': {piexif.ImageIFD.Orientation: orientation}}
if heif.exif:
try:
exif = piexif.load(heif.exif)
exif['0th'][piexif.ImageIFD.Orientation] = orientation
except Exception:
pass
new_heif = copy(heif)
new_heif.transformations = dict(heif.transformations, orientation_tag=0)
new_heif.exif = piexif.dump(exif)
return new_heif
def _extract_heif_exif(heif_file):
"""
Unlike other helper functions, this alters heif_file in-place.
"""
heif_file.exif = None
clean_metadata = []
for item in heif_file.metadata or []:
if item['type'] == 'Exif':
if heif_file.exif is None:
if item['data'] and item['data'][0:4] == b"Exif":
heif_file.exif = item['data']
else:
clean_metadata.append(item)
heif_file.metadata = clean_metadata
Image.register_open(HeifImageFile.format, HeifImageFile, check_heif_magic)
Image.register_save(HeifImageFile.format, _save)
Image.register_mime(HeifImageFile.format, 'image/heif')
Image.register_extensions(HeifImageFile.format, [".heic", ".avif"])
# Don't use this extensions for saving images, use the ones above.
# They have added for quick file type detection only (i.g. by Django).
Image.register_extensions(HeifImageFile.format, [".heif", ".hif"])
| 34.044053 | 84 | 0.62073 |
3a459c0bdc8968f8dba096a55ee2a81baf847594 | 1,510 | py | Python | examples/example.py | TannerBurns/cloc | 67753bc6148779db7a2bfb07e4410f12fa3de593 | [
"MIT"
] | 2 | 2020-03-04T14:15:07.000Z | 2020-03-06T19:32:42.000Z | examples/example.py | TannerBurns/cloc | 67753bc6148779db7a2bfb07e4410f12fa3de593 | [
"MIT"
] | null | null | null | examples/example.py | TannerBurns/cloc | 67753bc6148779db7a2bfb07e4410f12fa3de593 | [
"MIT"
] | null | null | null | from cloc import grp, cmd, opt, arg, mixins
from cloc.types import Choices
"""Test Code ->"""
class UserCmds(mixins.List, mixins.Echo):
u = UserCmds(users=['user1', 'user2'])
user2 = UserCmds(users=['user1', 'user2', 'user3'])
perms = PermissionCmds(roles=['admin', 'user', 'dev'], services=['test_service1'])
cli.add_command(u)
cli.add_command(group2)
group2.add_command(test)
group2.add_command(user2)
group2.add_command(permission_group)
permission_group.add_command(perms)
if __name__ == '__main__':
cli() | 22.878788 | 93 | 0.63245 |
3a48d584ca2b00f4953c04fc6e6edaf62e4524b4 | 111 | py | Python | lab001/load.py | DavidJRichards/fpga_101 | 9aa3e85211e47c63c29af36960fd767fe88f4d82 | [
"BSD-2-Clause"
] | 2 | 2021-08-15T20:19:11.000Z | 2021-08-16T07:28:36.000Z | lab001/load.py | DavidJRichards/fpga_101 | 9aa3e85211e47c63c29af36960fd767fe88f4d82 | [
"BSD-2-Clause"
] | null | null | null | lab001/load.py | DavidJRichards/fpga_101 | 9aa3e85211e47c63c29af36960fd767fe88f4d82 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
os.system("openocd -f wukong.cfg -c 'init; pld load 0 build/top.bit; exit' ")
| 27.75 | 77 | 0.693694 |
3a490f04946e54025d2f9929396fe594e1a1e7a5 | 3,916 | py | Python | utils/comm_mqtt.py | peacemaker07/iot_making_for_raspberry_pi | d37d1256ea99794ff1dde4de0cadcbee1e5d6679 | [
"MIT"
] | null | null | null | utils/comm_mqtt.py | peacemaker07/iot_making_for_raspberry_pi | d37d1256ea99794ff1dde4de0cadcbee1e5d6679 | [
"MIT"
] | null | null | null | utils/comm_mqtt.py | peacemaker07/iot_making_for_raspberry_pi | d37d1256ea99794ff1dde4de0cadcbee1e5d6679 | [
"MIT"
] | null | null | null | import json
import time
from utils.helper import RedisClient
from paho.mqtt.client import MQTT_ERR_SUCCESS
import paho.mqtt.client as mqtt
from utils.date_time import TimeMeasure
import tasks as tasks_mqtt
from utils.message import MsgShadowGet, MsgShadowUpdate
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
| 24.628931 | 113 | 0.59142 |
3a4b65fb4152f97b12ef78ecb2e26b90659acced | 255 | py | Python | servo-test.py | dthompson-personal/pi-robot-shop | 19ed4bc2727bc1681b7aed906fd95f58cc2f9fbe | [
"MIT"
] | 1 | 2019-01-08T00:12:38.000Z | 2019-01-08T00:12:38.000Z | servo-test.py | dthompson-personal/pi-robot-shop | 19ed4bc2727bc1681b7aed906fd95f58cc2f9fbe | [
"MIT"
] | null | null | null | servo-test.py | dthompson-personal/pi-robot-shop | 19ed4bc2727bc1681b7aed906fd95f58cc2f9fbe | [
"MIT"
] | null | null | null | # simple servo test for PCA9685 with HS422
from servo.servo import *
from time import sleep
pca = PCA9685()
pca.setZero(0)
sleep(2)
for a in xrange(-67,67,1):
pca.setAngle(0,a)
sleep(0.05)
for a in xrange(67,0,-1):
pca.setAngle(0,a)
sleep(0.05)
| 18.214286 | 42 | 0.686275 |
3a4cbefcb62071a2d988ae8d1ba6c3ebd094217e | 1,386 | py | Python | lists_dictionary/Hello France.py | vasetousa/Python-fundamentals | 3180c03de28b4f4d36d966221719069a7e18e521 | [
"MIT"
] | null | null | null | lists_dictionary/Hello France.py | vasetousa/Python-fundamentals | 3180c03de28b4f4d36d966221719069a7e18e521 | [
"MIT"
] | null | null | null | lists_dictionary/Hello France.py | vasetousa/Python-fundamentals | 3180c03de28b4f4d36d966221719069a7e18e521 | [
"MIT"
] | null | null | null | items = input().split("|") # items to buy
budged = int(input())
profit = 0
profit_price_list = []
profit_list = []
profit_price = 0
for index in items:
profit = 0
profit_price = 0
separator = index.split("->")
if separator[0] == "Clothes":
if not 0 < float(separator[1]) <= 50:
continue
elif separator[0] == "Shoes":
if not 0 < float(separator[1]) <= 35:
continue
elif separator[0] == "Accessories":
if not 0 < float(separator[1]) <= 20.50:
continue
budged -= float(separator[1]) # calculating budged left
profit_price += float(separator[1]) * 1.40 # calculating the price with 40% increase
profit += float(separator[1]) * 0.40 # profit = round(profit, 2) # calculating the profit after the 40% increase for each item
profit_price_list.append(round(profit_price, 2)) # list with the increased prices
profit_list.append(profit) # list with every items' profit
if budged <= 0:
budged += float(separator[1])
profit_price_list.pop()
profit_list.pop()
continue
profit_price = sum(profit_list)
price_after_40 = sum(profit_price_list)
budged += price_after_40
print(*profit_price_list)
print(f"Profit: {profit_price:.2f}")
print(); print()
if budged >= 150:
print("Hello, France!")
else:
print("Time to go.") | 34.65 | 143 | 0.622655 |
3a4f446c605bd2f4c43cf5fa28a98484cf88ee19 | 1,209 | py | Python | lims/addressbook/views.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | 12 | 2017-03-01T10:39:36.000Z | 2022-01-04T06:17:19.000Z | lims/addressbook/views.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | 29 | 2017-04-25T14:05:08.000Z | 2021-06-21T14:41:53.000Z | lims/addressbook/views.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | 4 | 2017-10-11T16:22:53.000Z | 2021-02-23T15:45:21.000Z | from rest_framework import viewsets
from rest_framework.serializers import ValidationError
from .models import Address
from .serializers import AddressSerializer
from lims.permissions.permissions import IsAddressOwner, IsAddressOwnerFilter
from lims.shared.mixins import AuditTrailViewMixin
| 35.558824 | 82 | 0.718776 |
3a4f4e40f01a34131b926552b927be814c889324 | 7,875 | py | Python | vision/crop_image_on_faces.py | timmahrt/toybox | 1c063428ba85d26c8d9229b020503f6f57df2219 | [
"MIT"
] | null | null | null | vision/crop_image_on_faces.py | timmahrt/toybox | 1c063428ba85d26c8d9229b020503f6f57df2219 | [
"MIT"
] | null | null | null | vision/crop_image_on_faces.py | timmahrt/toybox | 1c063428ba85d26c8d9229b020503f6f57df2219 | [
"MIT"
] | null | null | null | '''
Created on Sep 8, 2018
Use autocropFaces() to crop out the material around faces in an image,
where the faces are automatically detected.
See the bottom for an example use script.
Used this as a starting reference point:
https://docs.opencv.org/3.3.0/d7/d8b/tutorial_py_face_detection.html
@author: tmahrt
'''
import os
from os.path import join
import cv2
from matplotlib import pyplot as plt
from PIL import Image
TRAINING_DATA_PATH = '/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'
def outputDebug(imgFn,
faces,
faceRegion=None,
helperRegion=None,
finalCropRegion=None):
img = cv2.imread(imgFn)
# The list of faces
for face in faces:
_drawRectangle(img, face, (255, 0, 0))
# All the faces fit tightly in this space
if faceRegion is not None:
_drawRectangle(img, faceRegion, (0, 0, 255))
# I used this to see various intermediate stages
if helperRegion is not None:
_drawRectangle(img, helperRegion, (0, 255, 0))
# The final cropping region
if finalCropRegion is not None:
_drawRectangle(img, finalCropRegion, (255, 255, 0))
img = _convertBgrToRGB(img)
plt.imshow(img)
plt.show()
def _convertBgrToRGB(img):
# https://stackoverflow.com/questions/15072736/extracting-a-region-from-an-image-using-slicing-in-python-opencv/15074748#15074748
return img[:, :, ::-1]
def _drawRectangle(img, xywh, color):
x, y, w, h = xywh
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
def encapsulateSubsquares(regionList):
'''
Given a list of squares, return a square that tightly fits all subsquares
Input is a list of the form [(x, y, w, h), () ]
Output is the (x, y, w, h) that wholly includes all input
'''
newRegionList = [(x, y, x + w, y + h) for x, y, w, h in regionList]
x0List, y0List, x1List, y1List = zip(*newRegionList)
x0 = min(x0List)
y0 = min(y0List)
x1 = max(x1List)
y1 = max(y1List)
return [x0, y0, x1 - x0, y1 - y0]
def modifyAspectRatio(sourceXYWH, targetRatio):
'''
Changes the ratio of the input square to be that of the target ratio
'''
sourceRatio = sourceXYWH[2] / sourceXYWH[3]
if targetRatio > sourceRatio:
newX1 = int(sourceXYWH[3] * targetRatio)
returnXYWH = [sourceXYWH[0], sourceXYWH[1],
newX1, sourceXYWH[3]]
else:
newY1 = int(sourceXYWH[2] / targetRatio)
returnXYWH = [sourceXYWH[0], sourceXYWH[1],
sourceXYWH[2], newY1]
return returnXYWH
def relativeRecenter(sourceXYWH, targetXYWH):
'''
Centers a square with respect to the center of a different square
'''
targetXCenter = targetXYWH[0] + (targetXYWH[2] / 2.0)
targetYCenter = targetXYWH[1] + (targetXYWH[3] / 2.0)
newX = int(targetXCenter - (sourceXYWH[2] / 2.0))
newY = int(targetYCenter - (sourceXYWH[3] / 2.0))
return (newX, newY, sourceXYWH[2], sourceXYWH[3])
def keepInsideImage(sourceXYWH, imageWH):
'''
Forces a square to be within the image that contains it
'''
left = sourceXYWH[0]
right = sourceXYWH[0] + sourceXYWH[2]
top = sourceXYWH[1]
bottom = sourceXYWH[1] + sourceXYWH[3]
newLeft = left
if left < 0 and right > imageWH[0]:
newLeft = (imageWH[0] - right)
elif left < 0:
newLeft = 0
elif right > imageWH[0]:
newLeft = imageWH[0] - sourceXYWH[2]
newTop = top
if top < 0 and bottom > imageWH[1]:
newTop = imageWH[1] / 2.0 - sourceXYWH[3]
elif top < 0:
newTop = 0
elif bottom > imageWH[1]:
newTop = imageWH[1] - sourceXYWH[3]
return [int(newLeft), int(newTop), sourceXYWH[2], sourceXYWH[3]]
def enforceMinSize(sourceXYWH, targetWH, imgWH):
'''
Increase the crop region to the target, but don't exceed the img dimensions
'''
newW = max((targetWH[0], sourceXYWH[2]))
newH = max((targetWH[1], sourceXYWH[3]))
newW = min((imgWH[0], newW))
newH = min((imgWH[1], newH))
return (sourceXYWH[0], sourceXYWH[1], newW, newH)
def autocropFaces(fn, outputFN, recognizer, targetWH=None, debug=False):
'''
Will crop an image based on all of the faces it automatically detects
targetWH: e.g. (300, 200); if specified, it the output will that size.
The area around the detected heads will be enlarged to permit
the necessary aspect ratio before scaling occurs. If the image
is smaller than the target, whitespace will be filled in.
debug: if True, an image will pop up showing detected faces and the
region that will be cropped. The image must be closed before
the code will continue
'''
faceList = recognizer.recognize(fn)
faceRegion = encapsulateSubsquares(faceList)
img = Image.open(fn)
imgWH = (img.width, img.height)
if targetWH is not None:
sizedFaceRegion = enforceMinSize(faceRegion, targetWH, imgWH)
proportionedFaceRegion = modifyAspectRatio(sizedFaceRegion,
targetWH[0] / targetWH[1])
regionToCenterIn = relativeRecenter(sizedFaceRegion,
faceRegion)
adjustedFaceRegion = relativeRecenter(proportionedFaceRegion,
regionToCenterIn)
adjustedFaceRegion = keepInsideImage(adjustedFaceRegion, imgWH)
# If the crop region is smaller than the targetWH, fill in
# the empty space with a white background
newImg = Image.new('RGB',
(adjustedFaceRegion[2], adjustedFaceRegion[3]),
(255, 255, 255))
newImg.paste(img, (-adjustedFaceRegion[0], -adjustedFaceRegion[1]))
img = newImg
if debug is True:
outputDebug(fn, faceList, faceRegion, sizedFaceRegion,
finalCropRegion=adjustedFaceRegion)
else:
img = img.crop(faceRegion)
if targetWH is not None:
img = img.resize(targetWH)
img.save(outputFN)
# Example use
if __name__ == "__main__":
inputPath = os.path.abspath("../data/faces/")
outputPath = os.path.abspath("../data/faces/output")
targetWH = (300, 200)
if not os.path.exists(outputPath):
os.mkdir(outputPath)
_recognizer = FaceRecognizer()
for _fn in os.listdir(inputPath):
if ".jpg" not in _fn:
continue
inputFn = join(inputPath, _fn)
outputFn = join(outputPath, getThumbnailName(_fn))
try:
autocropFaces(inputFn, outputFn, _recognizer, targetWH, debug=True)
except NoFacesException:
print("No faces in: " + inputFn)
continue
| 30.761719 | 133 | 0.610159 |
3a5276bb48c6b9ee88490cc0b0a29ff3c27d3bba | 2,920 | py | Python | aiida_lsmo/workchains/multistage_ddec.py | ltalirz/aiida-lsmo | 38a839af63686320ab070fada89241860e095b9e | [
"MIT"
] | null | null | null | aiida_lsmo/workchains/multistage_ddec.py | ltalirz/aiida-lsmo | 38a839af63686320ab070fada89241860e095b9e | [
"MIT"
] | null | null | null | aiida_lsmo/workchains/multistage_ddec.py | ltalirz/aiida-lsmo | 38a839af63686320ab070fada89241860e095b9e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""MultistageDdecWorkChain workchain"""
from __future__ import absolute_import
from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory
from aiida.common import AttributeDict
from aiida.engine import WorkChain, ToContext
# import sub-workchains
Cp2kMultistageWorkChain = WorkflowFactory('cp2k.multistage') # pylint: disable=invalid-name
Cp2kDdecWorkChain = WorkflowFactory('ddec.cp2k_ddec') # pylint: disable=invalid-name
# import calculations
DdecCalculation = CalculationFactory('ddec') # pylint: disable=invalid-name
# import aiida data
CifData = DataFactory('cif') # pylint: disable=invalid-name
| 45.625 | 106 | 0.743151 |
3a5286d6d3711424348d457dbffee994d0ef9214 | 2,997 | py | Python | ambari-server/src/test/python/TestServerUtils.py | panfeiyy/ambari | 24077510723ede93d3024784f0b04422adaf56d6 | [
"Apache-2.0"
] | 16 | 2018-05-24T10:28:24.000Z | 2021-08-05T03:13:26.000Z | ambari-server/src/test/python/TestServerUtils.py | panfeiyy/ambari | 24077510723ede93d3024784f0b04422adaf56d6 | [
"Apache-2.0"
] | 8 | 2020-06-18T17:31:19.000Z | 2022-03-02T08:32:03.000Z | ambari-server/src/test/python/TestServerUtils.py | panfeiyy/ambari | 24077510723ede93d3024784f0b04422adaf56d6 | [
"Apache-2.0"
] | 17 | 2018-07-06T08:57:00.000Z | 2021-11-04T11:00:36.000Z | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
os.environ["ROOT"] = ""
from mock.mock import patch, MagicMock
from unittest import TestCase
import platform
from ambari_commons import os_utils
os_utils.search_file = MagicMock(return_value="/tmp/ambari.properties")
import shutil
project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../"))
shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties")
with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))):
with patch("os.path.isdir", return_value = MagicMock(return_value=True)):
with patch("os.access", return_value = MagicMock(return_value=True)):
with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}):
from ambari_server.serverUtils import get_ambari_server_api_base
from ambari_server.serverConfiguration import CLIENT_API_PORT, CLIENT_API_PORT_PROPERTY, SSL_API, DEFAULT_SSL_API_PORT, SSL_API_PORT
| 38.922078 | 140 | 0.746079 |
3a533adcbaa3e599ac553a4a4afcfe1138f8018d | 828 | py | Python | docs/md2ipynb.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | 7 | 2021-07-20T21:46:28.000Z | 2022-01-12T04:18:14.000Z | docs/md2ipynb.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | docs/md2ipynb.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | 3 | 2021-08-28T06:01:27.000Z | 2022-01-12T04:18:13.000Z | import sys
import time
from itertools import chain
from pathlib import Path
import nbformat
import notedown
if __name__ == "__main__":
assert len(sys.argv) >= 2, "usage: input.md"
here = Path(".")
files = list(chain.from_iterable(map(here.glob, sys.argv[1:])))
for file in files:
convert(file)
| 23.657143 | 78 | 0.669082 |
3a54d0fda33a47ced2ba7f11cd011f05493c2833 | 40 | py | Python | datasets/__init__.py | ML-Cai/LaneDetector | 4e56faf45cf592812284b0bfee149bba4658fac9 | [
"MIT"
] | null | null | null | datasets/__init__.py | ML-Cai/LaneDetector | 4e56faf45cf592812284b0bfee149bba4658fac9 | [
"MIT"
] | null | null | null | datasets/__init__.py | ML-Cai/LaneDetector | 4e56faf45cf592812284b0bfee149bba4658fac9 | [
"MIT"
] | null | null | null | from .tu_simple_lane import TusimpleLane | 40 | 40 | 0.9 |
3a54d4dcf4ae3d1438f9199425e3106b7a85632f | 147 | py | Python | src/Python/Turtle/06B-circle.py | programmer1017/MathTech | 5d7a9692d77a4a379824f691ae1cba3b0d2d4d59 | [
"MIT"
] | null | null | null | src/Python/Turtle/06B-circle.py | programmer1017/MathTech | 5d7a9692d77a4a379824f691ae1cba3b0d2d4d59 | [
"MIT"
] | null | null | null | src/Python/Turtle/06B-circle.py | programmer1017/MathTech | 5d7a9692d77a4a379824f691ae1cba3b0d2d4d59 | [
"MIT"
] | null | null | null | import turtle as t
n = 50
t. bgcolor("black")
t. color("green")
t. speed(0)
for x in range(n):
t. circle(80)
t. lt(360/n)
| 12.25 | 24 | 0.52381 |
3a5562123f0c3dc18461e7e454e66d71a8d213a8 | 29 | py | Python | dashboard/dashboardmenu/__init__.py | PyFlux/PyFlux | 8abae10261e276bf4942aed8d54ef3b5498754ca | [
"Apache-2.0"
] | null | null | null | dashboard/dashboardmenu/__init__.py | PyFlux/PyFlux | 8abae10261e276bf4942aed8d54ef3b5498754ca | [
"Apache-2.0"
] | 10 | 2020-03-24T17:09:56.000Z | 2021-12-13T20:00:15.000Z | dashboard/dashboardmenu/__init__.py | PyFlux/PyFlux-Django-Html | 8abae10261e276bf4942aed8d54ef3b5498754ca | [
"Apache-2.0"
] | null | null | null | from .dashboard_menu import * | 29 | 29 | 0.827586 |
3a5679211ddca25bc7c34ee2ad4a2a92de9f338e | 25,389 | py | Python | kessk_web/device/views.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 9 | 2019-09-30T04:24:39.000Z | 2021-07-15T06:08:20.000Z | kessk_web/device/views.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 6 | 2020-05-14T03:13:32.000Z | 2022-02-10T10:23:46.000Z | kessk_web/device/views.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 2 | 2020-12-19T07:12:01.000Z | 2021-05-24T02:21:15.000Z | # The 3-Clause BSD License
# Copyright (C) 2019, KessK, all rights reserved.
# Copyright (C) 2019, Kison.Y, all rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import hashlib
import random
import string
import time
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import JsonResponse
from django.shortcuts import render
from rest_framework.decorators import api_view
from common.AliyunIot import AliyunIot
from common.ExceptionAPI import AValidation400Error, response_json
from common.WechatCommonView import WechatCommonView
from common.config import ErrorCodes, DEVICE_MASK, DEVICE_NAME_DEFAULT, ALIYUN_IOT_CONTROL_APP_PRODUCT_KEY
from device.models import Device, DeviceBind, ControlDevice, AliyunIotRules
from device.wexinSignature import Signature
from rest_framework import status, generics
#
# class BindDeviceAPI(generics.CreateAPIView):
#
# def post(self, request, *args, **kwargs):
# print("ok")
class DeviceBindAction():
def bind_device(self,device_name=None,origin_user=None):
"""
Binding steps:
Step1. Create if not exists a binding log.
Step2. Create if not exists a device's rule.
Step3. Create if not exists a control device's rule # No more used
Step4. Create if there is no rule action from device to control device.
Step5. Create if there is no rule action from control device to device. # No more used
Step6. Create if there is no rule action from current control device to share's control device # No more used
Step7. Create if there is no rule action from share's control device to current control device # No more used
:param device_name:
:return:
"""
# Step.1
if not DeviceBind.objects.filter(user=self.user, device=self.device,onActive=True).exists():
if device_name is None:
device_name = self.get_user_device_name()
device_bind = DeviceBind(
device=self.device,
user=self.user,
origin_user=origin_user,
device_name=device_name,
onActive=True,
)
device_bind.save()
# Step.2-5
self._deviceRule.create_device2control_action()
# self._deviceRule.create_control2device_action()
#Step.6-7
# if origin_user is not None:
# origin_user_control = self._deviceRule.create_control_device(origin_user)
# self._deviceRule.create_share_rule_action(origin_user_control)
return DeviceBind.objects.get(user=self.user, device=self.device,onActive=True)
class ControlDeviceAction():
def create_control_device(self):
"""
Create a control device when it dose not exists.
Each user has only one control device
:return:
"""
if not ControlDevice.objects.filter(user=self.user).exists():
response = self._aliyun.register_control_device()
print('Aliyun response is ')
print(response)
if response is not None:
control_device = ControlDevice(
user=self.user,
product_name='KessK_Controllor',
device_name=response['DeviceName'],
product_key=response['ProductKey'],
device_secret=response['DeviceSecret'],
)
control_device.save()
return ControlDevice.objects.get(user=self.user)
def create_device2control_rule(self,device_bind,rule_name=None):
"""
Create Aliyun IoT rule from the esp8266 device to the control device.
It will only be created once.
:param device_bind:
:param rule_name:
:return:
"""
if rule_name is None:
rule_name = device_bind.device.device_name + "_2control_rule"
topic = "/"+device_bind.device.device_name+"/user/update"
if not AliyunIotRules.objects.filter(short_topic=topic,bind_device=device_bind).exists():
data = self._aliyun.create_rule(rule_name=rule_name,topic=topic,product_key=device_bind.device.product_key)
if data is not None:
aliyun_iot_relu = AliyunIotRules(
name=device_bind.device.device_name + self.user.first_name,
short_topic=topic,
ruleid=data["RuleId"],
bind_device=device_bind,
requestid=data["RequestId"]
)
aliyun_iot_relu.save()
data["rule_name"] = rule_name
return AliyunIotRules.objects.get(short_topic=topic,bind_device=device_bind)
class DeviceRule():
def create_control_device(self,user):
"""
Create a control device when it dose not exists.
Each user has only one control device
:return:
"""
if not ControlDevice.objects.filter(user=user).exists():
response = self._aliyun.register_control_device()
print('Aliyun response is ')
print(response)
if response is not None:
control_device = ControlDevice(
user=user,
product_name='KessK_Controllor',
device_name=response['DeviceName'],
product_key=response['ProductKey'],
device_secret=response['DeviceSecret'],
)
control_device.save()
return ControlDevice.objects.get(user=user)
def create_device_rule(self):
"""
Create Aliyun IoT rule from the esp8266 device to the control devices.
It will only be created once.
:return: The device's rule
"""
name = self.__md5(self.device.device_name + "_2control_rule")
topic = self.device.device_name + "/user/update"
return self.create_rule(name,topic,self.device.product_key,self.device.id,False)
def create_control_rule(self):
"""
Create Aliyun IoT rule from the control device device to the esp8266 devices.
It will only be created once.
:return: The device's rule
"""
name = self.__md5(self.control_device.device_name + "_2device_rule")
topic = "/" + self.control_device.device_name + "/user/update"
return self.create_rule(name,topic,self.control_device.product_key,self.control_device.id,True)
def create_device2control_action(self):
"""
Create action from esp8266 to control device
:return: The action object
"""
device_rule = self.create_device_rule()
configuration = "{\"topic\":\"/" + self.control_device.product_key + "/" + self.control_device.device_name + "/user/get\",\"topicType\":1}"
action = self.create_rule_action(device_rule.ruleid,configuration,self.control_device.id,True)
self._aliyun.start_rule(device_rule.ruleid)
return action
def create_control2device_action(self):
"""
Create action from control deivce to esp8266
:return: The action object
"""
device_rule = self.create_control_rule()
configuration = "{\"topic\":\"/" + self.device.product_key + "/" + self.device.device_name + "/user/get\",\"topicType\":1}"
action = self.create_rule_action(device_rule.ruleid, configuration, self.device.id, False)
self._aliyun.start_rule(device_rule.ruleid)
return action
def delete_device2control_action(self):
"""
Delete rule action from esp8266 to control device
:return:
"""
device_rule = self.create_device_rule()
try:
device_action = AliyunIotRules.objects.get(ruleid=device_rule.ruleid,isAction=True,device_id=self.control_device.id,isControlDevice=True)
except AliyunIotRules.DoesNotExist:
return
self._aliyun.delete_rule_action(device_action.action_id)
device_action.delete()
def delete_control2device_action(self):
"""
Delete rule action from control device to esp8266
:return:
"""
device_rule = self.create_control_rule()
try:
device_action = AliyunIotRules.objects.get(ruleid=device_rule.ruleid,isAction=True,device_id=self.device.id,isControlDevice=False)
except AliyunIotRules.DoesNotExist:
return
self._aliyun.delete_rule_action(device_action.action_id)
device_action.delete()
def create_rule_action(self,relu_id,configuration,device_id,is_control):
"""
Create Aliyun IoT rule action
Only one action per device or control device in each rule
:param relu_id:
:param configuration:
:param device_id:
:param is_control:
:return: The action object
"""
if not AliyunIotRules.objects.filter(ruleid=relu_id,action_config=configuration,isAction=True,device_id=device_id,isControlDevice=is_control).exists():
data = self._aliyun.create_rule_action(relu_id,configuration)
if data is not None:
aliyun_iot_relu_ = AliyunIotRules(
name=str(relu_id) + '_action_',
ruleid=relu_id,
isAction=True,
device_id=device_id,
action_id=data["ActionId"],
isControlDevice=is_control,
requestid=data["RequestId"],
action_type="REPUBLISH",
action_config=configuration,
)
aliyun_iot_relu_.save()
return AliyunIotRules.objects.get(ruleid=relu_id,action_config=configuration,isAction=True,device_id=device_id,isControlDevice=is_control)
def create_rule(self,rule_name,topic,product_key,device_id,is_control):
"""
Create Aliyun IoT rule
It will only be created once for each device or control device
:param rule_name:
:param topic:
:param product_key:
:param device_id:
:param is_control: if there is the control device's rule
:return: The device's rule
"""
if not AliyunIotRules.objects.filter(short_topic=topic,isControlDevice=is_control,device_id=device_id).exists():
data = self._aliyun.create_rule(rule_name=rule_name,topic=topic,product_key=product_key)
if data is not None:
aliyun_iot_relu = AliyunIotRules(
name=rule_name,
short_topic=topic,
ruleid=data["RuleId"],
isControlDevice=is_control,
device_id=device_id,
requestid=data["RequestId"]
)
aliyun_iot_relu.save()
# self._aliyun.start_rule(data["RuleId"])
return AliyunIotRules.objects.get(short_topic=topic,isControlDevice=is_control,device_id=device_id)
def check_login(request):
userid = request.session.get('userid')
if userid is None:
return False
return True | 46.245902 | 194 | 0.65363 |