hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8da9e0178f00d72e18dd60857fa82ec6eecb27f0
| 10,063
|
py
|
Python
|
examples/exchange_demag/test_exchange_demag.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 10
|
2018-03-24T07:43:17.000Z
|
2022-03-26T10:42:27.000Z
|
examples/exchange_demag/test_exchange_demag.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 21
|
2018-03-26T15:08:53.000Z
|
2021-07-10T16:11:14.000Z
|
examples/exchange_demag/test_exchange_demag.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 7
|
2018-04-09T11:50:48.000Z
|
2021-06-10T09:23:25.000Z
|
import os
import logging
import matplotlib
matplotlib.use('Agg')
import pylab as p
import numpy as np
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Exchange, Demag
from finmag.util.meshes import from_geofile, mesh_volume
import pytest
logger = logging.getLogger(name='finmag')
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
REL_TOLERANCE = 5e-4
Ms = 0.86e6
unit_length = 1e-9
mesh = from_geofile(os.path.join(MODULE_DIR, "bar30_30_100.geo"))
def run_finmag():
"""Run the finmag simulation and store data in averages.txt."""
sim = Sim(mesh, Ms, unit_length=unit_length)
sim.alpha = 0.5
sim.set_m((1, 0, 1))
exchange = Exchange(13.0e-12)
sim.add(exchange)
demag = Demag(solver="FK")
sim.add(demag)
fh = open(os.path.join(MODULE_DIR, "averages.txt"), "w")
fe = open(os.path.join(MODULE_DIR, "energies.txt"), "w")
logger.info("Time integration")
times = np.linspace(0, 3.0e-10, 61)
for counter, t in enumerate(times):
# Integrate
sim.run_until(t)
# Save averages to file
mx, my, mz = sim.m_average
fh.write(str(t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
# Energies
E_e = exchange.compute_energy()
E_d = demag.compute_energy()
fe.write(str(E_e) + " " + str(E_d) + "\n")
# Energy densities
if counter == 10:
exch_energy = exchange.energy_density_function()
demag_energy = demag.energy_density_function()
finmag_exch, finmag_demag = [], []
R = range(100)
for i in R:
finmag_exch.append(exch_energy([15, 15, i]))
finmag_demag.append(demag_energy([15, 15, i]))
# Store data
np.save(os.path.join(MODULE_DIR, "finmag_exch_density.npy"), np.array(finmag_exch))
np.save(os.path.join(MODULE_DIR, "finmag_demag_density.npy"), np.array(finmag_demag))
fh.close()
fe.close()
if __name__ == '__main__':
run_finmag()
test_compare_averages()
test_compare_energies()
test_compare_energy_density()
| 37.408922
| 114
| 0.644241
|
8daa3414a09b9f3c7c95225a1a7fdf929b8d3dfe
| 440
|
py
|
Python
|
BPt/default/options/samplers.py
|
sahahn/ABCD_ML
|
a8b1c48c33f3fdc046c8922964f1c456273238da
|
[
"MIT"
] | 1
|
2019-09-25T23:23:49.000Z
|
2019-09-25T23:23:49.000Z
|
BPt/default/options/samplers.py
|
sahahn/ABCD_ML
|
a8b1c48c33f3fdc046c8922964f1c456273238da
|
[
"MIT"
] | 1
|
2020-04-20T20:53:27.000Z
|
2020-04-20T20:53:27.000Z
|
BPt/default/options/samplers.py
|
sahahn/ABCD_ML
|
a8b1c48c33f3fdc046c8922964f1c456273238da
|
[
"MIT"
] | 1
|
2019-06-21T14:44:40.000Z
|
2019-06-21T14:44:40.000Z
|
from ..helpers import get_obj_and_params, all_from_objects
from ...extensions.samplers import OverSampler
SAMPLERS = {
'oversample': (OverSampler, ['default']),
}
all_obj_keys = all_from_objects(SAMPLERS)
| 25.882353
| 68
| 0.756818
|
a5c09a3f2e4a0708c742cfe5829c7e01efbe2a70
| 300
|
py
|
Python
|
ABC151-200/ABC192/abc192_c.py
|
billyio/atcoder
|
9d16765f91f28deeb7328fcc6c19541ee790941f
|
[
"MIT"
] | 1
|
2021-02-01T08:48:07.000Z
|
2021-02-01T08:48:07.000Z
|
ABC151-200/ABC192/abc192_c.py
|
billyio/atcoder
|
9d16765f91f28deeb7328fcc6c19541ee790941f
|
[
"MIT"
] | null | null | null |
ABC151-200/ABC192/abc192_c.py
|
billyio/atcoder
|
9d16765f91f28deeb7328fcc6c19541ee790941f
|
[
"MIT"
] | null | null | null |
N, K = map(int, input().split())
ans = N
for i in range(K):
num_list = [int(n) for n in str(ans)]
g1 = sorted(num_list, reverse=True)
g1 = ''.join((str(g) for g in g1))
g2 = sorted(num_list, reverse=False)
g2 = ''.join((str(g) for g in g2))
ans = int(g1) - int(g2)
print(ans)
| 27.272727
| 41
| 0.566667
|
a5c0fa60cac177d2865547e53143112bdfdc7111
| 1,008
|
py
|
Python
|
testing.py
|
madjabal/morphine
|
2c76b10a7276936042913d609ad773fbc08b0887
|
[
"MIT"
] | 15
|
2017-03-11T18:25:04.000Z
|
2022-03-31T19:54:31.000Z
|
testing.py
|
madjabal/morphine
|
2c76b10a7276936042913d609ad773fbc08b0887
|
[
"MIT"
] | 2
|
2018-10-17T15:08:36.000Z
|
2021-06-08T13:34:56.000Z
|
testing.py
|
madjabal/morphine
|
2c76b10a7276936042913d609ad773fbc08b0887
|
[
"MIT"
] | 2
|
2018-07-25T15:15:54.000Z
|
2019-06-14T11:16:41.000Z
|
# Python modules
import time
from datetime import timedelta
def consistency(func, args, expected, n=10**4):
"""Analyze and report on the consistency of a function."""
print('\n[CONSISTENCY TEST] {0}'.format(func.__doc__.format(*args)))
start = time.time()
interval = start
tally = 0
for i in range(n):
isCorrect = func(*args) == expected
tally += (1 if isCorrect else 0)
diff = time.time() - interval
if diff > 0.01:
interval = time.time()
show(tally, (i+1), time.time() - start, (i+1)/n)
show(tally, n, time.time() - start, (i+1)/n, '\n')
def max_over(n, func, args=None):
"""Compute the maximum value returned by func(args) in n runs."""
m = 0
for i in range(n):
v = func(*args) if args else func()
if v > m:
m = v
return m
| 30.545455
| 108
| 0.558532
|
a5c112fb1800922ae32e15c8c2c3119937a66895
| 520
|
py
|
Python
|
misc/python/fibonacci.py
|
saranshbht/codes-and-more-codes
|
0bd2e46ca613b3b81e1196d393902e86a43aa353
|
[
"MIT"
] | null | null | null |
misc/python/fibonacci.py
|
saranshbht/codes-and-more-codes
|
0bd2e46ca613b3b81e1196d393902e86a43aa353
|
[
"MIT"
] | null | null | null |
misc/python/fibonacci.py
|
saranshbht/codes-and-more-codes
|
0bd2e46ca613b3b81e1196d393902e86a43aa353
|
[
"MIT"
] | null | null | null |
from itertools import permutations
from collections import Counter
import time
print(time.time())
s=["dgajkhdjkjfkl","ahfjkh","jfskoj","hfakljfio","fjfjir","jiosj","jiojf","jriosj","jiorjf","jhhhhaskgasjdfljjriof"]
t=10
while t>0:
S=s[10-t]
c=dict(Counter(S))
Cperm=list(permutations(c.values()))
flag= False
for i in Cperm:
for j in range(2,len(i)):
if i[j]==i[j-1]+i[j-2]:
print("Dynamic")
flag= True
break
if flag==True:
break
else:
print("Not")
t=t-1
print(time.time())
| 18.571429
| 117
| 0.646154
|
a5c55462952f35e96e4d815b3891933e684d12b8
| 784
|
py
|
Python
|
rhasspy/speech.py
|
Wil-Peters/HomeAutomation
|
ab4f78d9fad42093435732233e99003f12dca5e7
|
[
"MIT"
] | 2
|
2020-04-09T20:29:15.000Z
|
2021-01-20T09:21:02.000Z
|
rhasspy/speech.py
|
Wil-Peters/HomeAutomation
|
ab4f78d9fad42093435732233e99003f12dca5e7
|
[
"MIT"
] | null | null | null |
rhasspy/speech.py
|
Wil-Peters/HomeAutomation
|
ab4f78d9fad42093435732233e99003f12dca5e7
|
[
"MIT"
] | null | null | null |
import configparser
import os
from typing import ByteString
import requests
from core.speaker import Speaker
from core.texttospeech import TextToSpeechGenerator
| 29.037037
| 88
| 0.715561
|
a5c6922a61844f38e222e52aacc04701fb1c3022
| 4,953
|
py
|
Python
|
main.py
|
rodrigobercinimartins/export-import-por-mesorregiao-brasil
|
73b8126e593eec63ae29eb81a2967f566ec93bc9
|
[
"MIT"
] | 1
|
2020-04-06T17:55:04.000Z
|
2020-04-06T17:55:04.000Z
|
main.py
|
rodrigobercini/export-import-por-mesorregiao-brasil
|
73b8126e593eec63ae29eb81a2967f566ec93bc9
|
[
"MIT"
] | null | null | null |
main.py
|
rodrigobercini/export-import-por-mesorregiao-brasil
|
73b8126e593eec63ae29eb81a2967f566ec93bc9
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import ssl
# I'm getting SSL certificates issues when downloading files from MDIC.
# The code below is a hack to get around this issue.
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
if __name__ == '__main__':
ExportsObject = ExportsByMesoregion(start_year=2020, end_year=2020, transaction_type='imports')
ExportsObject.download_data_and_aggregate_by_meso()
| 43.447368
| 173
| 0.657985
|
a5c927734733b551301c1522c13b6095afdcc07d
| 903
|
py
|
Python
|
backend/customers/migrations/0001_initial.py
|
cbreezy623/modabella
|
b68bcc8aca903887d31489baae609ed70fe3dba7
|
[
"Apache-2.0"
] | null | null | null |
backend/customers/migrations/0001_initial.py
|
cbreezy623/modabella
|
b68bcc8aca903887d31489baae609ed70fe3dba7
|
[
"Apache-2.0"
] | null | null | null |
backend/customers/migrations/0001_initial.py
|
cbreezy623/modabella
|
b68bcc8aca903887d31489baae609ed70fe3dba7
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-10-02 01:11
from django.db import migrations, models
import phonenumber_field.modelfields
| 33.444444
| 127
| 0.604651
|
a5ca6ea7872c55e908f6afc4233961e95a90159a
| 1,366
|
py
|
Python
|
sendUAV/recevier.py
|
RobEn-AAST/AI-UAVC
|
732683fd5821d492b772cc5f966e86aed164a68c
|
[
"MIT"
] | 16
|
2022-02-05T15:51:13.000Z
|
2022-02-05T17:38:54.000Z
|
sendUAV/recevier.py
|
RobEn-AAST/AI-UAVC
|
732683fd5821d492b772cc5f966e86aed164a68c
|
[
"MIT"
] | null | null | null |
sendUAV/recevier.py
|
RobEn-AAST/AI-UAVC
|
732683fd5821d492b772cc5f966e86aed164a68c
|
[
"MIT"
] | null | null | null |
from socket import socket, AF_INET, SOCK_STREAM, IPPROTO_TCP
import struct
import pickle
if __name__ == "__main__":
server = ServerSock(5500)
while True:
print(server.getMessage())
| 29.695652
| 82
| 0.51757
|
a5cabad6e15a3e94d18ccf5c8c5a2de2396af9ef
| 3,867
|
py
|
Python
|
graphdot/minipandas/dataframe.py
|
yhtang/GraphDot
|
3d5ed4fbb2f6912052baa42780b436da76979691
|
[
"BSD-3-Clause-LBNL"
] | 9
|
2020-02-14T18:07:39.000Z
|
2021-12-15T12:07:31.000Z
|
graphdot/minipandas/dataframe.py
|
yhtang/graphdot
|
3d5ed4fbb2f6912052baa42780b436da76979691
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2020-03-19T19:07:26.000Z
|
2021-02-24T06:08:51.000Z
|
graphdot/minipandas/dataframe.py
|
yhtang/graphdot
|
3d5ed4fbb2f6912052baa42780b436da76979691
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-10-17T06:11:18.000Z
|
2021-05-07T11:56:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import numpy as np
import pandas as pd
from .series import Series
| 32.495798
| 79
| 0.573054
|
a5cb7a30978758aaea2edade994cdb342894093c
| 21,620
|
py
|
Python
|
pedal/questions/loader.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 14
|
2019-08-22T03:40:23.000Z
|
2022-03-13T00:30:53.000Z
|
pedal/questions/loader.py
|
pedal-edu/pedal
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 74
|
2019-09-12T04:35:56.000Z
|
2022-01-26T19:21:32.000Z
|
pedal/questions/loader.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 2
|
2021-01-11T06:34:00.000Z
|
2021-07-21T12:48:07.000Z
|
"""
instructions: blah blah blah
settings:
tifa:
enabled: True
unit test by function (bool): Whether to test each function entirely before moving onto the
next one, or to first check that all functions have been defined, and then
checking their parameters, etc. Defaults to True.
show case details (bool): Whether to show the specific args/inputs that caused a test case
to fail.
rubric:
functions:
total: 100
definition: 10
signature: 10
cases: 80
global:
variables:
name:
type:
value:
inputs:
prints:
# Sandbox, type checking
functions:
documentation: "any" or "google"
coverage: 100%
tests: int
name: do_complicated_stuff
arity: int
signature: int, int -> float
signature: int, int, list[int], (int->str), dict[str:list[int]] -> list[int]
parameters:
name: banana
exactly:
regex:
includes:
within:
type: int
cases:
- arguments (list): 5, 4
inputs (list):
returns (Any):
equals: 27.3
is:
is not: _1
name (str): Meaningful name for tracking purposes? Or possibly separate into label/id/code
hint (str): Message to display to user
prints:
exactly:
regex:
startswith:
endswith:
plots:
# Cait
syntax:
prevent:
___ + ___
# Override any of our default feedback messages
messages:
FUNCTION_NOT_DEFINED: "Oops you missed a function"
"""
from pedal.core.commands import set_success, give_partial
from pedal.core.feedback_category import FeedbackCategory
from pedal.questions.constants import TOOL_NAME
from pedal.sandbox.commands import get_sandbox
from pedal.utilities.comparisons import equality_test
SETTING_SHOW_CASE_DETAILS = "show case details"
DEFAULT_SETTINGS = {
SETTING_SHOW_CASE_DETAILS: True
}
EXAMPLE_DATA = {
'functions': [{
'name': 'do_complicated_stuff',
'signature': 'int, int, [int] -> list[int]',
'cases': [
{'arguments': "5, 4, 3", 'returns': "12"},
]
}]
}
def check_function_defined(function, function_definitions, settings=None):
"""
Args:
function:
function_definitions:
settings:
Returns:
"""
# 1. Is the function defined syntactically?
# 1.1. With the right name?
function_name = function['name']
if function_name not in function_definitions:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'missing_function', function_name=function_name)
definition = function_definitions[function_name]
return definition
def check_function_signature(function, definition, settings=None):
"""
Args:
function:
definition:
settings:
Returns:
"""
function_name = function['name']
# 1.2. With the right parameters and return type?
# 1.2.1 'arity' style - simply checks number of parameters
if 'arity' in function or 'parameters' in function:
expected_arity = function['arity'] if 'arity' in function else len(function['parameters'])
actual_arity = len(definition.args.args)
if actual_arity < expected_arity:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'insufficient_args',
function_name=function_name, expected_arity=expected_arity,
actual_arity=actual_arity)
elif actual_arity > expected_arity:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'excessive_args',
function_name=function_name, expected_arity=expected_arity,
actual_arity=actual_arity)
# 1.2.2 'parameters' style - checks each parameter's name and type
if 'parameters' in function:
expected_parameters = function['parameters']
actual_parameters = definition.args.args
for expected_parameter, actual_parameter in zip(expected_parameters, actual_parameters):
actual_parameter_name = get_arg_name(actual_parameter)
if 'name' in expected_parameter:
if actual_parameter_name != expected_parameter['name']:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'wrong_parameter_name',
function_name=function_name,
expected_parameter_name=expected_parameter['name'],
actual_parameter_name=actual_parameter_name
)
if 'type' in expected_parameter:
actual_parameter_type = parse_type(actual_parameter)
# TODO: Handle non-string expected_parameter types (dict)
expected_parameter_type = parse_type_value(expected_parameter['type'], True)
if not type_check(expected_parameter_type, actual_parameter_type):
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'wrong_parameter_type',
function_name=function_name,
parameter_name=actual_parameter_name,
expected_parameter_type=expected_parameter_type,
actual_parameter_type=actual_parameter_type)
# 1.2.3. 'returns' style - checks the return type explicitly
if 'returns' in function:
expected_returns = parse_type_value(function['returns'], True)
actual_returns = parse_type(definition.returns)
if actual_returns != "None":
if not type_check(expected_returns, actual_returns):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "wrong_returns",
function_name=function_name, expected_returns=expected_returns,
actual_returns=actual_returns)
elif expected_returns != "None":
raise FeedbackException(FeedbackCategory.SPECIFICATION, "missing_returns",
function_name=function_name, expected_returns=expected_returns)
# 1.2.4. 'signature' style - shortcut for specifying the types
if 'signature' in function:
expected_signature = function['signature']
actual_returns = parse_type(definition.returns)
actual_parameters = ", ".join(parse_type(actual_parameter.annotation)
for actual_parameter in definition.args.args)
actual_signature = "{} -> {}".format(actual_parameters, actual_returns)
if not type_check(expected_signature, actual_signature):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "wrong_signature",
function_name=function_name, expected_signature=expected_signature,
actual_signature=actual_signature)
# All good here!
return True
def check_function_value(function, values, settings):
"""
2. Does the function exist in the data?
:param function:
:param values:
:param settings:
:return:
"""
function_name = function['name']
# 2.1. Does the name exist in the values?
if function_name not in values:
raise FeedbackException(FeedbackCategory.SPECIFICATION, "function_not_available", function_name=function_name)
function_value = values[function_name]
# 2.2. Is the name bound to a callable?
if not callable(function_value):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "name_is_not_function", function_name=function_name)
# All good here
return function_value
def check_case(function, case, student_function):
"""
:param function:
:param case:
:param student_function:
:return: status, arg, input, error, output, return, message
"""
function_name = function['name']
test_case = TestCase(function_name, case.get('name'))
# Get callable
sandbox = get_sandbox(MAIN_REPORT)
sandbox.clear_output()
# Potential bonus message
if 'message' in case:
test_case.add_message(case['message'])
# Queue up the the inputs
if 'inputs' in case:
test_case.add_inputs(case['inputs'])
sandbox.set_input(test_case.inputs)
else:
sandbox.clear_input()
# Pass in the arguments and call the function
if 'arguments' in case:
test_case.add_arguments(case['arguments'])
result = sandbox.call(function_name, *test_case.arguments)
# Store actual values
test_case.add_prints_returns(sandbox.output, result)
# Check for errors
if sandbox.exception:
test_case.add_error(sandbox.exception)
# 4. Check out the output
if 'prints' in case:
test_case.add_expected_prints(case['prints'])
if not output_test(sandbox.output, case['prints'], False, .0001):
test_case.fail()
# 5. Check the return value
if 'returns' in case:
test_case.add_expected_returns(case['returns'])
if not equality_test(result, case['returns'], True, .0001):
test_case.fail()
# TODO: Check the plots
# Return results
return test_case
# TODO: blockpy-feedback-unit => pedal-test-cases in BlockPy Client
TEST_TABLE_TEMPLATE = """<table class='pedal-test-cases table table-sm table-bordered table-hover'>
<tr class='table-active'>
<th></th>
<th>Arguments</th>
<th>Expected</th>
<th>Returned</th>
</tr>
{body}
</table>"""
TEST_TABLE_FOOTER = "</table>"
TEST_TABLE_ROW_HEADER = "<tr class='table-active'>"
TEST_TABLE_ROW_NORMAL = "<tr>"
TEST_TABLE_ROW_FOOTER = "</tr>"
TEST_TABLE_ROW_INFO = "<tr class='table-info'>"
GREEN_CHECK = " <td class='green-check-mark'>✔</td>"
RED_X = " <td>❌</td>"
CODE_CELL = " <td><code>{}</code></td>"
COLUMN_TITLES = ["", "Arguments", "Inputs", "Errors", "Expected", "Expected", "Returned", "Printed"]
def make_table(cases):
"""
Args:
cases:
Returns:
"""
body = []
for case in cases:
body.append(" <tr>")
body.append(GREEN_CHECK if case.success else RED_X)
body.append(CODE_CELL.format(", ".join(repr(arg) for arg in case.arguments)))
if case.has_error:
body.append(" <td colspan='2'>Error: <code>{}</code></td>".format(str(case.error)))
else:
body.append(CODE_CELL.format(repr(case.expected_returns)))
body.append(CODE_CELL.format(repr(case.returns)))
if not case.success and case.has_message:
body.append(" </tr><tr><td colspan='4'>{}</td>".format(case.message))
body.append(" </tr>")
body = "\n".join(body)
return TEST_TABLE_TEMPLATE.format(body=body)
#if ((any(args) and any(inputs)) or
# (any(expected_outputs) and any(expected_returns)) or
# (any(actual_outputs) and any(actual_returns))):
# # Complex cells
# pass
#else:
# Simple table
# Make header
# row_mask = [True, any(args), any(inputs), False,
# any("returns" in reason for reason in reasons),
# any("prints" in reason for reason in reasons),
# any("returns" in reason for reason in reasons),
# any("prints" in reason for reason in reasons)]
# header_cells = "".join("<th>{}</th>".format(title) for use, title in zip(row_mask, COLUMN_TITLES) if use)
# body = [TEST_TABLE_ROW_HEADER.format(header_cells)]
# for case in zip(
# statuses, args, inputs, errors, actual_outputs, actual_returns,
# expected_outputs, expected_returns):
# status, case = case[0], case[1:]
# print(row_mask[1:], case)
# def make_code(values):
# if values == None:
# return "<code>None</code>"
# elif isinstance(values, int):
# return "<code>{!r}</code>".format(values)
# else:
# return ", ".join("<code>{}</code>".format(repr(value)) for value in values)
# body.append(
# TEST_TABLE_ROW_NORMAL+
# (GREEN_CHECK if case[0] else RED_X)+
# "\n".join(" <td>{}</td>".format(make_code(values))
# for use, values in zip(row_mask[1:], case) if use)+
# "</tr>\n"
# )
# # Make each row
# table = "{}\n{}\n{}".format(TEST_TABLE_HEADER, "\n ".join(body), TEST_TABLE_FOOTER)
# return table
def check_cases(function, student_function, settings):
"""
Args:
function:
student_function:
settings:
"""
function_name = function['name']
if 'cases' in function:
cases = function['cases']
test_cases = [check_case(function, case, student_function) for case in cases]
success_cases = sum(test.success for test in test_cases)
if success_cases < len(cases):
if settings[SETTING_SHOW_CASE_DETAILS]:
table = make_table(test_cases)
raise FeedbackException(FeedbackCategory.SPECIFICATION, "failed_test_cases",
function_name=function_name,
cases_count=len(cases), failure_count=len(cases)-success_cases,
table=table)
else:
raise FeedbackException(FeedbackCategory.SPECIFICATION, "failed_test_cases_count",
function_name=function_name,
cases_count=len(cases), failure_count=len(cases) - success_cases)
def get_arg_name(node):
"""
Args:
node:
Returns:
"""
name = node.id
if name is None:
return node.arg
else:
return name
def check_question(data):
"""
Args:
data:
"""
results = list(load_question(data))
if results:
message, label = results[0]
gently(message, label=label)
def check_pool(questions):
"""
Args:
questions:
"""
pass
def load_file(filename):
"""
Args:
filename:
"""
pass
FEEDBACK_MESSAGES = {
FeedbackCategory.SPECIFICATION: {
"missing_function": "No function named `{function_name}` was found.",
"insufficient_args": ("The function named `{function_name}` "
"has fewer parameters ({actual_arity}) "
"than expected ({expected_arity})."),
"excessive_args": ("The function named `{function_name}` "
"has more parameters ({actual_arity}) "
"than expected ({expected_arity})."),
# TODO: missing_parameter that checks if parameter name exists, but is in the wrong place
"wrong_parameter_name": ("Error in definition of `{function_name}`. "
"Expected a parameter named `{expected_parameter_name}`, "
"instead found `{actual_parameter_name}`."),
"wrong_parameter_type": ("Error in definition of function `{function_name}` "
"parameter `{parameter_name}`. Expected `{expected_parameter_type}`, "
"instead found `{actual_parameter_type}`."),
"missing_returns": ("Error in definition of function `{function_name}` return type. "
"Expected `{expected_returns}`, but there was no return type specified."),
"wrong_returns": ("Error in definition of function `{function_name}` return type. "
"Expected `{expected_returns}`, instead found `{actual_returns}`."),
"wrong_signature": ("Error in definition of function `{function_name}` signature. "
"Expected `{expected_signature}`, instead found `{actual_signature}`."),
"name_is_not_function": "You defined `{function_name}`, but did not define it as a function.",
"function_not_available": ("You defined `{function_name}` somewhere in your code, "
"but it was not available in the top-level scope to be called. "
"Perhaps you defined it inside another function or scope?"),
"failed_test_cases": ("I ran your function <code>{function_name}</code> on my own test cases. "
"It failed {failure_count}/{cases_count} of my tests.\n{table}"),
"failed_test_cases_count": ("I ran your function <code>{function_name}</code> on my own test cases. "
"It failed {failure_count}/{cases_count} of my tests."),
}
}
| 35.913621
| 118
| 0.592091
|
a5cc7ebfb0f671bb1d1aeac6021cc68675439a1a
| 8,732
|
py
|
Python
|
VM/fetchLoop.py
|
djtech-dev/PyVM
|
1edda436ce7073d0cecbf16f5cab2509895d953c
|
[
"MIT"
] | 75
|
2017-09-22T22:36:13.000Z
|
2022-03-20T16:18:27.000Z
|
VM/fetchLoop.py
|
djtech-dev/PyVM
|
1edda436ce7073d0cecbf16f5cab2509895d953c
|
[
"MIT"
] | 7
|
2019-05-10T19:15:08.000Z
|
2021-08-24T16:03:34.000Z
|
VM/fetchLoop.py
|
djtech-dev/PyVM
|
1edda436ce7073d0cecbf16f5cab2509895d953c
|
[
"MIT"
] | 14
|
2018-07-02T02:49:46.000Z
|
2022-02-22T15:24:47.000Z
|
import enum
from .ELF import ELF32, enums
from .util import SegmentRegs, MissingOpcodeError
from .CPU import CPU32
import logging
logger = logging.getLogger(__name__)
| 33.328244
| 135
| 0.535502
|
a5cdc723644cccdf87dcd59c16c72ac9871189a8
| 2,753
|
py
|
Python
|
polecat/deploy/aws/deployment.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 4
|
2019-08-10T12:56:12.000Z
|
2020-01-21T09:51:20.000Z
|
polecat/deploy/aws/deployment.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 71
|
2019-04-09T05:39:21.000Z
|
2020-05-16T23:09:24.000Z
|
polecat/deploy/aws/deployment.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | null | null | null |
from termcolor import colored
from ...utils.feedback import feedback
from .constants import DEPLOYMENT_PREFIX, DEPLOYMENT_REGISTRY
from .exceptions import EntityDoesNotExist, EntityExists
from .operations import delete_parameter, get_parameter, set_parameter
from .project import assert_project_exists
from .utils import aws_client
def deployment_exists(project, deployment, ssm=None):
registry = get_parameter(DEPLOYMENT_REGISTRY.format(project), ssm=ssm)
return deployment in registry
def assert_deployment_exists(project, deployment, ssm=None):
if not deployment_exists(project, deployment, ssm=ssm):
raise EntityDoesNotExist(
f'deployment {deployment} does not exist'
)
| 38.774648
| 98
| 0.698147
|
a5cef8d918f7406a1dd78059cb13a600f918323a
| 5,897
|
py
|
Python
|
mlpy/regression/logistic_regression.py
|
SNUDerek/MLPy
|
0d47a8ef8522a663716cda6a831855e6482069ba
|
[
"MIT"
] | 1
|
2019-05-10T10:39:12.000Z
|
2019-05-10T10:39:12.000Z
|
mlpy/regression/logistic_regression.py
|
SNUDerek/MLPy
|
0d47a8ef8522a663716cda6a831855e6482069ba
|
[
"MIT"
] | null | null | null |
mlpy/regression/logistic_regression.py
|
SNUDerek/MLPy
|
0d47a8ef8522a663716cda6a831855e6482069ba
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..tools import batchGenerator
# LOGISTIC REGRESSION
# for (binary) categorical data
| 32.944134
| 89
| 0.587248
|
a5d00dc3b88e76c00327d591e70ffe150f4013d2
| 1,946
|
py
|
Python
|
esercizio_1/untitled1.py
|
navyzigz420/python_lab
|
a3496d8b170e334abfb5099bf6ee03df5e226b78
|
[
"Apache-2.0"
] | null | null | null |
esercizio_1/untitled1.py
|
navyzigz420/python_lab
|
a3496d8b170e334abfb5099bf6ee03df5e226b78
|
[
"Apache-2.0"
] | null | null | null |
esercizio_1/untitled1.py
|
navyzigz420/python_lab
|
a3496d8b170e334abfb5099bf6ee03df5e226b78
|
[
"Apache-2.0"
] | null | null | null |
bits = '110'
#print(str(valore))
print(turnBitsIntoInteger(bits))
| 1.3637
| 92
| 0.167009
|
a5d187baa7ec34c04b159476ef8dc6d77a915eac
| 4,175
|
py
|
Python
|
generated/azure-cli/aro/custom.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/azure-cli/aro/custom.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/azure-cli/aro/custom.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
import json
# module equivalent: azure_rm_openshiftmanagedcluster
# URL: /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ContainerService/openShiftManagedClusters/{{ open_shift_managed_cluster_name }}
# module equivalent: azure_rm_openshiftmanagedcluster
# URL: /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ContainerService/openShiftManagedClusters/{{ open_shift_managed_cluster_name }}
# module equivalent: azure_rm_openshiftmanagedcluster
# URL: /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ContainerService/openShiftManagedClusters/{{ open_shift_managed_cluster_name }}
| 55.666667
| 180
| 0.683353
|
a5d2df25221764ec5395b74a6c3cb30a216ee3ff
| 12,269
|
py
|
Python
|
server.py
|
satriabw/Tugas_Sisdis
|
b1e152f35834e52806071b9b1424b114dce65148
|
[
"MIT"
] | null | null | null |
server.py
|
satriabw/Tugas_Sisdis
|
b1e152f35834e52806071b9b1424b114dce65148
|
[
"MIT"
] | null | null | null |
server.py
|
satriabw/Tugas_Sisdis
|
b1e152f35834e52806071b9b1424b114dce65148
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from random import randint
from urllib.parse import parse_qs
import socket
import sys
import json
import traceback
import os
import base64
import yaml
import datetime
import requests
import re
route = Route()
def notFound(conn, request):
if "/api" in request.header["path"]:
notFoundJson(conn)
status = "404 Not Found"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def notImplemented(conn, request):
status = "501 Not Implemented"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def badRequest(conn, request):
if "/api" in request.header["path"]:
badRequestJson(conn, "Please use proper http version")
status = "400 Bad Request"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def getTime(t_raw):
t = datetime.datetime.strptime(t_raw, "%Y-%m-%d %H:%M:%S")
return t.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def getCounter():
with open('counter.json', 'r') as json_file:
data = json.load(json_file)
return data["count"]
def writeCounter(c):
count = {"count": c}
with open('counter.json', 'w') as json_file:
data = json.dump(count, json_file)
def getApiVersion():
with open('./spesifikasi.yaml', 'r') as f:
doc = yaml.load(f)
return doc["info"]["version"]
def notFoundJson(conn):
detail = "The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again."
status = "404"
title = "Not Found"
json_http_error(conn, detail, status, title)
def methodNotAllowedJson(conn, d):
detail = d
status = "405"
title = "Method Not Allowed"
json_http_error(conn, detail, status, title)
def badRequestJson(conn, d):
detail = d
status = "400"
title = "Bad Request"
json_http_error(conn, detail, status, title)
def json_http_ok(conn, **kwargs):
res_dict = {'apiversion': getApiVersion()}
for key, value in kwargs.items():
res_dict[key] = value
data = json.dumps(res_dict)
# Build Response
status = "200 OK"
c_type = "application/json; charset=UTF-8"
msgErr = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgErr)
def json_http_error(conn, detail, status, title):
res_dict = {'detail': detail, 'status': status, 'title': title}
data = json.dumps(res_dict)
status = "{} {}".format(status, title)
c_type = "application/json; charset=UTF-8"
msgErr = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgErr)
def main():
# HOST = socket.gethostbyname(socket.gethostname())
HOST = "0.0.0.0"
PORT = int(sys.argv[1])
#Get method
route.route("GET", "/", getRoot)
route.route("GET", "/hello-world", getHelloWorld)
route.route("GET", "/style", getStyle)
route.route("GET", "/background", getBackground)
route.route("GET", "/info", getInfo)
route.route("GET", "/api/hello", helloAPI)
route.route("GET", "/api/plusone/<:digit>", plusOneAPI)
route.route("GET", "/api/spesifikasi.yaml", getSpesifikasi)
#Post Method
route.route("POST", "/api/hello", helloAPI)
route.route("POST", "/hello-world", postHelloWorld)
# PUT
route.route("PUT", "/api/hello", helloAPI)
#PATCH
route.route("PATCH", "/api/hello", helloAPI)
#DELETE
route.route("DELETE", "/api/hello", helloAPI)
#HEAD
route.route("HEAD", "/api/hello", helloAPI)
# Serve the connection
connect(HOST, PORT)
def handler(conn, req):
try:
debugger = "=== Got Request ===\n{}\n===Got Header====\n{}\n".format(req._raw_request, req.header)
print(debugger)
route.dispatch(cleanURL(req.header["path"]), req.header["method"])(conn, req)
except TypeError as e:
print(traceback.format_exc())
if route.findPath(cleanURL(req.header["path"])):
notImplemented(conn, req)
return
notFound(conn, req)
return
def cleanURL(url):
return url.split("?")[0]
main()
| 31.060759
| 136
| 0.605102
|
a5d51824f01f43c1a1f7165def53773a506fe72b
| 656
|
py
|
Python
|
glia/widgets/editor/tabs.py
|
gliahq/Glia
|
4951569f2759ea886bad165b6d74a569c14bbd2a
|
[
"Apache-2.0"
] | 1
|
2020-08-20T08:22:33.000Z
|
2020-08-20T08:22:33.000Z
|
glia/widgets/editor/tabs.py
|
gliahq/Glia
|
4951569f2759ea886bad165b6d74a569c14bbd2a
|
[
"Apache-2.0"
] | 3
|
2021-04-20T18:20:45.000Z
|
2021-06-01T23:56:13.000Z
|
glia/widgets/editor/tabs.py
|
gliahq/Glia
|
4951569f2759ea886bad165b6d74a569c14bbd2a
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtWidgets import QTabWidget
from glia.widgets.editor import Editor
| 24.296296
| 69
| 0.643293
|
a5d8c45707967ee83846553d1837407bb63fcb57
| 2,296
|
py
|
Python
|
tests/test_aws_tdd.py
|
Fauxsys/offprem
|
9fe4764b24578b1ada43cfab600379b55fed038d
|
[
"MIT"
] | null | null | null |
tests/test_aws_tdd.py
|
Fauxsys/offprem
|
9fe4764b24578b1ada43cfab600379b55fed038d
|
[
"MIT"
] | null | null | null |
tests/test_aws_tdd.py
|
Fauxsys/offprem
|
9fe4764b24578b1ada43cfab600379b55fed038d
|
[
"MIT"
] | null | null | null |
""" Test Driven Development. """
import pytest
| 31.888889
| 78
| 0.761324
|
a5d993daf62319705e260124c70d45da91cc0c68
| 1,743
|
py
|
Python
|
CursoPython/dia 2/copia de clase.py
|
hamfree/PYTHON
|
2df83c1da393f05cadf0fe3f8d3173d4420eda60
|
[
"Apache-2.0"
] | null | null | null |
CursoPython/dia 2/copia de clase.py
|
hamfree/PYTHON
|
2df83c1da393f05cadf0fe3f8d3173d4420eda60
|
[
"Apache-2.0"
] | null | null | null |
CursoPython/dia 2/copia de clase.py
|
hamfree/PYTHON
|
2df83c1da393f05cadf0fe3f8d3173d4420eda60
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# pylint: disable=E1601
"""
17/02/2018
Profesores: lvaro Snchez de Cruz / ???
- Se hace un repaso de lo que se imparti el da anterior.
- Subprogramas o funciones:
- Viene de la programacin estructurada.
- Sintaxis:
def nombreFuncion([parametro1,][parametro2,]...)
(codigo de la funcion)
[return valor]
Los valores se pasan siempre por valor a las funciones.
Ahora vamos a ver un ejemplo:
def saludo()
print "Hola Mundo"
- Se explican los ejercicios propuestos del da 1.
- Ficheros
var = open('ruta al fichero','a'|'r'|'w')
- JSON
Es un formato de datos originario de JavaScript, que hablando en terminologia de python es una
lista de diccionarios, de la siguiente forma:
[
{},{},...
]
En python los JSON se pueden pasar a un diccionario muy facilmente.
"""
# modo 'r' read, que lee el fichero si existe y si no dara un error
# modo 'a' append, que aade al fichero si existe y si no lo crea
# modo 'w' write, que escribe en el fichero si existe, sobreescribiendo lo que tuviera, y si no lo crea.
leerFichero()
sobreescribirFichero()
leerEscribirFichero()
| 24.549296
| 104
| 0.596672
|
a5d9baaf2337daeafdfe9b9a22db73d38a684f6f
| 576
|
py
|
Python
|
functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py
|
emctl/samples
|
569f81035a6c214d4cda3687173e24003f17f95e
|
[
"MIT"
] | 3
|
2021-11-16T11:24:27.000Z
|
2021-11-21T17:11:24.000Z
|
functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py
|
emctl/samples
|
569f81035a6c214d4cda3687173e24003f17f95e
|
[
"MIT"
] | 7
|
2021-09-01T06:50:41.000Z
|
2021-09-03T23:12:07.000Z
|
functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py
|
emctl/samples
|
569f81035a6c214d4cda3687173e24003f17f95e
|
[
"MIT"
] | 4
|
2021-02-05T17:30:28.000Z
|
2021-08-16T21:26:55.000Z
|
import logging
import requests
import json
import azure.functions as func
dapr_url = "http://localhost:3500/v1.0"
| 28.8
| 72
| 0.697917
|
a5da4714ac6a7f9235bc1e8123d0bcfaf76ea57c
| 260
|
py
|
Python
|
CH_10_testing_and_logging/T_26_logging_json_config.py
|
mastering-python/code_2
|
441af8b67402c8216c482cca7c002e1d7f0f1baa
|
[
"MIT"
] | null | null | null |
CH_10_testing_and_logging/T_26_logging_json_config.py
|
mastering-python/code_2
|
441af8b67402c8216c482cca7c002e1d7f0f1baa
|
[
"MIT"
] | null | null | null |
CH_10_testing_and_logging/T_26_logging_json_config.py
|
mastering-python/code_2
|
441af8b67402c8216c482cca7c002e1d7f0f1baa
|
[
"MIT"
] | null | null | null |
import os
import json
from logging import config
name = os.path.splitext(__file__)[0]
json_filename = os.path.join(os.path.dirname(__file__),
f'{name}.json')
with open(json_filename) as fh:
config.dictConfig(json.load(fh))
| 20
| 55
| 0.661538
|
a5dac3d6ca2b3d760f8736d068bcd1c838b5581c
| 2,618
|
py
|
Python
|
tests/unit/test_upstream_dataset.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 9
|
2019-08-13T11:07:06.000Z
|
2022-01-14T18:15:13.000Z
|
tests/unit/test_upstream_dataset.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 166
|
2019-08-09T18:51:05.000Z
|
2021-12-02T15:24:15.000Z
|
tests/unit/test_upstream_dataset.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 21
|
2019-08-12T15:37:31.000Z
|
2021-06-15T14:06:23.000Z
|
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
| 34.906667
| 109
| 0.615737
|
a5daeaca530d32aa4078eb1a40a959857dd7e442
| 14,531
|
py
|
Python
|
pmaf/sequence/_multiple/_multiple.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-07-02T06:24:17.000Z
|
2021-07-02T06:24:17.000Z
|
pmaf/sequence/_multiple/_multiple.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T12:02:46.000Z
|
2021-06-28T12:02:46.000Z
|
pmaf/sequence/_multiple/_multiple.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from skbio import TabularMSA
from skbio.sequence import GrammaredSequence
from io import StringIO, IOBase
from shutil import copyfileobj
import copy
import numpy as np
from pmaf.internal.io._seq import SequenceIO
from pmaf.sequence._sequence._nucleotide import Nucleotide
from pmaf.sequence._metakit import MultiSequenceMetabase, NucleotideMetabase
from pmaf.sequence._shared import validate_seq_mode
from typing import Union, Optional, Any, Sequence, Generator
from pmaf.internal._typing import AnyGenericIdentifier
| 33.871795
| 91
| 0.566719
|
a5db8882e50338e2cfe3830ff393ba99f5232ba1
| 1,498
|
py
|
Python
|
arvore_derivacao.py
|
rjribeiro/trabalho-formais
|
358de668cc256c696fdc4b426a69cf5a3d17b511
|
[
"MIT"
] | 3
|
2018-04-28T15:55:50.000Z
|
2018-05-11T22:57:20.000Z
|
arvore_derivacao.py
|
rjribeiro/trabalho-formais
|
358de668cc256c696fdc4b426a69cf5a3d17b511
|
[
"MIT"
] | null | null | null |
arvore_derivacao.py
|
rjribeiro/trabalho-formais
|
358de668cc256c696fdc4b426a69cf5a3d17b511
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
a = ArvoreDerivacao('a')
b = ArvoreDerivacao('b')
A = ArvoreDerivacao('A', a)
B = ArvoreDerivacao('B', b)
S = ArvoreDerivacao('S', A, B)
S.print_arvore()
| 27.740741
| 85
| 0.579439
|
a5dca4db049c83c9e0aaf82c2743e38347886e01
| 1,404
|
py
|
Python
|
src/test.py
|
biqar/hypergraph-study
|
04b54117eb8f684a72259b27b03162efb4c18cd0
|
[
"MIT"
] | 2
|
2021-12-24T12:02:48.000Z
|
2021-12-25T00:00:22.000Z
|
src/test.py
|
biqar/hypergraph-study
|
04b54117eb8f684a72259b27b03162efb4c18cd0
|
[
"MIT"
] | null | null | null |
src/test.py
|
biqar/hypergraph-study
|
04b54117eb8f684a72259b27b03162efb4c18cd0
|
[
"MIT"
] | 1
|
2021-07-19T02:05:13.000Z
|
2021-07-19T02:05:13.000Z
|
import re
import sys
from operator import add
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: pagerank <file> <iterations>", file=sys.stderr)
sys.exit(-1)
print("WARN: This is a naive implementation of PageRank and is given as an example!\n" +
"Please refer to PageRank implementation provided by graphx",
file=sys.stderr)
# Initialize the spark context.
spark = SparkSession\
.builder\
.appName("PythonPageRank")\
.getOrCreate()
# Loads in input file. It should be in format of:
# URL neighbor URL
# URL neighbor URL
# URL neighbor URL
# ...
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
print("ALL LINKS",lines.collect())
links = lines.flatMap(lambda urls: parseNeighbors(urls)).distinct().groupByKey().cache()
print("ALL LINKS",links.collect())
| 29.25
| 92
| 0.608262
|
a5df0a5e25ad5c8a611b093330f6ecc81a28362f
| 1,312
|
py
|
Python
|
wagtail_lightadmin/wagtail_hooks.py
|
leukeleu/wagtail_lightadmin
|
6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5
|
[
"MIT"
] | 4
|
2019-02-22T14:07:26.000Z
|
2020-04-20T05:33:39.000Z
|
wagtail_lightadmin/wagtail_hooks.py
|
leukeleu/wagtail_lightadmin
|
6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5
|
[
"MIT"
] | 1
|
2019-05-18T08:04:32.000Z
|
2019-05-20T13:39:14.000Z
|
wagtail_lightadmin/wagtail_hooks.py
|
leukeleu/wagtail_lightadmin
|
6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5
|
[
"MIT"
] | 2
|
2017-06-06T09:34:53.000Z
|
2019-09-10T16:16:12.000Z
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.templatetags.static import static
from django.utils.html import format_html
from django.utils.module_loading import import_string
from wagtail.core import hooks
| 26.24
| 65
| 0.641768
|
a5df58684b3949214fa0f306fa78ff1bd3a232de
| 3,333
|
py
|
Python
|
examples/datamining/page_rank.py
|
pooya/disco
|
e03a337b3b20e191459c74a367b9e89e873f71ff
|
[
"BSD-3-Clause"
] | 786
|
2015-01-01T12:35:40.000Z
|
2022-03-19T04:39:22.000Z
|
examples/datamining/page_rank.py
|
pooya/disco
|
e03a337b3b20e191459c74a367b9e89e873f71ff
|
[
"BSD-3-Clause"
] | 51
|
2015-01-19T20:07:01.000Z
|
2019-10-19T21:03:06.000Z
|
examples/datamining/page_rank.py
|
pooya/disco
|
e03a337b3b20e191459c74a367b9e89e873f71ff
|
[
"BSD-3-Clause"
] | 122
|
2015-01-05T18:16:03.000Z
|
2021-07-10T12:35:22.000Z
|
# Copyright 2009-2010 Yelp
# Copyright 2013 David Marin
# Copyright 2014 Disco Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Iterative implementation of the PageRank algorithm:
This example has been ported from the mrjob project.
http://en.wikipedia.org/wiki/PageRank
The format of the input should be of the form:
node_id initial_score neighbor_1 weight_1 neighbor_2 weight_2 ...
For example, the following input is derieved from wikipedia:
$ cat input
0 1
1 1 2 1
2 1 1 1
3 1 0 0.5 1 0.5
4 1 1 0.33 3 0.33 5 0.33
5 1 1 0.5 4 0.5
6 1 1 0.5 4 0.5
7 1 1 0.5 4 0.5
8 1 1 0.5 4 0.5
9 1 4 1
10 1 4 1
$ cat input | ddfs chunk pages -
$ python page_rank.py --iterations 10 pages
The results are:
0 : 0.303085470793
1 : 3.32372143585
2 : 3.39335760361
3 : 0.360345571947
4 : 0.749335470793
5 : 0.360345571947
6 : 0.15
7 : 0.15
8 : 0.15
9 : 0.15
10 : 0.15
"""
from optparse import OptionParser
from disco.core import Job, result_iterator
from disco.worker.classic.worker import Params
from disco.worker.task_io import chain_reader
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options] inputs')
parser.add_option('--iterations',
default=10,
help='Numbers of iteration')
parser.add_option('--damping-factor',
default=0.85,
help='probability a web surfer will continue clicking on links')
(options, input) = parser.parse_args()
results = input
params = Params(damping_factor=float(options.damping_factor))
for j in range(int(options.iterations)):
job = Job().run(input=results, map=send_score, map_reader = chain_reader, reduce=receive_score, params = params)
results = job.wait()
for _, node in result_iterator(results):
fields = node.split()
print fields[0], ":", fields[1]
| 26.452381
| 120
| 0.645965
|
a5e4666915212b8f6b0b15dc2449a686ce496e42
| 5,633
|
py
|
Python
|
stackdriver/restapi.py
|
MarkMarine/stackdriver-client-python
|
7e5e5806d02fcf4b8633d19adbce6d64f3082083
|
[
"Apache-2.0"
] | null | null | null |
stackdriver/restapi.py
|
MarkMarine/stackdriver-client-python
|
7e5e5806d02fcf4b8633d19adbce6d64f3082083
|
[
"Apache-2.0"
] | null | null | null |
stackdriver/restapi.py
|
MarkMarine/stackdriver-client-python
|
7e5e5806d02fcf4b8633d19adbce6d64f3082083
|
[
"Apache-2.0"
] | null | null | null |
"""
restapi - base for calling rest resources
Stackdriver Public API, Copyright Stackdriver 2014
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import requests
import copy
import types
import json
import logging
logger = logging.getLogger(__name__)
def transport_func(func):
""" Decorates each of the transport functions that can get wrapped by a transport_controller """
func._is_transport_func = True
return func
| 35.427673
| 164
| 0.680987
|
a5e7acf2b322f72151a720e8d6b6a7577bf377de
| 13,896
|
py
|
Python
|
ventana_perceptron.py
|
musicbiker/ANNT
|
301f1090925c8937f0fd3b4955ec68ff772022ce
|
[
"MIT"
] | null | null | null |
ventana_perceptron.py
|
musicbiker/ANNT
|
301f1090925c8937f0fd3b4955ec68ff772022ce
|
[
"MIT"
] | null | null | null |
ventana_perceptron.py
|
musicbiker/ANNT
|
301f1090925c8937f0fd3b4955ec68ff772022ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 15:05:41 2019
@author: jrodriguez119
"""
import tkinter as tk
from tkinter import ttk
import crearcapas
import perceptron_multicapa
from threading import Thread
import sys
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from tkinter import filedialog as fd
from tkinter import messagebox as mb
import menu
import sklearn
#Funcin que genera la ventana de parmetros del Perceptron multicapa
def Ventana_perceptron(ventana_seleccion,X_train,Y_train,X_test,Y_test,ventana_inicio):
#Crear ventana
ventana_perceptron = tk.Toplevel(ventana_seleccion)
ventana_perceptron.geometry('725x600+500+200')
#Insertar menu
menu.menu(ventana_perceptron,ventana_inicio)
#Esconder ventana previa
ventana_seleccion.withdraw()
#Ttulo
labeltitulo = ttk.Label(ventana_perceptron,text = "Parmetros necesarios para el Perceptrn",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo.pack(pady=10)
#Frame donde alojar los widget de entrada
lframe = ttk.Frame(ventana_perceptron)
lframe.pack()
#------------------------ entrada de datos ---------------------------------
#Tamao de lote
tamlot = tk.IntVar()
lbtamlote = ttk.Label(lframe,text = "Tamao lote: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbtamlote.grid(column=0, row=0 ,pady=5,sticky=tk.W)
etamlot = ttk.Entry(lframe,width=5, textvariable = tamlot)
etamlot.grid(column=1, row=0,pady=5,sticky=tk.E)
#Optimizador
opt =tk.StringVar()
lbopt = ttk.Label(lframe, text="Optimizador: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbopt.grid(column=0, row=1,pady=5,sticky=tk.W)
cbopt=ttk.Combobox(lframe,width=9,state="readonly",textvariable = opt)
cbopt["values"] = ["SGD", "RMSProp","Adam","Adagrad"]
cbopt.grid(column = 1 ,row = 1,pady=5,columnspan=2)
cbopt.current(0)
#Proporcin de validacin
pv = tk.DoubleVar()
pv.set(0.2)
lbpv = ttk.Label(lframe,text = "Proporcin de Validacin :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpv.grid(column=0, row=2 ,pady=5,sticky=tk.W)
epv = ttk.Entry(lframe,width=5, textvariable = pv)
epv.grid(column=1, row=2,pady=5,sticky=tk.E)
#Nmero de capas ocultas
nco = tk.IntVar()
lbnco = ttk.Label(lframe,text = "Nmero capas ocultas :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnco.grid(column=0, row=3 ,pady=5,sticky=tk.W)
enco = ttk.Entry(lframe,width=5, textvariable = nco)
enco.grid(column=1, row=3,pady=5,sticky=tk.E)
#Funcin Loss
fl =tk.StringVar()
lbfl = ttk.Label(lframe, text="Funcin Loss: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbfl.grid(column=0, row=4,pady=5,sticky=tk.W)
cbfl=ttk.Combobox(lframe,width=21,state="readonly",textvariable = fl)
cbfl["values"] = ["kullback_leibler_divergence","mean_squared_error", "categorical_hinge",
"categorical_crossentropy","binary_crossentropy","poisson","cosine_proximity"]
cbfl.grid(column = 1 ,row = 4,pady=5,columnspan=2,sticky=tk.E)
cbfl.current(3)
#Mtodo de parada
labeltitulo1 = ttk.Label(ventana_perceptron,text = "Mtodo de parada",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=10)
lframe1 = ttk.Frame(ventana_perceptron)
lframe1.pack()
#Tipo de parada
#Parada por nmero de iteraciones
mp=tk.IntVar()
bat1= ttk.Radiobutton(lframe1, value=0,variable=mp)
bat1.grid(column=0, row=0)
nui=tk.IntVar()
lbnui = ttk.Label(lframe1, text="Nmero de iteraciones: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnui.grid(column=1, row=0,pady=5,sticky=tk.W)
enui = ttk.Entry(lframe1,width=5, textvariable = nui)
enui.grid(column=2, row=0,pady=5,sticky=tk.E)
#Parada por control de un parmetro
bat2 = ttk.Radiobutton(lframe1, value=1,variable=mp)
bat2.grid(column=0, row=1)
lbparada = ttk.Label(lframe1, text="Parada temprana: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbparada.grid(column = 1, row = 1,sticky=tk.W )
#Parmetro a controlar
lbcon = ttk.Label(lframe1, text=" Parmetro a controlar: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbcon.grid(column = 1, row = 2,pady=5,sticky=tk.W )
con =tk.StringVar()
cbcon=ttk.Combobox(lframe1,width=9,state="readonly",textvariable = con)
cbcon["values"] = ["loss","val_loss", "acc","val_acc"]
cbcon.grid(column = 2 ,row = 2,pady=5,sticky=tk.E)
cbcon.current(0)
#Delta mnima de evolucin
delt =tk.DoubleVar()
delt.set(0.001)
lbdelt = ttk.Label(lframe1, text=" Delta min: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbdelt.grid(column=1, row=3,pady=5,sticky=tk.W)
edelt = ttk.Entry(lframe1,width=5, textvariable = delt)
edelt.grid(column=2, row=3,pady=5,sticky=tk.E)
#Paciencia para realizar la parada
pat =tk.IntVar()
pat.set(3)
lbpat = ttk.Label(lframe1, text=" Paciencia: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpat.grid(column=1, row=4,pady=5,sticky=tk.W)
epat = ttk.Entry(lframe1,width=5, textvariable = pat)
epat.grid(column=2, row=4,pady=5,sticky=tk.E)
#Funcin que abre una ventana externa y nos permite crear nuestro modelo editando las capas ocultas
btnmodelo = ttk.Button(ventana_perceptron, text = "Crear modelo",style='my.TButton', command=crearmodelo)
btnmodelo.pack(pady=50)
lframe2 = ttk.Frame(ventana_perceptron)
lframe2.pack(side= "bottom")
btntrain = ttk.Button(lframe2, text = "Entrenar",style='my.TButton', command=entrenar)
btntrain.grid(row = 0, column = 1, padx = 20, pady=15)
btnatras = ttk.Button(lframe2, text = "Atras",style='my.TButton', command=atras)
btnatras.grid(row=0,column=0, padx = 20, pady=15)
| 40.750733
| 151
| 0.573258
|
a5e93ad8745db2b82f7503c050a79a9fd3c06143
| 419
|
py
|
Python
|
tests/search/test_search_onedrive.py
|
theodoriss/Office365-REST-Python-Client
|
3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
tests/search/test_search_onedrive.py
|
theodoriss/Office365-REST-Python-Client
|
3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
tests/search/test_search_onedrive.py
|
theodoriss/Office365-REST-Python-Client
|
3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from tests.graph_case import GraphTestCase
| 23.277778
| 99
| 0.706444
|
a5ea06e0a07718613f62378639588110228f7035
| 728
|
py
|
Python
|
secu/tests/user_post_test.py
|
wancy86/tornado-seed
|
bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3
|
[
"MIT"
] | null | null | null |
secu/tests/user_post_test.py
|
wancy86/tornado-seed
|
bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3
|
[
"MIT"
] | null | null | null |
secu/tests/user_post_test.py
|
wancy86/tornado-seed
|
bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3
|
[
"MIT"
] | null | null | null |
import requests
from ..base.test import BaseTestCase, AuthorizedTestCase
import uuid
import common
| 26.962963
| 59
| 0.542582
|
a5ea9efb676efad8c603777a80d368a57ffbe7ba
| 2,204
|
py
|
Python
|
arbeitsplan/management/commands/meldungConsistent.py
|
hkarl/svpb
|
29aab0065ff69c7c4d52812508167514d635cab9
|
[
"Apache-2.0"
] | 3
|
2015-02-20T14:53:17.000Z
|
2020-12-01T19:29:14.000Z
|
arbeitsplan/management/commands/meldungConsistent.py
|
hkarl/svpb
|
29aab0065ff69c7c4d52812508167514d635cab9
|
[
"Apache-2.0"
] | 67
|
2015-01-06T19:48:59.000Z
|
2022-03-20T16:56:22.000Z
|
arbeitsplan/management/commands/meldungConsistent.py
|
hkarl/svpb
|
29aab0065ff69c7c4d52812508167514d635cab9
|
[
"Apache-2.0"
] | 2
|
2015-12-07T09:21:10.000Z
|
2015-12-30T18:36:53.000Z
|
"""
Define a command that should be run from a crontab.
This one should check consistency of Meldungen:
at most one Meldung per Aufgabe, per User.
"""
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.utils import translation
from django.conf import settings
from django.core.mail import send_mail
import arbeitsplan.models as models
import datetime
import pprint
from collections import defaultdict
| 29.783784
| 72
| 0.587114
|
a5ece29e8598f0696a2017f159b9027891f278ea
| 1,215
|
py
|
Python
|
app/migrations/0013_auto_20200907_1056.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | 1
|
2021-10-15T14:37:33.000Z
|
2021-10-15T14:37:33.000Z
|
app/migrations/0013_auto_20200907_1056.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | null | null | null |
app/migrations/0013_auto_20200907_1056.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-09-07 10:56
import app.models
from django.db import migrations, models
| 31.153846
| 114
| 0.568724
|
a5ef7047358651b5620e1896751f01c69ce61941
| 6,404
|
py
|
Python
|
products_and_services_client/models/monthly_price.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | 2
|
2021-02-07T23:58:36.000Z
|
2021-02-08T01:03:25.000Z
|
products_and_services_client/models/monthly_price.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | null | null | null |
products_and_services_client/models/monthly_price.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento so referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MonthlyPrice):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.841026
| 392
| 0.600562
|
a5f29eaf88394cd5b49f0dfad22427d8e4654b7c
| 481
|
py
|
Python
|
sqlalchemy_i18n/expressions.py
|
EdwardBetts/sqlalchemy-i18n
|
9cf515be75be6e319416579b32528e1a096c03cf
|
[
"BSD-3-Clause"
] | 31
|
2015-02-26T11:08:43.000Z
|
2022-03-18T11:53:30.000Z
|
sqlalchemy_i18n/expressions.py
|
EdwardBetts/sqlalchemy-i18n
|
9cf515be75be6e319416579b32528e1a096c03cf
|
[
"BSD-3-Clause"
] | 13
|
2015-01-05T09:40:59.000Z
|
2022-01-18T23:57:28.000Z
|
sqlalchemy_i18n/expressions.py
|
EdwardBetts/sqlalchemy-i18n
|
9cf515be75be6e319416579b32528e1a096c03cf
|
[
"BSD-3-Clause"
] | 13
|
2015-01-08T08:24:15.000Z
|
2022-02-05T01:59:41.000Z
|
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.functions import GenericFunction
| 26.722222
| 57
| 0.756757
|
a5f2ce8d23f3ea07c4d73928966352c760c23c7e
| 47
|
py
|
Python
|
scripts/portal/OutElfKingRoom.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/portal/OutElfKingRoom.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/portal/OutElfKingRoom.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# 101050010
sm.warp(101050000, 7)
sm.dispose()
| 11.75
| 21
| 0.723404
|
a5f31b512ae3b988c292e1211f6d15cfb61624fc
| 839
|
py
|
Python
|
suppy/simulator/atomics/divergence_atomic.py
|
bmaris98/suppy
|
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
|
[
"BSD-3-Clause"
] | null | null | null |
suppy/simulator/atomics/divergence_atomic.py
|
bmaris98/suppy
|
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
|
[
"BSD-3-Clause"
] | null | null | null |
suppy/simulator/atomics/divergence_atomic.py
|
bmaris98/suppy
|
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
|
[
"BSD-3-Clause"
] | null | null | null |
from suppy.utils.stats_constants import DIVERGENCE, TYPE
from typing import Any, Dict
from suppy.simulator.atomics.atomic import Atomic
| 31.074074
| 56
| 0.647199
|
a5f71728f2d90dfa8913ba58e0714da23be50b98
| 2,673
|
py
|
Python
|
train/compute/python/pytorch_benchmark.py
|
sazanovd/param
|
595b81ceb64d8d106d05ab67d2c73e8465d06921
|
[
"MIT"
] | null | null | null |
train/compute/python/pytorch_benchmark.py
|
sazanovd/param
|
595b81ceb64d8d106d05ab67d2c73e8465d06921
|
[
"MIT"
] | null | null | null |
train/compute/python/pytorch_benchmark.py
|
sazanovd/param
|
595b81ceb64d8d106d05ab67d2c73e8465d06921
|
[
"MIT"
] | null | null | null |
import logging
from .lib.init_helper import init_logging, load_modules
# Initialize logging format before loading all other modules
logger = init_logging(logging.INFO)
import argparse
from .lib import pytorch as lib_pytorch
from .lib.config import BenchmarkConfig
from .lib.pytorch.benchmark import (
make_default_benchmark,
ExecutionPass,
get_benchmark_options,
)
from .workloads import pytorch as workloads_pytorch
if __name__ == "__main__":
main()
| 29.7
| 99
| 0.674523
|
a5f7a4ecfa05bf78a585981771c76de8e093cf7a
| 5,180
|
py
|
Python
|
database/BuildDatabase.py
|
chanzuckerberg/scoreboard
|
7ebf783819d0f5b4dd54092201f709b8644c85a4
|
[
"MIT"
] | 8
|
2017-11-28T22:36:37.000Z
|
2020-10-20T06:46:19.000Z
|
database/BuildDatabase.py
|
chanzuckerberg/scoreboard
|
7ebf783819d0f5b4dd54092201f709b8644c85a4
|
[
"MIT"
] | 25
|
2017-12-27T19:05:41.000Z
|
2022-03-15T18:35:22.000Z
|
database/BuildDatabase.py
|
chanzuckerberg/scoreboard
|
7ebf783819d0f5b4dd54092201f709b8644c85a4
|
[
"MIT"
] | 1
|
2018-04-23T11:16:41.000Z
|
2018-04-23T11:16:41.000Z
|
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Boolean, String, DateTime, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
import datetime
import os
import json
database = {
'pg_user': os.environ['SCOREBOARD_PG_USERNAME'],
'pg_pass': os.environ['SCOREBOARD_PG_PASSWORD'],
'pg_host': os.environ.get('SCOREBOARD_PG_HOST', 'localhost'),
'pg_port': os.environ.get('SCOREBOARD_PG_PORT', 5432),
'pg_database': os.environ.get('SCOREBOARD_PG_DATABASE', 'scoreboard')
}
# Build database
engine = create_engine(
"postgresql://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_database}".format(**database))
Base = declarative_base()
Base.metadata.create_all(engine)
# Load Data
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
with open("initialize.json") as fh:
initialize_data = json.load(fh)
for challenge in initialize_data["challenges"]:
datasets = challenge.pop('datasets', [])
new_challenge = Challenge(**challenge)
session.add(new_challenge)
session.flush()
session.refresh(new_challenge)
challenge_id = new_challenge.id
for dataset in datasets:
dataset["challenge_id"] = challenge_id
new_dataset = Dataset(**dataset)
session.add(new_dataset)
for admin in initialize_data["admins"]:
new_user = User(github_username=admin, is_admin=True)
session.add(new_user)
email_settings = initialize_data["email_settings"]
settings = AdminEmailSettings(email_provider=email_settings["email_provider"],
email_address= email_settings["admin_email"],
email_pass=email_settings["admin_pass"])
session.add(settings)
session.commit()
| 37
| 121
| 0.72471
|
a5f7a9890ad3c832bc0b2c81569f369cddac6df1
| 6,767
|
py
|
Python
|
docs/tutorials_source/generate_images_tutorial.py
|
XavierXinweiWang/UnrealCV_Ubuntu
|
4fdbb32f2096a4c95c0912d018ff4adb4801fb8b
|
[
"MIT"
] | null | null | null |
docs/tutorials_source/generate_images_tutorial.py
|
XavierXinweiWang/UnrealCV_Ubuntu
|
4fdbb32f2096a4c95c0912d018ff4adb4801fb8b
|
[
"MIT"
] | null | null | null |
docs/tutorials_source/generate_images_tutorial.py
|
XavierXinweiWang/UnrealCV_Ubuntu
|
4fdbb32f2096a4c95c0912d018ff4adb4801fb8b
|
[
"MIT"
] | null | null | null |
# Need at least 20 # characters
"""
===============
Generate Images
===============
This ipython notebook demonstrates how to generate an image dataset with rich
ground truth from a virtual environment.
"""
####################
import time; print(time.strftime("The last update of this file: %Y-%m-%d %H:%M:%S", time.gmtime()))
####################
# Load some python libraries
# The dependencies for this tutorials are
# PIL, Numpy, Matplotlib
from __future__ import division, absolute_import, print_function
import os, sys, time, re, json
import numpy as np
import matplotlib.pyplot as plt
imread = plt.imread
def imread8(im_file):
''' Read image as a 8-bit numpy array '''
im = np.asarray(Image.open(im_file))
return im
###############################
# Connect to the game
# ===================
# Load unrealcv python client, do :code:`pip install unrealcv` first.
from unrealcv import client
client.connect()
if not client.isconnected():
print('UnrealCV server is not running. Run the game downloaded from http://unrealcv.github.io first.')
sys.exit(-1)
###############################
# Make sure the connection works well
res = client.request('vget /unrealcv/status')
# The image resolution and port is configured in the config file.
print(res)
##############################
# Load a camera trajectory
# ========================
traj_file = './camera_traj.json' # Relative to this python script
import json; camera_trajectory = json.load(open(traj_file))
# We will show how to record a camera trajectory in another tutorial
##############################
# Render an image
# ===============
idx = 1
loc, rot = camera_trajectory[idx]
# Set position of the first camera
client.request('vset /camera/0/location {x} {y} {z}'.format(**loc))
client.request('vset /camera/0/rotation {pitch} {yaw} {roll}'.format(**rot))
# Get image
res = client.request('vget /camera/0/lit lit.png')
print('The image is saved to %s' % res)
# It is also possible to get the png directly without saving to a file
res = client.request('vget /camera/0/lit png')
im = read_png(res)
print(im.shape)
# Visualize the image we just captured
plt.imshow(im)
##############################
# Ground truth generation
# =======================
# Generate ground truth from this virtual scene
res = client.request('vget /camera/0/object_mask png')
object_mask = read_png(res)
res = client.request('vget /camera/0/normal png')
normal = read_png(res)
# Visualize the captured ground truth
plt.imshow(object_mask)
plt.figure()
plt.imshow(normal)
###############################
# Depth is retrieved as a numpy array
# For UnrealCV < v0.3.8, the depth is saved as an exr file, but this has two issues. 1. Exr is not well supported in Linux 2. It depends on OpenCV to read exr file, which is hard to install
res = client.request('vget /camera/0/depth npy')
depth = read_npy(res)
plt.imshow(depth)
##############################
# Get object information
# ======================
# List all the objects of this virtual scene
scene_objects = client.request('vget /objects').split(' ')
print('Number of objects in this scene:', len(scene_objects))
# TODO: replace this with a better implementation
id2color = {} # Map from object id to the labeling color
for obj_id in scene_objects:
color = Color(client.request('vget /object/%s/color' % obj_id))
id2color[obj_id] = color
# print('%s : %s' % (obj_id, str(color)))
#############################
# Parse the segmentation mask
id2mask = {}
for obj_id in scene_objects:
color = id2color[obj_id]
mask = match_color(object_mask, [color.R, color.G, color.B], tolerance = 3)
if mask is not None:
id2mask[obj_id] = mask
# This may take a while
# TODO: Need to find a faster implementation for this
##############################
# Print statistics of this virtual scene and this image
# =====================================================
# Load information of this scene
with open('object_category.json') as f:
id2category = json.load(f)
categories = set(id2category.values())
# Show statistics of this frame
image_objects = id2mask.keys()
print('Number of objects in this image:', len(image_objects))
print('%20s : %s' % ('Category name', 'Object name'))
for category in categories:
objects = [v for v in image_objects if id2category.get(v) == category]
if len(objects) > 6: # Trim the list if too long
objects[6:] = ['...']
if len(objects) != 0:
print('%20s : %s' % (category, objects))
##############################
# Show the annotation color of some objects
ids = ['SM_Couch_1seat_5', 'SM_Vase_17', 'SM_Shelving_6', 'SM_Plant_8']
# for obj_id in ids:
obj_id = ids[0]
color = id2color[obj_id]
print('%s : %s' % (obj_id, str(color)))
# color_block = np.zeros((100,100, 3)) + np.array([color.R, color.G, color.B]) / 255.0
# plt.figure(); plt.imshow(color_block); plt.title(obj_id)
##############################
# Plot only one object
mask = id2mask['SM_Plant_8']
plt.figure(); plt.imshow(mask)
##############################
# Show all sofas in this image
couch_instance = [v for v in image_objects if id2category.get(v) == 'Couch']
mask = sum(id2mask[v] for v in couch_instance)
plt.figure(); plt.imshow(mask)
##############################
# Change the annotation color, fixed in v0.3.9
# You can use this to make objects you don't care the same color
client.request('vset /object/SM_Couch_1seat_5/color 255 0 0') # Change to pure red
client.request('vget /object/SM_Couch_1seat_5/color')
res = client.request('vget /camera/0/object_mask png')
object_mask = read_png(res)
plt.imshow(object_mask)
##############################
# Clean up resources
# ==================
client.disconnect()
| 33.171569
| 189
| 0.632186
|
a5f94be0d65ac72db51e3348feb82e21a6da2f05
| 458
|
py
|
Python
|
blogs/migrations/0003_auto_20161006_1654.py
|
fenglb/mysite
|
a5b68ebdb23f4d5c55c5490ffebaa97780a5d6ab
|
[
"CC0-1.0"
] | null | null | null |
blogs/migrations/0003_auto_20161006_1654.py
|
fenglb/mysite
|
a5b68ebdb23f4d5c55c5490ffebaa97780a5d6ab
|
[
"CC0-1.0"
] | null | null | null |
blogs/migrations/0003_auto_20161006_1654.py
|
fenglb/mysite
|
a5b68ebdb23f4d5c55c5490ffebaa97780a5d6ab
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-06 08:54
from __future__ import unicode_literals
from django.db import migrations, models
| 21.809524
| 62
| 0.617904
|
a5fa4568c42ee2ffd61fc83bd38a5e610ae98af1
| 1,660
|
py
|
Python
|
examples/ieffileupdate.py
|
duncan-r/SHIP
|
2c4c22c77f9c18ea545d3bce70a36aebbd18256a
|
[
"MIT"
] | 6
|
2016-04-10T17:32:44.000Z
|
2022-03-13T18:41:21.000Z
|
examples/ieffileupdate.py
|
duncan-r/SHIP
|
2c4c22c77f9c18ea545d3bce70a36aebbd18256a
|
[
"MIT"
] | 19
|
2017-06-23T08:21:53.000Z
|
2017-07-26T08:23:03.000Z
|
examples/ieffileupdate.py
|
duncan-r/SHIP
|
2c4c22c77f9c18ea545d3bce70a36aebbd18256a
|
[
"MIT"
] | 6
|
2016-10-26T16:04:38.000Z
|
2019-04-25T23:55:06.000Z
|
"""
Summary:
Example use of the fmp package to update file paths in an .ief file
and save the ief file under a new name.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
import os
from ship.utils.fileloaders import fileloader as fl
# Contains functions for updating file paths and reading/writing files
from ship.utils import filetools
def iefExample():
"""update some key file paths in an ief file.
Updates the .dat file, .tcf file, and results file paths referenced by
the ief file and save it under a new ief file name.
"""
# Load the tuflow model with a tcf file
ief_file = r'C:\path\to\an\isis\ieffile.ief'
loader = fl.FileLoader()
ief = loader.loadFile(ief_file)
# Get the referenced fmp .dat and .tcf files
dat_path = ief.getValue('Datafile')
tcf_path = ief.getValue('2DFile')
results_path = ief.getValue('Results')
# Update the dat, results and tcf file names
root, ext = os.path.splitext(dat_path)
new_dat = root + '_Updated' + ext
root, ext = os.path.splitext(results_path)
new_results = root + '_Updated' + ext
root, ext = os.path.splitext(tcf_path)
new_tcf = root + '_Updated' + ext
ief.setValue('Datafile', new_dat)
ief.setValue('Results', new_results)
ief.setValue('2DFile', new_tcf)
# Update the filename and write contents to disk
ief.path_holder.filename += '_Updated'
ief_path = ief.path_holder.absolutePath()
ief.write(ief_path)
if __name__ == '__main__':
iefExample()
| 26.349206
| 75
| 0.654217
|
a5fbd11dbc9a0e80007cdb92a40b5c8dd7191ce7
| 8,387
|
py
|
Python
|
packages/w3af/w3af/core/data/url/HTTPRequest.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/data/url/HTTPRequest.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/w3af/w3af/core/data/url/HTTPRequest.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
"""
HTTPRequest.py
Copyright 2010 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import copy
import socket
import urllib2
from w3af.core.data.dc.headers import Headers
from w3af.core.data.dc.utils.token import DataToken
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.request.request_mixin import RequestMixIn
from w3af.core.data.url.constants import MAX_HTTP_RETRIES
| 34.514403
| 99
| 0.623465
|
a5ff1935416c4a799dc3631e3b180db7559793bf
| 817
|
py
|
Python
|
moldesign/_notebooks/nbscripts/gen_toc.py
|
Autodesk/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 147
|
2016-07-15T18:53:55.000Z
|
2022-01-30T04:36:39.000Z
|
moldesign/_notebooks/nbscripts/gen_toc.py
|
cherishyli/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 151
|
2016-07-15T21:35:11.000Z
|
2019-10-10T08:57:29.000Z
|
moldesign/_notebooks/nbscripts/gen_toc.py
|
cherishyli/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 33
|
2016-08-02T00:04:51.000Z
|
2021-09-02T10:05:04.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import sys, os
from nbformat import v4
if __name__ == '__main__':
with open(sys.argv[1], 'r') as nbfile:
nb = v4.reads(nbfile.read())
print('Contents\n=======\n---')
for cell in nb.cells:
if cell['cell_type'] == 'markdown':
for line in cell['source'].splitlines():
header = parse_line(line)
if header is None: continue
ilevel, name = header
print(' '*(ilevel-1) + ' - [%s](#%s)'%(name, name.replace(' ','-')))
| 19.452381
| 85
| 0.532436
|
570013af797984af8b152ded17b276b76de011a7
| 1,102
|
py
|
Python
|
server/gaiaApi/gaia/serializer.py
|
JawaBaliIBM/Gaia
|
12572330c637cec559f8f122ecc2bd3af3dcf64e
|
[
"Apache-2.0"
] | 6
|
2021-07-31T10:52:36.000Z
|
2022-03-19T17:10:55.000Z
|
server/gaiaApi/gaia/serializer.py
|
JawaBaliIBM/Gaia
|
12572330c637cec559f8f122ecc2bd3af3dcf64e
|
[
"Apache-2.0"
] | 3
|
2021-07-24T08:17:53.000Z
|
2021-08-10T14:41:46.000Z
|
server/gaiaApi/gaia/serializer.py
|
JawaBaliIBM/Gaia
|
12572330c637cec559f8f122ecc2bd3af3dcf64e
|
[
"Apache-2.0"
] | 1
|
2021-07-31T10:15:45.000Z
|
2021-07-31T10:15:45.000Z
|
from rest_framework import serializers
| 42.384615
| 62
| 0.784029
|
5701301232a4492ca4d517aea40acc01301fe2f8
| 329
|
py
|
Python
|
aoc2020/d01_report_repair/methods.py
|
sflis/aoc2020
|
ef6ee81c18b6ec8332b150638b3d78772fe8327a
|
[
"Unlicense"
] | null | null | null |
aoc2020/d01_report_repair/methods.py
|
sflis/aoc2020
|
ef6ee81c18b6ec8332b150638b3d78772fe8327a
|
[
"Unlicense"
] | null | null | null |
aoc2020/d01_report_repair/methods.py
|
sflis/aoc2020
|
ef6ee81c18b6ec8332b150638b3d78772fe8327a
|
[
"Unlicense"
] | null | null | null |
import numpy as np
| 25.307692
| 59
| 0.653495
|
570168b655cd4c5fe01f67c0408794d1cfd928aa
| 2,306
|
py
|
Python
|
tests/test_persona.py
|
holnburger/persine
|
cb26d1e275f7ed7e1048bc1e6b66b71386c3e602
|
[
"MIT"
] | 84
|
2020-12-20T20:39:19.000Z
|
2022-02-02T01:01:12.000Z
|
tests/test_persona.py
|
holnburger/persine
|
cb26d1e275f7ed7e1048bc1e6b66b71386c3e602
|
[
"MIT"
] | 1
|
2020-12-25T01:07:09.000Z
|
2020-12-25T04:05:19.000Z
|
tests/test_persona.py
|
holnburger/persine
|
cb26d1e275f7ed7e1048bc1e6b66b71386c3e602
|
[
"MIT"
] | 9
|
2020-12-23T03:10:35.000Z
|
2021-09-08T14:44:18.000Z
|
import pytest
from persine import Persona
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from unittest.mock import Mock
| 26.204545
| 77
| 0.598439
|
57018df18d3cbc94d73679782950464b4f793c17
| 26,556
|
py
|
Python
|
inference.py
|
QuPengfei/learnable-triangulation-pytorch
|
861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2
|
[
"MIT"
] | null | null | null |
inference.py
|
QuPengfei/learnable-triangulation-pytorch
|
861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2
|
[
"MIT"
] | null | null | null |
inference.py
|
QuPengfei/learnable-triangulation-pytorch
|
861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import cv2
import os
import h5py
from collections import defaultdict
from mvn.models.triangulation import RANSACTriangulationNet, AlgebraicTriangulationNet, VolumetricTriangulationNet
from mvn.models.loss import KeypointsMSELoss, KeypointsMSESmoothLoss, KeypointsMAELoss, KeypointsL2Loss, VolumetricCELoss
from mvn.utils import img, multiview, op, vis, misc, cfg
from mvn.utils.img import get_square_bbox, resize_image, crop_image, normalize_image, scale_bbox
from mvn.utils.multiview import Camera
from mvn.utils.multiview import project_3d_points_to_image_plane_without_distortion as project
from mvn.datasets import utils as dataset_utils
from mvn.utils.img import image_batch_to_torch
retval = {
'subject_names': ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11'],
'camera_names': ['54138969', '55011271', '58860488', '60457274'],
'action_names': [
'Directions-1', 'Directions-2',
'Discussion-1', 'Discussion-2',
'Eating-1', 'Eating-2',
'Greeting-1', 'Greeting-2',
'Phoning-1', 'Phoning-2',
'Posing-1', 'Posing-2',
'Purchases-1', 'Purchases-2',
'Sitting-1', 'Sitting-2',
'SittingDown-1', 'SittingDown-2',
'Smoking-1', 'Smoking-2',
'TakingPhoto-1', 'TakingPhoto-2',
'Waiting-1', 'Waiting-2',
'Walking-1', 'Walking-2',
'WalkingDog-1', 'WalkingDog-2',
'WalkingTogether-1', 'WalkingTogether-2']
}
h5_file="data/human36m/extra/una-dinosauria-data/h36m/cameras.h5"
bbox_file="data/human36m/extra/bboxes-Human36M-GT.npy"
#retval['bboxes'] = fill_bbox(bbox_file)
#retval['cameras'] = fill_cameras(h5_file)
def loadHuman36mLabel(path,train = True, withDamageAction=True, retain_every_n_frames_in_test=1):
"""
this load the label, including bouding box, camera matrices
"""
test = not train
labels = np.load(path, allow_pickle=True).item()
train_subjects = ['S1', 'S5', 'S6', 'S7', 'S8']
test_subjects = ['S9', 'S11']
train_subjects = list(labels['subject_names'].index(x) for x in train_subjects)
test_subjects = list(labels['subject_names'].index(x) for x in test_subjects)
indices = []
if train:
mask = np.isin(labels['table']['subject_idx'], train_subjects, assume_unique=True)
indices.append(np.nonzero(mask)[0])
if test:
mask = np.isin(labels['table']['subject_idx'], test_subjects, assume_unique=True)
if not withDamageAction:
mask_S9 = labels['table']['subject_idx'] == labels['subject_names'].index('S9')
damaged_actions = 'Greeting-2', 'SittingDown-2', 'Waiting-1'
damaged_actions = [labels['action_names'].index(x) for x in damaged_actions]
mask_damaged_actions = np.isin(labels['table']['action_idx'], damaged_actions)
mask &= ~(mask_S9 & mask_damaged_actions)
indices.append(np.nonzero(mask)[0][::retain_every_n_frames_in_test])
labels['table'] = labels['table'][np.concatenate(indices)]
return labels
if __name__ == "__main__":
#infer("alg",max_num=2, crop=True)
infer_videos("alg",max_num=1000, save_images_instead=False, crop=True)
| 42.018987
| 185
| 0.640496
|
57028ca06deb47d996805621dce315dc63a9dc8f
| 4,846
|
py
|
Python
|
survol/sources_types/Linux/tcp_sockets.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/Linux/tcp_sockets.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/Linux/tcp_sockets.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
TCP Linux sockets with netstat
"""
import re
import sys
import socket
import lib_util
import lib_common
from lib_properties import pc
from sources_types import addr as survol_addr
# Many advantages compared to psutil:
# The Python module psutil is not needed
# psutil gives only sockets if the process is accessible.
# It is much faster.
# On the other it is necessary to run netstat in the shell.
# $ netstat -aptn
# (Not all processes could be identified, non-owned process info
# will not be shown, you would have to be root to see it all.)
# Active Internet connections (servers and established)
# Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
# tcp 0 0 192.168.0.17:8000 0.0.0.0:* LISTEN 25865/python
# tcp 0 0 127.0.0.1:427 0.0.0.0:* LISTEN -
# tcp 0 0 0.0.0.0:5900 0.0.0.0:* LISTEN 4119/vino-server
# tcp 0 0 192.168.122.1:53 0.0.0.0:* LISTEN -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:60685 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:22 192.168.0.14:60371 ESTABLISHED -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:58478 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:44634 192.168.0.15:38960 TIME_WAIT -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:58658 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:44634 192.168.0.14:59694 ESTABLISHED 4118/rygel
# tcp 0 0 fedora22:44634 192.168.0.14:58690 ESTABLISHED 4118/rygel
# tcp 0 0 fedora22:ssh 192.168.0.14:63599 ESTABLISHED -
# tcp 0 0 fedora22:42042 176.103.:universe_suite ESTABLISHED 23512/amule
# tcp6 0 0 [::]:wbem-http [::]:* LISTEN -
# tcp6 0 0 [::]:wbem-https [::]:* LISTEN -
# tcp6 0 0 [::]:mysql [::]:* LISTEN -
# tcp6 0 0 [::]:rfb [::]:* LISTEN 4119/vino-server
# tcp6 0 0 [::]:50000 [::]:* LISTEN 23512/amule
# tcp6 0 0 [::]:43056 [::]:* LISTEN 4125/httpd
# tcp6 0 0 [::]:http [::]:* LISTEN -
# tcp6 0 0 [::]:ssh [::]:* LISTEN -
# tcp6 0 0 localhost:ipp [::]:* LISTEN -
# tcp6 0 0 [::]:telnet [::]:* LISTEN -
#
if __name__ == '__main__':
Main()
| 38.460317
| 98
| 0.577383
|
57033e68edf1bc714421c03684cc8349a3a89d3f
| 5,832
|
py
|
Python
|
models.py
|
JiaMingLin/residual_adapters
|
a3d32b4fb6c3c252f5adc1ad178b026a111c1a08
|
[
"Apache-2.0"
] | 137
|
2018-03-22T15:45:30.000Z
|
2022-03-17T09:39:07.000Z
|
models.py
|
JiaMingLin/residual_adapters
|
a3d32b4fb6c3c252f5adc1ad178b026a111c1a08
|
[
"Apache-2.0"
] | 5
|
2018-09-25T19:44:34.000Z
|
2020-12-19T11:26:41.000Z
|
models.py
|
JiaMingLin/residual_adapters
|
a3d32b4fb6c3c252f5adc1ad178b026a111c1a08
|
[
"Apache-2.0"
] | 40
|
2018-04-04T12:36:54.000Z
|
2022-02-19T05:46:36.000Z
|
# models.py
# created by Sylvestre-Alvise Rebuffi [srebuffi@robots.ox.ac.uk]
# Copyright The University of Oxford, 2017-2020
# This code is made available under the Apache v2.0 licence, see LICENSE.txt for details
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import config_task
import math
# No projection: identity shortcut
def resnet26(num_classes=10, blocks=BasicBlock):
return ResNet(blocks, [4,4,4],num_classes)
| 39.945205
| 133
| 0.627743
|
5703b571d61c82b1ad4a982fd9b8632e8dd15fd8
| 1,028
|
py
|
Python
|
Parte 02/Projeto 01.py
|
andrewyamagata/Python
|
ac9baf16cd142156829ec6e977ecfcac8a4e3965
|
[
"MIT"
] | null | null | null |
Parte 02/Projeto 01.py
|
andrewyamagata/Python
|
ac9baf16cd142156829ec6e977ecfcac8a4e3965
|
[
"MIT"
] | null | null | null |
Parte 02/Projeto 01.py
|
andrewyamagata/Python
|
ac9baf16cd142156829ec6e977ecfcac8a4e3965
|
[
"MIT"
] | null | null | null |
#Condies Aninhadas
#if -> elif -> elif -> else # pode usar quantos elif quiser
#Aprovando Emprstimo
casa = float(input('Qual o valor da casa: R$ '))
salario = float(input('Qual o valor do salrio: R$ '))
tempo = int(input('Quanto anos para pagar? '))
salario30 = salario * 0.30
prestacao = casa / (tempo * 12)
if salario30 >= prestacao and tempo >= 15:
print('Emprstimo no excede 30 % do seu slario')
print('-='*30)
print('EMPRSTIMO APROVADO COM RESTRIES')
print('-='*30)
elif salario30 >= prestacao and tempo < 15:
print('Emprstimo no excede 30 % e pagar em ',tempo)
print('-='*30)
print('EMPRSTIMO APROVADO SEM RESTRIES')
print('-='*30)
else:
print('Emprstimo excede 30% do seu salrio')
print('-='*30)
print('EMPRSTIMO NEGADO')
print('-='*30)
print('Para pagar a casa de R$ {:.2f}.\nCom o salrio que recebe de R$ {:.2f}.\nEm {} anos, voc deve pagar mensalmente R$ {:.2f}'.format(casa,salario,tempo,prestacao))
#\n quebra linha
#end=' ' puxa a linha de baixo
| 38.074074
| 168
| 0.657588
|
57053e08159134b657dc6dde4b49efc028c6a0a2
| 2,196
|
py
|
Python
|
main.py
|
GauravP2001/courseSniperBot
|
c3e05d2890f10177ee847a961b957d5e63e7d0ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
main.py
|
GauravP2001/courseSniperBot
|
c3e05d2890f10177ee847a961b957d5e63e7d0ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
main.py
|
GauravP2001/courseSniperBot
|
c3e05d2890f10177ee847a961b957d5e63e7d0ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import discord
import os
import requests
import asyncio
import psycopg2
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt=f"%m/%d/%Y %H:%M:%S %Z")
logger = logging.getLogger("Snipe Bot")
client = commands.Bot(command_prefix=".")
scheduler = AsyncIOScheduler()
DATABASE_URL = os.environ.get("DATABASE_URL")
conn = psycopg2.connect(DATABASE_URL, sslmode="require")
cur = conn.cursor()
# with conn:
# cur.execute("CREATE TABLE coursesToBeFound (index VARCHAR primary key);")
# cur.execute("INSERT INTO coursesToBeFound (index) VALUES (%s)", ("00150",))
# cur.execute("DELETE FROM coursesToBeFound where index = %s", ("00150",))
# cur.execute("SELECT * from coursesToBeFound;")
# for row in cur:
# print(row[0])
sectionsFound = []
if __name__ == "__main__":
logger.info("Starting")
scheduler.add_job(check_courses, "interval", seconds=10)
scheduler.start()
client.run(os.environ.get("token"))
| 28.519481
| 105
| 0.658015
|
57071627a3f7ead2f2e5161d076288e623b02921
| 160
|
py
|
Python
|
src/utilities/grammar.py
|
sonishreyas/news_scraper
|
7cd1bd9eb14fb903fc7b190b04191237da0a1d23
|
[
"MIT"
] | null | null | null |
src/utilities/grammar.py
|
sonishreyas/news_scraper
|
7cd1bd9eb14fb903fc7b190b04191237da0a1d23
|
[
"MIT"
] | null | null | null |
src/utilities/grammar.py
|
sonishreyas/news_scraper
|
7cd1bd9eb14fb903fc7b190b04191237da0a1d23
|
[
"MIT"
] | null | null | null |
from gingerit.gingerit import GingerIt
| 26.666667
| 38
| 0.74375
|
57075fadbef4087df6eac236abcbc48b853a6d54
| 619
|
py
|
Python
|
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
'''
Escreva um programa que leia dois nmeros inteiros e compare- os, mostrando na tela uma mensagem:
- O primeiro valor maior
- O segundo valor maior
- no existe valor maior, os dois so iguais
'''
# Ler dois nmeros inteiros
n1 = int(input('Informe o primeiro nmero: '))
n2 = int(input('Informe o segundo nmero: '))
# Operadores Lgicos
n1_maior = n1 > n2
n2_maior = n2 > n1
# Estrutura Condicional if, elif, else.
if n1_maior:
print('O nmero {} o maior!'.format(n1))
elif n2_maior:
print('O nmero {} o maior!'.format(n2))
else:
print('Os nmeros so iguais!')
| 22.925926
| 101
| 0.663974
|
57088093d1d0b3cfd26c3d3201f0bca2db2decb3
| 324
|
py
|
Python
|
ABS/ABC085C.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABS/ABC085C.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABS/ABC085C.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 18
| 59
| 0.435185
|
5708b5548cafd7c9ca9620325b1633201ca209f8
| 1,127
|
py
|
Python
|
mail_open_xchange/__openerp__.py
|
OdooCommunityWidgets/IDEAS-FOR-MODULES
|
74c588f6b6058119b8953650b6cb325fe5506cfd
|
[
"MIT"
] | 1
|
2015-05-27T19:56:29.000Z
|
2015-05-27T19:56:29.000Z
|
mail_open_xchange/__openerp__.py
|
OdooCommunityWidgets/IDEAS-FOR-MODULES
|
74c588f6b6058119b8953650b6cb325fe5506cfd
|
[
"MIT"
] | null | null | null |
mail_open_xchange/__openerp__.py
|
OdooCommunityWidgets/IDEAS-FOR-MODULES
|
74c588f6b6058119b8953650b6cb325fe5506cfd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
{
'name': 'Open-Xchange Odoo',
'version': '1.0',
'category': 'Social Network',
'sequence': 2,
'summary': 'Discussions, Mailing Lists, News',
'description': """
Open-Xchange Integration
=========================
This module is designed to be a standard open-xchange inbox inside Odoo to allow for the use of email inside the Odoo framework as an option alongside Odoo's own mail module.
I would like to slowly add features to this to further integrate Open-Xchange inside Odoo to allow for easier migration to Odoo for those that are not interested in using Odoo's default mail module to completely replace their emails.
Main Features
-------------
* Open-Xchange webmail interface inside Odoo.
* Multi-inbox handling by Open-Xchange.
* More features to be added later to further integrate Open-Xchange with Odoo.
""",
'author': 'Luke Branch',
'website': 'https://github.com/OdooCommunityWidgets/IDEAS-FOR-MODULES/mail_open_xchange',
'depends': ['base', 'base_setup', 'mail'],
'data': [
'',
],
'installable': False,
'application': True,
}
| 38.862069
| 233
| 0.674357
|
5708df6ade016849aefe1a0044ec7ee2d375c82f
| 10,853
|
py
|
Python
|
testing/test_pulse_prop.py
|
ibegleris/w-fopo
|
e44b83b8ec54d01bb34b89805378a2b0659dfe6f
|
[
"BSD-3-Clause"
] | null | null | null |
testing/test_pulse_prop.py
|
ibegleris/w-fopo
|
e44b83b8ec54d01bb34b89805378a2b0659dfe6f
|
[
"BSD-3-Clause"
] | null | null | null |
testing/test_pulse_prop.py
|
ibegleris/w-fopo
|
e44b83b8ec54d01bb34b89805378a2b0659dfe6f
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
sys.path.append('src')
from functions import *
import numpy as np
from numpy.testing import assert_allclose
"-----------------------Full soliton--------------------------------------------"
def pulse_propagations(ram, ss, nm, N_sol=1, cython = True, u = None):
"SOLITON TEST. IF THIS FAILS GOD HELP YOU!"
n2 = 2.5e-20 # n2 for silica [m/W]
# 0.0011666666666666668 # loss [dB/m]
alphadB = np.array([0 for i in range(nm)])
gama = 1e-3 # w/m
"-----------------------------General options------------------------------"
maxerr = 1e-13 # maximum tolerable error per step
"----------------------------Simulation parameters-------------------------"
N = 10
z = np.array([0,70]) # total distance [m]
nplot = 10 # number of plots
nt = 2**N # number of grid points
#dzstep = z/nplot # distance per step
dz_less = 1
dz = 1 # starting guess value of the step
lam_p1 = 1550
lamda_c = 1550e-9
lamda = lam_p1*1e-9
beta2 = -1e-3
P0_p1 = 1
betas = np.array([0, 0, beta2])
T0 = (N_sol**2 * np.abs(beta2) / (gama * P0_p1))**0.5
TFWHM = (2*np.log(1+2**0.5)) * T0
int_fwm = sim_parameters(n2, nm, alphadB)
int_fwm.general_options(maxerr, raman_object, ss, ram)
int_fwm.propagation_parameters(N, z, nplot, dz_less, 1)
int_fwm.woble_propagate(0)
fv, where = fv_creator(lam_p1,lam_p1 + 25,0, 100, int_fwm)
#fv, where = fv_creator(lam_p1, , int_fwm, prot_casc=0)
sim_wind = sim_window(fv, lamda, lamda_c, int_fwm, fv_idler_int=1)
loss = Loss(int_fwm, sim_wind, amax=int_fwm.alphadB)
alpha_func = loss.atten_func_full(sim_wind.fv, int_fwm)
int_fwm.alphadB = alpha_func
int_fwm.alpha = int_fwm.alphadB
dnerr = [0]
index = 1
master_index = 0
a_vec = [2.2e-6]
Q_large,M1,M2 = get_Qs(nm, gama, fv, a_vec, dnerr, index, master_index, lamda, n2)
if nm ==1:
M1, M2, Q_large= np.array([1]), np.array([1]), Q_large[:,0,0]
betas = betas[np.newaxis, :]
# sys.exit()
Dop = dispersion_operator(betas, int_fwm, sim_wind)
print(Dop.shape)
integrator = Integrator(int_fwm)
integrand = Integrand(int_fwm.nm,ram, ss, cython = False, timing = False)
dAdzmm = integrand.dAdzmm
RK = integrator.RK45mm
dAdzmm = integrand.dAdzmm
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
#M1, M2, Q = Q_matrixes(1, n2, lamda, gama=gama)
raman = raman_object(int_fwm.ram, int_fwm.how)
raman.raman_load(sim_wind.t, sim_wind.dt, M2, nm)
if raman.on == 'on':
hf = raman.hf
else:
hf = None
u = np.empty(
[ int_fwm.nm, len(sim_wind.t)], dtype='complex128')
U = np.empty([int_fwm.nm,
len(sim_wind.t)], dtype='complex128')
sim_wind.w_tiled = np.tile(sim_wind.w + sim_wind.woffset, (int_fwm.nm, 1))
u[:, :] = ((P0_p1)**0.5 / np.cosh(sim_wind.t/T0)) * \
np.exp(-1j*(sim_wind.woffset)*sim_wind.t)
U[:, :] = fftshift(sim_wind.dt*fft(u[:, :]))
gam_no_aeff = -1j*int_fwm.n2*2*pi/sim_wind.lamda
u, U = pulse_propagation(u, U, int_fwm, M1, M2.astype(np.int64), Q_large[0].astype(np.complex128),
sim_wind, hf, Dop[0], dAdzmm, gam_no_aeff,RK)
U_start = np.abs(U[ :, :])**2
u[:, :] = u[:, :] * \
np.exp(1j*z[-1]/2)*np.exp(-1j*(sim_wind.woffset)*sim_wind.t)
"""
fig1 = plt.figure()
plt.plot(sim_wind.fv,np.abs(U[1,:])**2)
plt.savefig('1.png')
fig2 = plt.figure()
plt.plot(sim_wind.fv,np.abs(U[1,:])**2)
plt.savefig('2.png')
fig3 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('3.png')
fig4 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('4.png')
fig5 = plt.figure()
plt.plot(fftshift(sim_wind.w),(np.abs(U[1,:])**2 - np.abs(U[1,:])**2 ))
plt.savefig('error.png')
fig6 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2 - np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('error2.png')
plt.show()
"""
return u, U, maxerr
def test_bire_pass():
Da = np.random.uniform(0, 2*pi, 100)
b = birfeg_variation(Da,2)
u = np.random.randn(2, 2**14) + 1j * np.random.randn(2, 2**14)
u *= 10
for i in range(100):
ut = b.bire_pass(u,i)
assert_allclose(np.abs(u)**2, np.abs(ut)**2)
u = 1 * ut
| 36.056478
| 105
| 0.540404
|
570a3a32cbbdc85ab026871552208d720276a1d7
| 1,089
|
py
|
Python
|
download.py
|
wujushan/AndroidHeatMap
|
1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59
|
[
"Apache-2.0"
] | 1
|
2019-06-13T16:05:36.000Z
|
2019-06-13T16:05:36.000Z
|
download.py
|
wujushan/AndroidHeatMap
|
1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59
|
[
"Apache-2.0"
] | null | null | null |
download.py
|
wujushan/AndroidHeatMap
|
1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
if __name__ == '__main__':
url = 'https://jjdong5.com/get_file/4/1fa69b06c6276768e95cc0c04d85feec693488a588/13000/13287/13287_360p.m3u8'
download(url)
| 34.03125
| 113
| 0.575758
|
570a7fbde091be0d15c77144e4caa11f184860d3
| 4,945
|
py
|
Python
|
tests/watermarks_test.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
tests/watermarks_test.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
tests/watermarks_test.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from time import sleep
from absl import app
from absl import flags
import erdos.graph
from erdos.op import Op
from erdos.utils import frequency
from erdos.message import Message
from erdos.data_stream import DataStream
from erdos.timestamp import Timestamp
from erdos.message import WatermarkMessage
INTEGER_FREQUENCY = 10 # The frequency at which to send the integers.
class SecondOperator(Op):
""" Second operator that listens in on the numbers and reports their
sum when the watermark is received. """
def __init__(self, name):
""" Initializes the attributes to be used."""
super(SecondOperator, self).__init__(name)
self.windows = defaultdict(list)
def save_numbers(self, message):
""" Save all the numbers corresponding to a window. """
batch_number = message.timestamp.coordinates[0]
self.windows[batch_number].append(message.data)
def execute_sum(self, message):
""" Sum all the numbers in this window and send out the aggregate. """
batch_number = message.timestamp.coordinates[0]
window_data = self.windows.pop(batch_number, None)
#print("Received a watermark for the timestamp: {}".format(batch_number))
#print("The sum of the window {} is {}".format(
# window_data, sum(window_data)))
output_msg = Message(sum(window_data),
Timestamp(coordinates = [batch_number]))
self.get_output_stream("sum_out").send(output_msg)
def execute(self):
""" Execute the spin() loop to continue processing messages. """
self.spin()
class ThirdOperator(Op):
""" Third operator that listens in on the sum and verifies correctness."""
def __init__(self, name):
"""Initializes the attributes to be used."""
super(ThirdOperator, self).__init__(name)
def assert_correctness(self, message):
""" Assert the correctness of the results."""
batch_number = message.timestamp.coordinates[0]
sum_data = sum(range((batch_number - 1) * 10 + 1, batch_number * 10 + 1))
print("Received sum: {} for the batch_number {}, expected {}".format(
message.data, batch_number, sum_data))
if __name__ == "__main__":
app.run(main)
| 37.462121
| 90
| 0.658038
|
570a9547e24dbd1a28701e76c97396c34016c792
| 1,436
|
py
|
Python
|
test/test_shop/views.py
|
blakelockley/django-base-shop
|
455a2f4465e90cde57719ac29dc090b14f0bd324
|
[
"MIT"
] | 1
|
2020-01-12T04:05:42.000Z
|
2020-01-12T04:05:42.000Z
|
test/test_shop/views.py
|
blakelockley/django-base-shop
|
455a2f4465e90cde57719ac29dc090b14f0bd324
|
[
"MIT"
] | 14
|
2020-03-24T18:11:07.000Z
|
2022-03-12T00:15:20.000Z
|
test/test_shop/views.py
|
blakelockley/django-base-shop
|
455a2f4465e90cde57719ac29dc090b14f0bd324
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django_base_shop.models import ShippingTag
from .models import ConcreteCart, ConcreteProduct
| 24.338983
| 85
| 0.660167
|
570cfc314b92388cc92855fea7600f5e8b1e443e
| 11,600
|
py
|
Python
|
q3/q3/drivers/ui/pyqt5.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | 2
|
2021-03-16T05:48:36.000Z
|
2021-10-11T01:55:48.000Z
|
q3/q3/drivers/ui/pyqt5.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | null | null | null |
q3/q3/drivers/ui/pyqt5.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | 1
|
2021-03-16T05:48:39.000Z
|
2021-03-16T05:48:39.000Z
|
# PYQT
import sys
#from ...TabPanel import TabPanel
import sip
from q3.ui.engine import qtw,qtc,qtg
from ... import consts, prop, direction
from ...ui import orientation, colors
from ...moduletype import ModuleType
from ...nodeiotype import NodeIoType
from ...q3vector import Q3Vector
from ...EventSignal import EventProps
from ..driverBase import Q3DriverBase
from enum import Enum
from ...valuetype import ValueType
from .IoLinkView import IoLinkView
from .IoNodeView import IoNodeView
from .ModuleViewImpl import ModuleViewImpl
from .GraphViewImpl import GraphViewImpl
#class IoNode:
# pass
| 34.017595
| 141
| 0.617931
|
570eadcaa613e66d764e81bda74fc4c5ac38c715
| 2,538
|
py
|
Python
|
2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py
|
michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
|
9de44e5ad2e8d197b0a3c1b362b0377339278bd2
|
[
"MIT"
] | 7
|
2021-10-02T03:19:59.000Z
|
2022-03-21T21:24:14.000Z
|
2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py
|
michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
|
9de44e5ad2e8d197b0a3c1b362b0377339278bd2
|
[
"MIT"
] | null | null | null |
2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py
|
michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
|
9de44e5ad2e8d197b0a3c1b362b0377339278bd2
|
[
"MIT"
] | 6
|
2021-08-30T02:58:02.000Z
|
2022-02-01T07:46:49.000Z
|
# import pandas, matplotlib, and seaborn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.width', 53)
pd.set_option('display.max_columns', 5)
pd.set_option('display.max_rows', 200)
pd.options.display.float_format = '{:,.0f}'.format
covidtotals = pd.read_csv("data/covidtotals.csv")
covidtotals.set_index("iso_code", inplace=True)
landtemps = pd.read_csv("data/landtemps2019avgs.csv")
# do a scatterplot of total_cases by total_deaths
ax = sns.regplot(x="total_cases_mill", y="total_deaths_mill", data=covidtotals)
ax.set(xlabel="Cases Per Million", ylabel="Deaths Per Million", title="Total Covid Cases and Deaths by Country")
plt.show()
fig, axes = plt.subplots(1,2, sharey=True)
sns.regplot(x=covidtotals.aged_65_older, y=covidtotals.total_cases_mill, ax=axes[0])
sns.regplot(x=covidtotals.gdp_per_capita, y=covidtotals.total_cases_mill, ax=axes[1])
axes[0].set_xlabel("Aged 65 or Older")
axes[0].set_ylabel("Cases Per Million")
axes[1].set_xlabel("GDP Per Capita")
axes[1].set_ylabel("")
plt.suptitle("Age 65 Plus and GDP with Cases Per Million")
plt.tight_layout()
fig.subplots_adjust(top=0.92)
plt.show()
# show the high elevation points in a different color
low, high = landtemps.loc[landtemps.elevation<=1000], landtemps.loc[landtemps.elevation>1000]
low.shape[0], low.avgtemp.mean()
high.shape[0], high.avgtemp.mean()
plt.scatter(x="latabs", y="avgtemp", c="blue", data=low)
plt.scatter(x="latabs", y="avgtemp", c="red", data=high)
plt.legend(('low elevation', 'high elevation'))
plt.xlabel("Latitude (N or S)")
plt.ylabel("Average Temperature (Celsius)")
plt.title("Latitude and Average Temperature in 2019")
plt.show()
# show scatter plot with different regression lines by elevation group
landtemps['elevation_group'] = np.where(landtemps.elevation<=1000,'low','high')
sns.lmplot(x="latabs", y="avgtemp", hue="elevation_group", palette=dict(low="blue", high="red"), legend_out=False, data=landtemps)
plt.xlabel("Latitude (N or S)")
plt.ylabel("Average Temperature")
plt.legend(('low elevation', 'high elevation'), loc='lower left')
plt.yticks(np.arange(-60, 40, step=20))
plt.title("Latitude and Average Temperature in 2019")
plt.tight_layout()
plt.show()
# show this as a 3D plot
fig = plt.figure()
plt.suptitle("Latitude, Temperature, and Elevation in 2019")
ax = plt.axes(projection='3d')
ax.set_xlabel("Elevation")
ax.set_ylabel("Latitude")
ax.set_zlabel("Avg Temp")
ax.scatter3D(landtemps.elevation, landtemps.latabs, landtemps.avgtemp)
plt.show()
| 39.046154
| 130
| 0.754137
|
570ee5f940cb42c1bdadbd336c5d3471836f1133
| 7,359
|
py
|
Python
|
tests/test/base/test_map_set.py
|
Eve-ning/reamber_base_py
|
6d19c84f2c110b60e633b82b73e0516396466f56
|
[
"MIT"
] | 10
|
2020-06-28T11:16:36.000Z
|
2021-08-09T21:41:43.000Z
|
tests/test/base/test_map_set.py
|
Eve-ning/reamberPy
|
6d19c84f2c110b60e633b82b73e0516396466f56
|
[
"MIT"
] | 35
|
2020-06-18T13:05:50.000Z
|
2022-02-18T10:13:35.000Z
|
tests/test/base/test_map_set.py
|
Eve-ning/reamber_base_py
|
6d19c84f2c110b60e633b82b73e0516396466f56
|
[
"MIT"
] | 2
|
2021-05-26T17:05:06.000Z
|
2021-06-12T18:42:13.000Z
|
import unittest
import numpy as np
from reamber.base import Bpm, Hit, Hold, Map, MapSet
from reamber.base.lists import BpmList
from reamber.base.lists.notes import HitList, HoldList
# noinspection PyTypeChecker,DuplicatedCode
if __name__ == '__main__':
unittest.main()
| 54.110294
| 116
| 0.621552
|
570f7be4fc6a73c331b26ffda6ddfc47a075df88
| 1,252
|
py
|
Python
|
minifyoperation.py
|
seece/cbpp
|
b6771c7933fa07444e660eafda6f06cf60edce01
|
[
"MIT"
] | null | null | null |
minifyoperation.py
|
seece/cbpp
|
b6771c7933fa07444e660eafda6f06cf60edce01
|
[
"MIT"
] | null | null | null |
minifyoperation.py
|
seece/cbpp
|
b6771c7933fa07444e660eafda6f06cf60edce01
|
[
"MIT"
] | null | null | null |
import re
from util import *
from operation import Operation, OperationResult
| 24.076923
| 122
| 0.626997
|
570f8d367a6c727fc6ef795d72a90ef7bea75141
| 2,024
|
py
|
Python
|
graph.py
|
mrpatiwi/k-walk-py
|
a800f64079024716b26c0ebb9c3a2c5b6a935b78
|
[
"MIT"
] | null | null | null |
graph.py
|
mrpatiwi/k-walk-py
|
a800f64079024716b26c0ebb9c3a2c5b6a935b78
|
[
"MIT"
] | null | null | null |
graph.py
|
mrpatiwi/k-walk-py
|
a800f64079024716b26c0ebb9c3a2c5b6a935b78
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from index_matrix import Matrix
__author__ = 'Patricio Lopez Juri'
| 23.264368
| 77
| 0.535079
|
570fe23611397bcc46c1ab733771a0e34fdc4ba4
| 1,302
|
py
|
Python
|
ep004_helper.py
|
jpch89/effectivepython
|
97ba297bf987f346219bf8de5198c0817f5146e0
|
[
"MIT"
] | null | null | null |
ep004_helper.py
|
jpch89/effectivepython
|
97ba297bf987f346219bf8de5198c0817f5146e0
|
[
"MIT"
] | null | null | null |
ep004_helper.py
|
jpch89/effectivepython
|
97ba297bf987f346219bf8de5198c0817f5146e0
|
[
"MIT"
] | null | null | null |
from urllib.parse import parse_qs
# query string
my_values = parse_qs('red=5&blue=0&green=',
keep_blank_values=True)
# print(repr(my_values)) #
print(my_values) #
# >>>
# {'red': ['5'], 'blue': ['0'], 'green': ['']}
# blank
#
# get
print('Red: ', my_values.get('red'))
print('Green: ', my_values.get('green'))
print('Opacity: ', my_values.get('opacity'))
print('-' * 50)
#
#
# 0
# False
red = my_values.get('red', [''])[0] or 0
green = my_values.get('green', [''])[0] or 0
opacity = my_values.get('opacity', [''])[0] or 0
print('Red: %r' % red)
print('Green: %r' % green)
print('Opacity: %r' % opacity)
print('-' * 50)
#
#
red = int(my_values.get('red', [''])[0] or 0)
#
# 1 Python 2.5
red = my_values.get('red', [''])
red = int(red[0]) if red[0] else 0
# 2 if/else
green = my_values.get('green', [''])
if green[0]:
green = int(green[0])
else:
green = 0
# 3
| 23.25
| 48
| 0.609831
|
570febe7fa7b5748adbff547439e5061a41d7ecb
| 2,643
|
py
|
Python
|
Piece_class_stuff.py
|
krystianpietryka/Chess
|
e65afbe3ac51441327a5057b3677334ceb54d916
|
[
"MIT"
] | null | null | null |
Piece_class_stuff.py
|
krystianpietryka/Chess
|
e65afbe3ac51441327a5057b3677334ceb54d916
|
[
"MIT"
] | null | null | null |
Piece_class_stuff.py
|
krystianpietryka/Chess
|
e65afbe3ac51441327a5057b3677334ceb54d916
|
[
"MIT"
] | null | null | null |
from enum import Enum
import pygame
# Load sprites into pygame
# Piece class Objects
| 37.757143
| 57
| 0.650397
|
5710d9d404ac2b132ecaaa64415a70da05239921
| 656
|
py
|
Python
|
pubsub.py
|
basecue/micropython-pubsub
|
fcb6189d648515f1a7106ed5f54e332ba069793d
|
[
"Apache-2.0"
] | null | null | null |
pubsub.py
|
basecue/micropython-pubsub
|
fcb6189d648515f1a7106ed5f54e332ba069793d
|
[
"Apache-2.0"
] | null | null | null |
pubsub.py
|
basecue/micropython-pubsub
|
fcb6189d648515f1a7106ed5f54e332ba069793d
|
[
"Apache-2.0"
] | null | null | null |
from micropython import schedule
_subscribers = {}
| 19.294118
| 47
| 0.609756
|
5712c5f2bba3745161134c95e4c1fe8d35033684
| 5,808
|
py
|
Python
|
sc_cost_meter/utils.py
|
zaro0508/lambda-sc-cost-meter
|
2e10fa102af983f61a352ae633651fc3eaf64b19
|
[
"Apache-2.0"
] | null | null | null |
sc_cost_meter/utils.py
|
zaro0508/lambda-sc-cost-meter
|
2e10fa102af983f61a352ae633651fc3eaf64b19
|
[
"Apache-2.0"
] | null | null | null |
sc_cost_meter/utils.py
|
zaro0508/lambda-sc-cost-meter
|
2e10fa102af983f61a352ae633651fc3eaf64b19
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import logging
import os
from datetime import datetime
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def get_env_var_value(env_var):
'''Get the value of an environment variable
:param env_var: the environment variable
:returns: the environment variable's value, None if env var is not found
'''
value = os.getenv(env_var)
if not value:
log.warning(f'cannot get environment variable: {env_var}')
return value
def get_marketplace_synapse_ids():
'''Get Synapse IDs from the Marketplace Dynamo DB, these are the Marketplace customers.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:return a list of synapse IDs, otherwise return empty list if no customers are in DB
'''
synapse_ids = []
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
ddb_marketplace_synapse_user_id_attribute = "SynapseUserId"
if ddb_marketplace_table_name:
client = get_dynamo_client()
response = client.scan(
TableName=ddb_marketplace_table_name,
ProjectionExpression=ddb_marketplace_synapse_user_id_attribute,
)
if "Items" in response.keys():
for item in response["Items"]:
synapse_ids.append(item[ddb_marketplace_synapse_user_id_attribute]["S"])
return synapse_ids
def get_marketplace_customer_id(synapse_id):
'''Get the Service Catalog customer ID from the Marketplace Dynamo DB.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:param synapse_id: synapse user id
:return the Marketplace customer ID, otherwise return None if cannot find an
associated customer ID
'''
customer_id = None
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
if ddb_marketplace_table_name:
ddb_customer_id_attribute = 'MarketplaceCustomerId'
client = get_dynamo_client()
response = client.get_item(
Key={
'SynapseUserId': {
'S': synapse_id,
}
},
TableName=ddb_marketplace_table_name,
ConsistentRead=True,
AttributesToGet=[
ddb_customer_id_attribute
]
)
if "Item" in response.keys():
customer_id = response["Item"][ddb_customer_id_attribute]["S"]
else:
log.info(f'cannot find registration for synapse user: {synapse_id}')
return customer_id
def get_marketplace_product_code(synapse_id):
'''Get the registered Service Catalog customer product code.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:param synapse_id: synapse user id
:return the Marketplace product code, None if cannot find customer ID
'''
product_code = None
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
if ddb_marketplace_table_name:
ddb_product_code_attribute = 'ProductCode'
client = get_dynamo_client()
response = client.get_item(
Key={
'SynapseUserId': {
'S': synapse_id,
}
},
TableName=ddb_marketplace_table_name,
ConsistentRead=True,
AttributesToGet=[
ddb_product_code_attribute
]
)
if "Item" in response.keys():
product_code = response["Item"][ddb_product_code_attribute]["S"]
else:
log.info(f'cannot find registration for synapse user: {synapse_id}')
return product_code
def get_customer_cost(customer_id, time_period, granularity):
'''
Get the total cost of all resources tagged with the customer_id for a given
time_period. The time_period and time granularity must match.
:param customer_id: the Marketplace customer ID
:param time_period: the cost time period
:param granularity: the granularity of time HOURLY|DAILY|MONTHLY
:return: the total cost of all resources and the currency unit
'''
client = get_ce_client()
response = client.get_cost_and_usage(
TimePeriod=time_period,
Granularity=granularity,
Filter={
"Tags": {
"Key": "marketplace:customerId",
"Values": [
customer_id
]
}
},
Metrics=["UnblendedCost"]
)
results_by_time = response['ResultsByTime']
cost = results_by_time[0]["Total"]["UnblendedCost"]["Amount"]
unit = results_by_time[0]["Total"]["UnblendedCost"]["Unit"]
return float(cost), unit
def report_cost(cost, customer_id, product_code):
'''
Report the incurred cost of the customer's resources to the AWS Marketplace
:param cost: the cost (as a float value)
:param customer_id: the Marketplace customer ID
:param product_code: the Marketplace product code
'''
cost_accrued_rate = 0.001 # TODO: use mareketplace get_dimension API to get this info
quantity = int(cost / cost_accrued_rate)
mrktpl_client = get_meteringmarketplace_client()
response = mrktpl_client.batch_meter_usage(
UsageRecords=[
{
'Timestamp': datetime.utcnow(),
'CustomerIdentifier': customer_id,
'Dimension': 'costs_accrued',
'Quantity': quantity
}
],
ProductCode=product_code
)
log.debug(f'batch_meter_usage response: {response}')
results = response["Results"][0]
status = results["Status"]
if status == 'Success':
log.info(f'usage record: {results}')
else:
# TODO: need to add a retry mechanism for failed reports
unprocessed_records = response["UnprocessedRecords"][0]
log.error(f'unprocessed record: {unprocessed_records}')
| 31.394595
| 89
| 0.719697
|
5714de071955ec101c9d0bd2f8b9cad2f55c7b5c
| 8,000
|
py
|
Python
|
source/metadata.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | 5
|
2017-09-01T14:05:03.000Z
|
2019-07-13T07:52:49.000Z
|
source/metadata.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | null | null | null |
source/metadata.py
|
sanmik/brain-network-viz
|
9c881e49c14c94e3f7ef4b7776d98c930716ee91
|
[
"MIT"
] | 1
|
2017-09-01T14:05:03.000Z
|
2017-09-01T14:05:03.000Z
|
# Library Imports
from itertools import islice
import csv
# Local Module Imports
import config
| 33.057851
| 79
| 0.58775
|
571574f8dd7e9bd961d512815c9fd6535e05f1d8
| 20,165
|
py
|
Python
|
src/src_python/antares_xpansion/driver.py
|
pelefebvre/antares-xpansion
|
c62ed1a982e970325dec6007eb57a9c6288ef0c7
|
[
"Apache-2.0"
] | null | null | null |
src/src_python/antares_xpansion/driver.py
|
pelefebvre/antares-xpansion
|
c62ed1a982e970325dec6007eb57a9c6288ef0c7
|
[
"Apache-2.0"
] | null | null | null |
src/src_python/antares_xpansion/driver.py
|
pelefebvre/antares-xpansion
|
c62ed1a982e970325dec6007eb57a9c6288ef0c7
|
[
"Apache-2.0"
] | 1
|
2021-05-27T13:06:26.000Z
|
2021-05-27T13:06:26.000Z
|
"""
Class to control the execution of the optimization session
"""
import shutil
import configparser
import glob
import os
import subprocess
import sys
from pathlib import Path
from antares_xpansion.input_checker import check_candidates_file
from antares_xpansion.input_checker import check_settings_file
from antares_xpansion.xpansion_utils import read_and_write_mps
| 41.069246
| 129
| 0.579965
|
571589111619e9fd5ae98d9e183ad96ef4ec5ca8
| 1,186
|
py
|
Python
|
CRASHLACMA/slistener.py
|
carlynorama/CRASHLACMA
|
d59890a5a0702940a0d678b600230f0f53384710
|
[
"CC0-1.0"
] | null | null | null |
CRASHLACMA/slistener.py
|
carlynorama/CRASHLACMA
|
d59890a5a0702940a0d678b600230f0f53384710
|
[
"CC0-1.0"
] | null | null | null |
CRASHLACMA/slistener.py
|
carlynorama/CRASHLACMA
|
d59890a5a0702940a0d678b600230f0f53384710
|
[
"CC0-1.0"
] | null | null | null |
from tweepy import StreamListener
import json, time, sys
| 29.65
| 108
| 0.555649
|
5715f8e7099f46fd56b68c4c665702f7dc7e68e3
| 379
|
py
|
Python
|
Curso-em-video-Python/PycharmProjects/pythonExercicios/Ex052.py
|
sartinicj/curso-em-video-python
|
8cb4ca05a88351c44aa4a7befc59c9596a50f268
|
[
"MIT"
] | null | null | null |
Curso-em-video-Python/PycharmProjects/pythonExercicios/Ex052.py
|
sartinicj/curso-em-video-python
|
8cb4ca05a88351c44aa4a7befc59c9596a50f268
|
[
"MIT"
] | null | null | null |
Curso-em-video-Python/PycharmProjects/pythonExercicios/Ex052.py
|
sartinicj/curso-em-video-python
|
8cb4ca05a88351c44aa4a7befc59c9596a50f268
|
[
"MIT"
] | null | null | null |
'''
N = int(input('Digite um NMERO: '))
tot = 0
for c in range(1, n+1):
if n%c == 0:
print('\033[33m', end=' ')
tot+=1
else:
print('\033[31m', end=' ')
print('{}'.format(c), end=' ')
print('\n\033[mO numero {} foi divisivel {} vezes'.format(n, tot))
if tot == 2:
print('E POR ISSO PRIMO')
else:
print('E POR ISSO NO PRIMO')
'''
| 21.055556
| 66
| 0.506596
|
57167335582e22b6c5a86bb8546c9e43ffff5640
| 158
|
py
|
Python
|
core/views.py
|
TonyLuque/marketing_website
|
2ebbbf7788c439afc8192926fa3dc3e231b1e69e
|
[
"MIT"
] | null | null | null |
core/views.py
|
TonyLuque/marketing_website
|
2ebbbf7788c439afc8192926fa3dc3e231b1e69e
|
[
"MIT"
] | null | null | null |
core/views.py
|
TonyLuque/marketing_website
|
2ebbbf7788c439afc8192926fa3dc3e231b1e69e
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
| 17.555556
| 44
| 0.734177
|
57172ba3fed738751a65af00ff67b7037fd96b96
| 206
|
py
|
Python
|
qclib/qclab.py
|
hagne/qclib
|
c90e06cb22708d610126710715a5d66bd4dc0898
|
[
"MIT"
] | null | null | null |
qclib/qclab.py
|
hagne/qclib
|
c90e06cb22708d610126710715a5d66bd4dc0898
|
[
"MIT"
] | null | null | null |
qclib/qclab.py
|
hagne/qclib
|
c90e06cb22708d610126710715a5d66bd4dc0898
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import qclib.tag_times
| 25.75
| 86
| 0.747573
|
57179805f172aec6d932cd77e1ce3e7e6d275877
| 317
|
py
|
Python
|
t_server.py
|
iloghyr/easy_python
|
b750f6817d54562b23630e2419bace19da0abf8b
|
[
"Apache-2.0"
] | 1
|
2018-03-01T02:42:52.000Z
|
2018-03-01T02:42:52.000Z
|
t_server.py
|
iloghyr/easy_python
|
b750f6817d54562b23630e2419bace19da0abf8b
|
[
"Apache-2.0"
] | null | null | null |
t_server.py
|
iloghyr/easy_python
|
b750f6817d54562b23630e2419bace19da0abf8b
|
[
"Apache-2.0"
] | null | null | null |
import socket
address = ('127.0.0.1', 31500)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s = socket.socket()
s.bind(address)
s.listen(5)
ss, addr = s.accept()
print 'got connected from',addr
ss.send('hihi')
ra = ss.recv(512)
print ra
ss.close()
s.close()
| 19.8125
| 78
| 0.583596
|
5717a38a4849d391d82607edadcfd400d7080783
| 1,906
|
py
|
Python
|
py_solutions_71-80/Euler_77.py
|
tijko/Project-Euler
|
d953a2bf6932c2c4e1235409fedf760add65a0ba
|
[
"MIT"
] | null | null | null |
py_solutions_71-80/Euler_77.py
|
tijko/Project-Euler
|
d953a2bf6932c2c4e1235409fedf760add65a0ba
|
[
"MIT"
] | 1
|
2022-03-15T02:49:09.000Z
|
2022-03-15T02:49:09.000Z
|
py_solutions_71-80/Euler_77.py
|
tijko/Project-Euler
|
d953a2bf6932c2c4e1235409fedf760add65a0ba
|
[
"MIT"
] | null | null | null |
# what is the first number to have 5000 different ways to sum with prime numbers?
import math
import timeit
start = timeit.default_timer()
print "Answer: %s" % euler_77()
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| 28.029412
| 82
| 0.407135
|
57199578121fb89b3db3e976c4737dd3dcc14bf5
| 2,258
|
py
|
Python
|
lambdas/get_users.py
|
charvi-a/320-S20-Track1
|
ac97504fc1fdedb1c311773b015570eeea8a8663
|
[
"BSD-3-Clause"
] | 9
|
2019-12-30T16:32:22.000Z
|
2020-03-03T20:14:47.000Z
|
lambdas/get_users.py
|
charvi-a/320-S20-Track1
|
ac97504fc1fdedb1c311773b015570eeea8a8663
|
[
"BSD-3-Clause"
] | 283
|
2020-02-03T15:16:03.000Z
|
2020-05-05T03:18:59.000Z
|
lambdas/get_users.py
|
charvi-a/320-S20-Track1
|
ac97504fc1fdedb1c311773b015570eeea8a8663
|
[
"BSD-3-Clause"
] | 3
|
2020-04-16T15:23:29.000Z
|
2020-05-12T00:38:41.000Z
|
from package.query_db import query
from package.lambda_exception import LambdaException
| 34.738462
| 171
| 0.574402
|
571ab14954af261729cb1d3fc0d5e206657e96fa
| 705
|
py
|
Python
|
leetCode/swap_nodes_in_pairs.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | null | null | null |
leetCode/swap_nodes_in_pairs.py
|
yskang/AlgorithmPracticeWithPython
|
f7129bd1924a7961489198f0ee052d2cd1e9cf40
|
[
"MIT"
] | 1
|
2019-11-04T06:44:04.000Z
|
2019-11-04T06:46:55.000Z
|
leetCode/swap_nodes_in_pairs.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | null | null | null |
# Title: Swap Nodes in Pairs
# Link: https://leetcode.com/problems/swap-nodes-in-pairs
def solution():
head = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
problem = Problem()
return problem.swap_pairs(head)
if __name__ == '__main__':
main()
| 22.741935
| 61
| 0.58156
|
571ac253ee844d994243e9c2e1443c9c4aa20002
| 16,967
|
py
|
Python
|
detect_actions.py
|
CTewan/ACAM_Demo
|
b76cf4ce1289b8c311dbad1588f299ff67f7eaf3
|
[
"MIT"
] | null | null | null |
detect_actions.py
|
CTewan/ACAM_Demo
|
b76cf4ce1289b8c311dbad1588f299ff67f7eaf3
|
[
"MIT"
] | null | null | null |
detect_actions.py
|
CTewan/ACAM_Demo
|
b76cf4ce1289b8c311dbad1588f299ff67f7eaf3
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import imageio
import tensorflow as tf
import json
import csv
import os
import sys
sys.path.append("object_detection")
sys.path.append("object_detection/deep_sort")
sys.path.append("action_detection")
import argparse
import object_detection.object_detector as obj
import action_detection.action_detector as act
import time
DISPLAY = False
SHOW_CAMS = False
np.random.seed(10)
COLORS = np.random.randint(0, 255, [1000, 3])
if __name__ == '__main__':
main()
| 38.386878
| 145
| 0.590263
|
571af6febfc1dc4cb09b37f0fb44cc848ccf1059
| 5,556
|
py
|
Python
|
tests/test_parametric_shapes/test_SweepMixedShape.py
|
RemDelaporteMathurin/paramak
|
10552f1b89820dd0f7a08e4a126834877e3106b4
|
[
"MIT"
] | null | null | null |
tests/test_parametric_shapes/test_SweepMixedShape.py
|
RemDelaporteMathurin/paramak
|
10552f1b89820dd0f7a08e4a126834877e3106b4
|
[
"MIT"
] | null | null | null |
tests/test_parametric_shapes/test_SweepMixedShape.py
|
RemDelaporteMathurin/paramak
|
10552f1b89820dd0f7a08e4a126834877e3106b4
|
[
"MIT"
] | null | null | null |
import os
import unittest
from pathlib import Path
import pytest
from paramak import SweepMixedShape
if __name__ == "__main__":
unittest.main()
| 29.089005
| 89
| 0.430166
|
571d88262578b13c16efd4393da7b28acd2cd972
| 458
|
py
|
Python
|
upload.py
|
newkisoft/newki-sql-backup
|
82690b25cfa57dc770210e5a3398954949271c0e
|
[
"MIT"
] | null | null | null |
upload.py
|
newkisoft/newki-sql-backup
|
82690b25cfa57dc770210e5a3398954949271c0e
|
[
"MIT"
] | null | null | null |
upload.py
|
newkisoft/newki-sql-backup
|
82690b25cfa57dc770210e5a3398954949271c0e
|
[
"MIT"
] | null | null | null |
from boto3.s3.transfer import S3Transfer
import boto3
import glob
import os
files = glob.glob("*.bak")
for file in files:
print(file)
uploaded = upload_to_aws(file, 'newki-backup', file)
os.remove(file)
| 28.625
| 84
| 0.69869
|
571dbed119712d82f6343f841d5c39a1d78ee427
| 996
|
py
|
Python
|
run_rnn.py
|
iqbaalmuhmd/CNNnumpyTest
|
eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b
|
[
"MIT"
] | 332
|
2017-06-13T10:40:05.000Z
|
2022-03-11T15:10:02.000Z
|
run_rnn.py
|
iqbaalmuhmd/CNNnumpyTest
|
eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b
|
[
"MIT"
] | 9
|
2017-06-16T02:36:06.000Z
|
2021-05-09T06:01:34.000Z
|
run_rnn.py
|
iqbaalmuhmd/CNNnumpyTest
|
eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b
|
[
"MIT"
] | 105
|
2017-06-15T06:40:44.000Z
|
2022-03-09T06:38:59.000Z
|
import numpy as np
from deepnet.nnet import RNN
from deepnet.solver import sgd_rnn
def text_to_inputs(path):
"""
Converts the given text into X and y vectors
X : contains the index of all the characters in the text vocab
y : y[i] contains the index of next character for X[i] in the text vocab
"""
with open(path) as f:
txt = f.read()
X, y = [], []
char_to_idx = {char: i for i, char in enumerate(set(txt))}
idx_to_char = {i: char for i, char in enumerate(set(txt))}
X = np.array([char_to_idx[i] for i in txt])
y = [char_to_idx[i] for i in txt[1:]]
y.append(char_to_idx['.'])
y = np.array(y)
vocab_size = len(char_to_idx)
return X, y, vocab_size, char_to_idx, idx_to_char
if __name__ == "__main__":
X, y, vocab_size, char_to_idx, idx_to_char = text_to_inputs('data/Rnn.txt')
rnn = RNN(vocab_size,vocab_size,char_to_idx,idx_to_char)
rnn = sgd_rnn(rnn,X,y,10,10,0.1)
| 27.666667
| 79
| 0.625502
|
571ddbe314e19b402b88195037ee31e371ecdddf
| 5,421
|
py
|
Python
|
lcclassifier/experiments/attnstats.py
|
oscarpimentel/astro-lightcurves-classifier
|
f697b43e22bd8c92c1b9df514be8565c736dd7cc
|
[
"MIT"
] | 1
|
2021-12-31T18:00:08.000Z
|
2021-12-31T18:00:08.000Z
|
lcclassifier/experiments/attnstats.py
|
oscarpimentel/astro-lightcurves-classifier
|
f697b43e22bd8c92c1b9df514be8565c736dd7cc
|
[
"MIT"
] | null | null | null |
lcclassifier/experiments/attnstats.py
|
oscarpimentel/astro-lightcurves-classifier
|
f697b43e22bd8c92c1b9df514be8565c736dd7cc
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from . import _C
import torch
from fuzzytorch.utils import TDictHolder, tensor_to_numpy, minibatch_dict_collate
import numpy as np
from fuzzytools.progress_bars import ProgressBar, ProgressBarMulti
import fuzzytools.files as files
import fuzzytools.datascience.metrics as fcm
from fuzzytools.matplotlib.utils import save_fig
import matplotlib.pyplot as plt
import fuzzytorch.models.seq_utils as seq_utils
from scipy.optimize import curve_fit
from lchandler import _C as _Clchandler
from lchandler.plots.lc import plot_lightcurve
from .utils import check_attn_scores
EPS = _C.EPS
###################################################################################################################################################
###################################################################################################################################################
| 35.431373
| 147
| 0.65855
|
571ea096124b732422144c10209f4cc5cb3c06c7
| 1,473
|
py
|
Python
|
get_item_by_key.py
|
flyco2016/my_python_module_project
|
6e1ac7f074f7b57403d7b7c6adadab17a26fc27d
|
[
"Apache-2.0"
] | null | null | null |
get_item_by_key.py
|
flyco2016/my_python_module_project
|
6e1ac7f074f7b57403d7b7c6adadab17a26fc27d
|
[
"Apache-2.0"
] | 1
|
2019-01-04T06:37:06.000Z
|
2019-01-04T06:37:06.000Z
|
get_item_by_key.py
|
flyco2016/my_python_module_project
|
6e1ac7f074f7b57403d7b7c6adadab17a26fc27d
|
[
"Apache-2.0"
] | null | null | null |
#
if __name__ == "__main__":
test_dic = {'a': 1, 'b': 2, 'c': {'a': 1, 'b': {'b': 4}}}
r1 = getItemByKey(test_dic, 'b')
r2 = getItemByKeyInMyMethod(test_dic, 'b')
print(r1, r2, sep='\n')
| 33.477273
| 81
| 0.491514
|
571ee3c5442f3448677cce9af4cb7b0165e2aa98
| 148
|
py
|
Python
|
tests/test_tail_chunks.py
|
moskytw/tacit
|
58286a71140be150438d10acf93028ef5f78f6d1
|
[
"MIT"
] | null | null | null |
tests/test_tail_chunks.py
|
moskytw/tacit
|
58286a71140be150438d10acf93028ef5f78f6d1
|
[
"MIT"
] | null | null | null |
tests/test_tail_chunks.py
|
moskytw/tacit
|
58286a71140be150438d10acf93028ef5f78f6d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tacit import tac_slices
for chunk in tac_slices('data/ordered.list', 2):
print repr(chunk)
| 18.5
| 48
| 0.675676
|
571f77622a48c2fb03cc44698429e534d7932593
| 7,166
|
py
|
Python
|
calories.py
|
davidsvaughn/har-pytorch
|
334733a1e870637c9077d16fc15e0b1954a6dfc5
|
[
"MIT"
] | 5
|
2020-09-17T12:17:13.000Z
|
2022-02-28T08:07:49.000Z
|
calories.py
|
davidsvaughn/har-pytorch
|
334733a1e870637c9077d16fc15e0b1954a6dfc5
|
[
"MIT"
] | null | null | null |
calories.py
|
davidsvaughn/har-pytorch
|
334733a1e870637c9077d16fc15e0b1954a6dfc5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import json
from datetime import datetime
import psycopg2
import functools
import requests
##############################################################
## https://www.exrx.net/Calculators/WalkRunMETs
## https://www.cdc.gov/growthcharts/clinical_charts.htm
## https://help.fitbit.com/articles/en_US/Help_article/1141
##############################################################
URL = 'https://f73lzrw31i.execute-api.us-west-2.amazonaws.com/default/demo_data_server'
HEADER = {'x-api-key': 'XXXXXX'}
#############################################################################################
#############################################################################################
revibe = adict()
revibe.DBNAME = 'revibe'
revibe.HOST = 'prd.c5fw7irdcxik.us-west-2.rds.amazonaws.com'
#revibe.PORT = '5432'
revibe.USER = 'dave'
revibe.PASS = 'tnoiSLoHjEBZE6JKsFgY'
revibe.SSLMODE = 'require'
CONN = None
## revised Harris-Benedict BMR equations...
## basal metabolic rate (kCals per day)
## find index j into y that minimizes abs(x-y[j])
GC = None
# data.bday = data.bday.strftime('%Y-%m-%d')
## s : speed in mph... sec by second vector of speeds....
## w : weight in lbs
## mode : 2=='walk', 3=='run'
## returns : calories summed across all seconds
def calsum(s, w, mode=2):
su, wu = 26.8, 2.2
s = s*su
w = w/wu
if mode==3:## run mode
vo = 0.2*s
else: ## walk mode == 2
fwvo = 21.11 - 0.3593*s + 0.003*s*s - 3.5
wvo = 0.1*s
d = 30
a = np.clip((s-(100-d))/(2*d), 0, 1)
vo = wvo*(1.-a) + fwvo*a
#############################
return np.sum(vo*w) / 12000.0
###################################
if __name__ == "__main__":
pid = 135 ## 135,"1974-05-28",1,0,74,196,1
pid = 169 ## 169,"1980-12-01",1,12,72,170,2
pid = 18947 ## 18947,"2010-08-28",0,0,0,0,0
pid = 10885 ##
# dd = request_demo_data(pid)
# print(dd)
# dd = get_demo_data(pid)
# print(dd)
#############
dd = make_demo_data(bday='2010-08-28', ht='54.035', wt='69.69', sex='3')
# dd = make_demo_data(ht='70', wt='120', sex='2')
print(dd)
| 30.887931
| 110
| 0.558889
|
5720538c6a907cf78b7908572f98d96063191e14
| 1,651
|
py
|
Python
|
pagelets/conf.py
|
rysdyk/django-pagelets
|
31669771b7ecf8ade3dae64465d3fa984d88d0f9
|
[
"BSD-3-Clause"
] | null | null | null |
pagelets/conf.py
|
rysdyk/django-pagelets
|
31669771b7ecf8ade3dae64465d3fa984d88d0f9
|
[
"BSD-3-Clause"
] | null | null | null |
pagelets/conf.py
|
rysdyk/django-pagelets
|
31669771b7ecf8ade3dae64465d3fa984d88d0f9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
CONTENT_AREAS = getattr(settings, 'PAGELET_CONTENT_AREAS', (
('main', 'Main'),
))
CONTENT_AREA_DEFAULT = getattr(settings, 'PAGELET_CONTENT_AREA_DEFAULT', 'main')
CONTENT_TYPES = getattr(settings, 'PAGELET_CONTENT_TYPES', (
('html', 'HTML',
(),
{},),
('markdown', 'Markdown',
(),
{},),
('wymeditor', 'WYMeditor',
('wymeditor/jquery.wymeditor.js',),
{},),
('textile', 'Textile',
(),
{},),
)) + getattr(settings, 'PAGELET_CONTENT_TYPES_EXTRA', ())
CONTENT_TYPE_CHOICES = tuple((c[0], c[1]) for c in CONTENT_TYPES)
CONTENT_TYPE_DEFAULT = getattr(settings, 'PAGELET_CONTENT_TYPE_DEFAULT', 'html')
try:
ATTACHMENT_PATH = settings.PAGELET_ATTACHMENT_PATH
except AttributeError:
ATTACHMENT_PATH = getattr(settings, 'PAGE_ATTACHMENT_PATH', 'attachments/pages/')
# settings.PAGELET_TEMPLATE_TAGS is a list of template tag names that
# will load before each pagelet is rendered, allowing custom template
# tags to be included without including {% load <template_tag> %}
tags = set(['pagelet_tags'])
if hasattr(settings, 'PAGELET_TEMPLATE_TAGS'):
for tag in settings.PAGELET_TEMPLATE_TAGS:
tags.add(tag)
AUTO_LOAD_TEMPLATE_TAGS = '{%% load %s %%}' % ' '.join(tags)
BASE_TEMPLATES = getattr(settings, 'PAGELET_BASE_TEMPLATES', [])
BASE_TEMPLATE_DEFAULT = getattr(settings, 'PAGELET_BASE_TEMPLATE_DEFAULT', None)
INLINE_PAGELET_EXTRA = getattr(settings, 'PAGELET_INLINE_PAGELET_EXTRA', 0)
INLINE_SHARED_EXTRA = getattr(settings, 'PAGELET_INLINE_SHARED_EXTRA', 0)
INLINE_ATTACHMENT_EXTRA = getattr(settings, 'PAGELET_INLINE_ATTACHMENT_EXTRA', 0)
| 36.688889
| 85
| 0.723198
|
5721e5bf810d647e593fd1d82e6a86cb2fa7e570
| 14,744
|
py
|
Python
|
alphad3m/alphad3m/metalearning/grammar_builder.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
alphad3m/alphad3m/metalearning/grammar_builder.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
alphad3m/alphad3m/metalearning/grammar_builder.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy as np
from scipy import stats
from collections import OrderedDict
from alphad3m.metalearning.resource_builder import load_metalearningdb
from alphad3m.metalearning.dataset_similarity import get_similar_datasets
from alphad3m.primitive_loader import load_primitives_by_name, load_primitives_by_id
logger = logging.getLogger(__name__)
if __name__ == '__main__':
test_dataset('185_baseball_MIN_METADATA')
| 48.983389
| 165
| 0.686788
|
5723328e5cd271a82c8d25b908bc2b420246795d
| 4,512
|
py
|
Python
|
deap_learning.py
|
fzjcdt/Genetic-CNN
|
6bd53f3f429434557b7fbf1122020259d910f618
|
[
"Apache-2.0"
] | 2
|
2019-10-08T08:27:41.000Z
|
2021-12-02T07:37:27.000Z
|
deap_learning.py
|
fzjcdt/Genetic-CNN
|
6bd53f3f429434557b7fbf1122020259d910f618
|
[
"Apache-2.0"
] | null | null | null |
deap_learning.py
|
fzjcdt/Genetic-CNN
|
6bd53f3f429434557b7fbf1122020259d910f618
|
[
"Apache-2.0"
] | null | null | null |
from deap import base, creator, tools
import random
"""
individuallist10
"""
# ****************************Types********************************
# def create(name, base, **kargs):
# Creates a new class named *name* inheriting from *base*
# A negative weight element corresponds to the minimization of
# the associated objective and positive weight to the maximization.
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
# an Individual class that is derived from a list with a fitness attribute set
# to the just created fitness
"""
create("Foo", list, bar=dict, spam=1)
This above line is exactly the same as defining in the :mod:`creator`
module something like the following. ::
class Foo(list):
spam = 1
def __init__(self):
self.bar = dict()
"""
creator.create("Individual", list, fitness=creator.FitnessMin)
# ****************************Initialization********************************
IND_SIZE = 10
toolbox = base.Toolbox()
# def register(self, alias, function, *args, **kargs):
# Register a *function* in the toolbox under the name *alias*.
# *argsfunctionfunction
"""
>>> def func(a, b, c=3):
... print(a, b, c)
...
>>> tools = Toolbox()
>>> tools.register("myFunc", func, 2, c=4)
>>> tools.myFunc(3)
2 3 4
"""
toolbox.register("attribute", random.random)
# def initRepeat(container, func, n):
# Call the function *container* with a generator function corresponding
# to the calling *n* times the function *func*.
"""
>>> initRepeat(list, random.random, 2) # doctest: +ELLIPSIS,
... # doctest: +NORMALIZE_WHITESPACE
[0.6394..., 0.0250...]
"""
# IND_SIZE random.random()IndividualIndividualIndividual listIND_SIZE
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attribute, n=IND_SIZE)
# individualpopulationn
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# ****************************Operators********************************
# def cxTwoPoint(ind1, ind2):
# Executes a two-point crossover on the input :term:`sequence` individuals.
toolbox.register("mate", tools.cxTwoPoint)
# gaussian mutation with mu and sigma
# The *indpb* argument is the probability of each attribute to be mutated.
#
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
# def selTournament(individuals, k, tournsize, fit_attr="fitness"):
# Select the best individual among *tournsize* randomly chosen
# individuals, *k* times. The list returned contains
# references to the input *individuals*.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluate)
for ind in main():
print(evaluate(ind))
| 33.176471
| 89
| 0.629876
|
57262781980201cf7735ba35e8965dd0cb76ade8
| 1,674
|
py
|
Python
|
pacman/utils/replay_buffer.py
|
i-rme/openai-pacman
|
4a80ed023ed2bdf031990147acbbeea904b9fc8e
|
[
"MIT"
] | 2
|
2020-01-26T23:06:57.000Z
|
2021-04-12T08:36:55.000Z
|
pacman/utils/replay_buffer.py
|
i-rme/openai-pacman
|
4a80ed023ed2bdf031990147acbbeea904b9fc8e
|
[
"MIT"
] | null | null | null |
pacman/utils/replay_buffer.py
|
i-rme/openai-pacman
|
4a80ed023ed2bdf031990147acbbeea904b9fc8e
|
[
"MIT"
] | null | null | null |
from collections import deque
import random
import numpy as np
| 27.442623
| 93
| 0.548387
|
5726ab8f943f02dfa0eee1936447786383a1ce72
| 9,126
|
py
|
Python
|
tests/entities/test_creature.py
|
Flame753/ARPG
|
f931d3437a83995b43bdddc68cb5ba89922dc259
|
[
"MIT"
] | null | null | null |
tests/entities/test_creature.py
|
Flame753/ARPG
|
f931d3437a83995b43bdddc68cb5ba89922dc259
|
[
"MIT"
] | null | null | null |
tests/entities/test_creature.py
|
Flame753/ARPG
|
f931d3437a83995b43bdddc68cb5ba89922dc259
|
[
"MIT"
] | null | null | null |
# Standard library imports
from pprint import pprint
import unittest
# Local application imports
from context import entities
from entities import creatures
from entities import items
from entities import currency
from entities import slots
if __name__ == '__main__':
unittest.main()
# runner = unittest.TextTestRunner()
# runner.run(suite())
| 36.504
| 107
| 0.655928
|
5727029dde1ffe2dc5b477e3ae9e9d629cfe867a
| 729
|
py
|
Python
|
tile/io.py
|
ViliamV/tile
|
a2b105143341f250690b8034076ba9214e9ed787
|
[
"MIT"
] | null | null | null |
tile/io.py
|
ViliamV/tile
|
a2b105143341f250690b8034076ba9214e9ed787
|
[
"MIT"
] | null | null | null |
tile/io.py
|
ViliamV/tile
|
a2b105143341f250690b8034076ba9214e9ed787
|
[
"MIT"
] | null | null | null |
import collections
import re
from typing import Iterator, TextIO
from .constants import TILE_END, TILE_START, TILE_WARNING
| 24.3
| 100
| 0.650206
|
572995aff10ad23755f80a0359fa3ca259ee111e
| 199
|
py
|
Python
|
testfiles/benchmarks/send_multiple.py
|
marcolamartina/PASTEL
|
8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925
|
[
"MIT"
] | null | null | null |
testfiles/benchmarks/send_multiple.py
|
marcolamartina/PASTEL
|
8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925
|
[
"MIT"
] | null | null | null |
testfiles/benchmarks/send_multiple.py
|
marcolamartina/PASTEL
|
8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925
|
[
"MIT"
] | 1
|
2020-07-08T11:23:22.000Z
|
2020-07-08T11:23:22.000Z
|
import binascii
from pwn import *
port = 1234
server = '127.0.0.1'
sleep(1)
for i in range(10000):
r = remote(server, port)
send(r,i)
r.close()
| 15.307692
| 28
| 0.623116
|
572a6ba98328eb8f0c8ea9e03989e22fca55780e
| 271
|
py
|
Python
|
backend/users/serializers.py
|
rmisiarek/django_vue_base
|
440459fdd73209e47567fb3572c056a05dc1c45a
|
[
"MIT"
] | 2
|
2019-04-28T20:26:13.000Z
|
2020-05-04T03:18:23.000Z
|
backend/users/serializers.py
|
rmisiarek/django_vue_base
|
440459fdd73209e47567fb3572c056a05dc1c45a
|
[
"MIT"
] | 22
|
2019-12-04T22:34:42.000Z
|
2022-02-12T07:12:29.000Z
|
backend/users/serializers.py
|
shrekdev/Django_Vue_Base
|
4f200358724bce137f9c5e723036b280e4fd81e2
|
[
"MIT"
] | null | null | null |
from djoser.serializers import UserCreateSerializer
from .models import CustomUser
| 27.1
| 61
| 0.763838
|
572d902af0ded1f19ce7ceba83a724a3db7dd67b
| 30
|
py
|
Python
|
flaskslack/__init__.py
|
Jamiewu2/flask-slack-template
|
8a2168aaab03ed080a3eab186c20a488cdf2055e
|
[
"MIT"
] | null | null | null |
flaskslack/__init__.py
|
Jamiewu2/flask-slack-template
|
8a2168aaab03ed080a3eab186c20a488cdf2055e
|
[
"MIT"
] | null | null | null |
flaskslack/__init__.py
|
Jamiewu2/flask-slack-template
|
8a2168aaab03ed080a3eab186c20a488cdf2055e
|
[
"MIT"
] | null | null | null |
name = "flask_slack_template"
| 15
| 29
| 0.8
|
572e21eca46eed4b2a282d7364cf1c249e13c730
| 6,704
|
py
|
Python
|
cpu.py
|
fernandozanutto/PyNES
|
cb8d589ceb55cd7df0e114e726c6b6bbbc556172
|
[
"Apache-2.0"
] | null | null | null |
cpu.py
|
fernandozanutto/PyNES
|
cb8d589ceb55cd7df0e114e726c6b6bbbc556172
|
[
"Apache-2.0"
] | null | null | null |
cpu.py
|
fernandozanutto/PyNES
|
cb8d589ceb55cd7df0e114e726c6b6bbbc556172
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict
from time import time_ns
from bus import Bus
from instructions.generic_instructions import Instruction
from rom import ROM
from status import Status
import instructions.instructions as i_file
import instructions.jump_instructions as j_file
import instructions.load_instructions as l_file
import instructions.store_instructions as s_file
import instructions.stack_instructions as t_file
import instructions.arithmetic_instructions as a_file
import instructions.logical_instructions as log_file
import instructions.nop_instructions as n_file
import instructions.unofficial_instructions as u_file
| 33.688442
| 110
| 0.604117
|
57301fb1a81fd60d2a17effeed4478182c84a5a9
| 2,622
|
py
|
Python
|
timeouts/aa.py
|
kapsitis/ddgatve-stat
|
684fac54b9d0b8e7891f58bf1fb32605a2d87a3c
|
[
"Apache-2.0"
] | null | null | null |
timeouts/aa.py
|
kapsitis/ddgatve-stat
|
684fac54b9d0b8e7891f58bf1fb32605a2d87a3c
|
[
"Apache-2.0"
] | null | null | null |
timeouts/aa.py
|
kapsitis/ddgatve-stat
|
684fac54b9d0b8e7891f58bf1fb32605a2d87a3c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import datetime
import os
import subprocess
import re
import urllib2
import math
####################################################################
## TODO: Replace this function by another one, which simply reads all lines from a file
####################################################################
#####################################################################
## Find the right logfile; logfiles are different for different days
#####################################################################
ddat = datetime.datetime.now()
theLog = '/home/kalvis/.timeouts/access-{yyyy}-{mm}-{dd}.log'.format( \
yyyy = ddat.year, mm=ddat.month, dd=ddat.day)
yellowCount = getYellowCount(theLog)
status = getStatus()
if yellowCount >= 5:
status = "red"
if yellowCount > 1:
os.system("/home/kalvis/.timeouts/msg.py")
logline = '{user}:{time}:{status}({yellowCount})\n'.format(user=currentUser(), \
time=ddat,status=getStatus(),yellowCount=yellowCount)
if not os.path.isfile(theLog):
open(theLog, 'a').close()
with open(theLog, "a") as myfile:
myfile.write(logline)
if yellowCount >= 5:
from subprocess import call
call(["pkill","-KILL","-u","marta"])
| 27.030928
| 88
| 0.558734
|
5731a1d1075f24d7e3e476ce3662c03caca1f970
| 1,786
|
py
|
Python
|
files_to_html_md5.py
|
jasonivey/scripts
|
09f9702e5ce62abbb7699aae16b45b33711fe856
|
[
"MIT"
] | null | null | null |
files_to_html_md5.py
|
jasonivey/scripts
|
09f9702e5ce62abbb7699aae16b45b33711fe856
|
[
"MIT"
] | null | null | null |
files_to_html_md5.py
|
jasonivey/scripts
|
09f9702e5ce62abbb7699aae16b45b33711fe856
|
[
"MIT"
] | null | null | null |
import os
import sys
import hash_utils
if __name__ == '__main__':
with open('index.php', 'w') as outfile:
StartIndexPhp()
for f in os.listdir('.'):
if f == 'index.php' or f.find( '?' ) != -1 or f.find( 'System Volume Information' ) != -1 or f.find( 'RECYCLER' ) != -1:
continue
if os.path.isdir(f):
md5str = hash_utils.md5sum(f)
print(f + ' - ' + md5str)
outfile.write( '\t<tr>\n' )
outfile.write( '\t\t<td align=center><a href="' + f + '">' + f + '</a></td>\n' )
outfile.write( '\t\t<td align=center>' + md5str + '</td>\n' )
outfile.write( '\t</tr>\n' )
CloseIndexPhp( outfile )
| 43.560976
| 133
| 0.526316
|