blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed78e6a932f361509c7b2a9d7e57906f6d9a5d0f
|
b0485dc3e30dc0170391e131ec00ccb335768817
|
/_build.py
|
afeb10aaecab2d7aba8fdf9720bc4faddcca2854
|
[] |
no_license
|
leisvip/djc_helper
|
8a54483780bcb6ec3a5316a869d5652cfad393f7
|
9e5982047ce6db05f09db3d81e7f4df6303f21d7
|
refs/heads/master
| 2023-06-25T07:06:13.307176 | 2021-07-19T02:09:09 | 2021-07-19T02:09:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,837 |
py
|
# 编译脚本
import argparse
import os
import shutil
import subprocess
from _init_venv_and_requirements import init_venv_and_requirements
from log import logger, color
from util import human_readable_size, show_head_line
def build(disable_douban=False):
# 初始化相关路径变量
venv_path = ".venv"
pyinstaller_path = os.path.join(venv_path, "Scripts", "pyinstaller")
# 初始化venv和依赖
init_venv_and_requirements(".venv", disable_douban)
show_head_line(f"将使用.venv环境进行编译", color("bold_yellow"))
build_configs = [
("main.py", "DNF蚊子腿小助手.exe", "utils/icons/DNF蚊子腿小助手.ico", ".", ["PyQt5"], []),
("auto_updater.py", "auto_updater.exe", "", "utils", ["PyQt5"], []),
("ark_lottery_special_version.py", "DNF蚊子腿小助手_集卡特别版.exe", "utils/icons/ark_lottery_special_version.ico", ".", ["PyQt5"], []),
("config_ui.py", "DNF蚊子腿小助手配置工具.exe", "utils/icons/config_ui.ico", ".", [], ["--noconsole"]),
]
for idx, config in enumerate(build_configs):
prefix = f"{idx + 1}/{len(build_configs)}"
src_path, exe_name, icon_path, target_dir, exclude_modules, extra_args = config
logger.info(color("bold_yellow") + f"{prefix} 开始编译 {exe_name}")
cmd_build = [
pyinstaller_path,
'--name', exe_name,
'-F',
src_path,
]
if icon_path != "":
cmd_build.extend(['--icon', icon_path])
for module in exclude_modules:
cmd_build.extend(['--exclude-module', module])
cmd_build.extend(extra_args)
logger.info(f"{prefix} 开始编译 {exe_name},命令为:{' '.join(cmd_build)}")
subprocess.call(cmd_build)
logger.info(f"编译结束,进行善后操作")
# 复制二进制
logger.info(f"复制{exe_name}到目标目录{target_dir}")
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
target_path = os.path.join(target_dir, exe_name)
shutil.copyfile(os.path.join("dist", exe_name), target_path)
# 删除临时文件
logger.info("删除临时文件")
for directory in ["build", "dist", "__pycache__"]:
shutil.rmtree(directory, ignore_errors=True)
os.remove(f"{exe_name}.spec")
filesize = os.path.getsize(target_path)
logger.info(color("bold_green") + f"{prefix} 编译{exe_name}结束,最终大小为{human_readable_size(filesize)}")
logger.info("done")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--disable_douban", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
build(args.disable_douban)
|
[
"fzls.zju@gmail.com"
] |
fzls.zju@gmail.com
|
42e95b4be95d83bcba4b00923df10849d38dd895
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03393/s345995802.py
|
343e2097a0d4f2b25bd715fd2830e3222965ec14
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 419 |
py
|
S = input()
se = set()
for s in S:
se.add(s)
if len(S) < 26:
for i in range(26):
s = chr(ord('a')+i)
if not s in se:
print(S+s)
exit()
else:
while len(S) > 1:
se.remove(S[-1])
S = S[:-1]
for i in range(ord(S[-1]), ord('z')+1):
s = chr(i)
if not s in se:
print(S[:-1]+s)
exit()
print(-1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2cc201cf266ff314089da1d203b0b3d0cc31fdfd
|
1b862f34c125ce200244dd79e4fda4b5b605ce2e
|
/.history/images_20210218000603.py
|
0a459524a8dabc3b90e7a0501798a67b7e4b69cb
|
[] |
no_license
|
edwino26/CoreImages
|
26085a49cf1cb79442ae563a88354b2fdceace87
|
6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e
|
refs/heads/master
| 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 |
Lasso
|
UTF-8
|
Python
| false | false | 2,563 |
py
|
import os
clear = lambda : os.system('cls')
#
# %%
import glob
import cv2
import os.path
import numpy as np
import matplotlib.pyplot as plt
# %%
cores_per_image = 6
uvFiles = glob.glob('./Photos/*.jpg')
print(uvFiles)
# Picture path
img = cv2.imread(uvFiles[0].replace('./Photos/',''))
print(img)
a = []
b = []
# %%
def oneventlbuttondown(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a.append(x)
b.append(y)
cv2.circle(img, (x, y), 10, (0, 0, 255), thickness=-1)
# cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=1)
cv2.imshow("image", img)
core_length = 3
vc = []
do = int(uvFiles[0][2:6])
dn = int(uvFiles[0][7:11])
for i in range(cores_per_image):
if i == 0:
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("output", 400, 300)
cv2.setMouseCallback("image", oneventlbuttondown)
cv2.imshow("image", img)
print(
'Click 1) left upper corner 2) right lower corner in leftmost core and 3) leftupper corner in second core')
cv2.waitKey(0)
y = b[0];
x = a[0];
dy = b[1] - b[0];
dx = a[1] - a[0]
gap = a[2] - a[1]
if i == 3:
midgap = gap * 4
else:
midgap = 0
if i > 0: x = x + (dx + gap) + midgap
crop_img = img[y:y + dy, x:x + dx]
if i == 0:
vc = crop_img
else:
vc = cv2.vconcat([vc, crop_img])
crop_name = str(int(uvFiles[0][2:6]) + (core_length * i)) + ".jpg"
path = os.path.join(os.path.relpath('Cropped', start=os.curdir), crop_name)
cv2.imwrite(path, crop_img)
concat_name = uvFiles[0][2:6] + "-" + uvFiles[0][7:11] + ".jpg"
path = os.path.join(os.path.relpath('Cropped', start=os.curdir), concat_name)
cv2.imwrite(path, vc)
p = vc.shape
vc_gray = cv2.cvtColor(vc, cv2.COLOR_BGR2GRAY)
print(vc.shape) # Dimensions of Image
print(vc_gray.shape) # It is already a numpy array
print(type(vc_gray))
# print(p[:10, :10, 1 ])
img_log = np.average(vc_gray[:, 80:120], axis=1)
depths = np.arange(do, dn, (dn - do) / len(img_log))
plt.figure()
# plt.subplot(1, 2, 1)
plt.subplot2grid((1, 10), (0, 0), colspan=3)
plt.plot(img_log, depths, 'green');
plt.axis([0, 120, do, dn]);
plt.gca().invert_yaxis();
plt.gca().invert_xaxis()
# plt.subplot(1, 2 ,2)
plt.subplot2grid((1, 10), (0, 3), colspan=7)
plt.imshow(vc_gray[:, 40:120], aspect='auto', origin='upper');
plt.colorbar()
p_50 = np.percentile(img_log, 50)
plt.show()
# %%
|
[
"ortega.edwin.y@gmail.com"
] |
ortega.edwin.y@gmail.com
|
7aaf30d580238668767fc362313bb0f9006f72eb
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/azurestackhci/azure-mgmt-azurestackhci/azure/mgmt/azurestackhci/_configuration.py
|
b419fc134f8d50033f2cbf5f5c9a6e3c66cd4704
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 |
MIT
| 2019-08-11T21:16:01 | 2018-11-28T21:34:49 |
Python
|
UTF-8
|
Python
| false | false | 3,264 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class AzureStackHCIClientConfiguration(Configuration):
"""Configuration for AzureStackHCIClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(AzureStackHCIClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-10-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-azurestackhci/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
e0a1f5a316d34042d099a185a757946ab5667625
|
3f99756553008745dcac63da942c8afe189a0bbb
|
/src/common/debug.py
|
2c2a20b77c88f20d2287098df133eea0833a9ebc
|
[] |
no_license
|
hekaistorm/DQN_wechat_jump_AI
|
b34e1d15486e4f6884221e68cb110f4b5f8fcaa6
|
2dc71f22e234bc17dd280d309103e84596754588
|
refs/heads/master
| 2020-09-06T09:05:59.478004 | 2018-02-04T05:28:06 | 2018-02-04T05:28:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,185 |
py
|
# -*- coding: utf-8 -*-
"""
这儿是debug的代码,当DEBUG_SWITCH开关开启的时候,会将各种信息存在本地,方便检查故障
"""
import os
import sys
import shutil
from PIL import ImageDraw
# 用来保存每一次的图片
screenshot_backup_dir = '../data/backups/'
def make_debug_dir(screenshot_backup_dir):
"""
创建备份文件夹
"""
if not os.path.isdir(screenshot_backup_dir):
os.mkdir(screenshot_backup_dir)
def backup_screenshot(ts):
"""
为了方便失败的时候 debug
"""
make_debug_dir(screenshot_backup_dir)
shutil.copy('autojump.png', '{}{}.png'.format(screenshot_backup_dir, ts))
def save_debug_screenshot(ts, im, piece_x, piece_y, board_x, board_y):
"""
对 debug 图片加上详细的注释
"""
make_debug_dir(screenshot_backup_dir)
draw = ImageDraw.Draw(im)
draw.line((piece_x, piece_y) + (board_x, board_y), fill=2, width=3)
draw.line((piece_x, 0, piece_x, im.size[1]), fill=(255, 0, 0))
draw.line((0, piece_y, im.size[0], piece_y), fill=(255, 0, 0))
draw.line((board_x, 0, board_x, im.size[1]), fill=(0, 0, 255))
draw.line((0, board_y, im.size[0], board_y), fill=(0, 0, 255))
draw.ellipse((piece_x - 10, piece_y - 10, piece_x + 10, piece_y + 10), fill=(255, 0, 0))
draw.ellipse((board_x - 10, board_y - 10, board_x + 10, board_y + 10), fill=(0, 0, 255))
del draw
im.save('{}{}{}_d.png'.format(screenshot_backup_dir, ts, str(piece_x) + '_' + str(piece_y)))
def dump_device_info():
"""
显示设备信息
"""
size_str = os.popen('adb shell wm size').read()
device_str = os.popen('adb shell getprop ro.product.device').read()
phone_os_str = os.popen('adb shell getprop ro.build.version.release').read()
density_str = os.popen('adb shell wm density').read()
print("""**********
Screen: {size}
Density: {dpi}
Device: {device}
Phone OS: {phone_os}
Host OS: {host_os}
Python: {python}
**********""".format(
size=size_str.strip(),
dpi=density_str.strip(),
device=device_str.strip(),
phone_os=phone_os_str.strip(),
host_os=sys.platform,
python=sys.version
))
|
[
"50673223@qq.com"
] |
50673223@qq.com
|
ee4e5ba67072d17cb87c3d167e85dfec37495d32
|
bac37a96ead59a3c4caaac63745d5748f5060195
|
/第9章 异常/异常4.py
|
7086b6c235bbdfbd54433b2b3796687261036263
|
[] |
no_license
|
pod1019/python_learning
|
1e7d3a9c10fc8c1b4e8ff31554d495df518fb385
|
a15213d33a253c3a77ab0d5de9a4f937c27693ca
|
refs/heads/master
| 2020-09-14T11:11:53.100591 | 2020-04-11T04:00:27 | 2020-04-11T04:00:27 | 223,112,718 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
# 多个except结构
try:
a = input("请输入被除数:")
b = input("请输入除数:")
c = float(a)/float(b)
print("两数相除的结果是:",c)
except ZeroDivisionError:
print("异常:除数不能为0")
except TypeError:
print("异常:除数和被除数都应该为数值类型")
except NameError:
print("异常:变量不存在")
except BaseException as e:
print(e)
print(type(e))
finally: # 无论如果,此语句必然执行
print("kkkkkkkkk")
|
[
"pod1019@163.com"
] |
pod1019@163.com
|
8336f14eb60ba8b70f687a50cfcfb4356b0cb70a
|
9360aeefb3605a3fe0c5e512e52ec3bc0942903f
|
/app.py
|
7527e6647e891d5f6706d20edee3162f0ce7496d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
eliaswalyba/facebound
|
1ff7dc32cc4bf50d14f2e6434af2adfb14300245
|
92500e61b1bc50702ea339563ee8b38b55a31169
|
refs/heads/master
| 2022-07-01T17:42:02.360416 | 2020-05-08T15:23:03 | 2020-05-08T15:23:03 | 262,851,606 | 0 | 0 |
MIT
| 2020-05-10T18:37:03 | 2020-05-10T18:37:02 | null |
UTF-8
|
Python
| false | false | 3,484 |
py
|
import cv2, os
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
from PIL import Image, ImageEnhance
@st.cache
def load_image(img):
im = Image.open(img)
return im
FACE_CASCADE_PATH = '/algos/haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(FACE_CASCADE_PATH )
# eye_cascade = cv2.CascadeClassifier('algos/haarcascade_eye.xml')
# smile_cascade = cv2.CascadeClassifier('algos/haarcascade_smile.xml')
def detect_faces(uploaded_image):
new_img = np.array(uploaded_image.convert('RGB'))
temp_img = cv2.cvtColor(new_img, 1)
gray = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
# Detect Face
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw Rectangle
for (x,y,w,h) in faces:
cv2.rectangle(temp_img, (x,y), (x+w, y+h), (255,0,0), 2)
return temp_img, faces
def main():
'''
Face Detection App
'''
st.title('Facebound')
st.text('by Fodé Diop')
options = ['Detection', 'About']
choice = st.sidebar.selectbox('Select Option', options)
if choice == 'Detection':
st.subheader('Face Detection')
image_file = st.file_uploader('Upload Image', type=['jpg', 'png', 'jpeg'])
if image_file is not None:
uploaded = Image.open(image_file)
# st.write(type(uploaded))
st.text('Original Image')
st.image(uploaded)
enhance_type = st.sidebar.radio('Enhance Type', ['Original', 'Grayscale', 'Contrast', 'Brightness', 'Blur'])
if enhance_type == 'Grayscale':
new_img = np.array(uploaded.convert('RGB'))
temp_img = cv2.cvtColor(new_img, 1)
gray = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
st.image(gray)
# Print on screen
st.write(gray)
st.write(new_img)
if enhance_type == 'Contrast':
contrast_rate = st.sidebar.slider('Contrtast', 0.5, 3.5)
enhancer = ImageEnhance.Contrast(uploaded)
img_output = enhancer.enhance(contrast_rate)
st.image(img_output)
if enhance_type == 'Brightness':
contrast_rate = st.sidebar.slider('Brigthness', 0.5, 3.5)
enhancer = ImageEnhance.Brightness(uploaded)
img_output = enhancer.enhance(contrast_rate)
st.image(img_output)
if enhance_type == 'Blur':
blur_rate = st.sidebar.slider('Blur', 0.5, 3.5)
new_img = np.array(uploaded.convert('RGB'))
temp_img = cv2.cvtColor(new_img, 1)
blurred = cv2.GaussianBlur(temp_img, (11,11), blur_rate)
st.image(blurred)
# else:
# st.image(uploaded)
# Face Detection
target = ['Face', 'Smiles', 'Eyes']
feature_choice = st.sidebar.selectbox('Find Features', target)
if st.button('Detect Faces'):
if feature_choice == 'Faces':
st.write('Print something goda damn it!!!!')
result_img, result_faces = detect_faces(uploaded)
st.image(result_img)
st.success(f'Found {len(result_faces)} faces.')
elif choice == 'About':
st.subheader('About Facebound')
st.markdown("Built with Streamlit and OpenCV by [Fodé Diop](https://www.github.com/diop)")
st.text("© Copyright 2020 Fodé Diop - MIT")
st.success("Dakar Institute of Technology")
if __name__ == '__main__':
main()
|
[
"fodesdiop@gmail.com"
] |
fodesdiop@gmail.com
|
fd07de3d5d3a2288f381e55246f4331593b092d8
|
f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41
|
/test/test_room_space_inventory_status_item.py
|
832bf42dbe4715ab3e1fb79003804c3340495d2d
|
[] |
no_license
|
CalPolyResDev/StarRezAPI
|
012fb8351159f96a81352d6c7bfa36cd2d7df13c
|
b184e1863c37ff4fcf7a05509ad8ea8ba825b367
|
refs/heads/master
| 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,152 |
py
|
# coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.room_space_inventory_status_item import RoomSpaceInventoryStatusItem # noqa: E501
from starrez_client.rest import ApiException
class TestRoomSpaceInventoryStatusItem(unittest.TestCase):
"""RoomSpaceInventoryStatusItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRoomSpaceInventoryStatusItem(self):
"""Test RoomSpaceInventoryStatusItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.room_space_inventory_status_item.RoomSpaceInventoryStatusItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"fedorareis@gmail.com"
] |
fedorareis@gmail.com
|
12bc7081611f0cf4e76ac1ca7877c8802cf8993e
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/pyqtgraph/TextItem/main.py
|
cee6f112509d051cd992e54fb2b7de2352ab1089
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 |
MIT
| 2021-02-17T23:33:37 | 2015-11-04T23:54:32 |
Python
|
UTF-8
|
Python
| false | false | 268 |
py
|
from PyQt5 import QtGui
import pyqtgraph as pg
app = QtGui.QApplication([])
x = [1,2,3,4,5]
y = [0,3,1,2,0]
plotWidget = pg.plot()
plotWidget.plot(x, y)
text = pg.TextItem("Hello World", color='f00')
plotWidget.addItem(text)
text.setPos(3, 2)
app.exec_()
|
[
"furas@tlen.pl"
] |
furas@tlen.pl
|
86c01d24c8a65bb62895d8f7fd5b4121e6227f36
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_032/ch15_2020_03_09_16_43_39_394880.py
|
d97f1bf48a978c5d3d9d2f3b83f3fff684be1147
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 132 |
py
|
NOME=input('Digite o seu nome:')
if NOME == 'Chris':
print('Todo mundo odeia o Chris')
else:
print('Olá, {0}'.format(NOME))
|
[
"you@example.com"
] |
you@example.com
|
f05afaefedd21c8a8362d23218c7eb4c9d7ffa0f
|
1ffc17893d9e15fd939628bbc41c3d2633713ebd
|
/docs/tests/test_documentation_tutorial.py
|
d607bf9a8a7c3372aaf0f4fa1cdc37a04f40be05
|
[
"Apache-2.0"
] |
permissive
|
xadupre/sklearn-onnx
|
646e8a158cdded725064964494f0f8a760630aa8
|
b05e4864cedbf4f2a9e6c003781d1db8b53264ac
|
refs/heads/master
| 2023-09-01T15:58:38.112315 | 2022-12-21T01:59:45 | 2022-12-21T01:59:45 | 382,323,831 | 0 | 2 |
Apache-2.0
| 2023-01-04T13:41:33 | 2021-07-02T11:22:00 |
Python
|
UTF-8
|
Python
| false | false | 3,935 |
py
|
# SPDX-License-Identifier: Apache-2.0
"""
Tests examples from the documentation.
"""
import unittest
import os
import sys
import importlib
import subprocess
def import_source(module_file_path, module_name):
if not os.path.exists(module_file_path):
raise FileNotFoundError(module_file_path)
module_spec = importlib.util.spec_from_file_location(
module_name, module_file_path)
if module_spec is None:
raise FileNotFoundError(
"Unable to find '{}' in '{}'.".format(
module_name, module_file_path))
module = importlib.util.module_from_spec(module_spec)
return module_spec.loader.exec_module(module)
class TestDocumentationTutorial(unittest.TestCase):
def test_documentation_tutorial(self):
this = os.path.abspath(os.path.dirname(__file__))
fold = os.path.normpath(os.path.join(this, '..', 'tutorial'))
found = os.listdir(fold)
tested = 0
for name in found:
if name.startswith("plot_") and name.endswith(".py"):
print("run %r" % name)
try:
mod = import_source(fold, os.path.splitext(name)[0])
assert mod is not None
except FileNotFoundError:
# try another way
cmds = [sys.executable, "-u",
os.path.join(fold, name)]
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = p.communicate()
out, err = res
st = err.decode('ascii', errors='ignore')
if len(st) > 0 and 'Traceback' in st:
if "No such file or directory: 'dot'" in st:
# dot not installed, this part
# is tested in onnx framework
pass
elif '"dot" not found in path.' in st:
# dot not installed, this part
# is tested in onnx framework
pass
elif ("cannot import name 'LightGbmModelContainer' "
"from 'onnxmltools.convert.common."
"_container'") in st:
# onnxmltools not recent enough
pass
elif ('Please fix either the inputs or '
'the model.') in st:
# onnxruntime datasets changed in master branch,
# still the same in released version on pypi
pass
elif ('Current official support for domain ai.onnx '
'is till opset 12.') in st:
# one example is using opset 13 but onnxruntime
# only support up to opset 12.
pass
elif "'str' object has no attribute 'decode'" in st:
# unstable bug in scikit-learn<0.24
pass
elif ("This method should be overwritten for "
"operator") in st:
# raised by old version of packages
# used in the documentation
pass
else:
raise RuntimeError(
"Example '{}' (cmd: {} - exec_prefix='{}') "
"failed due to\n{}"
"".format(name, cmds, sys.exec_prefix, st))
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
xadupre.noreply@github.com
|
25eb28da4c15af658689383ec67271d21e30711e
|
2e145222a18d4509d937951f5cec4df0e26ee86f
|
/vas/sqlfire/AgentInstances.py
|
c7014e3358ef52496bfff95762d437cb06c53a4c
|
[
"Apache-2.0"
] |
permissive
|
vdreamakitex/vas-python-api
|
7627b7e3fcf76c16b1ea8b9fb670fdb708eff083
|
ce7148a2044863e078e78b47abbaafc426f732ee
|
refs/heads/master
| 2021-01-18T05:13:25.459916 | 2012-11-05T09:58:45 | 2012-11-05T09:58:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,711 |
py
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Instance import Instance
from vas.shared.MutableCollection import MutableCollection
class AgentInstances(MutableCollection):
"""Used to enumerate, create, and delete agent instances
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(AgentInstances, self).__init__(client, location, 'agent-group-instances', AgentInstance)
def create(self, installation, name, jvm_options=None):
"""Creates a new agent instance
:param `vas.sqlfire.Installations.Installation` installation: The installation ot be used by the instance
:param str name: The name of the instances
:param list jvm_options: The JVM options that are based to the agent's
JVM when it is started
:rtype: :class:`vas.sqlfire.AgentInstances.AgentInstance`
:return: The new agent instance
"""
payload = {'installation': installation._location, 'name': name}
if jvm_options is not None:
payload['jvm-options'] = jvm_options
return self._create(payload, 'agent-group-instance')
class AgentInstance(Instance):
"""An agent instance
:ivar `vas.sqlfire.Groups.Group` group: The group that contains this instance
:ivar `vas.sqlfire.Installations.Installation` installation: The installation that this instance is using
:ivar list jvm_options: The JVM options that are passed to the agent's JVM
when it is started
:ivar `vas.sqlfire.AgentLiveConfigurations.AgentLiveConfigurations` live_configurations: The instance's live
configurations
:ivar str name: The instance's name
:ivar list node_instances: The instance's individual node instances
:ivar `vas.sqlfire.AgentPendingConfigurations.AgentPendingConfigurations` pending_configurations: The instance's pending
configurations
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str state: Retrieves the state of the resource from the server.
Will be one of:
* ``STARTING``
* ``STARTED``
* ``STOPPING``
* ``STOPPED``
"""
@property
def jvm_options(self):
return self.__jvm_options
def __init__(self, client, location):
super(AgentInstance, self).__init__(client, location, Group, Installation, AgentLiveConfigurations,
AgentPendingConfigurations, AgentNodeInstance, 'agent-node-instance')
def reload(self):
"""Reloads the agent instance's details from the server"""
super(AgentInstance, self).reload()
self.__jvm_options = self._details['jvm-options']
def update(self, installation=None, jvm_options=None):
"""Updates the instance
:param `vas.sqlfire.Installations.Installation` installation: The installation to be used by the instance. If
omitted or `None`, the configuration will not be
changed
:param list jvm_options: The JVM options that are passed to the agent's
JVM when it is started. If omitted or `None`,
the configuration will not be changed
"""
payload = {}
if installation:
payload['installation'] = installation._location
if jvm_options is not None:
payload['jvm-options'] = jvm_options
self._client.post(self._location, payload)
self.reload()
def __str__(self):
return "<{} name={} jvm_options={}>".format(self.__class__, self.name, self.__jvm_options)
from vas.sqlfire.AgentLiveConfigurations import AgentLiveConfigurations
from vas.sqlfire.AgentNodeInstances import AgentNodeInstance
from vas.sqlfire.AgentPendingConfigurations import AgentPendingConfigurations
from vas.sqlfire.Groups import Group
from vas.sqlfire.Installations import Installation
|
[
"bhale@vmware.com"
] |
bhale@vmware.com
|
205101325b29051add4fec00ed7a7ca59766cd56
|
f1e7c31d22ee90e5e3705352170388c09b7470d4
|
/day2/decorator4.py
|
69c18443a573abae91c47ec9c78ea8ce44331c52
|
[] |
no_license
|
taizilinger123/apple
|
d0b58eb184d4a7c5aec674e914fa67139d2419ca
|
285c10eedfa0863ad6d51efb9dea5c7d0ae540a6
|
refs/heads/master
| 2020-03-28T07:54:50.118668 | 2018-09-09T05:37:12 | 2018-09-09T05:37:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 499 |
py
|
import time
def timer(func): #timer(test1) func=test1
def deco(*args,**kwargs):
start_time=time.time()
func(*args,**kwargs) #run test1()
stop_time=time.time()
print("the func run time is %s" %(stop_time-start_time))
return deco
@timer #test1=timer(test1)
def test1():
time.sleep(1)
print('in the test1')
@timer # test2 = timer(test2) #deco test2(name) = deco(name)
def test2(name,age):
print("test2:",name,age)
test1()
test2("alex",22)
|
[
"837337164@qq.com"
] |
837337164@qq.com
|
9a7fc8e23d82ffb80c491b1e51e26e71ab025f91
|
ce18cf6bdb1a85a65a509597b4c0ec046b855186
|
/2020年12月/换酒问题.py
|
2aae2a3d340b6efc017fbb7a2722d70f2ee89088
|
[] |
no_license
|
elssm/leetcode
|
e12e39faff1da5afb234be08e7d9db85fbee58f8
|
a38103d2d93b34bc8bcf09f87c7ea698f99c4e36
|
refs/heads/master
| 2021-06-11T06:44:44.993905 | 2021-04-28T06:14:23 | 2021-04-28T06:14:23 | 171,072,054 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
class Solution(object):
def numWaterBottles(self, numBottles, numExchange):
"""
:type numBottles: int
:type numExchange: int
:rtype: int
"""
res=numBottles
while numBottles//numExchange:
res+=numBottles//numExchange
numBottles=numBottles//numExchange+numBottles%numExchange
return res
|
[
"noreply@github.com"
] |
elssm.noreply@github.com
|
19051aed542c9f4efa751cfbf4908783c1d3215e
|
dd0d2a4da64200a7bea42d23122384189b900850
|
/common_digit.py
|
64c95fda4c01ff6bdc0db9231dae66fbd66e46a4
|
[] |
no_license
|
gakkistyle/comp9021
|
06ad00b47b7b0135013b014464b5f13530cad49d
|
4d0d4a2d719745528bf84ed0dfb88a43f858be7e
|
refs/heads/master
| 2022-09-24T13:10:29.609277 | 2020-06-06T16:54:42 | 2020-06-06T16:54:42 | 270,043,710 | 14 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 770 |
py
|
def average_of_digits(digit=None):
if digit == None:
return -1
if len(digit) == 1:
digit_set = set(str(digit[0]))
sum = 0
for e in digit_set:
sum += int(e)
return sum/len(digit_set)
common = []
word_set1 = set(str(digit[0]))
word_set2 = set(str(digit[1]))
for e in word_set1:
if e in word_set2:
common.append(e)
for i in range(2,len(digit)):
word_setn = set(str(digit[i]))
for e in common:
if e not in word_setn:
common.remove(e)
if common == []:
return -1
sum = 0
for e in common:
sum += int(e)
return sum/len(common)
print(average_of_digits([3136823,665537857,8363265,35652385]))
|
[
"1824150996@qq.com"
] |
1824150996@qq.com
|
b5ce86e5c7206e0947b0bcb912983f891ecd0ce1
|
6df76f8a6fcdf444c3863e3788a2f4b2c539c22c
|
/django code/p109/p109/asgi.py
|
dbabed799f89d9fe7ba5076c4cdafffb94c9e6d1
|
[] |
no_license
|
basantbhandari/DjangoProjectsAsDocs
|
068e4a704fade4a97e6c40353edb0a4299bd9678
|
594dbb560391eaf94bb6db6dc07702d127010b88
|
refs/heads/master
| 2022-12-18T22:33:23.902228 | 2020-09-22T13:11:01 | 2020-09-22T13:11:01 | 297,651,728 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 385 |
py
|
"""
ASGI config for p109 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p109.settings')
application = get_asgi_application()
|
[
"36443209+basantbhandari@users.noreply.github.com"
] |
36443209+basantbhandari@users.noreply.github.com
|
c357997cbb60325855930257b942fbd28f13b1d8
|
6130f811f3acfcb9f60939d8752bb035cadaf928
|
/examples/adspygoogle/dfp/v201311/order_service/update_orders.py
|
24750560181ff34e7c6f4beb06d0f96e0cb10b50
|
[
"Apache-2.0"
] |
permissive
|
gsembi/googleads-python-legacy-lib
|
f2e3197413c23c1192b11e54bf78c087f04a2baa
|
9de235ffb65d014dd6ba22be50659c910eca5ae2
|
refs/heads/master
| 2021-01-23T23:38:28.076465 | 2014-10-14T20:38:20 | 2014-10-14T20:38:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,707 |
py
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the notes of a single order specified by ID.
To determine which orders exist, run get_all_orders.py."""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
from adspygoogle.dfp import DfpUtils
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201311')
# Create statement object to select a single order by an ID.
values = [{
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
query = 'WHERE id = :orderId'
statement = DfpUtils.FilterStatement(query, values)
# Get orders by statement.
response = order_service.GetOrdersByStatement(statement.ToStatement())[0]
orders = response.get('results')
if orders:
# Update each local order object by changing its notes.
updated_orders = []
for order in orders:
# Archived orders cannot be updated.
if not Utils.BoolTypeConvert(order['isArchived']):
order['notes'] = 'Spoke to advertiser. All is well.'
updated_orders.append(order)
# Update orders remotely.
orders = order_service.UpdateOrders(updated_orders)
# Display results.
if orders:
for order in orders:
print ('Order with id \'%s\', name \'%s\', advertiser id \'%s\', and '
'notes \'%s\' was updated.'
% (order['id'], order['name'], order['advertiserId'],
order['notes']))
else:
print 'No orders were updated.'
else:
print 'No orders found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ORDER_ID)
|
[
"emeralddragon88@gmail.com"
] |
emeralddragon88@gmail.com
|
ac8f9f2c4057ab0711a7c50124f020d7bd010361
|
5479cdac56abc115d3b52fbd31814dfd27262da7
|
/TaobaoSdk/Request/MarketingPromotionsGetRequest.py
|
5cd18fc86d83e7ffabe7feb005d71b73b4f75e98
|
[] |
no_license
|
xuyaoqiang-maimiao/TaobaoOpenPythonSDK
|
d9d2be6a7aa27c02bea699ed5667a9a30bf483ab
|
c82cde732e443ecb03cfeac07843e884e5b2167c
|
refs/heads/master
| 2021-01-18T07:49:57.984245 | 2012-08-21T07:31:10 | 2012-08-21T07:31:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,100 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 根据商品ID查询卖家使用该第三方工具对商品设置的所有优惠策略
# @author wuliang@maimiaotech.com
# @date 2012-08-09 12:36:54
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">根据商品ID查询卖家使用该第三方工具对商品设置的所有优惠策略</SPAN>
# <UL>
# </UL>
class MarketingPromotionsGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.marketing.promotions.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">需返回的优惠策略结构字段列表。可选值为Promotion中所有字段,如:promotion_id, promotion_title, item_id, status, tag_id等等</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Field List</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.fields = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">商品数字ID。根据该ID查询商品下通过第三方工具设置的所有优惠策略</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.num_iid = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">优惠策略状态。可选值:ACTIVE(有效),UNACTIVE(无效),若不传或者传入其他值,则默认查询全部</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.status = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">标签ID</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.tag_id = None
|
[
"liyangmin@maimiaotech.com"
] |
liyangmin@maimiaotech.com
|
5f8dc7f6ee3faba3e195cb8bd5c54cebab04e678
|
6eb282bbe4d43273b9e9cc8c0fa79400888cba9e
|
/tests/test_routes_image.py
|
e98c7912a9f48cdf2e6d390350ecba8cd232dc51
|
[
"MIT"
] |
permissive
|
jdelgad/memegen
|
d4300c707c5fee59aa2f5c5e0e8e606d699255ef
|
0de8e5d6bfae75843bbe0d149c7796cb044e24a7
|
refs/heads/master
| 2020-12-25T08:51:20.523408 | 2016-06-10T13:50:46 | 2016-06-10T13:50:46 | 60,996,119 | 1 | 0 | null | 2016-06-13T00:30:19 | 2016-06-13T00:30:19 | null |
UTF-8
|
Python
| false | false | 7,191 |
py
|
# pylint: disable=unused-variable,misplaced-comparison-constant,expression-not-assigned
import os
import pytest
from expecter import expect
from .conftest import load
TESTS = os.path.dirname(__file__)
ROOT = os.path.dirname(TESTS)
IMAGES = os.path.join(ROOT, "data", "images")
LATEST = os.path.join(IMAGES, "latest.jpg")
def describe_get():
def describe_visible():
def with_nominal_text(client):
path = os.path.join(IMAGES, 'iw', 'hello', 'world.jpg')
if os.path.exists(path):
os.remove(path)
response = client.get("/iw/hello/world.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
assert os.path.isfile(path)
def with_only_1_line(client):
response = client.get("/iw/hello.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
@pytest.mark.xfail(os.name == 'nt', reason="Windows has a path limit")
def with_lots_of_text(client):
top = "-".join(["hello"] * 20)
bottom = "-".join(["world"] * 20)
response = client.get("/iw/" + top + "/" + bottom + ".jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def describe_hidden():
def when_jpg(client):
response = client.get("/_aXcJaGVsbG8vd29ybGQJ.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def describe_custom_style():
def when_provided(client):
response = client.get("/sad-biden/hello.jpg?alt=scowl")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def it_redirects_to_lose_alt_when_default_style(client):
response = client.get("/sad-biden/hello.jpg?alt=default")
assert 302 == response.status_code
assert '<a href="/sad-biden/hello.jpg">' in \
load(response, as_json=False)
def it_redirects_to_lose_alt_when_unknown_style(client):
response = client.get("/sad-biden/hello.jpg?alt=__unknown__")
assert 302 == response.status_code
assert '<a href="/sad-biden/hello.jpg">' in \
load(response, as_json=False)
def it_keeps_alt_after_template_redirect(client):
response = client.get("/sad-joe/hello.jpg?alt=scowl")
assert 302 == response.status_code
assert '<a href="/sad-biden/hello.jpg?alt=scowl">' in \
load(response, as_json=False)
def it_keeps_alt_after_text_redirect(client):
response = client.get("/sad-biden.jpg?alt=scowl")
assert 302 == response.status_code
assert '-vote.jpg?alt=scowl">' in \
load(response, as_json=False)
def when_url(client):
url = "http://www.gstatic.com/webp/gallery/1.jpg"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 200
expect(response.mimetype) == 'image/jpeg'
def it_returns_an_error_with_non_image_urls(client):
url = "http://example.com"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 415
def it_redirects_to_lose_alt_when_unknown_url(client):
url = "http://example.com/not/a/real/image.jpg"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 302
expect(load(response, as_json=False)).contains(
'<a href="/sad-biden/hello.jpg">')
def it_redirects_to_lose_alt_when_bad_url(client):
url = "http:invalid"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 302
expect(load(response, as_json=False)).contains(
'<a href="/sad-biden/hello.jpg">')
def describe_custom_font():
def when_provided(client):
response = client.get("/iw/hello.jpg?font=impact")
expect(response.status_code) == 200
expect(response.mimetype) == 'image/jpeg'
def it_redirects_on_unknown_fonts(client):
response = client.get("/iw/hello.jpg?font=__unknown__")
expect(response.status_code) == 302
expect(load(response, as_json=False)).contains(
'<a href="/iw/hello.jpg">')
def describe_latest():
def when_existing(client):
open(LATEST, 'w').close() # force the file to exist
response = client.get("/latest.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def when_missing(client):
try:
os.remove(LATEST)
except FileNotFoundError:
pass
response = client.get("/latest.jpg")
assert 200 == response.status_code
assert 'image/png' == response.mimetype
def describe_redirects():
def when_missing_dashes(client):
response = client.get("/iw/HelloThere_World/How-areYOU.jpg")
assert 302 == response.status_code
assert '<a href="/iw/hello-there-world/how-are-you.jpg">' in \
load(response, as_json=False)
def when_no_text(client):
response = client.get("/live.jpg")
assert 302 == response.status_code
assert '<a href="/live/_/do-it-live!.jpg">' in \
load(response, as_json=False)
def when_aliased_template(client):
response = client.get("/insanity-wolf/hello/world.jpg")
assert 302 == response.status_code
assert '<a href="/iw/hello/world.jpg">' in \
load(response, as_json=False)
def when_jpeg_extension_without_text(client):
response = client.get("/iw.jpeg")
assert 302 == response.status_code
assert '<a href="/iw.jpg">' in \
load(response, as_json=False)
def when_jpeg_extension_with_text(client):
response = client.get("/iw/hello/world.jpeg")
assert 302 == response.status_code
assert '<a href="/iw/hello/world.jpg">' in \
load(response, as_json=False)
def describe_errors():
def when_unknown_template(client):
response = client.get("/make/sudo/give.me.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
# unit tests ensure this is a placeholder image
@pytest.mark.xfail(os.name == 'nt', reason="Windows has a path limit")
def when_too_much_text_for_a_filename(client):
top = "hello"
bottom = "-".join(["world"] * 50)
response = client.get("/iw/" + top + "/" + bottom + ".jpg")
assert 414 == response.status_code
assert {
'message': "Filename too long."
} == load(response)
|
[
"jacebrowning@gmail.com"
] |
jacebrowning@gmail.com
|
b5e23c5c655c526644f144779516ce18dd7a353e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/97/usersdata/194/54823/submittedfiles/lecker.py
|
f56acb6233287f3cbe81bfd2b3aa0164580158d3
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 824 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def lecker(lista):
cont=0
for i in range(0,len(lista)-1,1):
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
elif i==(len(lista)-1):
if lista[i]>lista[i-1]:
cont=cont+1
else:
if lista[i]>lista[i+1] and lista[i]>lista[i-1]:
cont=cont+1
if cont==1:
return True
else:
return False
a=[]
b=[]
n=int(input('quantidade de elementos:'))
for i in range(1,n+1,1):
valor=float(input('elementos da lista 1:'))
a.append(valor)
for i in range(1,n+1,1):
valor=float(input('elementos da lista 2:'))
b.append(valor)
if lecker(a):
print('S')
else:
print('N')
if lecker(b):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
8aac474ed41ab941cc830699ba847bd56a96843a
|
7698a74a06e10dd5e1f27e6bd9f9b2a5cda1c5fb
|
/zzz.masterscriptsTEB_GIST/for005md.py
|
5c2e1af3abcf60dbbdff817943ffd3a973318e9a
|
[] |
no_license
|
kingbo2008/teb_scripts_programs
|
ef20b24fe8982046397d3659b68f0ad70e9b6b8b
|
5fd9d60c28ceb5c7827f1bd94b1b8fdecf74944e
|
refs/heads/master
| 2023-02-11T00:57:59.347144 | 2021-01-07T17:42:11 | 2021-01-07T17:42:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,330 |
py
|
import sys
import copy
import math
import matplotlib
import scipy
import numpy
import pylab
def read_MD_outfile(filename,totE, kE, pE, time, temp, pres):
fileh = open(filename,'r')
result_flag = False
count = 0
for line in fileh:
line = line.strip('\n')
splitline = line.split()
if "4. RESULTS" in line:
result_flag = True
elif "A V E R A G E S O V E R" in line:
result_flag = False
if (result_flag):
if "NSTEP" in line:
if (len(splitline)<11):
continue
t_time = float(splitline[5])/1000.0 # convert from ps to ns
t_temp = float(splitline[8])
t_pres = float(splitline[11])
time.append(t_time)
temp.append(t_temp)
pres.append(t_pres)
if "Etot" in line:
if (len(splitline)<8):
continue
t_totE = float(splitline[2])
t_kE = float(splitline[5])
t_pE = float(splitline[8])
totE.append(t_totE)
kE.append(t_kE)
pE.append(t_pE)
fileh.close()
return totE, kE, pE, time, temp, pres
def main():
if len(sys.argv) != 3:
print "error: this program takes 2 inputs:"
print " (1) filename that contains a list of md output files. If it doesn't exist do sth like this: "
print " ls 5609039/*.out > tmpout.txt"
print " (2) filename for png plot"
print " This should be done automatically as part of 005md.checkMDrun.csh"
exit()
filelist = sys.argv[1]
filenamepng = sys.argv[2]
# read in file with a list of mdout files.
print "filelist containing MD.out files: " + filelist
print "Plot will be saved as: " + filenamepng
filenamelist = []
fileh = open(filelist,'r')
for line in fileh:
tfile = line.strip("\n")
splitline = tfile.split(".")
if (splitline[-1] != "out"):
print "Error. %s is not a .out file" % tfile
exit()
filenamelist.append(tfile)
fileh.close()
totE = []
kE = []
pE = []
time = []
temp = []
pres = []
for filename in filenamelist:
print "reading info from file: " + filename
totE, kE, pE, time, temp, pres = read_MD_outfile(filename,totE, kE, pE, time, temp, pres)
# Plot with 5 panels; tabs [x_left,y_left,x_up,y_up].
subpanel = [ [0.2,0.1,0.3,0.2], [0.6,0.1,0.3,0.2], [0.2,0.4,0.3,0.2], [0.6,0.4,0.3,0.2], [0.2,0.7,0.3,0.2], [0.6,0.7,0.3,0.2] ]
descname = ["totE", "kE", "pE", "temp", "pres"]
fig = pylab.figure(figsize=(8,8))
for i,desc in enumerate([totE, kE, pE, temp, pres]):
#print len(desc), len(totE), len(time)
axis = fig.add_axes(subpanel[i])
#lim_min = min(math.floor(Ymin),math.floor(Xmin))
# lim_max = max(math.ceil(Ymax), math.ceil(Xmax))
im = axis.plot(time,desc,'k-') #,[0,100],[0,100],'--')
axis.set_xlabel("time (ns)")
axis.set_ylabel(descname[i])
#axis.set_title('file='+xyfilename)
#axis.set_ylim(lim_min, lim_max)
#axis.set_xlim(lim_min, lim_max)
#fig.savefig('md_analysis_fig.png',dpi=600)
fig.savefig(filenamepng,dpi=600)
main()
|
[
"tbalius@gimel.cluster.ucsf.bkslab.org"
] |
tbalius@gimel.cluster.ucsf.bkslab.org
|
7bfaaf0db70cf0354f13f8bb62ab277d818e5da2
|
972dff80b81c78082e9022084ef75e954b204471
|
/gui/system/alertmods/volume_status.py
|
44a265cdb00c201d6b3499a3c0ac6c890b8daed5
|
[] |
no_license
|
TomHoenderdos/freenas
|
34bbf9957ed5904f1296af5a57eedc95e04f1074
|
83ae0c1805ea7e57b70f436810eca3b9cc0c9825
|
refs/heads/master
| 2021-01-17T09:29:19.668079 | 2014-01-28T01:58:23 | 2014-01-28T01:58:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,124 |
py
|
import re
import subprocess
from django.utils.translation import ugettext_lazy as _
from freenasUI.storage.models import Volume
from freenasUI.system.alert import alertPlugins, Alert, BaseAlert
class VolumeStatusAlert(BaseAlert):
def on_volume_status_not_healthy(self, vol, status, message):
if message:
return Alert(
Alert.WARN,
_('The volume %(volume)s status is %(status)s:'
' %(message)s') % {
'volume': vol,
'status': status,
'message': message,
}
)
else:
return Alert(
Alert.WARN,
_('The volume %(volume)s status is %(status)s') % {
'volume': vol,
'status': status,
}
)
def volumes_status_enabled(self):
return True
def on_volume_status_degraded(self, vol, status, message):
self.log(self.LOG_CRIT, _('The volume %s status is DEGRADED') % vol)
def run(self):
if not self.volumes_status_enabled():
return
for vol in Volume.objects.filter(vol_fstype__in=['ZFS', 'UFS']):
if not vol.is_decrypted():
continue
status = vol.status
message = ""
if vol.vol_fstype == 'ZFS':
p1 = subprocess.Popen(
["zpool", "status", "-x", vol.vol_name],
stdout=subprocess.PIPE
)
stdout = p1.communicate()[0]
if stdout.find("pool '%s' is healthy" % vol.vol_name) != -1:
status = 'HEALTHY'
else:
reg1 = re.search('^\s*state: (\w+)', stdout, re.M)
if reg1:
status = reg1.group(1)
else:
# The default case doesn't print out anything helpful,
# but instead coredumps ;).
status = 'UNKNOWN'
reg1 = re.search(r'^\s*status: (.+)\n\s*action+:',
stdout, re.S | re.M)
reg2 = re.search(r'^\s*action: ([^:]+)\n\s*\w+:',
stdout, re.S | re.M)
if reg1:
msg = reg1.group(1)
msg = re.sub(r'\s+', ' ', msg)
message += msg
if reg2:
msg = reg2.group(1)
msg = re.sub(r'\s+', ' ', msg)
message += msg
if status == 'HEALTHY':
return [Alert(
Alert.OK, _('The volume %s status is HEALTHY') % (vol, )
)]
elif status == 'DEGRADED':
return [self.on_volume_status_degraded(vol, status, message)]
else:
return [
self.on_volume_status_not_healthy(vol, status, message)
]
alertPlugins.register(VolumeStatusAlert)
|
[
"wg@FreeBSD.org"
] |
wg@FreeBSD.org
|
a6fa412a4318bdd44745d738c2d2aa91cac8f9d2
|
277b9292d494db49836c93693257ecab87ebeb18
|
/ynlu/sdk/evaluation/tests/test_entity_overlapping_ratio.py
|
541945abbd95063fa16157907ee1d43443903ffe
|
[
"MIT"
] |
permissive
|
hsiaoyi0504/yoctol-nlu-py
|
90c2df421994006a49a4db7fe6f104d247201fbd
|
4cec8d52ba3dd7827bddde152c95e814e533c0f2
|
refs/heads/master
| 2020-03-23T23:41:40.055683 | 2018-04-19T08:57:42 | 2018-04-19T08:57:42 | 142,249,617 | 0 | 0 |
MIT
| 2018-07-25T05:00:05 | 2018-07-25T05:00:05 | null |
UTF-8
|
Python
| false | false | 6,265 |
py
|
from unittest import TestCase
from ..entity_overlapping_score import (
single__entity_overlapping_score,
entity_overlapping_score,
)
class OverlappingScoreTestCase(TestCase):
def test_single__entity_overlapping_score_different_length(self):
with self.assertRaises(ValueError):
single__entity_overlapping_score(
utterance="12",
entity_prediction=[
{"value": "1", "entity": "a"},
{"value": "2", "entity": "b"},
],
y_true=["a", "b", "c"],
)
def test_single__entity_overlapping_score(self):
test_cases = [
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["4", "5", "6"],
"wrong_penalty_rate": 2.0,
},
-1.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["4", "DONT_CARE", "6"],
"wrong_penalty_rate": 2.0,
},
-0.666666666667,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["4", "2", "6"],
"wrong_penalty_rate": 2.0,
},
-0.33333333333333,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "DONT_CARE", "DONT_CARE"],
"wrong_penalty_rate": 2.0,
},
0.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "DONT_CARE", "value": "2"},
{"entity": "DONT_CARE", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "2", "3"],
"wrong_penalty_rate": 2.0,
},
0.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "2", "3"],
"wrong_penalty_rate": 2.0,
},
0.6666666666666667,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["5", "2", "3"],
"wrong_penalty_rate": 2.0,
},
0.3333333333333333,
),
(
{
"entity_prediction": [
{"entity": "DONT_CARE", "value": "1"},
{"entity": "DONT_CARE", "value": "2"},
{"entity": "DONT_CARE", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "DONT_CARE", "DONT_CARE"],
"wrong_penalty_rate": 2.0,
},
1.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["1", "2", "3"],
"wrong_penalty_rate": 2.0,
},
1.0,
),
]
for i, test_case in enumerate(test_cases):
with self.subTest(i=i):
result = single__entity_overlapping_score(**test_case[0])
self.assertAlmostEqual(test_case[1], result)
def test_entity_overlapping_score_different_amount(self):
with self.assertRaises(ValueError):
entity_overlapping_score(
utterances=["123", "345"],
entity_predictions=[[{"a": 1}], [{"b": 2}]],
y_trues=[["a"], ["b"], ["c"]],
)
def test_entity_overlapping_score(self):
result = entity_overlapping_score(
utterances=["123", "123"],
entity_predictions=[
[
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
[
{"entity": "DONT_CARE", "value": "1"},
{"entity": "DONT_CARE", "value": "2"},
{"entity": "DONT_CARE", "value": "3"},
],
],
y_trues=[
["5", "2", "3"],
["DONT_CARE", "DONT_CARE", "DONT_CARE"],
],
)
self.assertAlmostEqual(
(0.33333333333 + 1.0) / 2,
result,
)
|
[
"s916526000@gmail.com"
] |
s916526000@gmail.com
|
7ffcb76ec73333e2ac89d9c1b17839de77716f5e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/420/usersdata/329/87976/submittedfiles/exe11.py
|
715adcb70c57813e5b1796b83f844bcbc85024f3
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 244 |
py
|
# -*- coding: utf-8 -*-
n = int(input("digite um numero com 8 algarismos: "))
soma = 0
while n < 10000000 and n > 9999999:
resto = n % 10
n = (n - resto)/10
soma = soma + resto
print ('%d' % soma)
else:
print("NAO SEI")
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
85872ca81454d863e57c47043a303a247a75e42d
|
2a8abd5d6acdc260aff3639bce35ca1e688869e9
|
/telestream_cloud_qc_sdk/telestream_cloud_qc/models/frame_aspect_ratio_test.py
|
e350d1d1f34c6e4931d4824fe21895777c5735ce
|
[
"MIT"
] |
permissive
|
Telestream/telestream-cloud-python-sdk
|
57dd2f0422c83531e213f48d87bc0c71f58b5872
|
ce0ad503299661a0f622661359367173c06889fc
|
refs/heads/master
| 2021-01-18T02:17:44.258254 | 2020-04-09T11:36:07 | 2020-04-09T11:36:07 | 49,494,916 | 0 | 0 |
MIT
| 2018-01-22T10:07:49 | 2016-01-12T11:10:56 |
Python
|
UTF-8
|
Python
| false | false | 6,377 |
py
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class FrameAspectRatioTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'frame_aspect_ratio_numerator': 'int',
'frame_aspect_ratio_denominator': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'frame_aspect_ratio_numerator': 'frame_aspect_ratio_numerator',
'frame_aspect_ratio_denominator': 'frame_aspect_ratio_denominator',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, frame_aspect_ratio_numerator=None, frame_aspect_ratio_denominator=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""FrameAspectRatioTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._frame_aspect_ratio_numerator = None
self._frame_aspect_ratio_denominator = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if frame_aspect_ratio_numerator is not None:
self.frame_aspect_ratio_numerator = frame_aspect_ratio_numerator
if frame_aspect_ratio_denominator is not None:
self.frame_aspect_ratio_denominator = frame_aspect_ratio_denominator
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def frame_aspect_ratio_numerator(self):
"""Gets the frame_aspect_ratio_numerator of this FrameAspectRatioTest. # noqa: E501
:return: The frame_aspect_ratio_numerator of this FrameAspectRatioTest. # noqa: E501
:rtype: int
"""
return self._frame_aspect_ratio_numerator
@frame_aspect_ratio_numerator.setter
def frame_aspect_ratio_numerator(self, frame_aspect_ratio_numerator):
"""Sets the frame_aspect_ratio_numerator of this FrameAspectRatioTest.
:param frame_aspect_ratio_numerator: The frame_aspect_ratio_numerator of this FrameAspectRatioTest. # noqa: E501
:type: int
"""
self._frame_aspect_ratio_numerator = frame_aspect_ratio_numerator
@property
def frame_aspect_ratio_denominator(self):
"""Gets the frame_aspect_ratio_denominator of this FrameAspectRatioTest. # noqa: E501
:return: The frame_aspect_ratio_denominator of this FrameAspectRatioTest. # noqa: E501
:rtype: int
"""
return self._frame_aspect_ratio_denominator
@frame_aspect_ratio_denominator.setter
def frame_aspect_ratio_denominator(self, frame_aspect_ratio_denominator):
"""Sets the frame_aspect_ratio_denominator of this FrameAspectRatioTest.
:param frame_aspect_ratio_denominator: The frame_aspect_ratio_denominator of this FrameAspectRatioTest. # noqa: E501
:type: int
"""
self._frame_aspect_ratio_denominator = frame_aspect_ratio_denominator
@property
def reject_on_error(self):
"""Gets the reject_on_error of this FrameAspectRatioTest. # noqa: E501
:return: The reject_on_error of this FrameAspectRatioTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this FrameAspectRatioTest.
:param reject_on_error: The reject_on_error of this FrameAspectRatioTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this FrameAspectRatioTest. # noqa: E501
:return: The checked of this FrameAspectRatioTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this FrameAspectRatioTest.
:param checked: The checked of this FrameAspectRatioTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FrameAspectRatioTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FrameAspectRatioTest):
return True
return self.to_dict() != other.to_dict()
|
[
"cloudsupport@telestream.net"
] |
cloudsupport@telestream.net
|
ed7b5fcf55324e383b99dd8f860e850435b47ada
|
0faf534ebb6db6f32279e5bee25b968bd425ce3a
|
/tests/core/_while/_while.py
|
b6d827a12289764a394e2ef4beffb7579457bc29
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PyHDI/veriloggen
|
e8647cb2d40737d84e31d6b89c5799bab9cbd583
|
f2b1b9567150af097eed1b5e79ba2b412854ef43
|
refs/heads/develop
| 2023-08-09T10:02:35.626403 | 2023-08-09T00:50:14 | 2023-08-09T00:50:14 | 37,813,184 | 282 | 60 |
Apache-2.0
| 2023-07-20T03:03:29 | 2015-06-21T15:05:30 |
Python
|
UTF-8
|
Python
| false | false | 1,032 |
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from veriloggen import *
def mkTest():
m = Module('test')
clk = m.Reg('CLK')
rst = m.Reg('RST')
count = m.Reg('count', width=32)
m.Initial(
Systask('dumpfile', '_while.vcd'),
Systask('dumpvars', 0, clk, rst, count),
)
m.Initial(
clk(0),
Forever(clk(Not(clk), ldelay=5)) # forever #5 CLK = ~CLK;
)
m.Initial(
rst(0),
Delay(100),
rst(1),
Delay(100),
rst(0),
Delay(1000),
count(0),
While(count < 1024)(
count(count + 1),
Event(Posedge(clk))
),
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('')
print(verilog)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
eb599ad48afd47de67a5a38758872173421836a2
|
f2a0c0cad8ccc82ac00c7fa9dbf06c5fec96089c
|
/Student_Management/main/urls.py
|
b6fe5fc9e31bae12859e560cff9d8544ad9433a3
|
[] |
no_license
|
tushargoyal22/Django-Learning
|
49bb0c97f6e344dae053a3c913a74c765a9a021b
|
eb87ac56220d7f0e1e4741cda754547180835713
|
refs/heads/master
| 2020-12-26T18:12:07.305533 | 2020-04-20T06:22:14 | 2020-04-20T06:22:14 | 237,585,513 | 0 | 0 | null | 2020-06-06T09:08:09 | 2020-02-01T08:31:48 |
CSS
|
UTF-8
|
Python
| false | false | 497 |
py
|
from django.urls import path
from main import views
urlpatterns = [
path('',views.Index.as_view()),
path('college/<int:pk>' , views.CollegeDetail.as_view(),name='college'),
path('colleges/',views.CollegeList.as_view()),
path('create_college/' , views.CollegeCreate.as_view()),
path('update_college/<int:pk>' , views.CollegeUpdate.as_view()),
path('create_student/' , views.StudentCreate.as_view()),
path('delete_student/<int:pk>' , views.StudentDelete.as_view())
]
|
[
"tushar22.tg.tg@gmail.com"
] |
tushar22.tg.tg@gmail.com
|
e187641d7db47cec739bd694e61860ff1f2d4b26
|
a48eaa4419b87c011abdee1eebfd04b469f4417b
|
/.history/ghostpost/views_20200211120737.py
|
0c516f78e19399fa4ac83bde5fc952b2f89adef3
|
[] |
no_license
|
Imraj423/ghostpost
|
6418d6c9561528ac8c31dd70d8aae7fac4c77cca
|
4edc559eb1f9ef0d11aae78e2b1dbd5c4903ddb5
|
refs/heads/master
| 2021-01-02T13:32:58.032239 | 2020-02-11T23:21:31 | 2020-02-11T23:21:31 | 239,644,968 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,858 |
py
|
from django.shortcuts import render
from django.contrib.auth.models import User
from ghostpost.models import ghostPost
from django.shortcuts import render, reverse, HttpResponseRedirect
from ghostpost.forms import addPost
def index(request):
item = ghostPost.objects.all()
return render(request, 'index.html', {'data': item})
def detail(request):
item = ghostPost.objects.all()
return render(request, 'detail.html', {'data': item})
def post_add(request):
html = 'addpost.html'
if request.method == 'POST':
form = addPost(request.POST)
if form.is_valid():
data = form.cleaned_data
ghostPost.objects.create(
message=data['message'],
is_Boast=data['is_Boast']
)
return HttpResponseRedirect(reverse("index"))
form = addPost()
return render(request, html, {'form': form})
def like(request, id):
post = ghostPost.objects.get(id=id)
post.like += 1
post.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def dislike(request, id):
post = ghostPost.objects.get(id=id)
post.like -= 1
post.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def sorted(request):
html = "index.html"
data = ghostPost.objects.all().order_by(
"-like")
return render(request, html, {"data": data})
def sortedt(request):
html = "index.html"
data = ghostPost.objects.all().order_by("-time")
return render(request, html, {"data": data})
def sortedb(request):
html = "index.html"
data = ghostPost.objects.all().order_by("-is_Boast")
return render(request, html, {"data": data})
def sortedb(request):
html = "index.html"
data = ghostPost.objects.all().order_by("-is_Boast=False")
return render(request, html, {"data": data})
|
[
"dahqniss@gmail.com"
] |
dahqniss@gmail.com
|
652e8748f26f358862132b7fc9300aa65f1f05ec
|
3ff9821b1984417a83a75c7d186da9228e13ead9
|
/No_0530_Minimum Absolute Difference in BST/minimum_absolute)difference_in_BST_by_inorder_iteration.py
|
a7418dd3d34f9db81a543e4abdb35916f72c1593
|
[
"MIT"
] |
permissive
|
brianchiang-tw/leetcode
|
fd4df1917daef403c48cb5a3f5834579526ad0c2
|
6978acfb8cb767002cb953d02be68999845425f3
|
refs/heads/master
| 2023-06-11T00:44:01.423772 | 2023-06-01T03:52:00 | 2023-06-01T03:52:00 | 222,939,709 | 41 | 12 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,094 |
py
|
'''
Description:
Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
Example:
Input:
1
\
3
/
2
Output:
1
Explanation:
The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
Note: There are at least two nodes in this BST.
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
traversal_queue = [(root, 'init')]
min_diff, prev_node_value = float('inf'), -2**31
while traversal_queue:
node, label = traversal_queue.pop()
if label is not 'c':
if node.right:
traversal_queue.append( (node.right, 'r') )
traversal_queue.append( (node, 'c') )
if node.left:
traversal_queue.append( (node.left, 'l') )
else:
min_diff = min(min_diff, node.val - prev_node_value )
prev_node_value = node.val
return min_diff
# n : the number of nodes in binary search tree
## Time Complexity: O( n )
#
# The overhead in time is the cost of in-order traversal, which is of O( n )
## Space Complexity: O( n )
#
# THe overhead in space is the storage for traversal_queue, which is of O( n )
def test_bench():
## Test case_#1
root_1 = TreeNode(1)
root_1.right = TreeNode(3)
root_1.right.left = TreeNode(2)
# expected output:
'''
1
'''
print( Solution().getMinimumDifference(root_1) )
## Test case_#2
root_2 = TreeNode(5)
root_2.left = TreeNode(1)
root_2.right = TreeNode(10)
root_2.right.left = TreeNode(8)
root_2.right.right = TreeNode(13)
# expected output:
'''
2
'''
print( Solution().getMinimumDifference(root_2) )
if __name__ == '__main__':
test_bench()
|
[
"brianchiang1988@icloud.com"
] |
brianchiang1988@icloud.com
|
35fddb176546bcdc04b5f7168fe7656d9d16c1c5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02675/s648199301.py
|
b63ed0e3bb4be52116a50e76ac3fe5f3864781f1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
# -*- coding: utf-8 -*-
def main():
N = int(input())
case1 = [2, 4, 5, 7, 9]
case2 = [0, 1, 6, 8]
case3 = [3]
num = N % 10
if num in case1:
ans = 'hon'
elif num in case2:
ans = 'pon'
elif num in case3:
ans = 'bon'
print(ans)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
96a1d7b58328b30fde41e93d4831caca9bf6fc36
|
9aaa39f200ee6a14d7d432ef6a3ee9795163ebed
|
/Algorithm/Python/146. LRU Cache.py
|
12ae61d680fa056cf718b935addad161d26c1efe
|
[] |
no_license
|
WuLC/LeetCode
|
47e1c351852d86c64595a083e7818ecde4131cb3
|
ee79d3437cf47b26a4bca0ec798dc54d7b623453
|
refs/heads/master
| 2023-07-07T18:29:29.110931 | 2023-07-02T04:31:00 | 2023-07-02T04:31:00 | 54,354,616 | 29 | 16 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,259 |
py
|
# -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2016-08-04 22:39:03
# @Last modified by: WuLC
# @Last Modified time: 2016-08-04 22:40:49
# @Email: liangchaowu5@gmail.com
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.cache = {}
self.keys = collections.deque()
self.exist_keys = set()
def get(self, key):
"""
:rtype: int
"""
if key in self.exist_keys:
self.keys.remove(key)
self.keys.append(key)
return self.cache[key]
return -1
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: nothing
"""
if key not in self.exist_keys:
self.exist_keys.add(key)
if len(self.keys) == self.capacity:
# remove the LRU element
old_key = self.keys.popleft()
self.exist_keys.remove(old_key)
del self.cache[old_key]
else:
self.keys.remove(key)
self.keys.append(key)
self.cache[key] = value
|
[
"liangchaowu5@gmail.com"
] |
liangchaowu5@gmail.com
|
f86f346345b1e788b5612e8ac5f117bc6c0dbce1
|
e168a4b9e7997b5266df4c1fe2afbaf0ed031fed
|
/url_shortener/profiles/forms.py
|
470c5cd6344634922a1279b0c41660591cc5b23a
|
[] |
no_license
|
AaronScruggs/urly-bird
|
756eba26f21c66e78ed93bf6f936b50fb927aaef
|
a27314afb309de42230852fc2bd35416dece46d9
|
refs/heads/master
| 2021-01-22T01:18:59.907605 | 2016-04-05T07:01:53 | 2016-04-05T07:01:53 | 55,178,264 | 0 | 0 | null | 2016-03-31T19:45:02 | 2016-03-31T19:45:01 | null |
UTF-8
|
Python
| false | false | 217 |
py
|
from django import forms
from django.contrib.auth.models import User
from profiles.models import Profile
class ImageUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ("image",)
|
[
"aarondscruggs@gmail.com"
] |
aarondscruggs@gmail.com
|
a74b58b3e5974f4098f7a4932dfa112f9fedbc7e
|
19ddab74600f71700a6b693281d0180d5271f295
|
/程序员面试金典/01_04_回文排列.py
|
bc02963092c9dc8e4d739287a6103fd74aad53ce
|
[] |
no_license
|
zhulf0804/Coding.Python
|
4d55a430da1a8077c81feba65c13ac654aaf094a
|
46ab03e23d15ebd5434ef4dd5ae99130000b00a5
|
refs/heads/master
| 2022-09-14T18:40:59.880941 | 2022-08-20T08:25:51 | 2022-08-20T08:25:51 | 213,113,482 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 337 |
py
|
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
d = {}
for item in s:
d[item] = d.get(item, 0) + 1
is_odd = False
for k, v in d.items():
if v & 1 == 1:
if is_odd:
return False
is_odd = True
return True
|
[
"zhulf0804@gmail.com"
] |
zhulf0804@gmail.com
|
16ae617aa0dff53873785822c7cb2db033f9590b
|
494e3fbbdff5cf6edb087f3103ad5f15acbc174f
|
/schedule/migrations/0002_auto_20180727_2329.py
|
d0a6847d6321e79defcf1bfbd06aa6f38fb59def
|
[] |
no_license
|
TalentoUnicamp/my
|
1209048acdedbb916b8ae8ec80761d09f6ad7754
|
3d87a33cd282d97dbbbd5f62658f231456f12765
|
refs/heads/master
| 2020-03-23T21:12:58.316033 | 2018-08-14T06:11:36 | 2018-08-14T06:11:36 | 142,090,262 | 11 | 0 | null | 2018-08-17T05:13:26 | 2018-07-24T01:53:23 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,698 |
py
|
# Generated by Django 2.0.3 on 2018-07-28 02:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='event',
name='attended',
field=models.ManyToManyField(null=True, related_name='attended_events', to='user_profile.Profile'),
),
migrations.AlterField(
model_name='event',
name='attendees',
field=models.ManyToManyField(null=True, related_name='selected_events', to='user_profile.Profile'),
),
migrations.AlterField(
model_name='event',
name='event_type',
field=models.CharField(choices=[('Meta', 'Meta'), ('Keynote', 'Keynote'), ('Workshop', 'Workshop'), ('Palestra', 'Palestra')], max_length=20),
),
migrations.AlterField(
model_name='event',
name='max_attendees',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='speaker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='my_events', to='user_profile.Profile'),
),
migrations.AlterField(
model_name='feedback',
name='comments',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='feedback',
name='rating',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"gustavomaronato@gmail.com"
] |
gustavomaronato@gmail.com
|
9abea3f326ea59ebd86d1c7b1d83e63ad82ffd60
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/ChenglongChen_Kaggle_HomeDepot/Kaggle_HomeDepot-master/Code/Chenglong/feature_group_distance.py
|
8be14bcf62e8f822d47294b1071b3b95a6516e0a
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 |
Python
|
UTF-8
|
Python
| false | false | 4,848 |
py
|
# -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: group relevance based distance features
@note: such features are not used in final submission
"""
import re
import string
import numpy as np
import pandas as pd
import config
from config import TRAIN_SIZE
from utils import dist_utils, ngram_utils, nlp_utils
from utils import logging_utils, pkl_utils, time_utils
from feature_base import BaseEstimator, StandaloneFeatureWrapper, PairwiseFeatureWrapper
# tune the token pattern to get a better correlation with y_train
# token_pattern = r"(?u)\b\w\w+\b"
# token_pattern = r"\w{1,}"
# token_pattern = r"\w+"
# token_pattern = r"[\w']+"
token_pattern = " " # just split the text into tokens
# -------------------- Group by (obs, relevance) based distance features ----------------------------------- #
# Something related to Query Expansion
class GroupRelevance_Ngram_Jaccard(BaseEstimator):
"""Single aggregation features"""
def __init__(self, obs_corpus, target_corpus, id_list, dfTrain, target_field, relevance, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode, id_list)
self.dfTrain = dfTrain[dfTrain["relevance"] != 0].copy()
self.target_field = target_field
self.relevance = relevance
self.relevance_str = self._relevance_to_str()
self.ngram = ngram
self.ngram_str = ngram_utils._ngram_str_map[self.ngram]
def __name__(self):
if isinstance(self.aggregation_mode, str):
feat_name = "Group_%sRelevance_%s_Jaccard_%s"%(
self.relevance_str, self.ngram_str, string.capwords(self.aggregation_mode))
elif isinstance(self.aggregation_mode, list):
feat_name = ["Group_%sRelevance_%s_Jaccard_%s"%(
self.relevance_str, self.ngram_str, string.capwords(m)) for m in self.aggregation_mode]
return feat_name
def _relevance_to_str(self):
if isinstance(self.relevance, float):
return re.sub("\.", "d", str(self.relevance))
else:
return str(self.relevance)
def transform_one(self, obs, target, id):
df = self.dfTrain[self.dfTrain["search_term"] == obs].copy()
val_list = [config.MISSING_VALUE_NUMERIC]
if df is not None:
df = df[df["id"] != id].copy()
df = df[df["relevance"] == self.relevance].copy()
if df is not None and df.shape[0] > 0:
target_tokens = nlp_utils._tokenize(target, token_pattern)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
val_list = []
for x in df[self.target_field]:
x_tokens = nlp_utils._tokenize(x, token_pattern)
x_ngrams = ngram_utils._ngrams(x_tokens, self.ngram)
val_list.append(dist_utils._jaccard_coef(x_ngrams, target_ngrams))
return val_list
# -------------------------------- Main ----------------------------------
def main():
logname = "generate_feature_group_distance_%s.log"%time_utils._timestamp()
logger = logging_utils._get_logger(config.LOG_DIR, logname)
dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
dfTrain = dfAll.iloc[:TRAIN_SIZE].copy()
## run python3 splitter.py first
split = pkl_utils._load("%s/splits_level1.pkl"%config.SPLIT_DIR)
n_iter = len(split)
relevances_complete = [1, 1.25, 1.33, 1.5, 1.67, 1.75, 2, 2.25, 2.33, 2.5, 2.67, 2.75, 3]
relevances = [1, 1.33, 1.67, 2, 2.33, 2.67, 3]
ngrams = [1]
obs_fields = ["search_term"]
target_fields = ["product_title", "product_description"]
aggregation_mode = ["mean", "std", "max", "min", "median"]
## for cv
for i in range(n_iter):
trainInd, validInd = split[i][0], split[i][1]
dfTrain2 = dfTrain.iloc[trainInd].copy()
sub_feature_dir = "%s/Run%d" % (config.FEAT_DIR, i+1)
for target_field in target_fields:
for relevance in relevances:
for ngram in ngrams:
param_list = [dfAll["id"], dfTrain2, target_field, relevance, ngram, aggregation_mode]
pf = PairwiseFeatureWrapper(GroupRelevance_Ngram_Jaccard, dfAll, obs_fields, [target_field], param_list, sub_feature_dir, logger)
pf.go()
## for all
sub_feature_dir = "%s/All" % (config.FEAT_DIR)
for target_field in target_fields:
for relevance in relevances:
for ngram in ngrams:
param_list = [dfAll["id"], dfTrain, target_field, relevance, ngram, aggregation_mode]
pf = PairwiseFeatureWrapper(GroupRelevance_Ngram_Jaccard, dfAll, obs_fields, [target_field], param_list, sub_feature_dir, logger)
pf.go()
if __name__ == "__main__":
main()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
4fccba1e6cf207096ecb5d43ef2b1e74b10f2d7a
|
e41651d8f9b5d260b800136672c70cb85c3b80ff
|
/Notification_System/temboo/Library/Flickr/PhotoComments/LeaveComment.py
|
86bbc8411b315c8fddfd9fdd48b7df1f6c43f6c9
|
[] |
no_license
|
shriswissfed/GPS-tracking-system
|
43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c
|
1c5e90a483386bd2e5c5f48f7c5b306cd5f17965
|
refs/heads/master
| 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,616 |
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# LeaveComment
# Add a comment to a specified photo on Flickr.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class LeaveComment(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the LeaveComment Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(LeaveComment, self).__init__(temboo_session, '/Library/Flickr/PhotoComments/LeaveComment')
def new_input_set(self):
return LeaveCommentInputSet()
def _make_result_set(self, result, path):
return LeaveCommentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return LeaveCommentChoreographyExecution(session, exec_id, path)
class LeaveCommentInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the LeaveComment
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
super(LeaveCommentInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((required, string) The API Secret provided by Flickr (AKA the OAuth Consumer Secret).)
"""
super(LeaveCommentInputSet, self)._set_input('APISecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(LeaveCommentInputSet, self)._set_input('AccessToken', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(LeaveCommentInputSet, self)._set_input('AccessTokenSecret', value)
def set_CommentText(self, value):
"""
Set the value of the CommentText input for this Choreo. ((required, string) The text of the comment you are adding.)
"""
super(LeaveCommentInputSet, self)._set_input('CommentText', value)
def set_PhotoID(self, value):
"""
Set the value of the PhotoID input for this Choreo. ((required, integer) The id of the photo to add a comment to)
"""
super(LeaveCommentInputSet, self)._set_input('PhotoID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
super(LeaveCommentInputSet, self)._set_input('ResponseFormat', value)
class LeaveCommentResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the LeaveComment Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Flickr.)
"""
return self._output.get('Response', None)
class LeaveCommentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return LeaveCommentResultSet(response, path)
|
[
"shriswissfed@gmail.com"
] |
shriswissfed@gmail.com
|
ec358af8dcc747a31d12f7fb499c7a78bba2c640
|
7701773efa258510951bc7d45325b4cca26b3a7d
|
/from_trans_file_cloud/explore_pathlib.py
|
cd6ac1e600ecf9cc21bb0408817543f804917d9b
|
[] |
no_license
|
Archanciel/explore
|
c170b2c8b5eed0c1220d5e7c2ac326228f6b2485
|
0576369ded0e54ce7ff9596ec4df076e69067e0c
|
refs/heads/master
| 2022-06-17T19:15:03.647074 | 2022-06-01T20:07:04 | 2022-06-01T20:07:04 | 105,314,051 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
from pathlib import Path
root = Path('D:\\Development\\Python\\trans_file_cloud\\.git')
child = Path('D:\\Development\\Python\\trans_file_cloud\\.git\\hooks')
other = Path('/some/other/path')
print(root in child.parents)
|
[
"jp.schnyder@gmail.com"
] |
jp.schnyder@gmail.com
|
ae83c59eb63599eac7d7f45ea8229a239af25040
|
82f993631da2871933edf83f7648deb6c59fd7e4
|
/w1/L3/7.py
|
8469a86b108877706bb07df0088f4d1eea2b7434
|
[] |
no_license
|
bobur554396/PPII2021Summer
|
298f26ea0e74c199af7b57a5d40f65e20049ecdd
|
7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2
|
refs/heads/master
| 2023-06-26T05:42:08.523345 | 2021-07-24T12:40:05 | 2021-07-24T12:40:05 | 380,511,125 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
# - [] Iterators and Iterbales
a = [1, 2, 3, 4] # - iterable object
it = iter(a)
# print(next(it))
# print(next(it))
# print(next(it))
# print(next(it))
# print(next(it))
for i in it:
print(i)
|
[
"bobur.muhsimbaev@gmail.com"
] |
bobur.muhsimbaev@gmail.com
|
f9a25ea75f1038ebb53730647439228ea1d83873
|
9102c3a5fa3a5b0202d61206973d0ea167f7a4d0
|
/July/07-IslandPerimeter.py
|
a93da08ce948ac402b6597b23157a28ceea1580f
|
[] |
no_license
|
Madhav-Somanath/LeetCode
|
8e1b39e106cec238e5a2a3acb3eb267f5c36f781
|
b6950f74d61db784095c71df5115ba10be936c65
|
refs/heads/master
| 2023-01-08T15:10:00.249806 | 2020-10-31T14:45:43 | 2020-10-31T14:45:43 | 255,654,520 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,657 |
py
|
""" You are given a map in form of a two-dimensional integer grid where 1 represents land and 0 represents water.
Grid cells are connected horizontally/vertically (not diagonally). The grid is completely surrounded by water,
and there is exactly one island (i.e., one or more connected land cells).
The island doesn't have "lakes" (water inside that isn't connected to the water around the island). One cell is a square with side length 1.
The grid is rectangular, width and height don't exceed 100. Determine the perimeter of the island. """
# SOLUTION
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
if not grid:
return 0
def sum_adjacent(i, j):
adjacent = (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1),
res = 0
for x, y in adjacent:
if x < 0 or y < 0 or x == len(grid) or y == len(grid[0]) or grid[x][y] == 0:
res += 1
return res
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
count += sum_adjacent(i, j)
return count
'''
m, n, Perimeter = len(grid), len(grid[0]), 0
for i in range(m):
for j in range(n):
Perimeter += 4*grid[i][j]
if i > 0: Perimeter -= grid[i][j]*grid[i-1][j]
if i < m-1: Perimeter -= grid[i][j]*grid[i+1][j]
if j > 0: Perimeter -= grid[i][j]*grid[i][j-1]
if j < n-1: Perimeter -= grid[i][j]*grid[i][j+1]
return Perimeter
'''
|
[
"madhav.somanath@gmail.com"
] |
madhav.somanath@gmail.com
|
a7d11fe7ad97288252922c00a7c365e7199665ed
|
43e900f11e2b230cdc0b2e48007d40294fefd87a
|
/Amazon/VideoOnsite/162.find-peak-element.py
|
5b3ada63691cf9fcf4b02f7261a2be18b71ec8d7
|
[] |
no_license
|
DarkAlexWang/leetcode
|
02f2ed993688c34d3ce8f95d81b3e36a53ca002f
|
89142297559af20cf990a8e40975811b4be36955
|
refs/heads/master
| 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 557 |
py
|
#
# @lc app=leetcode id=162 lang=python3
#
# [162] Find Peak Element
#
# @lc code=start
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
l, r = 0, len(nums) - 1
while l + 1 < r:
mid = (l + r) // 2
if nums[mid] > nums[mid + 1] and nums[mid] > nums[mid - 1]:
return mid
if nums[mid] > nums[mid + 1]:
r = mid
else:
l = mid
if nums[l] < nums[r]:
return r
else:
return l
# @lc code=end
|
[
"wangzhihuan0815@gmail.com"
] |
wangzhihuan0815@gmail.com
|
04fa896307a6d243658fb915099d337f76804cd5
|
86813bf514f3e0257f92207f40a68443f08ee44b
|
/0406 根据身高重建队列/0406 根据身高重建队列.py
|
989f32ac1430a2408dcaef254410bf9310c75be2
|
[] |
no_license
|
Aurora-yuan/Leetcode_Python3
|
4ce56679b48862c87addc8cd870cdd525c9d926c
|
720bb530850febc2aa67a56a7a0b3a85ab37f415
|
refs/heads/master
| 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 838 |
py
|
#label: 贪心算法 difficulty: medium
"""
思路
1.排序:按照身高从高到低排,升高相同的按k从小到大排
2.插入:按照排序好的顺序逐个插入新数组,插入的位置按照k来插
如示例中,排序完:
[[7,0], [7,1], [6,1], [5,0], [5,2],[4,4]]
插入的过程:
第一插:[[7,0]]
第二插:[[7,0], [7,1]]
第三插:[[7,0], [6,1],[7,1]]
第四插:[[5,0],[7,0], [6,1],[7,1]]
...
先插高的,后插矮的,即使后插的插到前面也不会有影像,因为矮
"""
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda (h, k): (-h, k))
res = []
for p in people:
res.insert(p[1],p)
return res
|
[
"noreply@github.com"
] |
Aurora-yuan.noreply@github.com
|
963b0a84d3f5586261ec0ed22a68007f2a76aa70
|
0a1356b97465cc1d5c3f661f61b3b8c51fb05d46
|
/android_binding/.buildozer/android/platform/python-for-android/testapps/testapp/main.py
|
5baa420f3c203147e6abbfe8085c24a0a8778493
|
[
"MIT",
"Python-2.0"
] |
permissive
|
Rohan-cod/cross_platform_calc
|
00360f971e4da68dd36d6836c9ddbb157f6b77d5
|
5785a5e8150d174019b330c812e7eb012cc4dd79
|
refs/heads/master
| 2022-12-22T10:29:05.317051 | 2021-06-05T10:52:44 | 2021-06-05T10:52:44 | 237,465,912 | 2 | 1 |
MIT
| 2022-12-09T05:18:55 | 2020-01-31T16:07:31 |
C
|
UTF-8
|
Python
| false | false | 4,015 |
py
|
print('main.py was successfully called')
import os
print('imported os')
print('this dir is', os.path.abspath(os.curdir))
print('contents of this dir', os.listdir('./'))
import sys
print('pythonpath is', sys.path)
import kivy
print('imported kivy')
print('file is', kivy.__file__)
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.popup import Popup
from kivy.clock import Clock
print('Imported kivy')
from kivy.utils import platform
print('platform is', platform)
kv = '''
#:import Metrics kivy.metrics.Metrics
#:import sys sys
<FixedSizeButton@Button>:
size_hint_y: None
height: dp(60)
ScrollView:
GridLayout:
cols: 1
size_hint_y: None
height: self.minimum_height
FixedSizeButton:
text: 'test pyjnius'
on_press: app.test_pyjnius()
Image:
keep_ratio: False
allow_stretch: True
source: 'colours.png'
size_hint_y: None
height: dp(100)
Label:
height: self.texture_size[1]
size_hint_y: None
font_size: 100
text_size: self.size[0], None
markup: True
text: '[b]Kivy[/b] on [b]SDL2[/b] on [b]Android[/b]!'
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: sys.version
halign: 'center'
padding_y: dp(10)
Widget:
size_hint_y: None
height: 20
Label:
height: self.texture_size[1]
size_hint_y: None
font_size: 50
text_size: self.size[0], None
markup: True
text: 'dpi: {}\\ndensity: {}\\nfontscale: {}'.format(Metrics.dpi, Metrics.density, Metrics.fontscale)
halign: 'center'
FixedSizeButton:
text: 'test ctypes'
on_press: app.test_ctypes()
FixedSizeButton:
text: 'test numpy'
on_press: app.test_numpy()
Widget:
size_hint_y: None
height: 1000
on_touch_down: print('touched at', args[-1].pos)
<ErrorPopup>:
title: 'Error'
size_hint: 0.75, 0.75
Label:
text: root.error_text
'''
class ErrorPopup(Popup):
error_text = StringProperty('')
def raise_error(error):
print('ERROR:', error)
ErrorPopup(error_text=error).open()
class TestApp(App):
def build(self):
root = Builder.load_string(kv)
Clock.schedule_interval(self.print_something, 2)
# Clock.schedule_interval(self.test_pyjnius, 5)
print('testing metrics')
from kivy.metrics import Metrics
print('dpi is', Metrics.dpi)
print('density is', Metrics.density)
print('fontscale is', Metrics.fontscale)
return root
def print_something(self, *args):
print('App print tick', Clock.get_boottime())
def on_pause(self):
return True
def test_pyjnius(self, *args):
try:
from jnius import autoclass
except ImportError:
raise_error('Could not import pyjnius')
return
print('Attempting to vibrate with pyjnius')
# PythonActivity = autoclass('org.renpy.android.PythonActivity')
# activity = PythonActivity.mActivity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
activity = PythonActivity.mActivity
Intent = autoclass('android.content.Intent')
Context = autoclass('android.content.Context')
vibrator = activity.getSystemService(Context.VIBRATOR_SERVICE)
vibrator.vibrate(1000)
def test_ctypes(self, *args):
import ctypes
def test_numpy(self, *args):
import numpy
print(numpy.zeros(5))
print(numpy.arange(5))
print(numpy.random.random((3, 3)))
TestApp().run()
|
[
"rohaninjmu@gmail.com"
] |
rohaninjmu@gmail.com
|
cf7330a35aacb57aecc3cf237fab0a5660c9e136
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/1101-1200/1155-Number of Dice Rolls With Target Sum/1155-Number of Dice Rolls With Target Sum.py
|
f54e16cb49f5483bfd0bcd1a41d19b792bf96035
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 |
MIT
| 2020-10-02T12:47:47 | 2017-08-08T05:57:26 |
C++
|
UTF-8
|
Python
| false | false | 863 |
py
|
class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
dp = [[0] * (1 + target) for _ in range(1 + d)]
dp[0][0] = 1
MOD = 10 ** 9 + 7
for i in range(1, 1 + d):
for j in range(1, 1 + target):
for k in range(1, 1 + min(f, j)):
dp[i][j] = (dp[i][j] + dp[i - 1][j - k]) % MOD
return dp[d][target]
class Solution2:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
dp = [0] * (1 + target)
dp[0] = 1
MOD = 10 ** 9 + 7
for i in range(1, 1 + d):
temp = [0] * (1 + target)
for j in range(1, 1 + target):
for k in range(1, 1 + min(f, j)):
temp[j] = (temp[j] + dp[j - k]) % MOD
dp = temp
return dp[target]
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
efc48cf55cecc69f2b9a01cbc950890c053e3a77
|
31bc3fdc7c2b62880f84e50893c8e3d0dfb66fa6
|
/libraries/numpy/python_369/python_369/numpy_118/built_in_scalars/uint_.py
|
31601e10986c1a268eb3ab8a0b088f9f95f7615e
|
[] |
no_license
|
tpt5cu/python-tutorial
|
6e25cf0b346b8182ebc8a921efb25db65f16c144
|
5998e86165a52889faf14133b5b0d7588d637be1
|
refs/heads/master
| 2022-11-28T16:58:51.648259 | 2020-07-23T02:20:37 | 2020-07-23T02:20:37 | 269,521,394 | 0 | 0 | null | 2020-06-05T03:23:51 | 2020-06-05T03:23:50 | null |
UTF-8
|
Python
| false | false | 1,496 |
py
|
# https://numpy.org/doc/1.18/reference/arrays.scalars.html#built-in-scalar-types
import numpy as np
def what_is_uint():
'''
- "np.uint" and "np.uintc" are aliases for real underlying NumPy scalar types
- The values of those aliases depend on the operating system
- On my system, "np.uint" creates an object whose class is "numpy.uint64"
- "np.uint" has the same precision as ... ?
- On my system, "np.uintc" creates an object whose class is "numpy.uint32"
- "np.uintc" has the same precision as ... ?
- If I want some size other than those specified by the aliases, I'll have to use a class with an explicit size, e.g. np.uint8
'''
print(np.uint is np.uint64) # True
print(np.uintc is np.uint32) # True
# No error because 1 certainly fits within the size of a C long
ary = np.array(1, dtype=np.uint)
print(ary.dtype) # uint64
#print(int(10**50)) # 100000000000000000000000000000000000000000000000000
#np.array(10**50, dtype=np.uint) # OverflowError: Python int too large to convert to C long
print(type(np.uint)) # <class 'type'>
scalar = np.uint(10)
print(type(scalar)) # <class 'numpy.uint64'>
scalar = np.uint32(10)
print(type(scalar)) # <class 'numpy.uint32'>
scalar = np.uintc(10)
print(type(scalar)) # <class 'numpy.uint32'>
scalar = np.uint8(4)
print(type(scalar)) # <class 'numpy.uint8'>
if __name__ == '__main__':
what_is_uint()
|
[
"uif93194@gmail.com"
] |
uif93194@gmail.com
|
2d192a9d9291492a2911fb5ad35382030baf8fc5
|
fad34b6b81e93850e6f408bbc24b3070e002997d
|
/Python-DM-Text Mining-01.py
|
e4b51fba0851281217136c06054f5f0570c357bf
|
[] |
no_license
|
Sandy4321/Latent-Dirichlet-Allocation-2
|
d60c14a3abb62e05a31aaac8c9a6d9381ec9d560
|
0bf6670643c7968064e375a287448b515b077473
|
refs/heads/master
| 2021-05-05T09:57:17.304046 | 2017-07-26T16:14:22 | 2017-07-26T16:14:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,149 |
py
|
############################################################################
# Created by: Prof. Valdecy Pereira, D.Sc.
# UFF - Universidade Federal Fluminense (Brazil)
# email: valdecy.pereira@gmail.com
# Course: Data Mining
# Lesson: Text Mining
# Citation:
# PEREIRA, V. (2017). Project: LDA - Latent Dirichlet Allocation, File: Python-DM-Text Mining-01.py, GitHub repository:
# <https://github.com/Valdecy/Latent-Dirichlet-Allocation>
############################################################################
# Installing Required Libraries
import numpy as np
import pandas as pd
from nltk.tokenize import RegexpTokenizer
from random import randint
# Function: lda_tm
def lda_tm(document = [], K = 2, alpha = 0.12, eta = 0.01, iterations = 5000, dtm_matrix = False, dtm_bin_matrix = False, dtm_tf_matrix = False, dtm_tfidf_matrix = False, co_occurrence_matrix = False, correl_matrix = False):
################ Part 1 - Start of Function #############################
tokenizer = RegexpTokenizer(r'\w+')
result_list = []
# Corpus
corpus = []
for i in document:
tokens = tokenizer.tokenize(i.lower())
corpus.append(tokens)
# Corpus ID
corpus_id = []
for i in document:
tokens = tokenizer.tokenize(i.lower())
corpus_id.append(tokens)
# Unique Words
uniqueWords = []
for j in range(0, len(corpus)):
for i in corpus[j]:
if not i in uniqueWords:
uniqueWords.append(i)
# Corpus ID for Unique Words
for j in range(0, len(corpus)):
for i in range(0, len(uniqueWords)):
for k in range(0, len(corpus[j])):
if uniqueWords[i] == corpus[j][k]:
corpus_id[j][k] = i
# Topic Assignment
topic_assignment = []
for i in document:
tokens = tokenizer.tokenize(i.lower())
topic_assignment.append(tokens)
# dtm
if dtm_matrix == True or dtm_bin_matrix == True or dtm_tf_matrix == True or dtm_tfidf_matrix == True or co_occurrence_matrix == True or correl_matrix == True:
dtm = np.zeros(shape = (len(corpus), len(uniqueWords)))
for j in range(0, len(corpus)):
for i in range(0, len(uniqueWords)):
for k in range(0, len(corpus[j])):
if uniqueWords[i] == corpus[j][k]:
dtm[j][i] = dtm[j][i] + 1
dtm_pd = pd.DataFrame(dtm, columns = uniqueWords)
if dtm_matrix == True:
result_list.append(dtm_pd)
# dtm_bin
if dtm_bin_matrix == True or co_occurrence_matrix == True or correl_matrix == True:
dtm_bin = np.zeros(shape = (len(corpus), len(uniqueWords)))
for i in range(0, len(corpus)):
for j in range(0, len(uniqueWords)):
if dtm[i,j] > 0:
dtm_bin[i,j] = 1
dtm_bin_pd = pd.DataFrame(dtm_bin, columns = uniqueWords)
if dtm_bin_matrix == True:
result_list.append(dtm_bin_pd)
# dtm_tf
if dtm_tf_matrix == True:
dtm_tf = np.zeros(shape = (len(corpus), len(uniqueWords)))
for i in range(0, len(corpus)):
for j in range(0, len(uniqueWords)):
if dtm[i,j] > 0:
dtm_tf[i,j] = dtm[i,j]/dtm[i,].sum()
dtm_tf_pd = pd.DataFrame(dtm_tf, columns = uniqueWords)
result_list.append(dtm_tf_pd)
# dtm_tfidf
if dtm_tfidf_matrix == True:
idf = np.zeros(shape = (1, len(uniqueWords)))
for i in range(0, len(uniqueWords)):
idf[0,i] = np.log10(dtm.shape[0]/(dtm[:,i]>0).sum())
dtm_tfidf = np.zeros(shape = (len(corpus), len(uniqueWords)))
for i in range(0, len(corpus)):
for j in range(0, len(uniqueWords)):
dtm_tfidf[i,j] = dtm_tf[i,j]*idf[0,j]
dtm_tfidf_pd = pd.DataFrame(dtm_tfidf, columns = uniqueWords)
result_list.append(dtm_tfidf_pd)
# Co-occurrence Matrix
if co_occurrence_matrix == True:
co_occurrence = np.dot(dtm_bin.T,dtm_bin)
co_occurrence_pd = pd.DataFrame(co_occurrence, columns = uniqueWords, index = uniqueWords)
result_list.append(co_occurrence_pd)
# Correlation Matrix
if correl_matrix == True:
correl = np.zeros(shape = (len(uniqueWords), len(uniqueWords)))
for i in range(0, correl.shape[0]):
for j in range(i, correl.shape[1]):
correl[i,j] = np.corrcoef(dtm_bin[:,i], dtm_bin[:,j])[0,1]
correl_pd = pd.DataFrame(correl, columns = uniqueWords, index = uniqueWords)
result_list.append(correl_pd)
# LDA Initialization
for i in range(0, len(topic_assignment)):
for j in range(0, len(topic_assignment[i])):
topic_assignment[i][j] = randint(0, K-1)
cdt = np.zeros(shape = (len(topic_assignment), K))
for i in range(0, len(topic_assignment)):
for j in range(0, len(topic_assignment[i])):
for m in range(0, K):
if topic_assignment[i][j] == m:
cdt[i][m] = cdt[i][m] + 1
cwt = np.zeros(shape = (K, len(uniqueWords)))
for i in range(0, len(corpus)):
for j in range(0, len(uniqueWords)):
for m in range(0, len(corpus[i])):
if uniqueWords[j] == corpus[i][m]:
for n in range(0, K):
if topic_assignment[i][m] == n:
cwt[n][j] = cwt[n][j] + 1
# LDA Algorithm
for i in range(0, iterations + 1):
for d in range(0, len(corpus)):
for w in range(0, len(corpus[d])):
initial_t = topic_assignment[d][w]
word_num = corpus_id[d][w]
cdt[d,initial_t] = cdt[d,initial_t] - 1
cwt[initial_t,word_num] = cwt[initial_t,word_num] - 1
p_z = ((cwt[:,word_num] + eta) / (np.sum((cwt), axis = 1) + len(corpus) * eta)) * ((cdt[d,] + alpha) / (sum(cdt[d,]) + K * alpha ))
z = np.sum(p_z)
p_z_ac = np.add.accumulate(p_z/z)
u = np.random.random_sample()
for m in range(0, K):
if u <= p_z_ac[m]:
final_t = m
break
topic_assignment[d][w] = final_t
cdt[d,final_t] = cdt[d,final_t] + 1
cwt[final_t,word_num] = cwt[final_t,word_num] + 1
if i % 100 == 0:
print('iteration:', i)
theta = (cdt + alpha)
for i in range(0, len(theta)):
for j in range(0, K):
theta[i,j] = theta[i,j]/np.sum(theta, axis = 1)[i]
result_list.append(theta)
phi = (cwt + eta)
d_phi = np.sum(phi, axis = 1)
for i in range(0, K):
for j in range(0, len(phi.T)):
phi[i,j] = phi[i,j]/d_phi[i]
phi_pd = pd.DataFrame(phi.T, index = uniqueWords)
result_list.append(phi_pd)
return result_list
############### End of Function ##############
######################## Part 2 - Usage ####################################
# Documents
doc_1 = "data mining technique data mining first favourite technique"
doc_2 = "data mining technique data mining second favourite technique"
doc_3 = "data mining technique data mining third favourite technique"
doc_4 = "data mining technique data mining fourth favourite technique"
doc_5 = "friday play guitar"
doc_6 = "saturday will play guitar"
doc_7 = "sunday will play guitar"
doc_8 = "monday will play guitar"
doc_9 = "good good indeed can thank"
# Compile Documents
docs = [doc_1, doc_2, doc_3, doc_4, doc_5, doc_6, doc_7, doc_8, doc_9]
# Call Function
lda = lda_tm(document = docs, K = 3, alpha = 0.12, eta = 0.01, iterations = 2500, co_occurrence_matrix = True)
########################## End of Code #####################################
|
[
"noreply@github.com"
] |
Sandy4321.noreply@github.com
|
3e30a6a777fc7d9632db4589647703d42784d301
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-dgc/huaweicloudsdkdgc/v1/model/real_time_node_status.py
|
7221161869b508adcbdee1530355437f7d8e3e9e
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,485 |
py
|
# coding: utf-8
import re
import six
class RealTimeNodeStatus:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'status': 'str',
'log_path': 'str',
'node_type': 'str'
}
attribute_map = {
'name': 'name',
'status': 'status',
'log_path': 'logPath',
'node_type': 'nodeType'
}
def __init__(self, name=None, status=None, log_path=None, node_type=None):
"""RealTimeNodeStatus - a model defined in huaweicloud sdk"""
self._name = None
self._status = None
self._log_path = None
self._node_type = None
self.discriminator = None
if name is not None:
self.name = name
if status is not None:
self.status = status
if log_path is not None:
self.log_path = log_path
if node_type is not None:
self.node_type = node_type
@property
def name(self):
"""Gets the name of this RealTimeNodeStatus.
:return: The name of this RealTimeNodeStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RealTimeNodeStatus.
:param name: The name of this RealTimeNodeStatus.
:type: str
"""
self._name = name
@property
def status(self):
"""Gets the status of this RealTimeNodeStatus.
:return: The status of this RealTimeNodeStatus.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this RealTimeNodeStatus.
:param status: The status of this RealTimeNodeStatus.
:type: str
"""
self._status = status
@property
def log_path(self):
"""Gets the log_path of this RealTimeNodeStatus.
:return: The log_path of this RealTimeNodeStatus.
:rtype: str
"""
return self._log_path
@log_path.setter
def log_path(self, log_path):
"""Sets the log_path of this RealTimeNodeStatus.
:param log_path: The log_path of this RealTimeNodeStatus.
:type: str
"""
self._log_path = log_path
@property
def node_type(self):
"""Gets the node_type of this RealTimeNodeStatus.
:return: The node_type of this RealTimeNodeStatus.
:rtype: str
"""
return self._node_type
@node_type.setter
def node_type(self, node_type):
"""Sets the node_type of this RealTimeNodeStatus.
:param node_type: The node_type of this RealTimeNodeStatus.
:type: str
"""
self._node_type = node_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RealTimeNodeStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e5cc439e0b34ca33b3d001bd387a2a4479ab0b82
|
5f332fd35e0071b6c33727b1ec7b22efefd4182a
|
/lib/sconstool/util/finder_.py
|
84618a3615bf73a74897f7ec62a7cd00982180de
|
[
"MIT"
] |
permissive
|
ptomulik/scons-tool-util
|
ec4bc3f6971feda4fa918632b0f0431faf96779b
|
daab1c7db087feb988a721bf10e6b5c29c0e02b2
|
refs/heads/master
| 2021-08-07T12:20:17.485348 | 2020-04-15T18:48:28 | 2020-04-15T18:48:28 | 153,349,258 | 1 | 0 | null | 2020-04-15T18:49:16 | 2018-10-16T20:23:55 |
Python
|
UTF-8
|
Python
| false | false | 5,721 |
py
|
# -*- coding: utf-8 -*-
"""Provides the :class:`.ToolFinder` class.
"""
from . import misc_
import os
__all__ = ('ToolFinder',)
class ToolFinder(object):
"""Callable object which searches for executables.
A single ToolFinder instance searches for a single file (program), for
example a compiler executable or script interpreter. The constructor
accepts several options, for each option there is corresponding
@property (read-only) with the same name.
:Example: Typical use in a tool module
.. code-block:: python
from sconstool.util import ToolFinder
foo = ToolFinder('foo')
def generate(env):
env.SetDefault(FOO=foo(env))
# ...
def exists(env):
return env.get('FOO', foo(env))
"""
__slots__ = ('_tool', '_kw')
_ctor_kwargs = ('name',
'path',
'pathext',
'reject',
'priority_path',
'fallback_path',
'strip_path',
'strip_priority_path',
'strip_fallback_path')
def __init__(self, tool, **kw):
"""
:param str tool:
symbolic name of the tool,
:keyword str,list name:
base name of the file (program name) being searched for,
may be a list of alternative program names,
:keyword str,list path:
search path to be used instead of the standard SCons PATH,
:keyword str,list pathext:
a list of file extensions to be considered as executable,
:keyword list reject:
a list of paths to be rejected,
:keyword str,list priority_path:
extra search path to be searched prior to :attr:`.path`,
:keyword str,list fallback_path:
extra search path to be searched after :attr:`.path`,
:keyword bool strip_path:
if ``True`` (default), the leading path, if it's in :attr:`path`
list, will be stripped from the returned file path,
:keyword bool strip_priority_path:
if ``True``, the leading path, if it's in **priority_path**
list, will be stripped from the returned file path;
:keyword bool strip_fallback_path:
if ``True``, the leading path, if it's in **fallback_path** list,
will be stripped from the returned file path.
"""
self._tool = str(tool)
misc_.check_kwargs('ToolFinder()', kw, self._ctor_kwargs)
self._kw = kw
@property
def tool(self):
"""Tool name, that was passed in to the c-tor as an argument.
:rtype: str
"""
return self._tool
def __call__(self, env):
"""Performs the actual search.
:param env:
a SCons environment; provides construction variables and the
``env.WhereIs()`` method to the :class:`.ToolFinder`.
:return:
depending on options chosen at object creation, a name or a
path to the executable file found. If the program can't be
found, ``None`` is returned.
:rtype: str
"""
return self._search(env)
def _whereis(self, env, prog, where):
path = getattr(self, where)
if path and not isinstance(path, str):
# this trick enables variable substitution in list entries
path = os.path.pathsep.join(path)
return env.WhereIs(prog, path, self.pathext, self.reject)
def _adjust_result(self, env, result, where):
prog = env.subst(result[0])
strip = getattr(self, 'strip_%s' % where)
if os.path.isabs(prog) or strip:
return prog
return result[1]
def _search_in(self, env, where):
progs = self.name
if isinstance(progs, str):
progs = [progs]
for prog in progs:
found = self._whereis(env, prog, where)
if found:
return self._adjust_result(env, (prog, found), where)
return None
def _search(self, env):
for where in ('priority_path', 'path', 'fallback_path'):
found = self._search_in(env, where)
if found:
return found
return None
@classmethod
def _add_getter(cls, attr, default=None, **kw):
if isinstance(default, property):
default = default.fget
kw['defaultattr'] = default.__name__
doc = """\
The value of **%(attr)s** keyword argument passed in to the
constructor at object creation, or ``self.%(defaultattr)s`` if the
argument was omitted.
:rtype: %(rtype)s
"""
else:
doc = """\
The value of **%(attr)s** keyword argument passed in to the
constructor at object creation, or ``%(default)r`` if the
argument was omitted.
:rtype: %(rtype)s
"""
kw = dict({'doc': doc}, **kw)
misc_.add_ro_dict_property(cls, '_kw', attr, default, **kw)
TF = ToolFinder
TF._add_getter('name', TF.tool, rtype='str')
TF._add_getter('path', rtype='str,list')
TF._add_getter('priority_path', [], rtype='str,list')
TF._add_getter('fallback_path', [], rtype='str,list')
TF._add_getter('pathext', rtype='str,list')
TF._add_getter('reject', [], rtype='list')
TF._add_getter('strip_path', True, rtype='bool')
TF._add_getter('strip_priority_path', False, rtype='bool')
TF._add_getter('strip_fallback_path', False, rtype='bool')
del TF
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set ft=python et ts=4 sw=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
f6325cdee89668b585f012a30c7130e6022150fc
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/clouddirectory_write_f/schema_delete.py
|
58f82c7195d72611e6c1e62d27b86b09d9f7b063
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 866 |
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
apply-schema : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/clouddirectory/apply-schema.html
create-schema : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/clouddirectory/create-schema.html
publish-schema : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/clouddirectory/publish-schema.html
update-schema : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/clouddirectory/update-schema.html
"""
write_parameter("clouddirectory", "delete-schema")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
7fb7c4d5f15747a600819c85ad9266779fdb129c
|
a676d918b568964d475a3ea25c79d446b1783abf
|
/Chap0/project/ex16.py
|
cbb9237d4682e1be0fb5529a2f836c5ce46caa04
|
[] |
no_license
|
AIHackerTest/SailingChen10_Py101-004
|
35d76d32e6a21c487ce8d48f974532fb38a05051
|
3c95e04f7d54529e897beec7652e089514ee6dd5
|
refs/heads/master
| 2021-05-15T00:32:35.407998 | 2017-09-12T08:31:02 | 2017-09-12T08:31:02 | 103,240,423 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 731 |
py
|
# coding = utf-8
from sys import argv
script, filename = argv
print ("We're going to erase %r." % filename)
print ("If you don't want that, hit CTRL-C (^C).")
print ("If you do want that, hit RETURN.")
input("yes or no: ")
print ("Opening the file...")
target = open(filename, 'w')
print ("Truncating the file. Goodbye!")
target.truncate()
print ("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print ("I'm going to write these to the file.")
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print ("And finally,we close it.")
target.close()
|
[
"xiaowan5219@gmail.com"
] |
xiaowan5219@gmail.com
|
9b5b03a445f19ee80e1454f2b69ec50d24fc9858
|
febeffe6ab6aaa33e3a92e2dbbd75783a4e32606
|
/ssseg/cfgs/annnet/cfgs_voc_resnet101os8.py
|
c5a99a673dced76b76fc8e87509c725ef4b0e15f
|
[
"MIT"
] |
permissive
|
Junjun2016/sssegmentation
|
7bbc5d53abee1e0cc88d5e989e4cff5760ffcd09
|
bf7281b369e8d7fc2f8986caaeec3ec38a30c313
|
refs/heads/main
| 2023-02-04T22:09:13.921774 | 2020-12-23T06:28:56 | 2020-12-23T06:28:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,208 |
py
|
'''define the config file for voc and resnet101os8'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'voc',
'set': 'trainaug',
'rootdir': '/data/VOCdevkit/VOC2012',
}
)
DATASET_CFG['test'].update(
{
'type': 'voc',
'rootdir': '/data/VOCdevkit/VOC2012',
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
}
)
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'annnet_resnet101os8_voc_train',
'logfilepath': 'annnet_resnet101os8_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'annnet_resnet101os8_voc_test',
'logfilepath': 'annnet_resnet101os8_voc_test/test.log',
'resultsavepath': 'annnet_resnet101os8_voc_test/annnet_resnet101os8_voc_results.pkl'
}
)
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
8002b8bd33ebd7c6508328204e0bcaba4abfe848
|
c527df31f9daf06c36e8025b372d137ad9c1c4c7
|
/django/call_app/admin.py
|
e2561d3e10d41c1a4aa4e22c0d7feb735b07ee77
|
[] |
no_license
|
Katerina964/callback
|
8dc7d5d230d095ec32ce1d69c4648f4564e99f87
|
741fa58779413845ccc4e478ccc2b952c6d000a0
|
refs/heads/master
| 2023-03-30T17:55:08.611493 | 2021-04-13T08:53:30 | 2021-04-13T08:53:30 | 295,951,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
from django.contrib import admin
from .models import Crmaccount, Call, Customer
admin.site.register(Crmaccount)
admin.site.register(Call)
admin.site.register(Customer)
|
[
"katrin.balakina@gmail.com"
] |
katrin.balakina@gmail.com
|
c98f149bd94f32fc457a90250420211834c8d90c
|
a3d2620bbf25002c7b182600c2e40f8f06555e91
|
/django_time/lab13/order/migrations/0007_remove_product_product_views.py
|
c68818c51d3abadc681f55d7596f477885ffcfe3
|
[] |
no_license
|
alejo8591/backend-lab
|
782736a82933f705f825a1194369bfe13e86c0ec
|
4a02a9552083a7c877e91b0f8b81e37a8650cf54
|
refs/heads/master
| 2016-09-03T03:53:43.878240 | 2015-11-26T06:35:38 | 2015-11-26T06:35:38 | 3,911,349 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 361 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0006_product_product_views'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='product_views',
),
]
|
[
"alejo8591@gmail.com"
] |
alejo8591@gmail.com
|
01994540ff9ece71215b866941314e6004992e91
|
0c6100dc16291986fab157ed0437f9203f306f1b
|
/1101- 1200/1124.py
|
d5f6e0e4c103af58a54d2712db288e17c2355229
|
[] |
no_license
|
Matuiss2/URI-ONLINE
|
4c93c139960a55f7cc719d0a3dcd6c6c716d3924
|
6cb20f0cb2a6d750d58b826e97c39c11bf8161d9
|
refs/heads/master
| 2021-09-17T09:47:16.209402 | 2018-06-30T08:00:14 | 2018-06-30T08:00:14 | 110,856,303 | 13 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,008 |
py
|
def f(l1, l2, r1, r2):
# Processo
dx = l1 - r1 - r2
dy = l2 - r1 - r2
if dx < 0 or dy < 0:
return False # Se a soma dos raios for maior que um dos lados retorna falso, elimina vários casos
return dx * dx + dy * dy >= (r1 + r2) * (r1 + r2) and min(l1, l2) >= 2 * max(r1, r2)
# Valor bool, se couber volta True se não couber volta False
def main():
while True:
# Entrada
data = input().split() # recebe o valor e separa
l1 = int(data[0])
l2 = int(data[1])
r1 = int(data[2])
r2 = int(data[3])
if not (l1 + l2 + r1 + r2) > 0: # Se todos os valores forem 0, o programa fecha(seguindo as instruções)
break
# Saída
if f(l1, l2, r1, r2): # Chama e retorna o valor da função anterior, se for True entra aqui e imprime S
print("S")
else: # Se for False entra aqui
print("N")
return 0
main() # Chama e retorna o valor da função main
|
[
"noreply@github.com"
] |
Matuiss2.noreply@github.com
|
bb02514e4e44722f691e3de8784fab89c79f2dd0
|
c116a7ab1fb1ec460f526cf8cefe0abd9eac1584
|
/py/lib/utils/metrics.py
|
589ada85cb53607b1ba8cb38042382b3b9c77d89
|
[
"Apache-2.0"
] |
permissive
|
ckunlun/LightWeightCNN
|
99a60b41d27a05bae6ad6ba7d2d537010f47726e
|
b3bed250520971c80bbc170958ff7f5b698be7cc
|
refs/heads/master
| 2022-10-09T02:24:54.620610 | 2020-06-08T08:34:25 | 2020-06-08T08:34:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,734 |
py
|
# -*- coding: utf-8 -*-
"""
@date: 2020/4/27 下午8:25
@file: metrics.py
@author: zj
@description:
"""
import torch
from thop import profile
from torchvision.models import AlexNet
from models.squeeze_net import SqueezeNet
from models.squeeze_net_bypass import SqueezeNetBypass
def compute_num_flops(model):
input = torch.randn(1, 3, 224, 224)
macs, params = profile(model, inputs=(input,), verbose=False)
# print(macs, params)
GFlops = macs * 2.0 / pow(10, 9)
params_size = params * 4.0 / 1024 / 1024
return GFlops, params_size
def topk_accuracy(output, target, topk=(1,)):
"""
计算前K个。N表示样本数,C表示类别数
:param output: 大小为[N, C],每行表示该样本计算得到的C个类别概率
:param target: 大小为[N],每行表示指定类别
:param topk: tuple,计算前top-k的accuracy
:return: list
"""
assert len(output.shape) == 2 and output.shape[0] == target.shape[0]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
for name in ['alexnet', 'squeezenet', 'squeezenet-bypass']:
if name == 'alexnet':
model = AlexNet()
elif name == 'squeezenet':
model = SqueezeNet()
else:
model = SqueezeNetBypass()
gflops, params_size = compute_num_flops(model)
print('{}: {:.3f} GFlops - {:.3f} MB'.format(name, gflops, params_size))
|
[
"505169307@qq.com"
] |
505169307@qq.com
|
55bc765f2a5614073dcc33b1956bac232d9e27db
|
db52e7d3bcc78b60c8c7939bc95f07cab503b3d7
|
/GUI for NMT/runnmtui.py
|
bb81a21cd0de211568f7108da7ff99f4b1fb6ab0
|
[
"BSD-3-Clause"
] |
permissive
|
krzwolk/GroundHog
|
e2d495254f5794fdbc5a0de51cf49c76e51fdc6a
|
3d3e6ec9b30f3ae22bda28914c637c10050a472b
|
refs/heads/master
| 2020-12-24T18:55:18.983124 | 2016-02-09T17:20:05 | 2016-02-09T17:20:05 | 51,382,746 | 0 | 0 | null | 2016-02-09T17:09:35 | 2016-02-09T17:09:35 | null |
UTF-8
|
Python
| false | false | 210 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description:
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nmtui import main
main()
|
[
"krzysztof@wolk.pl"
] |
krzysztof@wolk.pl
|
256a7ddfba37eb808339ceb2846b338beba828fe
|
30e8e9365725fbdd7b0ee6660595eb8fa97b4a16
|
/Semi-Supervised Learning_GAN/code.py
|
a17a4879c9e6758d1716dbf6fe64f475233c9117
|
[] |
no_license
|
moileehyeji/Discussion
|
edf0945c75a45998b13f4a4fa214587ed9bc5a75
|
d502f45edadb178f14a21201707a6b1651932499
|
refs/heads/main
| 2023-05-06T15:15:00.567930 | 2021-06-04T05:59:20 | 2021-06-04T05:59:20 | 373,735,724 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,125 |
py
|
# https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/sgan/sgan.py
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--num_classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_emb = nn.Embedding(opt.num_classes, opt.latent_dim)
self.init_size = opt.img_size // 4 # Initial size before upsampling
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, noise):
out = self.l1(noise)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
"""Returns layers of each discriminator block"""
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.conv_blocks = nn.Sequential(
*discriminator_block(opt.channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = opt.img_size // 2 ** 4
# Output layers
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt.num_classes + 1), nn.Softmax())
def forward(self, img):
out = self.conv_blocks(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
label = self.aux_layer(out)
return validity, label
# Loss functions
adversarial_loss = torch.nn.BCELoss()
auxiliary_loss = torch.nn.CrossEntropyLoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
auxiliary_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
# os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, (imgs, labels) in enumerate(dataloader):
batch_size = imgs.shape[0]
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
fake_aux_gt = Variable(LongTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(LongTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
gen_imgs = generator(z)
# Loss measures generator's ability to fool the discriminator
validity, _ = discriminator(gen_imgs)
g_loss = adversarial_loss(validity, valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
real_pred, real_aux = discriminator(real_imgs)
d_real_loss = (adversarial_loss(real_pred, valid) + auxiliary_loss(real_aux, labels)) / 2
# Loss for fake images
fake_pred, fake_aux = discriminator(gen_imgs.detach())
d_fake_loss = (adversarial_loss(fake_pred, fake) + auxiliary_loss(fake_aux, fake_aux_gt)) / 2
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# Calculate discriminator accuracy
pred = np.concatenate([real_aux.data.cpu().numpy(), fake_aux.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), fake_aux_gt.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], "images/%d.png" % batches_done, nrow=5, normalize=True)
|
[
"noreply@github.com"
] |
moileehyeji.noreply@github.com
|
a74b092a468de49c8bc506b98a8a0ff2bf39b929
|
2ce18a0d8e106065b57136927e3e73b4fa82f8fa
|
/list-comprehension/changing-generators.py
|
29c62ce4f684f20d09f39b639c0d5fa5b0a8ddf9
|
[] |
no_license
|
ColinFendrick/python-data-science-toolbox
|
3eac02f3e65cf7e63f7c297f06a35ee7cbe92216
|
83a3d4614ef825302f1881b5b9a59e65db583a00
|
refs/heads/master
| 2021-01-02T19:06:18.395930 | 2020-02-17T17:07:44 | 2020-02-17T17:07:44 | 239,757,083 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 151 |
py
|
lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
lengths = (len(person) for person in lannister)
for value in lengths:
print(value)
|
[
"colin.fendrick@gmail.com"
] |
colin.fendrick@gmail.com
|
5b96b98122a2782bb9492808fa86015dbce11b7a
|
8b5d68c9398186cae64dbcc5b293d62d69e1921d
|
/src/python/knowledge_base/readers/structured_data_reader.py
|
7036de83e51c53d32b65ca69040eabecd3cc8e46
|
[
"Apache-2.0"
] |
permissive
|
reynoldsm88/Hume
|
ec99df21e9b9651ec3cacfb8655a510ba567abc9
|
79a4ae3b116fbf7c9428e75a651753833e5bc137
|
refs/heads/master
| 2020-07-24T21:28:39.709145 | 2019-07-10T15:43:24 | 2019-07-10T15:43:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 295 |
py
|
import json
class StructuredDataReader:
def __init__(self):
pass
def read(self, kb, structured_kb_file):
print "StructuredDataReader READ"
with open(structured_kb_file) as f:
structured_kb = json.load(f)
kb.structured_kb = structured_kb
|
[
"hqiu@bbn.com"
] |
hqiu@bbn.com
|
1a329ea8b2e8fde9c9df6ee1fd947b58d49244a3
|
f42affa951cd292e42fa47b4f4c5bfdab5c21eeb
|
/paddle.py
|
5a3c751610cf1e19d060b380d81001011fc1d8fc
|
[] |
no_license
|
thepixelboy/pong-game
|
27e5432c9ee0080d2db3f2909591a0d2ef8d35c5
|
d79fea5f8fd85dc06b906375587514a317d32bae
|
refs/heads/main
| 2023-05-06T22:22:03.107087 | 2021-05-30T12:11:50 | 2021-05-30T12:11:50 | 372,206,257 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 577 |
py
|
from turtle import Turtle
DEFAULT_MOVE = 20
class Paddle(Turtle):
def __init__(self, position):
super().__init__()
self.position = position
self.create_paddle()
def create_paddle(self):
self.shape("square")
self.color("white")
self.penup()
self.shapesize(stretch_wid=5, stretch_len=1)
self.goto(self.position)
def go_up(self):
new_y_position = self.ycor() + DEFAULT_MOVE
self.goto(self.xcor(), new_y_position)
def go_down(self):
new_y_position = self.ycor() - DEFAULT_MOVE
self.goto(self.xcor(), new_y_position)
|
[
"34570952+thepixelboy@users.noreply.github.com"
] |
34570952+thepixelboy@users.noreply.github.com
|
d029186d44f62f98b226e4323b39b616d5f990a0
|
fb97ccbd6aa0933f991c429c0e30081ce0f1fd90
|
/Python/_interview_cake/9_valid_bst.py
|
596335f493c2f0de60817cd5c0c1ec068d7cae43
|
[] |
no_license
|
01-Jacky/PracticeProblems
|
a6c9b1dabc794ca52624870e48dcb84b1b69af67
|
5714fdb2d8a89a68d68d07f7ffd3f6bcff5b2ccf
|
refs/heads/master
| 2022-03-23T12:24:13.834902 | 2019-12-31T08:11:19 | 2019-12-31T08:11:19 | 81,617,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,111 |
py
|
"""
Validate a BST
1)
Max of left sub tree must be < than root value
Min of right sub tree must be > than root value
"""
def is_bst(root, min=float('-inf'), max=float('inf')):
if root is None:
return True
return min < root.value < max and \
is_bst(root.left, min, root.value) and \
is_bst(root.right, root.value, max)
def is_binary_search_tree(root):
node_and_bounds_stack = [(root, -float('inf'), float('inf'))]
# depth-first traversal
while len(node_and_bounds_stack):
node, lower_bound, upper_bound = node_and_bounds_stack.pop()
if (node.value <= lower_bound) or (node.value >= upper_bound):
return False
if node.left: # this node must be less than the current node
node_and_bounds_stack.append((node.left, lower_bound, node.value))
if node.right: # this node must be greater than the current node
node_and_bounds_stack.append((node.right, node.value, upper_bound))
# if none of the nodes were invalid, return true (at this point we have checked all nodes)
return True
|
[
"hklee310@gmail.com"
] |
hklee310@gmail.com
|
7a460d9abfd96d7fe1447f44197a372f74d342bc
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_overcompensating.py
|
2a2eb667f086df830e2666df3c95521102fec4ca
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 296 |
py
|
from xai.brain.wordbase.verbs._overcompensate import _OVERCOMPENSATE
#calss header
class _OVERCOMPENSATING(_OVERCOMPENSATE, ):
def __init__(self,):
_OVERCOMPENSATE.__init__(self)
self.name = "OVERCOMPENSATING"
self.specie = 'verbs'
self.basic = "overcompensate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d99d576a058ef5956106984d6bfadfa650d180fb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03167/s367868270.py
|
31abb3c30c5fcaa1420f7b86a38e2c7adaa479cf
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
from collections import deque
h,w=map(int,input().split())
maze=[[i for i in input()] for _ in range(h)]
que=deque([[0,0]])
visited=[[0 for _ in range(w)] for _ in range(h)]
visited[0][0]=1
while que:
n=que.popleft()
x,y=n[0],n[1]
if n==(h-1,w-1):
break
for i, j in [(1,0), (0,1)]:
if (x+i >=w) or (y+j >=h) or maze[y+j][x+i] == '#':
continue
if visited[y+j][x+i] == 0:
que.append([x+i,y+j])
visited[y+j][x+i] += visited[y][x]
print(visited[h-1][w-1]%(10**9+7))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
81fe7eadd2418caa75ad8188bf1b5777398c7eb8
|
24f664aa2344d4f5d5e7b048ac4e85231715c4c8
|
/datasets/github/scrape_repos/indexer.py
|
dd7a16e3b4940538eab982c9b84e8157e3e56d50
|
[] |
no_license
|
speycode/clfuzz
|
79320655e879d1e0a06a481e8ec2e293c7c10db7
|
f2a96cf84a7971f70cb982c07b84207db407b3eb
|
refs/heads/master
| 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,203 |
py
|
# Copyright 2018, 2019 Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Index ContentFiles from cloned GitHub repos."""
import multiprocessing
import os
import pathlib
import random
from datasets.github.scrape_repos import github_repo
from datasets.github.scrape_repos.preprocessors import preprocessors
from datasets.github.scrape_repos.proto import scrape_repos_pb2
from labm8.py import app
from labm8.py import humanize
from labm8.py import pbutil
FLAGS = app.FLAGS
app.DEFINE_integer(
"indexer_processes", os.cpu_count(), "The number of indexer processes to run."
)
app.DEFINE_string("clone_list", None, "The path to a LanguageCloneList file.")
def ImportFromLanguage(
language: scrape_repos_pb2.LanguageToClone, pool: multiprocessing.Pool
) -> None:
"""Import contentfiles from a language specification.
Args:
language: The language to import.
pool: A multiprocessing pool.
Raises:
ValueError: If importer field not set.
"""
if not language.importer:
raise ValueError("LanguageToClone.importer field not set")
app.Log(1, "Enumerating all repos ...")
all_repos = [
github_repo.GitHubRepo(pathlib.Path(language.destination_directory / f))
for f in pathlib.Path(language.destination_directory).iterdir()
if f.name.endswith(".pbtxt")
]
app.Log(1, "Pruning indexed repos ...")
num_repos = len(all_repos)
repos_to_import = [repo for repo in all_repos if not repo.IsIndexed()]
num_todo = len(repos_to_import)
num_pruned = num_repos - num_todo
random.shuffle(repos_to_import)
app.Log(
1,
"Importing %s of %s %s repos ...",
humanize.Commas(num_todo),
humanize.Commas(num_repos),
language.language.capitalize(),
)
for i, repo in enumerate(repos_to_import):
repo.Index(
list(language.importer),
pool,
github_repo.IndexProgress(num_pruned + i, num_repos),
)
def main(argv):
"""Main entry point."""
if len(argv) > 1:
raise app.UsageError("Unknown arguments '{}'".format(", ".join(argv[1:])))
clone_list_path = pathlib.Path(FLAGS.clone_list or "")
if not clone_list_path.is_file():
raise app.UsageError("--clone_list is not a file.")
clone_list = pbutil.FromFile(
clone_list_path, scrape_repos_pb2.LanguageCloneList()
)
# Error early if the config contains invalid preprocessors.
for language in clone_list.language:
for importer in language.importer:
[preprocessors.GetPreprocessorFunction(p) for p in importer.preprocessor]
pool = multiprocessing.Pool(FLAGS.indexer_processes)
for language in clone_list.language:
ImportFromLanguage(language, pool)
if __name__ == "__main__":
app.RunWithArgs(main)
|
[
"chrisc.101@gmail.com"
] |
chrisc.101@gmail.com
|
ab575baf490fda95031b2f5688a47b4869525d35
|
7d172bc83bc61768a09cc97746715b8ec0e13ced
|
/odoo/migrations/0006_auto_20170628_0402.py
|
09a34635f361cf04be7b163f3380f627c20f235a
|
[] |
no_license
|
shivam1111/jjuice
|
a3bcd7ee0ae6647056bdc62ff000ce6e6af27594
|
6a2669795ed4bb4495fda7869eeb221ed6535582
|
refs/heads/master
| 2020-04-12T05:01:27.981792 | 2018-11-08T13:00:49 | 2018-11-08T13:00:49 | 81,114,622 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-06-28 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('odoo', '0005_auto_20170618_1356'),
]
operations = [
migrations.AlterModelTable(
name='promotioncodes',
table='promotion_codes',
),
]
|
[
"shivam1111@gmail.com"
] |
shivam1111@gmail.com
|
ace37ff10fc593ff551992e0b65900a9501b6d8a
|
e53c7d270e26bd0fac9dedadff9b4a4ff99110ec
|
/posts/views.py
|
7d49db83c9a42fea1d31c85f873eff532ba7c0cb
|
[] |
no_license
|
kydzoster/django-message_board
|
34b33c0c240bd1dbb21bb0500db791411cca8cc6
|
df8f038fc94b02e9ec9c51b8aab8307e1bc75848
|
refs/heads/master
| 2022-08-01T09:54:13.686456 | 2020-05-27T14:17:34 | 2020-05-27T14:17:34 | 267,337,171 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 253 |
py
|
from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView
from .models import Post
class HomePageView(ListView):
model = Post
template_name = 'home.html'
context_object_name = 'all_posts_list'
|
[
"kydzoster@gmail.com"
] |
kydzoster@gmail.com
|
d0ae70451d70c0b7ffb35207c06faf07fc9c01d9
|
5801d65a93670ee89fc92fc59c3948765f8c028f
|
/loan_management/loan_management/doctype/customer_expenses/customer_expenses.py
|
7c1e3f0b7e1fa4eb980bd78431ed0448b4f6de7a
|
[
"MIT"
] |
permissive
|
staumoepeau/customer_loan
|
a9205476aa4646ba08f8531c27ecd43a21165f12
|
bb9f42160bc1e17085f000b15810892337dd0465
|
refs/heads/master
| 2021-01-22T04:23:16.804892 | 2018-11-27T21:09:28 | 2018-11-27T21:09:28 | 92,459,369 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Sione Taumoepeau and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CustomerExpenses(Document):
pass
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
57800186771cb6665475f9ebaa02f58d2a3cb52f
|
a570561df345c055a8763aefb63a153ed2a4d002
|
/django/paper_tracker/papers/urls.py
|
9a3fbdfcd3d4f5104ea9f1c501953d1a8478b644
|
[
"MIT"
] |
permissive
|
kmod/paper_tracker
|
7b089613172788360d5401434e58a31740062577
|
43dc10286e8ea3d38b888403091d18549a8106d6
|
refs/heads/master
| 2020-12-24T12:34:32.130210 | 2016-11-29T22:52:36 | 2016-11-29T22:52:36 | 72,976,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 715 |
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^papers$', views.papers_index, name='papers_index'),
url(r'^$', views.collections_index, name='collections_index'),
url(r'^collection/(?P<collection_id>[0-9]+)/$', views.collection, name='collection'),
url(r'^paper/new$', views.paper_new, name='paper_new'),
# url(r'^paper/(?P<paper_id>[0-9]+)$', views.paper, name='paper'),
url(r'^paper/(?P<paper_id>[0-9]+)/find_pdf$', views.paper_findpdf, name='paper_findpdf'),
url(r'^paper/(?P<paper_id>[0-9]+)/delete$', views.paper_delete, name='paper_delete'),
url(r'^collection/(?P<collection_id>[0-9]+)/edit/(?P<paper_id>[0-9]+)$', views.cpaper, name='cpaper'),
]
|
[
"kevmod@gmail.com"
] |
kevmod@gmail.com
|
c2a5bcff0bcc1420d7abd3fe87de544b2d01d220
|
5a7a3447d434a458a7bb63f2aa11b64c284d5492
|
/thread-ing/thread-test.py
|
837138ed0f016cd25779cf75bbc034ccf39bbfbd
|
[] |
no_license
|
woshimayi/mypython
|
35792e12036a7a05f12d3ef7006637b2b03f0e2e
|
7f1eb38e8585bf6d2f21d3ad0f64dace61425875
|
refs/heads/master
| 2023-09-01T08:59:12.301836 | 2023-08-30T05:30:54 | 2023-08-30T05:30:54 | 130,017,052 | 4 | 0 | null | 2018-12-02T16:18:14 | 2018-04-18T06:50:36 |
HTML
|
UTF-8
|
Python
| false | false | 1,130 |
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: woshimayi
@license: (C) Copyright 2015-2049, Node Supply Chain Manager Corporation Limited.
@contact: xxxxxxxx@qq.com
@software: garner
@file: thread-test.py
@time: 2020/8/6 17:12
@desc:
'''
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print ("开始线程:" + self.name)
print_time(self.name, self.counter, 5)
print ("退出线程:" + self.name)
def print_time(threadName, delay, counter):
while counter:
print(exitFlag)
if exitFlag:
threadName.exit()
time.sleep(delay)
print ("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
# 创建新线程
thread1 = myThread(1, "Thread-1", 1)
thread2 = myThread(2, "Thread-2", 2)
# 开启新线程
thread1.start()
thread2.start()
thread1.join()
thread2.join()
|
[
"woshidamayi@Gmail.com"
] |
woshidamayi@Gmail.com
|
26a8e7dd07b21f480488e1f4a850785dfd0f4f0d
|
f98c174d9011ed29cd8d304f0e4d7042b00d0233
|
/automaton/lib/autoplatform.py
|
40fe96107a47a44e1797c0eae35c56deb42b1d0e
|
[
"MIT"
] |
permissive
|
nemec/Automaton
|
10755e544a2004b31b55bf213c516001955a89f1
|
eea2f89dc10031fba45c80eb63053480dfc3543f
|
refs/heads/master
| 2020-12-24T15:04:49.102660 | 2016-01-04T20:23:35 | 2016-01-04T20:23:35 | 703,746 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,318 |
py
|
import platform as pl
import os
# pylint: disable-msg=C0103
# This module deals with platform-specific paths
# Set the platform we are currently running on
if pl.system().lower().startswith('windows'):
platform = 'windows'
elif pl.system().lower().startswith('darwin'):
platform = 'mac'
else:
platform = 'linux'
def get_dir_hierarchy():
"""An ordered hierarchy of directories to use."""
return (personaldir(), systemdir(), localdir())
def personaldir():
"""
The personal directory for settings storage.
The settings location in the "home" directory for a user.
"""
if platform == 'windows':
return os.path.join(os.environ['APPDATA'], 'automaton')
else:
return os.path.expanduser('~/.automaton/')
def systemdir():
"""
The system directory for settings storage.
Usually the default "/etc" directory.
"""
if platform == 'windows':
return os.path.join(os.environ['ProgramFiles'], 'automaton')
else:
return "/etc/automaton/"
def localdir():
"""
The local directory for settings storage.
Located in the same place as the rest of the Automaton modules.
Method for getting dir taken from wxPython project
"""
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
directory = os.path.dirname(os.path.abspath(root))
return os.path.normpath(os.path.join(directory, "../settings/"))
def get_existing_file(filename, strict=False):
"""
Searches through the directory hierarchy for a file/path named "filename"
If 'strict' is false, it returns a path where the file can be placed if there
is no existing file.
If 'strict' is true, returns None there is no existing file.
"""
path = None
# First check to see if the queue file exists anywhere
for d in get_dir_hierarchy():
if os.path.exists(d):
filepath = os.path.join(d, filename)
if os.access(filepath, os.W_OK):
path = filepath
break
# Now try to create a queue file in one of the dirs
if path is None and not strict:
for directory in get_dir_hierarchy():
if not os.path.exists(directory):
try:
os.mkdir(directory)
except IOError:
pass
filepath = os.path.join(directory, filename)
if os.access(directory, os.W_OK):
path = filepath
break
return path
|
[
"djnemec@gmail.com"
] |
djnemec@gmail.com
|
afb78213b6b7a098c79cada1633fcf560bcdde47
|
f156f2d94c1334b60afaab93fedb25da937af7a5
|
/world/models.py
|
90bbe5d05af7a6b3c2c4eb9441a8121432a07ae1
|
[] |
no_license
|
zeroam/geodjango
|
74e0484263b23a024f453ec5c7fa68da3a2ccbc5
|
b56a79ac22a126f11bbf6addbc734b6714f516cb
|
refs/heads/master
| 2020-04-19T09:32:28.141513 | 2019-01-29T07:58:18 | 2019-01-29T07:58:18 | 168,114,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 894 |
py
|
from django.contrib.gis.db import models
class WorldBorder(models.Model):
# Regular Django fields corresponding to the attributes in the world borders shapefile.
name = models.CharField(max_length=50)
area = models.IntegerField()
pop2005 = models.IntegerField('Polulation 2005')
fips = models.CharField('FIPS Code', max_length=2)
iso2 = models.CharField('2 Digit ISO', max_length=2)
iso3 = models.CharField('3 Digit ISO', max_length=3)
un = models.IntegerField('United Nation Code')
region = models.IntegerField('Region Code')
subregion = models.IntegerField('Sub-Region Code')
lon = models.FloatField()
lat = models.FloatField()
# GeoDjango-specific: a geometry field (MultiPolygonField)
mpoly = models.MultiPolygonField()
# Returns the string represenation of the modle.
def __str__(self):
return self.name
|
[
"imdff0803@gmail.com"
] |
imdff0803@gmail.com
|
d70057826d20d1c2123c88d7b0b4fc2374b67a16
|
49536aafb22a77a6caf249c7fadef46d63d24dfe
|
/tensorflow/tensorflow/python/kernel_tests/matrix_solve_op_test.py
|
46c0c0de944b57256fb9fa5f616169edea2a8e3b
|
[
"Apache-2.0"
] |
permissive
|
wangzhi01/deeplearning-1
|
4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d
|
46ab82253d956953b8aa98e97ceb6cd290e82288
|
refs/heads/master
| 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,173 |
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session(use_gpu=True):
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
def testConcurrent(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in False, True:
lhs1 = random_ops.random_normal([3, 3], seed=42)
lhs2 = random_ops.random_normal([3, 3], seed=42)
rhs1 = random_ops.random_normal([3, 3], seed=42)
rhs2 = random_ops.random_normal([3, 3], seed=42)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = sess.run(all_ops)
self.assertAllEqual(val[0], val[1])
self.assertAllEqual(val[2], val[3])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
|
[
"hanshuobest@163.com"
] |
hanshuobest@163.com
|
284a051b4199ebc1e0859e2bc7ce26faacac59c5
|
b7a97c2919807983cd418d9262a1246fff9d95a1
|
/apps/feeder/models/order.py
|
77d0d82a6596d396f812baa2efc04c2fd78f327f
|
[] |
no_license
|
PUYUP/kirimsaran
|
da2f439c70979ab88ef2e62e3b2a73c2278ce077
|
250dddddc3d22429c26eed6bfeaf054666f0c110
|
refs/heads/main
| 2023-08-04T10:11:23.016982 | 2021-09-29T00:59:11 | 2021-09-29T00:59:11 | 397,851,918 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,508 |
py
|
from django.core.validators import RegexValidator
from django.db import models, transaction
from django.conf import settings
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from .abstract import AbstractCommonField
from ..utils import save_random_identifier
class AbstractOrder(AbstractCommonField):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='orders'
)
broadcast = models.ForeignKey(
'feeder.Broadcast',
on_delete=models.SET_NULL,
related_name='orders',
null=True,
blank=True
)
fragment = models.ForeignKey(
'feeder.Fragment',
on_delete=models.SET_NULL,
related_name='orders',
null=True,
blank=True
)
identifier = models.CharField(
max_length=7,
editable=False,
validators=[
RegexValidator(
regex='^[a-zA-Z0-9]*$',
message=_("Can only contain the letters a-Z and 0-9."),
code='invalid_identifier'
),
]
)
class Meta:
abstract = True
app_label = 'feeder'
ordering = ['-create_at']
def __str__(self) -> str:
return self.broadcast.label
@transaction.atomic
def save(self, *args, **kwargs):
# Generate random identifier
if not self.pk and not self.identifier:
# We pass the model instance that is being saved
self.identifier = save_random_identifier(self)
return super().save(*args, **kwargs)
@transaction.atomic
def insert_meta(self, meta_dict):
OrderMeta = apps.get_registered_model('feeder', 'OrderMeta')
bulk_meta = []
for meta in meta_dict:
o = OrderMeta(order=self, **meta)
bulk_meta.append(o)
if len(meta_dict) > 0:
try:
OrderMeta.objects.bulk_create(
bulk_meta,
ignore_conflicts=False
)
except Exception as e:
print(e)
@transaction.atomic
def insert_order_item(self, item_dict):
OrderItem = apps.get_registered_model('feeder', 'OrderItem')
bulk_item = []
for item in item_dict:
target = item.get('target', None)
if target:
o = OrderItem(order=self, target=target)
bulk_item.append(o)
if len(bulk_item) > 0:
try:
OrderItem.objects.bulk_create(
bulk_item,
ignore_conflicts=False
)
except Exception as e:
print(e)
class AbstractOrderMeta(AbstractCommonField):
order = models.ForeignKey(
'feeder.Order',
on_delete=models.CASCADE,
related_name='metas'
)
meta_key = models.CharField(max_length=255)
meta_value = models.TextField()
class Meta:
abstract = True
app_label = 'feeder'
ordering = ['-create_at']
def __str__(self) -> str:
return self.meta_key
class OrderItemManager(models.Manager):
@transaction.atomic
def bulk_create(self, objs, **kwargs):
for obj in objs:
target = getattr(obj, 'target', None)
if target:
setattr(obj, 'price', target.price)
setattr(obj, 'method', target.method)
setattr(obj, 'value', target.value)
return super().bulk_create(objs, **kwargs)
class AbstractOrderItem(AbstractCommonField):
order = models.ForeignKey(
'feeder.Order',
on_delete=models.CASCADE,
related_name='items'
)
target = models.ForeignKey(
'feeder.Target',
on_delete=models.SET_NULL,
related_name='items',
null=True,
blank=True
)
price = models.IntegerField(default=0)
method = models.CharField(max_length=255)
value = models.CharField(max_length=255)
objects = OrderItemManager()
class Meta:
abstract = True
app_label = 'feeder'
ordering = ['-create_at']
def __str__(self) -> str:
return str(self.price)
@transaction.atomic
def save(self, *args, **kwargs):
if not self.pk:
self.price = self.target.price
self.method = self.target.method
self.value = self.target.value
return super().save(*args, **kwargs)
|
[
"hellopuyup@gmail.com"
] |
hellopuyup@gmail.com
|
9cbf73b313ceadb9a84a8983a41d3478ed5d80c4
|
bd4144e919786b4aded4345a2a69ed79e0922946
|
/1월 3주차/공통조상.py
|
2f554303324d304c1362d167d0514050064e797d
|
[] |
no_license
|
2020-ASW/kwoneyng-Park
|
670ee027a77c1559f808a51aaf58f27ab3bb85b9
|
3ef556889bbf3f2762c01fdfd10b59869d5e912f
|
refs/heads/master
| 2023-05-14T16:14:04.227511 | 2021-06-11T08:00:37 | 2021-06-11T08:00:37 | 321,286,504 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,294 |
py
|
from math import log2
def makeTree(cur, parent):
depth[cur] = depth[parent] + 1 # 자식노드 차수가 부모노드 + 1
dp[cur][0] = parent
for i in range(1,mxL):
upper = dp[cur][i-1] #1 2^n
if upper == 0:
break
dp[cur][i] = dp[upper][i-1]
# dp[13][2] = dp[6][1]
for child in narr[cur]:
cnt[cur] += makeTree(child, cur)
return cnt[cur]
def find(a,b):
if depth[a] == depth[b]:
# start
for i in range(mxL):
if dp[a][i] == dp[b][i]:
if i == 0:
return dp[a][0]
return find(dp[a][i-1], dp[b][i-1])
if depth[a] < depth[b]:
a,b = b,a
for i in range(mxL):
if depth[b] > depth[dp[a][i]]:
return find(dp[a][i-1],b)
for T in range(1,int(input())+1):
v,e,st,ed = map(int,input().split())
data = list(map(int,input().split()))
narr = [[] for _ in range(v+1)]
mxL = int(log2(v))+1 # 최대 점프하는 수
for i in range(e):
narr[data[i*2]].append(data[i*2+1])
depth = [0]*(v+1)
depth[0] = -1
dp = [[0]*mxL for _ in range(v+1)] # dp[node][jump한 수 (2^n)]
cnt = [1]*(v+1)
makeTree(1,0)
ans = find(st,ed)
rs = cnt[ans]
print(ans, rs)
|
[
"nan308@naver.com"
] |
nan308@naver.com
|
96f31bfeb86c80ba89858cec03aa42169c5c1f39
|
9f98ed0db445cd69e22eea9e6cfefa929111fe7f
|
/setup.py
|
8afab05cdee21e2c40619e9211f70e7c7243323a
|
[] |
no_license
|
zhuyoucai168/talospider
|
670c34fc75e709814c1dd9f9f72e0a21e07dee47
|
da4f0bdc6f6046c306be5c36d9016b74794823b0
|
refs/heads/master
| 2020-08-29T05:39:57.661905 | 2019-02-22T06:55:48 | 2019-02-22T06:55:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 470 |
py
|
#!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name='talospider',
version='0.0.6',
author='Howie Hu',
description="A simple,lightweight scraping micro-framework",
author_email='xiaozizayang@gmail.com',
install_requires=['lxml', 'requests', 'cchardet', 'cssselect'],
url="https://github.com/howie6879/talospider/blob/master/README.md",
packages=find_packages(),
package_data={'talospider': ['utils/*.txt']})
|
[
"xiaozizayang@gmail.com"
] |
xiaozizayang@gmail.com
|
0121af025b75095b667e0d0416853d7206c880a4
|
ac83d1ddb84ecc904c73bdf779f458bd77efc98c
|
/src/programy/config/brain/binaries.py
|
730ef746b85832414db26d3fdd3828a61fc3a8a2
|
[
"MIT"
] |
permissive
|
secrecy27/chatbot
|
77829f32a15e17563f038663aebebdb71e52c5a7
|
e65a753cf665a4d6d97b57703431cba5331e4f0b
|
refs/heads/master
| 2022-07-24T08:39:57.788009 | 2020-07-16T03:55:21 | 2020-07-16T03:55:21 | 130,678,143 | 4 | 4 |
NOASSERTION
| 2022-07-06T19:49:14 | 2018-04-23T10:12:01 |
Python
|
UTF-8
|
Python
| false | false | 2,747 |
py
|
"""
Copyright (c) 2016-2018 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.config.section import BaseSectionConfigurationData
class BrainBinariesConfiguration(BaseSectionConfigurationData):
def __init__(self):
BaseSectionConfigurationData.__init__(self, "binaries")
self._save_binary = False
self._load_binary = False
self._binary_filename = None
self._load_aiml_on_binary_fail = False
@property
def save_binary(self):
return self._save_binary
@property
def load_binary(self):
return self._load_binary
@property
def binary_filename(self):
return self._binary_filename
@property
def load_aiml_on_binary_fail(self):
return self._load_aiml_on_binary_fail
def load_config_section(self, configuration_file, configuration, bot_root):
binaries = configuration_file.get_section("binaries", configuration)
if binaries is not None:
self._save_binary = configuration_file.get_option(binaries, "save_binary", missing_value=None)
self._load_binary = configuration_file.get_option(binaries, "load_binary", missing_value=None)
binary_filename = configuration_file.get_option(binaries, "binary_filename", missing_value=None)
if binary_filename is not None:
self._binary_filename = self.sub_bot_root(binary_filename, bot_root)
self._load_aiml_on_binary_fail = configuration_file.get_option(binaries, "load_aiml_on_binary_fail", missing_value=None)
else:
YLogger.warning(self, "'binaries' section missing from bot config, using to defaults")
|
[
"secrecy418@naver.com"
] |
secrecy418@naver.com
|
582e6d7977304ec94ff5e09011134c56548fddee
|
8644a2174c3cb7ccfe211a5e49edffbcc3a74a46
|
/HackerrankSolutions/ProblemSolving/DataStructures/LinkedList/Easy/insert_node_doubly_ll.py
|
30a3ceddc485daee86a8b335eec39479fd28e2eb
|
[] |
no_license
|
bhavya2403/Learning-Python
|
9e7cc9dee21172321fb217cae27c8072357f71ce
|
3898211b357fbab320010a82a4811b68611d0422
|
refs/heads/main
| 2023-03-24T03:19:49.989965 | 2021-03-22T20:11:04 | 2021-03-22T20:11:04 | 315,962,811 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,170 |
py
|
class DoublyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = DoublyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def sortedInsert(head, data):
node = DoublyLinkedListNode(data)
if data < head.data:
node.next = head
head.prev = node
node.prev = None
head = node
return head
curr = head
while curr:
if curr.next is None:
curr.next = node
node.prev = curr
node.next = None
break
if curr.data < data < curr.next.data or curr.data ==data:
node.next = curr.next
node.prev = curr
curr.next = node
curr.next.prev = node
break
curr = curr.next
return head
|
[
"noreply@github.com"
] |
bhavya2403.noreply@github.com
|
3c40d89ebe58b12e75def3e0190a55e9fe582789
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py
|
84eaca27405633ca786ead28b974db2f7f527e5c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 3,652 |
py
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
_base_ = [
'../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
]
model = dict(
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
c3b5005a2b16bf465392034a5dd8560026528ce1
|
9318b1885946f639f1446431abc6ec4fa33fc9ac
|
/Cisco_python/module_4/act-3.py
|
4a247a9c4472018c19b3a620743bb178d2405f56
|
[] |
no_license
|
mcewenar/PYTHON_INFO_I_BASIC
|
1d365bcd3d0186c8955e3cde2605831717d0a412
|
e5c3278969b420e7ce03bf7903cf57e63865aaca
|
refs/heads/master
| 2023-06-04T02:26:42.124304 | 2021-06-22T02:48:08 | 2021-06-22T02:48:08 | 326,510,259 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,371 |
py
|
#Tu tarea es escribir y probar una función que toma tres argumentos (un año, un mes y un día del mes)
#y devuelve el día correspondiente del año, o devuelve None si cualquiera de los argumentos no es válido.
#Debes utilizar las funciones previamente escritas y probadas. Agrega algunos casos de prueba al código.
#Esta prueba es solo el comienzo.
def isYearLeap(year):
if year % 4 == 0 and (year %100 != 0 or year % 400 == 0):
return True
else:
return False
def daysInMonth(year, month):
if month <= 0 or month > 12 or year < 1582:
return None
else:
if month in [1,3,5,7,8,10,12]:
return 31
elif month == 2:
if isYearLeap(year):
return 29
else:
return 28
else:
return 30
def dayOfYear(year, month, day):
days = 0
for m in range(1, month):
md = daysInMonth(year,m)
if md == None:
return None
days += md
md = daysInMonth(year, month)
if md == None or month == None:
return None
elif day >= 1 and day <= md:
return days + day
else:
return None
while True:
try:
x=int(input("Ingrese un año: "))
y=int(input("Ingrese el mes: "))
z=int(input("Ingrese el día: "))
print(dayOfYear(x, y, z))
except ValueError:
print("No se permite ingresar datos alfanuméricos")
|
[
"dmcewena@hotmail.com"
] |
dmcewena@hotmail.com
|
a9ffbf6927f011eca02197d776c8cdf231525322
|
42ff36f4c6c967d2f39bf75c1f24c8b5911a8491
|
/whileloop.py
|
b570fd612b80873ea6da85ac6f2859b7c5ebf077
|
[] |
no_license
|
cal1log/python
|
0d47b688e619d0cdd464267225f76fff7d3101a4
|
c8196c40e5505d4e83301ada97dd384611660778
|
refs/heads/main
| 2023-06-29T13:11:31.869976 | 2021-07-27T22:36:04 | 2021-07-27T22:36:04 | 366,841,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 213 |
py
|
#!/usr/bin/env python3
i = 1
''' incremental while loop '''
while i <= 5:
print('hello calilog')
i += 1
print()
i = 5
''' decremental while loop '''
while i >= 1:
print('hello calilog')
i -= 1
|
[
"orlago250183@gmail.com"
] |
orlago250183@gmail.com
|
16bec49a939949dec19469329515808a53e2b58d
|
ddd35c693194aefb9c009fe6b88c52de7fa7c444
|
/Live 10.1.18/_NKFW2/ResettingMixerComponent.py
|
c2477c605980a00da5595cf0a5b14ce75043c10b
|
[] |
no_license
|
notelba/midi-remote-scripts
|
819372d9c22573877c7912091bd8359fdd42585d
|
e3ec6846470eed7da8a4d4f78562ed49dc00727b
|
refs/heads/main
| 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,013 |
py
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: C:\ProgramData\Ableton\Live 9.7 Suite\Resources\MIDI Remote Scripts\_NKFW2\ResettingMixerComponent.py
# Compiled at: 2017-10-14 18:54:45
from itertools import izip_longest
from _Framework.CompoundComponent import CompoundComponent
from _Framework.SubjectSlot import subject_slot
from ResettingChannelStripComponent import ResettingChannelStripComponent
from Utils import right_justify_track_components
justify_function = right_justify_track_components
class ResettingMixerComponent(CompoundComponent):
""" ResettingMixerComponent works with a SlaveManager to control a group of
ResettingChannelStripComponents. """
def __init__(self, slave_manager, num_tracks=8, right_just_returns=True, name='Resetting_Mixer_Control', *a, **k):
super(ResettingMixerComponent, self).__init__(name=name, *a, **k)
self._right_justify_returns = bool(right_just_returns)
self._channel_strips = []
for _ in xrange(num_tracks):
strip = self.register_component(ResettingChannelStripComponent())
self._channel_strips.append(strip)
self._reassign_tracks.subject = slave_manager
self._reassign_tracks(slave_manager.track_offset)
def set_reset_volume_buttons(self, buttons):
""" Sets the buttons to use for resetting volume. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_volume_button(button)
def set_reset_pan_buttons(self, buttons):
""" Sets the buttons to use for resetting pan. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_pan_button(button)
def set_reset_send_a_buttons(self, buttons):
""" Sets the buttons to use for resetting send A. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_send_a_button(button)
def set_reset_send_b_buttons(self, buttons):
""" Sets the buttons to use for resetting send B. """
for strip, button in izip_longest(self._channel_strips, buttons or []):
strip.set_reset_send_b_button(button)
@subject_slot('track_offset')
def _reassign_tracks(self, offset):
tracks = self._reassign_tracks.subject.tracks_to_use
if self._right_justify_returns:
justify_function(self.song(), tracks, offset, self._channel_strips)
else:
for index, comp in enumerate(self._channel_strips):
track_offset = offset + index
if track_offset in xrange(len(tracks)):
comp.set_track(tracks[track_offset])
else:
comp.set_track(None)
return
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/_NKFW2/ResettingMixerComponent.pyc
|
[
"notelba@example.com"
] |
notelba@example.com
|
c306ee028f03366c34bdca2afb22d77a7303c459
|
a6f70134a9bfdcc630e67a6d05c174d35496ada3
|
/Sum of Inverse of Numbers^n.py
|
bb4e17b7740256803b5bc189aaea48aee10de4d2
|
[] |
no_license
|
nauman-sakharkar/Python-2.x
|
9c0e9d9e5968631e44ab595175ddcbe0a1b615ad
|
31df433481d75c7b76a40b2fc372fa6fefbb779f
|
refs/heads/master
| 2022-10-08T17:20:46.387977 | 2020-06-10T07:36:03 | 2020-06-10T07:36:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 147 |
py
|
n=int(input("Enter the Number Of Times = "))
q=int(input("Enter The Number = "))
sum=0
for i in range(1,n+1):
sum=sum+((1/q)**i)
print("",sum)
|
[
"50130960+nauman-sakharkar@users.noreply.github.com"
] |
50130960+nauman-sakharkar@users.noreply.github.com
|
4ea666bc8e896fbcd40fb73d27e4e967147c0a7b
|
3e85618c79a1a934fec543e1327e772ca081a5b9
|
/N1226.py
|
f9cf0945dd8c9496a8325051fcd4c4ce8e6bba04
|
[] |
no_license
|
ghdus4185/SWEXPERT
|
72d79aa4a668452327a676a644b952bab191c79b
|
4dc74ad74df7837450de4ce55526dac7760ce738
|
refs/heads/master
| 2020-07-16T18:31:22.153239 | 2019-12-20T04:18:30 | 2019-12-20T04:18:30 | 205,843,190 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,138 |
py
|
import sys
sys.stdin = open('input.txt', 'r')
def find(x,y):
global di, dj, maze, possible, check
stack = []
stack.append([x,y])
while stack:
n = stack.pop()
for k in range(4):
ni = n[0] + di[k]
nj = n[1] + dj[k]
# 범위 안에 있는지
if 0 <= ni < 16 and 0 <= nj < 16:
if maze[ni][nj] == 3:
possible = 1
return possible
if maze[ni][nj] == 0:
stack.append([ni, nj])
maze[n[0]][n[1]] = 1
return possible
di = [-1, 1, 0, 0]
dj = [0, 0, -1, 1]
for tc in range(1, 11):
t = int(input())
maze = [list(map(int, ' '.join(input()).split())) for _ in range(16)]
# 시작점 찾기
res = 0
for i in range(16):
for j in range(16):
if maze[i][j] == 2:
res = 1
break
if res == 1:
break
check = [[0]*16 for _ in range(16)]
possible = 0
find(i, j)
if possible == 1:
print('#{} 1'.format(t))
else:
print('#{} 0'.format(t))
|
[
"ckdghdus@naver.com"
] |
ckdghdus@naver.com
|
7ede643951e1f15dbbd488aee63423bae39dbced
|
33db9e6d0a73f2353747a4c9d3223d55a38730a8
|
/apps/first_app/models.py
|
9c9e3168c45565effb1144ef8f0ded356a58890e
|
[] |
no_license
|
philmccormick23/Likes-and-Books
|
94d778df265fe9b1645f783c83358617ca6fe0c0
|
0a9b18ceb7ce33a72334900e7f9f62b10d87a796
|
refs/heads/master
| 2020-04-02T15:14:46.314382 | 2018-10-24T19:46:10 | 2018-10-24T19:46:10 | 154,559,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 753 |
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.EmailField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Books(models.Model):
name = models.CharField(max_length=255)
desc = models.CharField(max_length=255)
upload = models.ForeignKey(User, null=True,related_name="codingdojo", on_delete=models.PROTECT)
users = models.ManyToMfanyField(User, related_name="likes")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
[
"phillipmccormick@Phillips-MacBook-Pro.local"
] |
phillipmccormick@Phillips-MacBook-Pro.local
|
0c21269cec3d106c781ee734f3a60a7415c78889
|
1792509a9accac11c837e2a18dcb3d34f1d7e30e
|
/client/category.py
|
edfa52d0b3eb42271cc8d9e90fe84f84bc763d38
|
[] |
no_license
|
kafura-kafiri/herb
|
2d3166b94e5fdacd106d6c4bc21d09f6c9cf568e
|
48329a0059e2843c72ad2d85e7bb31379f0042e5
|
refs/heads/master
| 2020-04-09T09:35:03.720161 | 2018-12-17T11:02:25 | 2018-12-17T11:02:25 | 160,238,902 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 478 |
py
|
import requests
url = 'http://localhost:5000/categories/'
headers = {'content-type': 'application/json'}
_categories = [
{
'ancestors': ['a', 'b', 'c'],
'title': 'd'
}, {
'ancestors': ['x', 'y'],
'title': 'z'
}
]
def fill():
requests.post(url + '*')
print()
print('categories >>')
for category in _categories:
response = requests.post(url + '+', data={'json': str(category)})
print(response.content)
|
[
"kafura.kafiri@gmail.com"
] |
kafura.kafiri@gmail.com
|
28cb89506c201fba276f34362a75f76ce01ffe95
|
f6d2385cd8eb896e17c5e72ac75abe6a0ba28659
|
/greffe1/essais.py
|
9f4aebeaa116aa85140e83a9274bb4de511e3b61
|
[] |
no_license
|
pastrouveedespeudo/greffegreffe
|
fba94c9169c3d021714eabf1a45812ca762cfe9d
|
8ebe4d555246aed26e705671014a260a23148a6a
|
refs/heads/master
| 2020-06-12T14:50:17.590418 | 2019-07-04T14:01:25 | 2019-07-04T14:01:25 | 194,335,511 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
from fonction import function
from fonction import ecrire
from fonction import lecture
from fonction import ecrire2
page = 'https://fr.yahoo.com/?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbS8&guce_referrer_sig=AQAAAMdlxFFv1CpIEQ0VuhLMZl4pjm_0Ur2KGpLoKBkg4lBqmzqdwLxulK-E29QEXf815EL1VsURfRYB-M3USUSs2fFR6tT63nGaOfQyk5mY4V9AltWx-EzQiluy32sS5KxDY0lQRsL6YmEXNMq4qWdOpBoyt2T6KtkfK9Bce2Dt8ViB'
page = function(page)
page = ecrire(page)
page_affichage = lecture()
ececrire2(page_affichage)
|
[
"noreply@github.com"
] |
pastrouveedespeudo.noreply@github.com
|
79258c9426d558486274c453e5f1e7bd0cbb4a0a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/validPalindrome_20200803230103.py
|
15299f1055653cb18098fa47a7ef7af4c4238410
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 402 |
py
|
import re
def palindrome(str):
if len(str) == 0:
return True
actualStr = str.lower()
str = str.lower()
cleanStr = re.sub(r"[,.;:@#?!&$]+",' ',str)
print('cleanStr',cleanStr)
str = str.split(" ")
str.reverse()
newArr = []
print(actualStr)
for i in str:
newArr.append(i[::-1])
print(newArr)
palindrome("A man, a plan, a canal: Panama")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ca7095ab3d8c8f9a438a75a24c6495f62b664b90
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_geologies.py
|
84e56d430f9355cad6a66d3b9a709b593d67b684
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 247 |
py
|
from xai.brain.wordbase.nouns._geology import _GEOLOGY
#calss header
class _GEOLOGIES(_GEOLOGY, ):
def __init__(self,):
_GEOLOGY.__init__(self)
self.name = "GEOLOGIES"
self.specie = 'nouns'
self.basic = "geology"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
84eef6cc65ec245e27db562aaabcc91b480142bb
|
bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75
|
/Hash Table/logger_rate_limiter.py
|
2d6dcba5dcff6b6585ced12fe6631fc0e2af2b74
|
[] |
no_license
|
harvi7/Leetcode-Problems-Python
|
d3a5e8898aceb11abc4cae12e1da50061c1d352c
|
73adc00f6853e821592c68f5dddf0a823cce5d87
|
refs/heads/master
| 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 636 |
py
|
class Logger:
def __init__(self):
"""
Initialize your data structure here.
"""
self._msg_dict = {}
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
"""
Returns true if the message should be printed in the given timestamp, otherwise returns false.
If this method returns false, the message will not be printed.
The timestamp is in seconds granularity.
"""
if not message in self._msg_dict or 10 <= timestamp - self._msg_dict[message]:
self._msg_dict[message] = timestamp
return True
return False
|
[
"iamharshvirani7@gmail.com"
] |
iamharshvirani7@gmail.com
|
377e0a1762965418f5d2a4d4871feeed710a71e8
|
565f95f207f49d987bdc372cd80942be95451731
|
/python/misc/hlgrep
|
d76c0a07ff98af04d294df10bef1e8a2e4b4256a
|
[] |
no_license
|
dustin/snippets
|
76724c2131546bddd35a80da16921a44a49e2262
|
5be535890f9e71e298fec601d55c469c542ea330
|
refs/heads/master
| 2023-08-18T22:01:39.546961 | 2022-12-29T07:39:40 | 2022-12-29T07:39:40 | 18,840 | 18 | 4 | null | 2013-01-06T08:10:39 | 2008-05-22T07:58:19 |
C
|
UTF-8
|
Python
| false | false | 306 |
#!/usr/bin/env python
import sys
import posix
import re
smso=posix.popen("tput smso").read()
rmso=posix.popen("tput rmso").read()
expression=re.compile("(" + sys.argv[1] + ")")
l=sys.stdin.readline()
while l != '':
s=expression.sub(smso + '\\1' + rmso, l)
sys.stdout.write(s)
l=sys.stdin.readline()
|
[
"dustin@spy.net"
] |
dustin@spy.net
|
|
a35928309c1fa5bf69a6928dedc88f21e8e1bf73
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/examples/chartsheet.py
|
3edbd9dbfe920d08f7d3e6d4ecf08d471cba16e0
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 |
NOASSERTION
| 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null |
UTF-8
|
Python
| false | false | 1,774 |
py
|
#######################################################################
#
# An example of creating an Excel chart in a chartsheet with Python
# and XlsxWriter.
#
# Copyright 2013-2019, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chartsheet.xlsx')
# Add a worksheet to hold the data.
worksheet = workbook.add_worksheet()
# Add a chartsheet. A worksheet that only holds a chart.
chartsheet = workbook.add_chartsheet()
# Add a format for the headings.
bold = workbook.add_format({'bold': 1})
# Add the worksheet data that the charts will refer to.
headings = ['Number', 'Batch 1', 'Batch 2']
data = [
[2, 3, 4, 5, 6, 7],
[10, 40, 50, 20, 10, 50],
[30, 60, 70, 50, 40, 30],
]
worksheet.write_row('A1', headings, bold)
worksheet.write_column('A2', data[0])
worksheet.write_column('B2', data[1])
worksheet.write_column('C2', data[2])
# Create a new bar chart.
chart1 = workbook.add_chart({'type': 'bar'})
# Configure the first series.
chart1.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure a second series. Note use of alternative syntax to define ranges.
chart1.add_series({
'name': ['Sheet1', 0, 2],
'categories': ['Sheet1', 1, 0, 6, 0],
'values': ['Sheet1', 1, 2, 6, 2],
})
# Add a chart title and some axis labels.
chart1.set_title ({'name': 'Results of sample analysis'})
chart1.set_x_axis({'name': 'Test number'})
chart1.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart1.set_style(11)
# Add the chart to the chartsheet.
chartsheet.set_chart(chart1)
# Display the chartsheet as the active sheet when the workbook is opened.
chartsheet.activate();
workbook.close()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
f485b1fe84144a2e6e02f8c6db683e8241399c64
|
831fe3255ab2dd7abb9fc79a21756012d57cb863
|
/projects/nerf/nerf/raymarcher.py
|
3be73d32299a15739202136510193efb2809c1ef
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
ksengin/pytorch3d
|
3e84365ed2499c11ef5a443c4ab28bda85e71f7e
|
1fffa20541c9fa3248e02473bee294724922d989
|
refs/heads/master
| 2021-05-16T21:24:36.150263 | 2021-03-19T07:07:23 | 2021-03-19T07:07:23 | 250,474,512 | 0 | 0 |
NOASSERTION
| 2020-03-27T08:00:17 | 2020-03-27T08:00:17 | null |
UTF-8
|
Python
| false | false | 2,796 |
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from pytorch3d.renderer import EmissionAbsorptionRaymarcher
from pytorch3d.renderer.implicit.raymarching import (
_check_density_bounds,
_check_raymarcher_inputs,
_shifted_cumprod,
)
class EmissionAbsorptionNeRFRaymarcher(EmissionAbsorptionRaymarcher):
"""
This is essentially the `pytorch3d.renderer.EmissionAbsorptionRaymarcher`
which additionally returns the rendering weights. It also skips returning
the computation of the alpha-mask which is, in case of NeRF, equal to 1
everywhere.
The weights are later used in the NeRF pipeline to carry out the importance
ray-sampling for the fine rendering pass.
For more details about the EmissionAbsorptionRaymarcher please refer to
the documentation of `pytorch3d.renderer.EmissionAbsorptionRaymarcher`.
"""
def forward(
self,
rays_densities: torch.Tensor,
rays_features: torch.Tensor,
eps: float = 1e-10,
**kwargs,
) -> torch.Tensor:
"""
Args:
rays_densities: Per-ray density values represented with a tensor
of shape `(..., n_points_per_ray, 1)` whose values range in [0, 1].
rays_features: Per-ray feature values represented with a tensor
of shape `(..., n_points_per_ray, feature_dim)`.
eps: A lower bound added to `rays_densities` before computing
the absorbtion function (cumprod of `1-rays_densities` along
each ray). This prevents the cumprod to yield exact 0
which would inhibit any gradient-based learning.
Returns:
features: A tensor of shape `(..., feature_dim)` containing
the rendered features for each ray.
weights: A tensor of shape `(..., n_points_per_ray)` containing
the ray-specific emission-absorbtion distribution.
Each ray distribution `(..., :)` is a valid probability
distribution, i.e. it contains non-negative values that integrate
to 1, such that `weights.sum(dim=-1)==1).all()` yields `True`.
"""
_check_raymarcher_inputs(
rays_densities,
rays_features,
None,
z_can_be_none=True,
features_can_be_none=False,
density_1d=True,
)
_check_density_bounds(rays_densities)
rays_densities = rays_densities[..., 0]
absorption = _shifted_cumprod(
(1.0 + eps) - rays_densities, shift=self.surface_thickness
)
weights = rays_densities * absorption
features = (weights[..., None] * rays_features).sum(dim=-2)
return features, weights
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
ada1ac04d0162f1f086d1ebfc1bb718c67f74aee
|
2a34a824e1a2d3bac7b99edcf19926a477a157a0
|
/src/cr/vision/core/colors.py
|
2865015e52642389b5b3c74caf559bef6dda8111
|
[
"Apache-2.0"
] |
permissive
|
carnotresearch/cr-vision
|
a7cb07157dbf470ed3fe560ef85d6e5194c660ae
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
refs/heads/master
| 2023-04-10T22:34:34.833043 | 2021-04-25T13:32:14 | 2021-04-25T13:32:14 | 142,256,002 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,002 |
py
|
'''
List of common colors in b g r format
'''
ALICEBLUE = (255, 248, 240)
ANTIQUEWHITE = (215, 235, 250)
AQUA = (255, 255, 0)
AQUAMARINE = (212, 255, 127)
AZURE = (255, 255, 240)
BEIGE = (220, 245, 245)
BISQUE = (196, 228, 255)
BLACK = (0, 0, 0)
BLANCHEDALMOND = (205, 235, 255)
BLUE = (255, 0, 0)
BLUEVIOLET = (226, 43, 138)
BROWN = (42, 42, 165)
BURLYWOOD = (135, 184, 222)
CADETBLUE = (160, 158, 95)
CHARTREUSE = (0, 255, 127)
CHOCOLATE = (30, 105, 210)
CORAL = (80, 127, 255)
CORNFLOWERBLUE = (237, 149, 100)
CORNSILK = (220, 248, 255)
CRIMSON = (60, 20, 220)
CYAN = (255, 255, 0)
DARKBLUE = (139, 0, 0)
DARKCYAN = (139, 139, 0)
DARKGOLDENROD = (11, 134, 184)
DARKGRAY = (169, 169, 169)
DARKGREEN = (0, 100, 0)
DARKGREY = (169, 169, 169)
DARKKHAKI = (107, 183, 189)
DARKMAGENTA = (139, 0, 139)
DARKOLIVEGREEN = (47, 107, 85)
DARKORANGE = (0, 140, 255)
DARKORCHID = (204, 50, 153)
DARKRED = (0, 0, 139)
DARKSALMON = (122, 150, 233)
DARKSEAGREEN = (143, 188, 143)
DARKSLATEBLUE = (139, 61, 72)
DARKSLATEGRAY = (79, 79, 47)
DARKSLATEGREY = (79, 79, 47)
DARKTURQUOISE = (209, 206, 0)
DARKVIOLET = (211, 0, 148)
DEEPPINK = (147, 20, 255)
DEEPSKYBLUE = (255, 191, 0)
DIMGRAY = (105, 105, 105)
DIMGREY = (105, 105, 105)
DODGERBLUE = (255, 144, 30)
FIREBRICK = (34, 34, 178)
FLORALWHITE = (240, 250, 255)
FORESTGREEN = (34, 139, 34)
FUCHSIA = (255, 0, 255)
GAINSBORO = (220, 220, 220)
GHOSTWHITE = (255, 248, 248)
GOLD = (0, 215, 255)
GOLDENROD = (32, 165, 218)
GRAY = (128, 128, 128)
GREEN = (0, 128, 0)
GREENYELLOW = (47, 255, 173)
GREY = (128, 128, 128)
HONEYDEW = (240, 255, 240)
HOTPINK = (180, 105, 255)
INDIANRED = (92, 92, 205)
INDIGO = (130, 0, 75)
IVORY = (240, 255, 255)
KHAKI = (140, 230, 240)
LAVENDER = (250, 230, 230)
LAVENDERBLUSH = (245, 240, 255)
LAWNGREEN = (0, 252, 124)
LEMONCHIFFON = (205, 250, 255)
LIGHTBLUE = (230, 216, 173)
LIGHTCORAL = (128, 128, 240)
LIGHTCYAN = (255, 255, 224)
LIGHTGOLDENRODYELLOW = (210, 250, 250)
LIGHTGRAY = (211, 211, 211)
LIGHTGREEN = (144, 238, 144)
LIGHTGREY = (211, 211, 211)
LIGHTPINK = (193, 182, 255)
LIGHTSALMON = (122, 160, 255)
LIGHTSEAGREEN = (170, 178, 32)
LIGHTSKYBLUE = (250, 206, 135)
LIGHTSLATEGRAY = (153, 136, 119)
LIGHTSLATEGREY = (153, 136, 119)
LIGHTSTEELBLUE = (222, 196, 176)
LIGHTYELLOW = (224, 255, 255)
LIME = (0, 255, 0)
LIMEGREEN = (50, 205, 50)
LINEN = (230, 240, 250)
MAGENTA = (255, 0, 255)
MAROON = (0, 0, 128)
MEDIUMAQUAMARINE = (170, 205, 102)
MEDIUMBLUE = (205, 0, 0)
MEDIUMORCHID = (211, 85, 186)
MEDIUMPURPLE = (219, 112, 147)
MEDIUMSEAGREEN = (113, 179, 60)
MEDIUMSLATEBLUE = (238, 104, 123)
MEDIUMSPRINGGREEN = (154, 250, 0)
MEDIUMTURQUOISE = (204, 209, 72)
MEDIUMVIOLETRED = (133, 21, 199)
MIDNIGHTBLUE = (112, 25, 25)
MINTCREAM = (250, 255, 245)
MISTYROSE = (225, 228, 255)
MOCCASIN = (181, 228, 255)
NAVAJOWHITE = (173, 222, 255)
NAVY = (128, 0, 0)
OLDLACE = (230, 245, 253)
OLIVE = (0, 128, 128)
OLIVEDRAB = (35, 142, 107)
ORANGE = (0, 165, 255)
ORANGERED = (0, 69, 255)
ORCHID = (214, 112, 218)
PALEGOLDENROD = (170, 232, 238)
PALEGREEN = (152, 251, 152)
PALETURQUOISE = (238, 238, 175)
PALEVIOLETRED = (147, 112, 219)
PAPAYAWHIP = (213, 239, 255)
PEACHPUFF = (185, 218, 255)
PERU = (63, 133, 205)
PINK = (203, 192, 255)
PLUM = (221, 160, 221)
POWDERBLUE = (230, 224, 176)
PURPLE = (128, 0, 128)
RED = (0, 0, 255)
ROSYBROWN = (143, 143, 188)
ROYALBLUE = (225, 105, 65)
SADDLEBROWN = (19, 69, 139)
SALMON = (114, 128, 250)
SANDYBROWN = (96, 164, 244)
SEAGREEN = (87, 139, 46)
SEASHELL = (238, 245, 255)
SIENNA = (45, 82, 160)
SILVER = (192, 192, 192)
SKYBLUE = (235, 206, 135)
SLATEBLUE = (205, 90, 106)
SLATEGRAY = (144, 128, 112)
SLATEGREY = (144, 128, 112)
SNOW = (250, 250, 255)
SPRINGGREEN = (127, 255, 0)
STEELBLUE = (180, 130, 70)
TAN = (140, 180, 210)
TEAL = (128, 128, 0)
THISTLE = (216, 191, 216)
TOMATO = (71, 99, 255)
TURQUOISE = (208, 224, 64)
VIOLET = (238, 130, 238)
WHEAT = (179, 222, 245)
WHITE = (255, 255, 255)
WHITESMOKE = (245, 245, 245)
YELLOW = (0, 255, 255)
YELLOWGREEN = (50, 205, 154)
|
[
"shailesh@indigits.com"
] |
shailesh@indigits.com
|
2915daa920a718772f982608d13e1abbe0e0de8f
|
96e76bcb634e0e48bcf3ae352eb235ed9bc32b36
|
/app/migrations/0020_news_date_and_time.py
|
abf6be4f45c0a5659dc925a56654a48eab0b5a70
|
[] |
no_license
|
Ectroverse/EctroverseDjango
|
cef8a8a2149271c0995f1b60676f636e5dfc23ec
|
a3dad97b4e7a89694248c21df75ebdcc37e975f0
|
refs/heads/master
| 2023-04-18T21:12:20.062646 | 2021-04-28T11:06:01 | 2021-04-28T11:06:01 | 291,338,914 | 1 | 3 | null | 2021-01-23T14:32:21 | 2020-08-29T19:50:33 |
Python
|
UTF-8
|
Python
| false | false | 409 |
py
|
# Generated by Django 3.1 on 2021-01-24 16:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0019_auto_20210124_1638'),
]
operations = [
migrations.AddField(
model_name='news',
name='date_and_time',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
[
"vsavko@gmail.com"
] |
vsavko@gmail.com
|
6c58b0de7a6aaa29da887706c57a87152a52622a
|
7208db50a22368c335e7d7d8b37a3fedb09c60e5
|
/cairis/gui/ResponsesDialog.py
|
462cb9196456757bb3e23ec2869fb3380d5121b9
|
[
"Apache-2.0"
] |
permissive
|
nebloc/cairis
|
41c7f20af56c46bddcb3927dc4aa410f6477e6ed
|
1277a148a270d5471b59fc238aa6590bc1d3044e
|
refs/heads/master
| 2020-03-24T03:51:11.908096 | 2018-07-27T16:07:36 | 2018-07-27T16:07:36 | 142,434,768 | 0 | 0 |
Apache-2.0
| 2018-07-26T11:58:24 | 2018-07-26T11:58:24 | null |
UTF-8
|
Python
| false | false | 4,350 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
import cairis.core.Risk
from ResponseDialog import ResponseDialog
from DialogClassParameters import DialogClassParameters
from ResponseDialogParameters import ResponseDialogParameters
from AcceptEnvironmentPanel import AcceptEnvironmentPanel
from TransferEnvironmentPanel import TransferEnvironmentPanel
from MitigateEnvironmentPanel import MitigateEnvironmentPanel
from DimensionBaseDialog import DimensionBaseDialog
from cairis.core.ARM import *
__author__ = 'Shamal Faily'
class ResponsesDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,RESPONSES_ID,'Responses',(800,300),'response.png')
self.theMainWindow = parent
idList = [RESPONSES_LISTRESPONSES_ID,RESPONSES_BUTTONADD_ID,RESPONSES_BUTTONDELETE_ID]
columnList = ['Name','Type']
self.buildControls(idList,columnList,self.dbProxy.getResponses,'response')
listCtrl = self.FindWindowById(RESPONSES_LISTRESPONSES_ID)
listCtrl.SetColumnWidth(0,300)
def addObjectRow(self,mitListCtrl,listRow,response):
mitListCtrl.InsertStringItem(listRow,response.name())
mitListCtrl.SetStringItem(listRow,1,response.__class__.__name__)
def onAdd(self,evt):
try:
riskDict = self.dbProxy.getDimensionNames('risk')
if (len(riskDict) == 0):
dlg = wx.MessageDialog(self,'Cannot mitigate for non-existing risks','Add response',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
responseTypes = ['Accept','Transfer','Mitigate']
from DimensionNameDialog import DimensionNameDialog
rtDlg = DimensionNameDialog(self,'response',responseTypes,'Select',(300,200))
if (rtDlg.ShowModal() == DIMNAME_BUTTONACTION_ID):
responseType = rtDlg.dimensionName()
responsePanel = MitigateEnvironmentPanel
if (responseType == 'Accept'):
responsePanel = AcceptEnvironmentPanel
elif (responseType == 'Transfer'):
responsePanel = TransferEnvironmentPanel
addParameters = ResponseDialogParameters(RESPONSE_ID,'Add response',ResponseDialog,RESPONSE_BUTTONCOMMIT_ID,self.dbProxy.addResponse,True,responsePanel,responseType)
self.addObject(addParameters)
rtDlg.Destroy()
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add response',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
try:
selectedObjt = self.objts[self.selectedLabel]
responseType = selectedObjt.responseType()
responsePanel = MitigateEnvironmentPanel
if (responseType == 'Accept'):
responsePanel = AcceptEnvironmentPanel
elif (responseType == 'Transfer'):
responsePanel = TransferEnvironmentPanel
updateParameters = ResponseDialogParameters(RESPONSE_ID,'Edit response',ResponseDialog,RESPONSE_BUTTONCOMMIT_ID,self.dbProxy.updateResponse,False,responsePanel,responseType)
self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit response',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def onDelete(self,evt):
try:
self.dbProxy.associateGrid(self.theMainWindow.FindWindowById(ID_REQGRID))
self.deleteObject('No response','Delete response',self.dbProxy.deleteResponse)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete response',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
|
[
"shamal.faily@googlemail.com"
] |
shamal.faily@googlemail.com
|
3d9cb190898bb0de72ad98aa055083f485cc3c08
|
f07392633118f7f6aff0a5a9b2a5c9eaab1a0299
|
/Examples/packaging/Capitalize/capitalize/capital_mod.py
|
2b4d8147fce933a404c366dee5112a3e807866e5
|
[] |
no_license
|
UWPCE-PythonCert/Py300
|
afc4abca736cfea031292db6bed996465f37604f
|
7f93d20ae66ba9a56c4dcc0c1fdafcf79db15349
|
refs/heads/master
| 2020-05-26T13:43:38.098926 | 2018-03-05T07:11:48 | 2018-03-05T07:11:48 | 85,002,542 | 4 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,092 |
py
|
#!/usr/bin/env python
"""
A really simple module, just to demonstrate packaging
"""
def capitalize_line(instr):
"""
capitalizes the input string
:param instr: the string to capitalize it should be a single line.
:type instr: string
:returns: a capitalized version of instr
"""
return " ".join( word.capitalize() for word in instr.split() )
def capitalize(infilename, outfilename):
"""
reads the contents of infilename, and writes it to outfilename, but with
every word capitalized
note: very primitive -- it will mess some files up!
this is called by the capitalize script
:param infilename: The file name you want to process
:type infilename: string
:param outfilename: the name of the new file that will be created
:type outfilename: string
:returns: None
:raises: IOError if infilename doesn't exist.
"""
infile = open(infilename, 'U')
outfile = open(outfilename, 'w')
for line in infile:
outfile.write(capitalize_line(line))
outfile.write("\n")
return None
|
[
"PythonCHB@gmail.com"
] |
PythonCHB@gmail.com
|
6dce8ab5aa0b8bd0c0ee86d7753accc09fc9c3a9
|
8fa8ded3772dd7a124c1bbb91fc109ed2b63574b
|
/mycelium/apps/groups/migrations/0024_auto__add_field_grouprule_account__add_field_group_account.py
|
6efb4463b3442d5bbdba8fed2d4d0f47a61bb622
|
[] |
no_license
|
skoczen/mycelium
|
3642b0f5e5ea03d609a3e499c7ad68092101dce0
|
da0f169163f4dc93e2dc2b0d934abf4f18c18af0
|
refs/heads/master
| 2020-04-10T09:21:46.893254 | 2014-05-20T02:27:06 | 2014-05-20T02:27:06 | 2,114,887 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,761 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("accounts", "0004_create_old_data_account"),
)
def forwards(self, orm):
# Adding field 'GroupRule.account'
db.add_column('groups_grouprule', 'account', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['accounts.Account']), keep_default=False)
# Adding field 'Group.account'
db.add_column('groups_group', 'account', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['accounts.Account']), keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupRule.account'
db.delete_column('groups_grouprule', 'account_id')
# Deleting field 'Group.account'
db.delete_column('groups_group', 'account_id')
models = {
'accounts.account': {
'Meta': {'ordering': "('name',)", 'object_name': 'Account'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Plan']"}),
'subdomain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'accounts.plan': {
'Meta': {'ordering': "('name',)", 'object_name': 'Plan'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'groups.group': {
'Meta': {'ordering': "('name',)", 'object_name': 'Group'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'qi_simple_searchable_search_field': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rules_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'groups.grouprule': {
'Meta': {'ordering': "('group', 'id')", 'object_name': 'GroupRule'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'left_side': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rules.LeftSide']", 'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rules.Operator']", 'null': 'True', 'blank': 'True'}),
'right_side_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rules.RightSideType']", 'null': 'True', 'blank': 'True'}),
'right_side_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rules.leftside': {
'Meta': {'ordering': "('order',)", 'object_name': 'LeftSide'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'add_closing_paren': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allowed_operators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rules.Operator']", 'symmetrical': 'False'}),
'allowed_right_side_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rules.RightSideType']", 'symmetrical': 'False'}),
'choices': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'query_string_partial': ('django.db.models.fields.TextField', [], {})
},
'rules.operator': {
'Meta': {'ordering': "('order',)", 'object_name': 'Operator'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'query_string_partial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'use_filter': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'rules.rightsidetype': {
'Meta': {'object_name': 'RightSideType'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['groups']
|
[
"steven@quantumimagery.com"
] |
steven@quantumimagery.com
|
133eb8ff9bdd88c775a362eb91c937e712aea0bb
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/workloads/setup.py
|
4eb445f63f094bb7c215e4c40b08e0266e1db3d9
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 |
MIT
| 2023-09-14T10:48:57 | 2017-10-11T16:27:31 |
Python
|
UTF-8
|
Python
| false | false | 1,704 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '0.1.0a1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='workloads',
version=VERSION,
description='Microsoft Azure Command-Line Tools Workloads Extension.',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/main/src/workloads',
classifiers=CLASSIFIERS,
packages=find_packages(exclude=["tests"]),
package_data={'azext_workloads': ['azext_metadata.json']},
install_requires=DEPENDENCIES
)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
0b7dfd99cdf13d9ecafbc21d8fc4f34870cc081b
|
e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d
|
/crawlers/urllib2s/urllib2_posts.py
|
e58f5ccf7fb07608478bd5d3e0cbb37eff0ded44
|
[] |
no_license
|
neuroph12/nlpy
|
3f3d1a8653a832d6230cb565428ee0c77ef7451d
|
095976d144dacf07414bf7ee42b811eaa67326c1
|
refs/heads/master
| 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 257 |
py
|
import urllib
import urllib2
url = 'http://www.douban.com/accounts/login'
values = {'form_email': '',
'form_password': ''}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
resp = urllib2.urlopen(req)
html = resp.read()
print(html)
|
[
"anderscui@gmail.com"
] |
anderscui@gmail.com
|
66259f17ed43af8cc07fab9f59f2c6e11087508a
|
e84f8bcf2ea91ac12f9850a6f487b8b6bff09235
|
/pyfr/backends/cuda/types.py
|
0cc8c1b194cd8f1297244c06bf5c39a0ec500c80
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
Aerojspark/PyFR
|
2bdbbf8a1a0770dc6cf48100dc5f895eb8ab8110
|
b59e67f3aa475f7e67953130a45f264f90e2bb92
|
refs/heads/master
| 2021-01-14T08:51:48.893378 | 2014-09-01T15:02:28 | 2014-09-01T15:02:28 | 24,884,060 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,726 |
py
|
# -*- coding: utf-8 -*-
import collections
import itertools as it
import numpy as np
import pycuda.driver as cuda
import pyfr.backends.base as base
class CUDAMatrixBase(base.MatrixBase):
def onalloc(self, basedata, offset):
self.basedata = int(basedata)
self.data = self.basedata + offset
self.offset = offset
# Process any initial value
if self._initval is not None:
self._set(self._initval)
# Remove
del self._initval
def _get(self):
# Allocate an empty buffer
buf = np.empty(self.datashape, dtype=self.dtype)
# Copy
cuda.memcpy_dtoh(buf, self.data)
# Slice to give the expected I/O shape
return buf[...,:self.ioshape[-1]]
def _set(self, ary):
# Allocate a new buffer with suitable padding and assign
buf = np.zeros(self.datashape, dtype=self.dtype)
buf[...,:self.ioshape[-1]] = ary
# Copy
cuda.memcpy_htod(self.data, buf)
@property
def _as_parameter_(self):
return self.data
def __long__(self):
return self.data
class CUDAMatrix(CUDAMatrixBase, base.Matrix):
def __init__(self, backend, ioshape, initval, extent, tags):
super(CUDAMatrix, self).__init__(backend, backend.fpdtype, ioshape,
initval, extent, tags)
class CUDAMatrixRSlice(base.MatrixRSlice):
@property
def _as_parameter_(self):
return self.parent.basedata + self.offset
def __long__(self):
return self.parent.basedata + self.offset
class CUDAMatrixBank(base.MatrixBank):
def __long__(self):
return self._curr_mat.data
class CUDAConstMatrix(CUDAMatrixBase, base.ConstMatrix):
def __init__(self, backend, initval, extent, tags):
ioshape = initval.shape
super(CUDAConstMatrix, self).__init__(backend, backend.fpdtype,
ioshape, initval, extent, tags)
class CUDAView(base.View):
def __init__(self, backend, matmap, rcmap, stridemap, vshape, tags):
super(CUDAView, self).__init__(backend, matmap, rcmap, stridemap,
vshape, tags)
self.mapping = CUDAMatrixBase(backend, np.int32, (1, self.n),
self.mapping, None, tags)
if self.nvcol > 1:
self.cstrides = CUDAMatrixBase(backend, np.int32, (1, self.n),
self.cstrides, None, tags)
if self.nvrow > 1:
self.rstrides = CUDAMatrixBase(backend, np.int32, (1, self.n),
self.rstrides, None, tags)
class CUDAMPIMatrix(CUDAMatrix, base.MPIMatrix):
def __init__(self, backend, ioshape, initval, extent, tags):
# Call the standard matrix constructor
super(CUDAMPIMatrix, self).__init__(backend, ioshape, initval, extent,
tags)
# Allocate a page-locked buffer on the host for MPI to send/recv from
self.hdata = cuda.pagelocked_empty((self.nrow, self.ncol),
self.dtype, 'C')
class CUDAMPIView(base.MPIView):
pass
class CUDAQueue(base.Queue):
def __init__(self, backend):
super(CUDAQueue, self).__init__(backend)
# Last kernel we executed
self._last = None
# CUDA stream and MPI request list
self._stream_comp = cuda.Stream()
self._stream_copy = cuda.Stream()
self._mpireqs = []
# Items waiting to be executed
self._items = collections.deque()
def __lshift__(self, items):
self._items.extend(items)
def __mod__(self, items):
self.run()
self << items
self.run()
def __nonzero__(self):
return bool(self._items)
def _exec_item(self, item, rtargs):
if item.ktype == 'compute':
item.run(self._stream_comp, self._stream_copy, *rtargs)
elif item.ktype == 'mpi':
item.run(self._mpireqs, *rtargs)
else:
raise ValueError('Non compute/MPI kernel in queue')
self._last = item
def _exec_next(self):
item, rtargs = self._items.popleft()
# If we are at a sequence point then wait for current items
if self._at_sequence_point(item):
self._wait()
# Execute the item
self._exec_item(item, rtargs)
def _exec_nowait(self):
while self._items and not self._at_sequence_point(self._items[0][0]):
self._exec_item(*self._items.popleft())
def _wait(self):
last = self._last
if last and last.ktype == 'compute':
self._stream_comp.synchronize()
self._stream_copy.synchronize()
elif last and last.ktype == 'mpi':
from mpi4py import MPI
MPI.Prequest.Waitall(self._mpireqs)
self._mpireqs = []
self._last = None
def _at_sequence_point(self, item):
return self._last and self._last.ktype != item.ktype
def run(self):
while self._items:
self._exec_next()
self._wait()
@staticmethod
def runall(queues):
# First run any items which will not result in an implicit wait
for q in queues:
q._exec_nowait()
# So long as there are items remaining in the queues
while any(queues):
# Execute a (potentially) blocking item from each queue
for q in it.ifilter(None, queues):
q._exec_next()
q._exec_nowait()
# Wait for all tasks to complete
for q in queues:
q._wait()
|
[
"freddie@witherden.org"
] |
freddie@witherden.org
|
91f594c0d623009fa6d5f267254ce89dd81b5e16
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02846/s097636759.py
|
3a28136572804ebc45464a64aaca2efeebe9c309
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,115 |
py
|
import sys
sys.setrecursionlimit(10**9)
INF=10**18
def input():
return sys.stdin.readline().rstrip()
def main():
def nibutan(ok,ng):
while abs(ok-ng) > 1:
mid = (ok + ng) // 2
if solve(mid,2):
ok = mid
else:
ng = mid
return ok
def solve(mid,n):
dif=(d_0+d_1)*(mid-1)
c=0
if dif*(dif+d_0) == 0:
c+=1
elif dif*(dif+d_0) < 0:
c+=1
if (dif+d_0)*(dif+d_0+d_1) < 0:
c+=1
if c==n:
return True
else:
return False
T=list(map(int,input().split()))
A=list(map(int,input().split()))
B=list(map(int,input().split()))
d_0=T[0]*(A[0]-B[0])
d_1=T[1]*(A[1]-B[1])
if d_0==-d_1:
print('infinity')
elif d_0*(d_0+d_1)<0:
if (d_0*2+d_1)*(d_0*2+d_1*2)<0:
n=nibutan(2,10**40)
ans=n*2-1
ans+=solve(n+1,1)
print(ans)
else:
print(1)
else:
print(0)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.