hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0643deae65bf97584696f33e80afdf35b197abcf | 1,677 | py | Python | robit/core/alert.py | stratusadv/robit | 7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27 | [
"MIT"
] | null | null | null | robit/core/alert.py | stratusadv/robit | 7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27 | [
"MIT"
] | 1 | 2021-11-01T18:51:04.000Z | 2021-11-01T18:51:04.000Z | robit/core/alert.py | stratusadv/robit | 7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27 | [
"MIT"
] | null | null | null | import logging
from datetime import datetime, timedelta
from robit.core.health import Health
| 37.266667 | 119 | 0.627907 |
06440df8fe03e6136138c3851bd70beb0db8af44 | 5,823 | py | Python | validate_tags/validate_master.py | asurion/Hibernate | b95c68ba8dba6a43baea288ade231944d1719988 | [
"Apache-2.0"
] | 9 | 2017-06-06T17:47:57.000Z | 2021-08-06T18:30:11.000Z | validate_tags/validate_master.py | asurion/Hibernate | b95c68ba8dba6a43baea288ade231944d1719988 | [
"Apache-2.0"
] | null | null | null | validate_tags/validate_master.py | asurion/Hibernate | b95c68ba8dba6a43baea288ade231944d1719988 | [
"Apache-2.0"
] | null | null | null | import datetime
import pytz
from utils.timezones import time_zones
from utils.CalculateHours import calc_hours
| 35.290909 | 117 | 0.493732 |
0647d34191beaa453acced9a85d7fac8926ac453 | 110 | py | Python | pre_questionnaire/admin.py | AurelienNioche/ModelingMadeEasyApp | dbb738dda204906c5f4b7aeb9c71feea961a4cce | [
"MIT"
] | null | null | null | pre_questionnaire/admin.py | AurelienNioche/ModelingMadeEasyApp | dbb738dda204906c5f4b7aeb9c71feea961a4cce | [
"MIT"
] | null | null | null | pre_questionnaire/admin.py | AurelienNioche/ModelingMadeEasyApp | dbb738dda204906c5f4b7aeb9c71feea961a4cce | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import PreQuestionnaire
admin.site.register(PreQuestionnaire)
| 18.333333 | 37 | 0.845455 |
0648e18f81ac883f3b49a5656d1320a8eddbf0ed | 5,014 | py | Python | unitorch/score/voc_map.py | fuliucansheng/UniTorch | 47038321593ce4e7eabda555bd58c0cf89482146 | [
"MIT"
] | 2 | 2022-02-05T08:52:00.000Z | 2022-03-27T07:01:34.000Z | unitorch/score/voc_map.py | Lixin-Qian/unitorch | 47038321593ce4e7eabda555bd58c0cf89482146 | [
"MIT"
] | null | null | null | unitorch/score/voc_map.py | Lixin-Qian/unitorch | 47038321593ce4e7eabda555bd58c0cf89482146 | [
"MIT"
] | 1 | 2022-03-27T07:01:13.000Z | 2022-03-27T07:01:13.000Z | import numpy as np
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
def _voc_ap(
rec,
prec,
use_07_metric=False,
):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_ap_score(
p_bboxes: List[np.ndarray],
p_scores: List[np.ndarray],
p_classes: List[np.ndarray],
gt_bboxes: List[np.ndarray],
gt_classes: List[np.ndarray],
class_id: int = None,
threshold: float = 0.5,
):
"""
Args:
p_bboxes: a list of predict bboxes
p_scores: a list of predict score for bbox
p_classes: a list of predict class id for bbox
gt_bboxes: a list of ground truth bboxes
gt_classes: a list of true class id for each true bbox
class_id: the class id to compute ap score
threshold: the threshold to ap score
"""
if class_id is not None:
gt_bboxes = [gt_bbox[gt_class == class_id] for gt_class, gt_bbox in zip(gt_classes, gt_bboxes)]
p_bboxes = [p_bbox[p_class == class_id] for p_class, p_bbox in zip(p_classes, p_bboxes)]
p_scores = [p_score[p_class == class_id] for p_class, p_score in zip(p_classes, p_scores)]
p_indexes = [np.array([i] * len(p_bboxes[i])) for i in range(len(p_bboxes))]
p_bboxes, p_scores, p_indexes = (
np.concatenate(p_bboxes),
np.concatenate(p_scores),
np.concatenate(p_indexes),
)
p_sort_indexes = np.argsort(-p_scores)
tp = np.zeros(p_scores.shape[0])
fp = np.zeros(p_scores.shape[0])
gt_bbox_status = defaultdict(set)
for idx, p_sort_index in enumerate(p_sort_indexes):
p_index = int(p_indexes[p_sort_index])
gt_bbox = gt_bboxes[p_index]
p_bbox = p_bboxes[p_sort_index]
vmax = -float("inf")
jmax = -1
if gt_bbox.size > 0:
ixmin = np.maximum(gt_bbox[:, 0], p_bbox[0])
iymin = np.maximum(gt_bbox[:, 1], p_bbox[1])
ixmax = np.minimum(gt_bbox[:, 2], p_bbox[2])
iymax = np.minimum(gt_bbox[:, 3], p_bbox[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (
(p_bbox[2] - p_bbox[0] + 1.0) * (p_bbox[3] - p_bbox[1] + 1.0)
+ (gt_bbox[:, 2] - gt_bbox[:, 0] + 1.0) * (gt_bbox[:, 3] - gt_bbox[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
vmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if vmax > threshold:
if jmax not in gt_bbox_status[p_index]:
tp[idx] = 1
gt_bbox_status[p_index].add(jmax)
else:
fp[idx] = 1
else:
fp[idx] = 1
fp = np.cumsum(fp, axis=0)
tp = np.cumsum(tp, axis=0)
rec = tp / float(sum([len(gt) for gt in gt_bboxes]))
prec = tp / np.maximum(tp + fp, np.finfo(np.float).eps)
ap = _voc_ap(rec, prec)
return ap
def voc_map_score(
p_bboxes: List[np.ndarray],
p_scores: List[np.ndarray],
p_classes: List[np.ndarray],
gt_bboxes: List[np.ndarray],
gt_classes: List[np.ndarray],
):
"""
Args:
p_bboxes: a list of predict bboxes
p_scores: a list of predict score for bbox
p_classes: a list of predict class id for bbox
gt_bboxes: a list of ground truth bboxes
gt_classes: a list of true class id for each true bbox
Returns:
a avg ap score of all classes in ground truth
"""
classes = set(list(np.concatenate(gt_classes)))
ap_scores = dict()
for thres in range(50, 100, 5):
ap_scores[thres] = [
voc_ap_score(
p_bboxes,
p_scores,
p_classes,
gt_bboxes,
gt_classes,
c,
thres / 100,
)
for c in classes
]
mAP = {iou: np.mean(x) for iou, x in ap_scores.items()}
return np.mean(list(mAP.values()))
| 33.426667 | 103 | 0.553849 |
06499670b453111a27b20a04dcc90e10b1a775c3 | 1,418 | py | Python | tests/_test_callable_py3.py | youknowone/callable | a31ea23103d7df156c30d990c5c249844ce302f6 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/_test_callable_py3.py | youknowone/callable | a31ea23103d7df156c30d990c5c249844ce302f6 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/_test_callable_py3.py | youknowone/callable | a31ea23103d7df156c30d990c5c249844ce302f6 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import asyncio
from callable import Callable
from signature import signature
import pytest
| 28.938776 | 85 | 0.568406 |
06499d2a878e935b1bbb4ea1ae0081bd6e6ed4b7 | 75 | py | Python | .metadata/.plugins/org.python.pydev.shared_interactive_console/history.py | fullerene12/VOTA | 3a5cfc1e210ac7ea274537a8d189b54660416599 | [
"MIT"
] | null | null | null | .metadata/.plugins/org.python.pydev.shared_interactive_console/history.py | fullerene12/VOTA | 3a5cfc1e210ac7ea274537a8d189b54660416599 | [
"MIT"
] | null | null | null | .metadata/.plugins/org.python.pydev.shared_interactive_console/history.py | fullerene12/VOTA | 3a5cfc1e210ac7ea274537a8d189b54660416599 | [
"MIT"
] | 1 | 2021-08-01T22:39:18.000Z | 2021-08-01T22:39:18.000Z | import sys; print('%s %s' % (sys.executable or sys.platform, sys.version))
| 37.5 | 74 | 0.693333 |
064ab2c6467200414b067feed1868bd5e05cfaa4 | 1,293 | py | Python | caravel/loaders/_png.py | neurospin/pycaravel | 412d37ae1b06afcc9645e01a096bbe2674cfe47a | [
"CECILL-B"
] | null | null | null | caravel/loaders/_png.py | neurospin/pycaravel | 412d37ae1b06afcc9645e01a096bbe2674cfe47a | [
"CECILL-B"
] | null | null | null | caravel/loaders/_png.py | neurospin/pycaravel | 412d37ae1b06afcc9645e01a096bbe2674cfe47a | [
"CECILL-B"
] | 1 | 2020-08-27T13:17:00.000Z | 2020-08-27T13:17:00.000Z | # coding: utf-8
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
This module defines the png dataset loader.
"""
# Third party import
import imageio
# Package import
from .loader_base import LoaderBase
| 24.865385 | 74 | 0.520495 |
064ad9d1f980f75038d7cfdfdcbb95549772aa8a | 92 | py | Python | src/auth/__init__.py | MarkStefanovic/todo-api | fb6198511712df853e693787839533f0c9956178 | [
"MIT"
] | null | null | null | src/auth/__init__.py | MarkStefanovic/todo-api | fb6198511712df853e693787839533f0c9956178 | [
"MIT"
] | null | null | null | src/auth/__init__.py | MarkStefanovic/todo-api | fb6198511712df853e693787839533f0c9956178 | [
"MIT"
] | null | null | null | from src.auth.adapter import *
from src.auth.domain import *
from src.auth.service import *
| 23 | 30 | 0.771739 |
064ca7c37993e4810c14d5f7e1d0f4a40a067487 | 8,098 | py | Python | video_utils.py | Domhnall-Liopa/Lip2Wav | 236ae24cd7945da8a75ddea1cfdc3da271c3c59f | [
"MIT"
] | null | null | null | video_utils.py | Domhnall-Liopa/Lip2Wav | 236ae24cd7945da8a75ddea1cfdc3da271c3c59f | [
"MIT"
] | null | null | null | video_utils.py | Domhnall-Liopa/Lip2Wav | 236ae24cd7945da8a75ddea1cfdc3da271c3c59f | [
"MIT"
] | null | null | null | import json
import random
import re
import subprocess
import tempfile
from datetime import timedelta
import cv2
import numpy as np
import requests
from vidaug import augmentors as va
# this is a static build from https://www.johnvansickle.com/ffmpeg/old-releases/ffmpeg-4.4.1-i686-static.tar.xz
# requires new ffmpeg version for:
# - duration of extracted audio == video
# - contains x264 codec in build required for clean video frames
FFMPEG_PATH = '/opt/lip2wav/ffmpeg-4.4.1-i686-static/ffmpeg'
FFPROBE_PATH = '/opt/lip2wav/ffmpeg-4.4.1-i686-static/ffprobe'
OLD_FFMPEG_PATH = 'ffmpeg-2.8.15'
FFMPEG_OPTIONS = '-hide_banner -loglevel panic'
VIDEO_CROP_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -ss {{start_time}} -to {{end_time}} -async 1 {{output_video_path}}'
VIDEO_INFO_COMMAND = f'{FFMPEG_PATH} -i {{input_video_path}}'
VIDEO_DURATION_COMMAND = f'{FFPROBE_PATH} {FFMPEG_OPTIONS} -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {{video_path}}'
VIDEO_TO_AUDIO_COMMAND = f'{{ffmpeg_path}} {FFMPEG_OPTIONS} -threads 1 -y -i {{input_video_path}} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {{output_audio_path}}'
VIDEO_CONVERT_FPS_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -strict -2 -filter:v fps=fps={{fps}} {{output_video_path}}' # copies original codecs and metadata (rotation)
VIDEO_SPEED_ALTER_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -filter_complex "[0:v]setpts={{video_speed}}*PTS[v];[0:a]atempo={{audio_speed}}[a]" -map "[v]" -map "[a]" {{output_video_path}}'
VIDEO_REMOVE_AUDIO_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -c copy -an {{output_video_path}}'
VIDEO_ADD_AUDIO_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -i {{input_audio_path}} -strict -2 -c:v copy -c:a aac {{output_video_path}}'
| 32.785425 | 216 | 0.67918 |
064f53cd615575e4bc66f6d26d74337b90be2852 | 621 | py | Python | aflcov/vis.py | axt/afl-cov-vis | 7806fa430113732790563b0f15884a087ebd21ea | [
"BSD-2-Clause"
] | 29 | 2017-11-12T09:35:01.000Z | 2022-02-17T09:29:54.000Z | aflcov/vis.py | usc-isi-bass/afl-cov | 18e305d101443d8a06c46f9ac080dd45ca13d8bb | [
"BSD-2-Clause"
] | 2 | 2017-11-12T09:40:43.000Z | 2018-01-19T10:37:17.000Z | aflcov/vis.py | usc-isi-bass/afl-cov | 18e305d101443d8a06c46f9ac080dd45ca13d8bb | [
"BSD-2-Clause"
] | 6 | 2017-11-12T09:50:20.000Z | 2022-02-22T06:01:17.000Z | from bingraphvis.base import Content
| 31.05 | 130 | 0.481481 |
064faa0fae768ef7598b80938b851b966512e6ab | 3,418 | py | Python | corehq/couchapps/tests/test_all_docs.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/couchapps/tests/test_all_docs.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/couchapps/tests/test_all_docs.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.dbaccessors.couchapps.all_docs import \
get_all_doc_ids_for_domain_grouped_by_db, get_doc_count_by_type, \
delete_all_docs_by_doc_type, get_doc_count_by_domain_type
from dimagi.utils.couch.database import get_db
from django.test import TestCase
| 50.264706 | 104 | 0.675834 |
0650fe2b4a8ec05a4f26bd56fccd6338468b7b46 | 913 | py | Python | parc/instruction.py | kevinyuan/pydgin | 9e5dea526a17b23929b2a1d24598154b42323073 | [
"BSD-3-Clause"
] | 159 | 2015-02-12T03:28:25.000Z | 2022-02-24T22:40:35.000Z | parc/instruction.py | kevinyuan/pydgin | 9e5dea526a17b23929b2a1d24598154b42323073 | [
"BSD-3-Clause"
] | 21 | 2015-01-31T23:47:26.000Z | 2020-12-21T12:41:08.000Z | parc/instruction.py | kevinyuan/pydgin | 9e5dea526a17b23929b2a1d24598154b42323073 | [
"BSD-3-Clause"
] | 40 | 2015-01-28T21:31:30.000Z | 2022-01-25T12:50:23.000Z | #=======================================================================
# instruction.py
#=======================================================================
from pydgin.utils import r_uint
| 19.020833 | 72 | 0.496166 |
06517cf6f2e451230d0d6eb3711d313d63aa2c66 | 564 | py | Python | docs/examples/dev/_semantic_text_features.py | yacth/autogoal | a55c1534161e850587e2ca3533aa2fd5ae28569e | [
"MIT"
] | null | null | null | docs/examples/dev/_semantic_text_features.py | yacth/autogoal | a55c1534161e850587e2ca3533aa2fd5ae28569e | [
"MIT"
] | null | null | null | docs/examples/dev/_semantic_text_features.py | yacth/autogoal | a55c1534161e850587e2ca3533aa2fd5ae28569e | [
"MIT"
] | null | null | null | from autogoal.contrib import find_classes
from autogoal.kb import *
from autogoal.kb import build_pipelines, build_pipeline_graph
from autogoal.contrib.spacy import SpacyNLP
from autogoal.contrib._wrappers import FlagsMerger
import logging
logging.basicConfig(level=logging.INFO)
pipeline_space = build_pipeline_graph(
input=List(Sentence()),
output=MatrixContinuousDense(),
registry=find_classes(),
# registry=[SpacyNLP, FlagsMerger],
# max_list_depth=1,
)
for i in range(10):
pipeline = pipeline_space.sample()
print(pipeline)
| 22.56 | 61 | 0.77305 |
0651ab151a7c92bb5c33655beaba51093024c9dc | 341 | py | Python | opytimizer/spaces/__init__.py | anukaal/opytimizer | 5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9 | [
"Apache-2.0"
] | 528 | 2018-10-01T20:00:09.000Z | 2022-03-27T11:15:31.000Z | opytimizer/spaces/__init__.py | anukaal/opytimizer | 5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9 | [
"Apache-2.0"
] | 17 | 2019-10-30T00:47:03.000Z | 2022-03-21T11:39:28.000Z | opytimizer/spaces/__init__.py | anukaal/opytimizer | 5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9 | [
"Apache-2.0"
] | 35 | 2018-10-01T20:03:23.000Z | 2022-03-20T03:54:15.000Z | """Customizable space module that provides different search spaces
implementations.
"""
from opytimizer.spaces.boolean import BooleanSpace
from opytimizer.spaces.grid import GridSpace
from opytimizer.spaces.hyper_complex import HyperComplexSpace
from opytimizer.spaces.search import SearchSpace
from opytimizer.spaces.tree import TreeSpace
| 34.1 | 66 | 0.859238 |
0652b6080d711fc812aa3a6054f91161bc0d0a8b | 16,913 | py | Python | lattpy/spatial.py | dylanljones/lattpy | 6779ae7755aaf9e844d63a6f63b5036fb64d9f89 | [
"MIT"
] | 11 | 2020-10-29T17:23:02.000Z | 2022-02-28T12:25:41.000Z | lattpy/spatial.py | dylanljones/lattpy | 6779ae7755aaf9e844d63a6f63b5036fb64d9f89 | [
"MIT"
] | 7 | 2021-01-12T13:53:42.000Z | 2022-03-29T11:21:58.000Z | lattpy/spatial.py | dylanljones/lattpy | 6779ae7755aaf9e844d63a6f63b5036fb64d9f89 | [
"MIT"
] | 1 | 2021-10-31T11:15:20.000Z | 2021-10-31T11:15:20.000Z | # coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, Dylan Jones
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""Spatial algorithms and data structures."""
import math
import numpy as np
import itertools
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree, Voronoi
from typing import Iterable, Sequence, Optional, Union
from .utils import ArrayLike, min_dtype, chain
from .plotting import draw_points, draw_vectors, draw_lines, draw_surfaces
__all__ = [
"distance", "interweave", "vindices", "vrange", "cell_size", "cell_volume",
"compute_vectors", "compute_neighbors", "KDTree", "VoronoiTree", "WignerSeitzCell",
"rx", "ry", "rz", "rotate2d", "rotate3d", "build_periodic_translation_vector"
]
def distance(r1: ArrayLike, r2: ArrayLike, decimals: Optional[int] = None) -> float:
""" Calculates the euclidian distance bewteen two points.
Parameters
----------
r1: array_like
First input point.
r2: array_like
Second input point of matching size.
decimals: int, optional
Optional decimals to round distance to.
Returns
-------
distance: float
"""
dist = math.sqrt(np.sum(np.square(r1 - r2)))
if decimals is not None:
dist = round(dist, decimals)
return dist
def interweave(arrays: Sequence[np.ndarray]) -> np.ndarray:
""" Interweaves multiple arrays along the first axis
Example
-------
>>> arr1 = np.array([[1, 1], [3, 3], [5, 5]])
>>> arr2 = np.array([[2, 2], [4, 4], [6, 6]])
>>> interweave([arr1, arr2])
array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]])
Parameters
----------
arrays: (M) Sequence of (N, ...) array_like
The input arrays to interwave. The shape of all arrays must match.
Returns
-------
interweaved: (M*N, ....) np.ndarray
"""
shape = list(arrays[0].shape)
shape[0] = sum(x.shape[0] for x in arrays)
result = np.empty(shape, dtype=arrays[0].dtype)
n = len(arrays)
for i, arr in enumerate(arrays):
result[i::n] = arr
return result
def vindices(limits: Iterable[Sequence[int]], sort_axis: Optional[int] = 0,
dtype: Optional[Union[int, str, np.dtype]] = None) -> np.ndarray:
""" Return an array representing the indices of a d-dimensional grid.
Parameters
----------
limits: (D, 2) array_like
The limits of the indices for each axis.
sort_axis: int, optional
Optional axis that is used to sort indices.
dtype: int or str or np.dtype, optional
Optional data-type for storing the lattice indices. By default the given limits
are checked to determine the smallest possible data-type.
Returns
-------
vectors: (N, D) np.ndarray
"""
if dtype is None:
dtype = min_dtype(limits, signed=True)
limits = np.asarray(limits)
dim = limits.shape[0]
# Create meshgrid reshape grid to array of indices
# version 1:
# axis = np.meshgrid(*(np.arange(*lim, dtype=dtype) for lim in limits))
# nvecs = np.asarray([np.asarray(a).flatten("F") for a in axis]).T
# version 2:
# slices = [slice(lim[0], lim[1], 1) for lim in limits]
# nvecs = np.mgrid[slices].astype(dtype).reshape(dim, -1).T
# version 3:
size = limits[:, 1] - limits[:, 0]
nvecs = np.indices(size, dtype=dtype).reshape(dim, -1).T + limits[:, 0]
# Optionally sort indices along given axis
if sort_axis is not None:
nvecs = nvecs[np.lexsort(nvecs.T[[sort_axis]])]
return nvecs
def vrange(start=None, *args,
dtype: Optional[Union[int, str, np.dtype]] = None,
sort_axis: Optional[int] = 0, **kwargs) -> np.ndarray:
""" Return evenly spaced vectors within a given interval.
Parameters
----------
start: array_like, optional
The starting value of the interval. The interval includes this value.
The default start value is 0.
stop: array_like
The end value of the interval.
step: array_like, optional
Spacing between values. If `start` and `stop` are sequences and the `step`
is a scalar the given step size is used for all dimensions of the vectors.
The default step size is 1.
sort_axis: int, optional
Optional axis that is used to sort indices.
dtype: dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
vectors: (N, D) np.ndarray
"""
# parse arguments
if len(args) == 0:
stop = start
start = np.zeros_like(stop)
step = kwargs.get("step", 1.0)
elif len(args) == 1:
stop = args[0]
step = kwargs.get("step", 1.0)
else:
stop, step = args
start = np.atleast_1d(start)
stop = np.atleast_1d(stop)
if step is None:
step = np.ones_like(start)
elif not hasattr(step, "__len__"):
step = np.ones_like(start) * step
# Create grid and reshape to array of vectors
slices = [slice(i, f, s) for i, f, s in zip(start, stop, step)]
array = np.mgrid[slices].reshape(len(slices), -1).T
# Optionally sort array along given axis
if sort_axis is not None:
array = array[np.lexsort(array.T[[sort_axis]])]
return array if dtype is None else array.astype(dtype)
def cell_size(vectors: ArrayLike) -> np.ndarray:
""" Computes the shape of the box spawned by the given vectors.
Parameters
----------
vectors: array_like
The basis vectors defining the cell.
Returns
-------
size: np.ndarray
"""
max_values = np.max(vectors, axis=0)
min_values = np.min(vectors, axis=0)
min_values[min_values > 0] = 0
return max_values - min_values
def cell_volume(vectors: ArrayLike) -> float:
r""" Computes the volume of the unit cell defined by the primitive vectors.
The volume of the unit-cell in two and three dimensions is defined by
.. math::
V_{2d} = \abs{a_1 \cross a_2}, \quad V_{3d} = a_1 \cdot \abs{a_2 \cross a_3}
For higher dimensions the volume is computed using the determinant:
.. math::
V_{d} = \sqrt{\det{A A^T}}
where .math:`A` is the array of vectors.
Parameters
----------
vectors: array_like
The basis vectors defining the cell.
Returns
-------
vol: float
"""
dim = len(vectors)
if dim == 1:
v = float(vectors)
elif dim == 2:
v = np.cross(vectors[0], vectors[1])
elif dim == 3:
cross = np.cross(vectors[1], vectors[2])
v = np.dot(vectors[0], cross)
else:
v = np.sqrt(np.linalg.det(np.dot(vectors.T, vectors)))
return abs(v)
def compute_vectors(a: float, b: Optional[float] = None, c: Optional[float] = None,
alpha: Optional[float] = None, beta: Optional[float] = None,
gamma: Optional[float] = None,
decimals: Optional[int] = 0) -> np.ndarray:
""" Computes lattice vectors by the lengths and angles. """
if b is None and c is None:
vectors = [a]
elif c is None:
alpha = np.deg2rad(alpha)
ax = a
bx = b * np.cos(alpha)
by = b * np.sin(alpha)
vectors = np.array([
[ax, 0],
[bx, by]
])
else:
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
ax = a
bx = b * np.cos(gamma)
by = b * np.sin(gamma)
cx = c * np.cos(beta)
cy = (abs(c) * abs(b) * np.cos(alpha) - bx * cx) / by
cz = np.sqrt(c ** 2 - cx ** 2 - cy ** 2)
vectors = np.array([
[ax, 0, 0],
[bx, by, 0],
[cx, cy, cz]
])
if decimals:
vectors = np.round(vectors, decimals=decimals)
return vectors
# noinspection PyUnresolvedReferences
def arange(self, steps, offset=0.):
limits = self.limits * (1 + offset)
steps = [steps] * self.dim if not hasattr(steps, "__len__") else steps
return [np.arange(*lims, step=step) for lims, step in zip(limits, steps)]
def linspace(self, nums, offset=0.):
limits = self.limits * (1 + offset)
nums = [nums] * self.dim if not hasattr(nums, "__len__") else nums
return [np.linspace(*lims, num=num) for lims, num in zip(limits, nums)]
def meshgrid(self, nums=None, steps=None, offset=0., check=True):
if nums is not None:
grid = np.array(np.meshgrid(*self.linspace(nums, offset)))
elif steps is not None:
grid = np.array(np.meshgrid(*self.arange(steps, offset)))
else:
raise ValueError("Either the number of points or the step size muste be specified")
if check:
lengths = grid.shape[1:]
dims = range(len(lengths))
for item in itertools.product(*[range(n) for n in lengths]):
point = np.array([grid[d][item] for d in dims])
if not self.check(point):
for d in dims:
grid[d][item] = np.nan
return grid
def symmetry_points(self):
origin = np.zeros((1,))
corners = self.vertices.copy()
face_centers = None
if self.dim == 1:
return origin, corners, None, None
elif self.dim == 2:
edge_centers = np.zeros((len(self.edges), 2))
for i, simplex in enumerate(self.edges):
p1, p2 = self.vertices[simplex]
edge_centers[i] = p1 + (p2 - p1) / 2
elif self.dim == 3:
edge_centers = list()
face_centers = list()
for i, simplex in enumerate(self.edges):
edges = self.vertices[simplex]
# compute face centers
face_centers.append(np.mean(edges, axis=0))
# compute edge centers
for p1, p2 in chain(edges, cycle=True):
edge_centers.append(p1 + (p2 - p1) / 2)
edge_centers = np.asarray(edge_centers)
face_centers = np.asarray(face_centers)
else:
raise NotImplementedError()
return origin, corners, edge_centers, face_centers
def rx(theta: float) -> np.ndarray:
"""X-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[1, 0, 0], [0, cos, -sin], [0, sin, cos]])
def ry(theta: float) -> np.ndarray:
"""Y-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[cos, 0, sin], [0, 1, 0], [-sin, 0, +cos]])
def rz(theta: float) -> np.ndarray:
"""Z-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
def rot(thetax: float = 0., thetay: float = 0., thetaz: float = 0.) -> np.ndarray:
"""General rotation matrix"""
r = np.eye(3)
if thetaz:
r = np.dot(r, rz(thetaz))
if thetay:
r = np.dot(r, ry(thetay))
if thetax:
r = np.dot(r, rz(thetax))
return r
def rotate2d(a, theta):
"""Applies the z-rotation matrix to a 2D point"""
return np.dot(a, rz(theta)[:2, :2])
def rotate3d(a, thetax=0., thetay=0., thetaz=0.):
"""Applies the general rotation matrix to a 3D point"""
return np.dot(a, rot(thetax, thetay, thetaz))
| 32.840777 | 98 | 0.586945 |
0653906f675760bbd82805e22137ded59d26086f | 1,471 | py | Python | test/test_io.py | wiktorn/pyosmium | 0517eae40da1347523c72a6cb097a5b6158b25a8 | [
"BSD-2-Clause"
] | null | null | null | test/test_io.py | wiktorn/pyosmium | 0517eae40da1347523c72a6cb097a5b6158b25a8 | [
"BSD-2-Clause"
] | null | null | null | test/test_io.py | wiktorn/pyosmium | 0517eae40da1347523c72a6cb097a5b6158b25a8 | [
"BSD-2-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD
#
# This file is part of Pyosmium.
#
# Copyright (C) 2022 Sarah Hoffmann.
import pytest
import osmium as o
| 21.955224 | 82 | 0.636302 |
06556055c40c35a9d717e9e1b378737090b2df39 | 302 | py | Python | tg_gui/_platform_displayio_/shared.py | TG-Techie/tg-gui | 11d0eabeffb36fa43fc90f818624053928c37637 | [
"MIT"
] | 1 | 2019-03-20T19:49:14.000Z | 2019-03-20T19:49:14.000Z | tg_gui/_platform_displayio_/shared.py | TG-Techie/tg_gui | 11d0eabeffb36fa43fc90f818624053928c37637 | [
"MIT"
] | 15 | 2019-02-09T12:24:05.000Z | 2021-12-01T17:21:06.000Z | tg_gui/_platform_displayio_/shared.py | TG-Techie/tg-gui | 11d0eabeffb36fa43fc90f818624053928c37637 | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from displayio import Shape, Group, TileGrid, Palette, Bitmap, OnDiskBitmap
NativeElement = Group | Shape | TileGrid | Palette | Bitmap | OnDiskBitmap
NativeContainer = Group
else:
NativeElement = object
NativeContainer = object
| 27.454545 | 79 | 0.748344 |
0655cb7f1c625d52133699a96d2f35fe1b202f17 | 13,279 | py | Python | pyaccords/pysrc/ec2instanceinfo.py | MarouenMechtri/accords-platform-1 | 4f950fffd9fbbf911840cc5ad0fe5b5a331edf42 | [
"Apache-2.0"
] | 1 | 2015-02-28T21:25:54.000Z | 2015-02-28T21:25:54.000Z | pyaccords/pysrc/ec2instanceinfo.py | MarouenMechtri/accords-platform-1 | 4f950fffd9fbbf911840cc5ad0fe5b5a331edf42 | [
"Apache-2.0"
] | null | null | null | pyaccords/pysrc/ec2instanceinfo.py | MarouenMechtri/accords-platform-1 | 4f950fffd9fbbf911840cc5ad0fe5b5a331edf42 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
#copyright 2013, Hamid MEDJAHED (hmedjahed@prologue.fr) Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
import HTMLParser
data = '''
<table cellspacing="0" class="table table-bordered table-hover table-condensed" id="data">
<thead>
<tr>
<th class="name">Name</th>
<th class="memory">Memory</th>
<th class="computeunits">
<abbr title="One EC2 Compute Unit provides the equivalent CPU capacity of a 1.0-1.2 GHz 2007 Opteron or 2007 Xeon processor.">Compute Units</abbr>
</th>
<th class="storage">Storage</th>
<th class="architecture">Architecture</th>
<th class="ioperf">I/O Performance</th>
<th class="maxips">
<abbr title="Adding additional IPs requires launching the instance in a VPC.">Max IPs</abbr>
</th>
<th class="apiname">API Name</th>
<th class="cost">Linux cost</th>
<th class="cost">Windows cost</th>
</tr>
</thead>
<tbody>
<tr>
<td class="name">M1 Small</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="1">1</span></td>
<td class="storage"><span sort="160">160 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">8</td>
<td class="apiname">m1.small</td>
<td class="cost" hour_cost="0.060">$0.060 per hour</td>
<td class="cost" hour_cost="0.115">$0.115 per hour</td>
</tr>
<tr>
<td class="name">M1 Medium</td>
<td class="memory"><span sort="3.75">3.75 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="410">410 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">m1.medium</td>
<td class="cost" hour_cost="0.12">$0.12 per hour</td>
<td class="cost" hour_cost="0.23">$0.23 per hour</td>
</tr>
<tr>
<td class="name">M1 Large</td>
<td class="memory"><span sort="7.5">7.50 GB</span></td>
<td class="computeunits"><span sort="4">4</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">30</td>
<td class="apiname">m1.large</td>
<td class="cost" hour_cost="0.24">$0.24 per hour</td>
<td class="cost" hour_cost="0.46">$0.46 per hour</td>
</tr>
<tr>
<td class="name">M1 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="8">8</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">60</td>
<td class="apiname">m1.xlarge</td>
<td class="cost" hour_cost="0.48">$0.48 per hour</td>
<td class="cost" hour_cost="0.92">$0.92 per hour</td>
</tr>
<tr>
<td class="name">Micro</td>
<td class="memory"><span sort="0.6">0.60 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="0">Low</span></td>
<td class="maxips">1</td>
<td class="apiname">t1.micro</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Extra Large</td>
<td class="memory"><span sort="17.10">17.10 GB</span></td>
<td class="computeunits"><span sort="6.5">6.5</span></td>
<td class="storage"><span sort="420">420 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m2.xlarge</td>
<td class="cost" hour_cost="0.41">$0.41 per hour</td>
<td class="cost" hour_cost="0.57">$0.57 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Double Extra Large</td>
<td class="memory"><span sort="34.2">34.20 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m2.2xlarge</td>
<td class="cost" hour_cost="0.82">$0.82 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Quadruple Extra Large</td>
<td class="memory"><span sort="68.4">68.40 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">240</td>
<td class="apiname">m2.4xlarge</td>
<td class="cost" hour_cost="1.64">$1.64 per hour</td>
<td class="cost" hour_cost="2.28">$2.28 per hour</td>
</tr>
<tr>
<td class="name">M3 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m3.xlarge</td>
<td class="cost" hour_cost="0.50">$0.50 per hour</td>
<td class="cost" hour_cost="0.98">$0.98 per hour</td>
</tr>
<tr>
<td class="name">M3 Double Extra Large</td>
<td class="memory"><span sort="30">30.00 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m3.2xlarge</td>
<td class="cost" hour_cost="1.00">$1.00 per hour</td>
<td class="cost" hour_cost="1.96">$1.96 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Medium</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="5">5</span></td>
<td class="storage"><span sort="350">350 GB</span></td>
<td class="architecture">32_64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">c1.medium</td>
<td class="cost" hour_cost="0.145">$0.145 per hour</td>
<td class="cost" hour_cost="0.285">$0.285 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Extra Large</td>
<td class="memory"><span sort="7">7.00 GB</span></td>
<td class="computeunits"><span sort="20">20</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">60</td>
<td class="apiname">c1.xlarge</td>
<td class="cost" hour_cost="0.58">$0.58 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Quadruple Extra Large</td>
<td class="memory"><span sort="23">23.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cc1.4xlarge</td>
<td class="cost" hour_cost="1.30">$1.30 per hour</td>
<td class="cost" hour_cost="1.61">$1.61 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Eight Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="3370">3370 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">240</td>
<td class="apiname">cc2.8xlarge</td>
<td class="cost" hour_cost="2.40">$2.40 per hour</td>
<td class="cost" hour_cost="2.97">$2.97 per hour</td>
</tr>
<tr>
<td class="name">Cluster GPU Quadruple Extra Large</td>
<td class="memory"><span sort="22">22.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cg1.4xlarge</td>
<td class="cost" hour_cost="2.10">$2.10 per hour</td>
<td class="cost" hour_cost="2.60">$2.60 per hour</td>
</tr>
<tr>
<td class="name">High I/O Quadruple Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="2048">2048 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hi1.4xlarge</td>
<td class="cost" hour_cost="3.10">$3.10 per hour</td>
<td class="cost" hour_cost="3.58">$3.58 per hour</td>
</tr>
<tr>
<td class="name">High Storage Eight Extra Large</td>
<td class="memory"><span sort="117.00">117.00 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="49152">48 TB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hs1.8xlarge</td>
<td class="cost" hour_cost="4.600">$4.600 per hour</td>
<td class="cost" hour_cost="4.931">$4.931 per hour</td>
</tr>
<tr>
<td class="name">High Memory Cluster Eight Extra Large</td>
<td class="memory"><span sort="244.00">244.00 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="240">240 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cr1.8xlarge</td>
<td class="cost" hour_cost="3.500">$3.500 per hour</td>
<td class="cost" hour_cost="3.831">$3.831 per hour</td>
</tr>
</tbody>
</table> '''
| 48.112319 | 158 | 0.524889 |
06585c3b0c0000d446eb614d1e5895fa37089822 | 1,105 | py | Python | backend/project_requests/admin.py | mnieber/taskboard | 7925342751e2782bd0a0258eb2d43d9ec90ce9d8 | [
"MIT"
] | null | null | null | backend/project_requests/admin.py | mnieber/taskboard | 7925342751e2782bd0a0258eb2d43d9ec90ce9d8 | [
"MIT"
] | null | null | null | backend/project_requests/admin.py | mnieber/taskboard | 7925342751e2782bd0a0258eb2d43d9ec90ce9d8 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.http import HttpResponseRedirect
from django.urls import path
from faker import Faker
from .models import ProjectRequest
from .utils import create_project_request
| 29.864865 | 84 | 0.608145 |
065a10f929ff09c5fb1f252b5c8f9281c467c8ab | 13,477 | py | Python | reinforcement_learning/tabular_RL/algorithms/td_zero.py | EliorBenYosef/reinforcement-learning | c694d07044e12c92e08ca8c2ef06b073ca1704d4 | [
"MIT"
] | 16 | 2019-09-01T14:12:45.000Z | 2022-03-07T03:10:38.000Z | reinforcement_learning/tabular_RL/algorithms/td_zero.py | EliorBenYosef/reinforcement-learning | c694d07044e12c92e08ca8c2ef06b073ca1704d4 | [
"MIT"
] | 5 | 2020-11-13T19:08:40.000Z | 2022-02-10T04:13:29.000Z | reinforcement_learning/tabular_RL/algorithms/td_zero.py | EliorBenYosef/reinforcement-learning | c694d07044e12c92e08ca8c2ef06b073ca1704d4 | [
"MIT"
] | 1 | 2022-03-07T03:10:39.000Z | 2022-03-07T03:10:39.000Z | import numpy as np
from gym import wrappers
from reinforcement_learning.utils.utils import decrement_eps, EPS_DEC_LINEAR, pickle_save
from reinforcement_learning.tabular_RL.utils import init_v, init_q, init_q1_q2, \
max_action_q, max_action_q1_q2, eps_greedy_q, eps_greedy_q1_q2, print_v
| 34.205584 | 119 | 0.549974 |
0660694db2ddc7b0023f6b169f47cbe6fc31c8a7 | 916 | py | Python | topo.py | rahil-g/gpf | 234c22f500283f75454ccba4a12b765be9ddad05 | [
"MIT"
] | null | null | null | topo.py | rahil-g/gpf | 234c22f500283f75454ccba4a12b765be9ddad05 | [
"MIT"
] | null | null | null | topo.py | rahil-g/gpf | 234c22f500283f75454ccba4a12b765be9ddad05 | [
"MIT"
] | null | null | null | #Author: Rahil Gandotra
#This file consists of the custom Mininet topology used for GPF.
from mininet.topo import Topo
topos = { 'mytopo': ( lambda: MyTopo() ) }
| 31.586207 | 75 | 0.622271 |
0661b5f4de7b9d1818fd8ebe0cb07e2e58e19d2a | 10,819 | py | Python | Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py | jippo015/Sub-Zero.bundle | 734e0f7128c05c0f639e11e7dfc77daa1014064b | [
"MIT"
] | 1,553 | 2015-11-09T02:17:06.000Z | 2022-03-31T20:24:52.000Z | Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 691 | 2015-11-05T21:32:26.000Z | 2022-03-17T10:52:45.000Z | Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py | saiterlz/Sub-Zero.bundle | 1a0bb9c3e4be84be35d46672907783363fe5a87b | [
"MIT"
] | 162 | 2015-11-06T19:38:55.000Z | 2022-03-16T02:42:41.000Z | # coding=utf-8
import logging
import rarfile
import os
from subliminal.exceptions import ConfigurationError
from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \
LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guess_matches, guessit, sanitize, region, type_map, \
raise_for_status, json, SHOW_EXPIRATION_TIME, title_re, season_re, datetime, pytz, NO_VALUE, releases_key, \
SUBTITLE_EXTENSIONS, language_converters
from subzero.language import Language
logger = logging.getLogger(__name__)
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episode = video.episode
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episode=episode, year=video.year, imdb_id=video.imdb_id)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
super(LegendasTVProvider, self).download_subtitle(subtitle)
subtitle.archive.content = None
def get_archives(self, title_id, language_code, title_type, season, episode):
return super(LegendasTVProvider, self).get_archives.original(self, title_id, language_code, title_type,
season, episode)
| 41.136882 | 118 | 0.574175 |
06631addf22bfb69f24be36f23cfcd2fff2aa5f2 | 1,587 | py | Python | Position.py | bubakazouba/Robinhood-for-Google-Finance | 4e0aa8955e4bc786a8528ea500459f5937f15a96 | [
"MIT"
] | 5 | 2017-11-24T08:13:47.000Z | 2021-05-05T04:48:30.000Z | Position.py | bubakazouba/Robinhood-for-Google-Finance | 4e0aa8955e4bc786a8528ea500459f5937f15a96 | [
"MIT"
] | null | null | null | Position.py | bubakazouba/Robinhood-for-Google-Finance | 4e0aa8955e4bc786a8528ea500459f5937f15a96 | [
"MIT"
] | null | null | null | import re | 44.083333 | 352 | 0.63264 |
066587c08345eadec5ce3298131ac1c2190623fb | 15,789 | py | Python | app_framework/main_window.py | planktontoolbox/plankton-toolbox | 626930120329983fb9419a9aed94712148bac219 | [
"MIT"
] | 5 | 2016-12-02T08:24:35.000Z | 2021-02-24T08:41:41.000Z | app_framework/main_window.py | planktontoolbox/plankton-toolbox | 626930120329983fb9419a9aed94712148bac219 | [
"MIT"
] | 53 | 2016-11-14T13:11:41.000Z | 2022-01-13T09:28:11.000Z | app_framework/main_window.py | planktontoolbox/plankton-toolbox | 626930120329983fb9419a9aed94712148bac219 | [
"MIT"
] | 1 | 2020-11-27T01:20:10.000Z | 2020-11-27T01:20:10.000Z | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import time
import codecs
from PyQt5 import QtWidgets
from PyQt5 import QtCore
import plankton_core
import app_framework
import app_activities
import app_tools
import toolbox_utils
| 44.601695 | 104 | 0.611565 |
0665edd3f6dbbe3c348e0f2328bbc74630f84ac0 | 481 | py | Python | Traits/manual/this.py | marshallmcdonnell/interactive_plotting | 35e9a781fa1a7328679794d27e24e194e35c012b | [
"MIT"
] | null | null | null | Traits/manual/this.py | marshallmcdonnell/interactive_plotting | 35e9a781fa1a7328679794d27e24e194e35c012b | [
"MIT"
] | null | null | null | Traits/manual/this.py | marshallmcdonnell/interactive_plotting | 35e9a781fa1a7328679794d27e24e194e35c012b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# this.py --- Example of This predefined trait
from traits.api import HasTraits, This
#---------------------------------------
# Extrat
fred = Employee()
mary = Executive()
# The following is OK, because fred's manager can be an
# instance of Employee or any subclass.
fred.manager = mary
# This is also OK, because mary's manager can be an Employee
mary.manager = fred
| 20.041667 | 60 | 0.665281 |
06669e5cbe5823ce5ec6dea9345b3539ee4591b9 | 1,443 | py | Python | two_buckets_and_a_lambda/terraform/lambdas/credentials-lambda.py | chariotsolutions/aws-examples | 0c0945966f3e1b118ba5db948d5db3e304bc2ac3 | [
"MIT"
] | 6 | 2020-05-20T13:58:35.000Z | 2022-02-04T13:25:05.000Z | two_buckets_and_a_lambda/terraform/lambdas/credentials-lambda.py | chariotsolutions/aws-examples | 0c0945966f3e1b118ba5db948d5db3e304bc2ac3 | [
"MIT"
] | 1 | 2021-09-02T21:19:10.000Z | 2021-09-02T21:19:10.000Z | two_buckets_and_a_lambda/terraform/lambdas/credentials-lambda.py | chariotsolutions/aws-examples | 0c0945966f3e1b118ba5db948d5db3e304bc2ac3 | [
"MIT"
] | 3 | 2019-11-14T21:03:15.000Z | 2022-01-17T19:12:02.000Z | import boto3
import json
import logging
import os
bucket = os.environ['UPLOAD_BUCKET']
role_arn = os.environ['ASSUMED_ROLE_ARN']
sts_client = boto3.client('sts')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
| 27.226415 | 105 | 0.546778 |
066b73292bddecff1f5b7768febdea7bae1b1c84 | 173 | py | Python | tests/src/year2017/test_day05b.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 10 | 2017-12-11T17:54:52.000Z | 2021-12-09T20:16:30.000Z | tests/src/year2017/test_day05b.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 260 | 2015-12-09T11:03:03.000Z | 2021-12-12T14:32:23.000Z | tests/src/year2017/test_day05b.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | null | null | null | """2017 - Day 5 Part 2: A Maze of Twisty Trampolines, All Alike tests."""
from src.year2017.day05b import solve
| 24.714286 | 73 | 0.676301 |
066ba314a42c5ac31373da270a9eb728c97a0653 | 1,374 | py | Python | tests/test_signal.py | etcd/manticore | 87073d9985c4ca445217b7b135a6af0a51044b21 | [
"Apache-2.0"
] | null | null | null | tests/test_signal.py | etcd/manticore | 87073d9985c4ca445217b7b135a6af0a51044b21 | [
"Apache-2.0"
] | null | null | null | tests/test_signal.py | etcd/manticore | 87073d9985c4ca445217b7b135a6af0a51044b21 | [
"Apache-2.0"
] | 1 | 2021-12-26T12:57:01.000Z | 2021-12-26T12:57:01.000Z |
import unittest
from manticore.utils.event import Signal
| 19.628571 | 56 | 0.569869 |
066e5fb8233dc5224d22ebf3b89a9a83782274aa | 745 | py | Python | TITADOweb/web/migrations/0005_passwordresetcodes.py | KomeilParseh/TITA-DO | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 9 | 2020-08-27T10:10:11.000Z | 2021-04-21T04:46:15.000Z | TITADOweb/web/migrations/0005_passwordresetcodes.py | mdk1384/TITA-DO-1 | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 2 | 2020-08-27T12:09:57.000Z | 2021-01-05T09:29:19.000Z | TITADOweb/web/migrations/0005_passwordresetcodes.py | mdk1384/TITA-DO-1 | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 2 | 2020-08-27T10:10:18.000Z | 2021-01-01T06:20:20.000Z | # Generated by Django 3.1.4 on 2020-12-27 17:34
from django.db import migrations, models
| 29.8 | 114 | 0.561074 |
0672220769ef18bb8f7d78e648bf612a87c0cd49 | 253 | py | Python | setup.py | SodakDoubleD/dbprime | 76d2824adbe0f10d6ad04a5607a07f36874389c7 | [
"MIT"
] | null | null | null | setup.py | SodakDoubleD/dbprime | 76d2824adbe0f10d6ad04a5607a07f36874389c7 | [
"MIT"
] | null | null | null | setup.py | SodakDoubleD/dbprime | 76d2824adbe0f10d6ad04a5607a07f36874389c7 | [
"MIT"
] | null | null | null | from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="dbprime",
version="0.1dev",
author="Dalton Dirkson",
author_email="sodakdoubled@gmail.com",
packages=["dbprime",],
)
| 19.461538 | 42 | 0.652174 |
0672274e210ffb823f4cb0faec6bba2fb13a9739 | 2,491 | py | Python | 008.py | ThomasB123/Project-Euler | ca6786513f210e79fe55417ed43797ffb24610af | [
"MIT"
] | null | null | null | 008.py | ThomasB123/Project-Euler | ca6786513f210e79fe55417ed43797ffb24610af | [
"MIT"
] | null | null | null | 008.py | ThomasB123/Project-Euler | ca6786513f210e79fe55417ed43797ffb24610af | [
"MIT"
] | null | null | null | # Largest product in a series
'''
The four adjacent digits in the 1000-digit number that have the greatest product are 9 9 8 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
'''
# Answer = 23514624000
number = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
greatest = 1
for x in range(988):
product = 1
for y in range(13):
product *= int(number[x+y])
greatest = max(greatest,product)
print(greatest) | 67.324324 | 1,011 | 0.934163 |
067270cf798fc12d58fd8f1dd276c3807b8272a4 | 4,102 | py | Python | tfsnippet/utils/misc.py | Feng37/tfsnippet | 70c7dc5c8c8f6314f9d9e44697f90068417db5cd | [
"MIT"
] | null | null | null | tfsnippet/utils/misc.py | Feng37/tfsnippet | 70c7dc5c8c8f6314f9d9e44697f90068417db5cd | [
"MIT"
] | null | null | null | tfsnippet/utils/misc.py | Feng37/tfsnippet | 70c7dc5c8c8f6314f9d9e44697f90068417db5cd | [
"MIT"
] | null | null | null | import os
import re
from contextlib import contextmanager
import numpy as np
import six
__all__ = ['humanize_duration', 'camel_to_underscore', 'NOT_SET',
'cached_property', 'clear_cached_property', 'maybe_close',
'iter_files']
def humanize_duration(seconds):
"""
Format specified time duration as human readable text.
Args:
seconds: Number of seconds of the time duration.
Returns:
str: The formatted time duration.
"""
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname in [(86400, 'day'),
(3600, 'hr'),
(60, 'min')]:
if seconds >= uvalue:
val = int(seconds // uvalue)
if val > 0:
if val > 1:
uname += 's'
pieces.append('{:d} {}'.format(val, uname))
seconds %= uvalue
if seconds > np.finfo(np.float64).eps:
pieces.append('{:.4g} sec{}'.format(
seconds, 's' if seconds > 1 else ''))
elif not pieces:
pieces.append('0 sec')
return ' '.join(pieces) + suffix
def camel_to_underscore(name):
"""Convert a camel-case name to underscore."""
s1 = re.sub(CAMEL_TO_UNDERSCORE_S1, r'\1_\2', name)
return re.sub(CAMEL_TO_UNDERSCORE_S2, r'\1_\2', s1).lower()
CAMEL_TO_UNDERSCORE_S1 = re.compile('([^_])([A-Z][a-z]+)')
CAMEL_TO_UNDERSCORE_S2 = re.compile('([a-z0-9])([A-Z])')
NOT_SET = NotSet()
def cached_property(cache_key):
"""
Decorator to cache the return value of an instance property.
.. code-block:: python
class MyClass(object):
@cached_property('_cached_property'):
def cached_property(self):
return ...
# usage
o = MyClass()
print(o.cached_property) # fetch the cached value
Args:
cache_key (str): Attribute name to store the cached value.
"""
return wrapper
def clear_cached_property(instance, cache_key):
"""
Clear the cached values of specified property.
Args:
instance: The owner instance of the cached property.
cache_key (str): Attribute name to store the cached value.
"""
if hasattr(instance, cache_key):
delattr(instance, cache_key)
| 25.962025 | 73 | 0.573623 |
0673b5944cf3b730042b94eae2844b3646f79c99 | 54,598 | py | Python | spaic/Backend/Backend.py | ZhejianglabNCRC/SPAIC | 5a08328adcc5a197316cf353746bae7ab6865337 | [
"Apache-2.0"
] | 3 | 2022-03-01T03:04:25.000Z | 2022-03-01T03:07:15.000Z | spaic/Backend/Backend.py | ZhejianglabNCRC/SPAIC | 5a08328adcc5a197316cf353746bae7ab6865337 | [
"Apache-2.0"
] | null | null | null | spaic/Backend/Backend.py | ZhejianglabNCRC/SPAIC | 5a08328adcc5a197316cf353746bae7ab6865337 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2020/8/6
@project: SPAIC
@filename: Backend
@author: Hong Chaofei
@contact: hongchf@gmail.com
@description:
backend Pytorch, Tensorflow, CUDA, Euler, 2 Runge-Kutta
"""
from abc import abstractmethod, ABC
from collections import OrderedDict
from ..Network.BaseModule import BaseModule, VariableAgent
from ..Network.DelayQueue import DelayQueue
import numpy as np
import torch
backends = dict()
# @abstractmethod
# def euler_update(self):
# pass
#
# @abstractmethod
# def rk2_update(self):
# pass
#
# @abstractmethod
# def reset(self, v, v_reset, u_reset, spike):
# '''
# voltage reset
#
# Parameters
# ----------
# v
# v_reset
# u_reset
# spike
#
# Returns
# -------
# v[spike] = v_reset
# v[spike] += u_reset
# '''
#
# @abstractmethod
# def reset_u(self, u, u_reset, spike):
# '''
# recovery reset
#
# Parameters
# ----------
# u
# _reset
# spike
#
# Returns
# -------
# u[spike] = u+u_reset
# '''
# NotImplementedError()
#
# @abstractmethod
# def next_stage(self, x):
# '''
#
# Parameters
# ----------
# x: list
#
# Returns
# -------
# x[index]
# '''
#
# @abstractmethod
# def izh_v(self, v, u, psp):
# '''
#
# Parameters
# ----------
# v: list
# u: list
# psp: list
#
# Returns
# -------
# V=V+dt*(0.04*V^2+5*V+140-U+PSP)
# '''
# NotImplementedError()
#
# @abstractmethod
# def izh_u(self, a, b, v, u):
# '''
#
# Parameters
# ----------
# a: list
# b: list
# u: list
# v: list
#
# Returns
# -------
# U=U+a*(b*V-U)
# '''
# NotImplementedError()
def exp(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def sin(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def cos(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def tan(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log2(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log10(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
# class Darwin_Backend(Backend):
#
# def __init__(self):
# super(Darwin_Backend, self).__init__()
# pass
| 36.447263 | 218 | 0.54502 |
0673b6dfdd8c195674ae3591ed3bb93d152c2801 | 1,257 | py | Python | yuz_egitimi.py | mehdikosaca/yuz_tanima | d2d7828a1f5562d21acde3af8df60ec96a88e7c3 | [
"Apache-2.0"
] | 2 | 2021-12-30T06:38:21.000Z | 2021-12-30T06:39:24.000Z | yuz_egitimi.py | mehdikosaca/yuz_tanima | d2d7828a1f5562d21acde3af8df60ec96a88e7c3 | [
"Apache-2.0"
] | null | null | null | yuz_egitimi.py | mehdikosaca/yuz_tanima | d2d7828a1f5562d21acde3af8df60ec96a88e7c3 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
from PIL import Image
import os
#Verilerin yolu
path = "veriseti"
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
#imajlarn alnmas ve etiketlenmesi iin fonksiyon
print("\n [INFO] yzler eitiliyor. Birka saniye bekleyin...")
yuzler, ids = getImageAndLabels(path)
recognizer.train(yuzler,np.array(ids))
#Modeli eitim/eitim dosyasna kaydet
recognizer.write("egitim/egitim.yml") #Dikkat! recognizer.save() Raspberry Pi zerinde almyor
#Eitilen yz saysn gster ve kodu sonlandr
print(f"\n [INFO] {len(np.unique(ids))} yz eitildi. Betik sonlandrlyor...")
print(yuzler)
| 36.970588 | 97 | 0.706444 |
06743547989129e1af7ae30ff01eaf04b4056ad2 | 1,846 | py | Python | hello.py | jferroaq/Tarea7z | 013f1f1e8dc3b631be102d6e5731d2ffdffd3657 | [
"Apache-2.0"
] | null | null | null | hello.py | jferroaq/Tarea7z | 013f1f1e8dc3b631be102d6e5731d2ffdffd3657 | [
"Apache-2.0"
] | null | null | null | hello.py | jferroaq/Tarea7z | 013f1f1e8dc3b631be102d6e5731d2ffdffd3657 | [
"Apache-2.0"
] | null | null | null | import kivy
from kivy.app import App
from kivy.uix.button import Label
from kivy.uix.colorpicker import ColorPicker
from kivy.graphics import Color, Ellipse, Triangle
from kivy.properties import StringProperty, ObjectProperty
if __name__ in ["__main__", "__android__"]:
SaludoApp().run()
| 25.287671 | 65 | 0.62026 |
0674d6e58cd606f3c44fa44647eb41365904b800 | 356 | py | Python | mundo-02/aula13-ex054.py | fabiocoutoaraujo/CursoVideoPython | 7e3b6ab89cbbba79f640d12e40f3d1e8c22295cf | [
"MIT"
] | 1 | 2020-04-18T16:39:23.000Z | 2020-04-18T16:39:23.000Z | mundo-02/aula13-ex054.py | fabiocoutoaraujo/CursoVideoPython | 7e3b6ab89cbbba79f640d12e40f3d1e8c22295cf | [
"MIT"
] | null | null | null | mundo-02/aula13-ex054.py | fabiocoutoaraujo/CursoVideoPython | 7e3b6ab89cbbba79f640d12e40f3d1e8c22295cf | [
"MIT"
] | null | null | null | from datetime import date
maior = menor = 0
atual = date.today().year
for c in range(1, 8):
nascimento = int(input(f'Em que ano a {c} pessoa nasceu? '))
if atual - nascimento > 20:
maior += 1
else:
menor += 1
print(f'Ao todo, temos {maior} pessoas maiores de idade!')
print(f'Ao todo, temos {menor} pessoas menores de idade!')
| 29.666667 | 65 | 0.63764 |
0675b9a64430a3b476aa0125ccfd22711ba0b255 | 6,356 | py | Python | Contents/Code/zdfneo.py | typekitrel/abctestard | 1df43561327694ba155f513ad152aab51c56ef42 | [
"MIT"
] | null | null | null | Contents/Code/zdfneo.py | typekitrel/abctestard | 1df43561327694ba155f513ad152aab51c56ef42 | [
"MIT"
] | null | null | null | Contents/Code/zdfneo.py | typekitrel/abctestard | 1df43561327694ba155f513ad152aab51c56ef42 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# zdfneo.py - Aufruf durch __init__.py/ZDF_get_content
#
# Die Funktionen dienen zur Auswertung der ZDF-Neo-Seiten
#
Neo_Base = 'https://www.neo-magazin-royale.de'
PREFIX = '/video/ardmediathek2016/zdfneo'
####################################################################################################
#-------------------------
#-------------------------
####################################################################################################
# htmlentities in neo, Zeichen s. http://aurelio.net/bin/python/fix-htmldoc-utf8.py
# HTMLParser() versagt hier
def unescape_neo(line):
line_ret = (line.replace("ö", "").replace("ä", "").replace("ü", "")
.replace("Ã\x96", "").replace("Ã\x84", "").replace("Ã\x9c", "")
.replace("Ã\x9f", ""))
return line_ret
| 43.834483 | 112 | 0.660321 |
067c09b6f2a552b9aa72b55a71741cdde3c0cbee | 8,732 | py | Python | luna/mol/atom.py | keiserlab/LUNA | ab2a968550f3f91107f8a91ad00da6aa3e2df68b | [
"MIT"
] | 2 | 2022-03-23T13:34:36.000Z | 2022-03-27T22:21:08.000Z | luna/mol/atom.py | keiserlab/LUNA | ab2a968550f3f91107f8a91ad00da6aa3e2df68b | [
"MIT"
] | 1 | 2022-03-22T19:29:50.000Z | 2022-03-22T19:29:50.000Z | luna/mol/atom.py | keiserlab/LUNA | ab2a968550f3f91107f8a91ad00da6aa3e2df68b | [
"MIT"
] | null | null | null | import numpy as np
from openbabel import openbabel as ob
import logging
logger = logging.getLogger()
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return (self.atomic_num == other.atomic_num
and np.all(self._coord == other._coord)
and self.serial_number == other.serial_number)
return False
def __ne__(self, other):
"""Overrides the default implementation"""
return not self.__eq__(other)
def __hash__(self):
"""Overrides the default implementation"""
return hash((self.atomic_num, tuple(self._coord), self.serial_number))
class ExtendedAtom:
"""Extend :class:`~luna.MyBio.PDB.Atom.Atom` with additional properties and methods.
Parameters
----------
atom : :class:`~luna.MyBio.PDB.Atom.Atom`
An atom.
nb_info : iterable of `AtomData`, optional
A sequence of `AtomData` containing information about atoms covalently bound to ``atom``.
atm_grps : iterable of :class:`~luna.groups.AtomGroup`, optional
A sequence of atom groups that contain ``atom``.
invariants : list or tuple, optional
Atomic invariants.
"""
def add_nb_info(self, nb_info):
""" Add `AtomData` objects to ``neighbors_info``."""
self._nb_info = list(set(self._nb_info + list(nb_info)))
def add_atm_grps(self, atm_grps):
""" Add :class:`~luna.groups.AtomGroup` objects to ``atm_grps``."""
self._atm_grps = list(set(self._atm_grps + list(atm_grps)))
def remove_nb_info(self, nb_info):
""" Remove `AtomData` objects from ``neighbors_info``."""
self._nb_info = list(set(self._nb_info) - set(nb_info))
def remove_atm_grps(self, atm_grps):
""" Remove :class:`~luna.groups.AtomGroup` objects from ``atm_grps``."""
self._atm_grps = list(set(self._atm_grps) - set(atm_grps))
def get_neighbor_info(self, atom):
"""Get information from a covalently bound atom."""
for info in self._nb_info:
if atom.serial_number == info.serial_number:
return info
def is_neighbor(self, atom):
"""Check if a given atom is covalently bound to it."""
return atom.serial_number in [i.serial_number for i in self._nb_info]
def as_json(self):
"""Represent the atom as a dict containing the structure id, model id,
chain id, residue name, residue id, and atom name.
The dict is defined as follows:
* ``pdb_id`` (str): structure id;
* ``model`` (str): model id;
* ``chain`` (str): chain id;
* ``res_name`` (str): residue name;
* ``res_id`` (tuple): residue id (hetflag, sequence identifier, insertion code);
* ``name`` (tuple): atom name (atom name, alternate location).
"""
full_id = self.get_full_id()
return {"pdb_id": full_id[0],
"model": full_id[1],
"chain": full_id[2],
"res_name": self.parent.resname,
"res_id": full_id[3],
"name": full_id[4]}
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.full_atom_name == other.full_atom_name
return False
def __ne__(self, other):
"""Overrides the default implementation"""
return not self.__eq__(other)
def __hash__(self):
"""Overrides the default implementation"""
return hash(self.full_atom_name)
| 34.928 | 122 | 0.61475 |
067d4e2d3158aba74160b531385178fe32b82215 | 1,379 | py | Python | src/cogs/example_cog.py | Abaan404/MagmaBot | 2149f6ad8a6a1158112ab9efb4dc77c04c3a5f8e | [
"MIT"
] | 1 | 2021-10-03T21:05:45.000Z | 2021-10-03T21:05:45.000Z | src/cogs/example_cog.py | Abaan404/MagmaBot | 2149f6ad8a6a1158112ab9efb4dc77c04c3a5f8e | [
"MIT"
] | null | null | null | src/cogs/example_cog.py | Abaan404/MagmaBot | 2149f6ad8a6a1158112ab9efb4dc77c04c3a5f8e | [
"MIT"
] | null | null | null | import discord, itertools
from discord.ext import commands, tasks
# Lava is not allowed to change the first text
PRESENCE_TEXT = itertools.cycle(["lava is cute", "*pushes you against wall* wanna play fortnite amongus?", "with ur mum", "owo.exe", "dangit jelly", "gewrhgkhewghkhfuckoiyo5uo", "MiEWcWAFT?? OWOWO"])
| 35.358974 | 199 | 0.645395 |
067de1744accb6c29040ae07655261d1c6513b87 | 33,808 | py | Python | Public_accessV3.py | nismod/natural-capital-mapping | f388fee3c2592bf99eb628b4d4743bf9be3d4346 | [
"MIT"
] | 1 | 2021-03-31T00:35:00.000Z | 2021-03-31T00:35:00.000Z | Public_accessV3.py | nismod/natural-capital-mapping | f388fee3c2592bf99eb628b4d4743bf9be3d4346 | [
"MIT"
] | null | null | null | Public_accessV3.py | nismod/natural-capital-mapping | f388fee3c2592bf99eb628b4d4743bf9be3d4346 | [
"MIT"
] | 2 | 2020-01-30T09:40:36.000Z | 2021-04-16T09:36:40.000Z | # Aim is to apply a multiplier to the natural capital scores to reflect the degree of public access
# Challenge is that it is difficult to clip or intersect the complex public access layer with the large and
# detailed OSMM-based base map - it takes days to run and then fails.
# So here we extract a subset of the base map that excludes gardens and manmade features, to cut the processing load.
# Create a public access layer from component datasets and set up a multiplier for recreation
# Intersect the public access layer with the subset and merge back into the base map
# A separate multiplier can then be applied to all gardens to reflect their private value if required
# -----------------------------------------------------------------------------------------------------------------
import time
import arcpy
import os
import MyFunctions
arcpy.CheckOutExtension("Spatial")
print(''.join(["## Started on : ", time.ctime()]))
arcpy.env.overwriteOutput = True # Overwrites files
arcpy.env.qualifiedFieldNames = False # Joined fields will be exported without the join table name
arcpy.env.XYTolerance = "0.001 Meters"
# *** Enter parameters
# --------------------
# region = "Arc"
# region = "Oxon"
region = "NP"
# Choice of method that has been used to generate the input files - this determines location and names of input files
method = "CROME_PHI"
# method = "LERC"
# method = "HLU"
if region == "Oxon" and method == "HLU":
gdbs = [r"D:\cenv0389\Oxon_GIS\Oxon_county\Data\Public_access.gdb"]
region_boundary = "Oxfordshire"
boundary = "Oxfordshire"
base_map = "OSMM_HLU_CR_ALC_Des_GS"
area_tag = "Oxon"
hab_field = "Interpreted_habitat"
# Name of OSMM fields used for interpretation
MakeField = "Make"
DescGroup = "DescriptiveGroup"
DescTerm = "DescriptiveTerm"
delete_1 = True
elif region == "Arc" or region == "NP" or (region == "Oxon" and method == "CROME_PHI"):
if region == "NP":
folder = r"M:\urban_development_natural_capital"
region_boundary = os.path.join(folder, "Data.gdb\NP_boundary")
else:
folder = r"D:\cenv0389\OxCamArc\NatCap_Arc_FreeData"
region_boundary = os.path.join(folder, "Arc_outline.shp")
arcpy.env.workspace = folder
if region == "Arc":
gdbs = arcpy.ListWorkspaces("*", "FileGDB")
# Or comment out previous line and use this format (one row per gdb) if repeating certain gdbs only
# gdbs = []
# gdbs.append(os.path.join(folder, "AylesburyVale.gdb"))
# gdbs.append(os.path.join(folder, "Chiltern.gdb"))
# gdbs.append(os.path.join(folder, "SouthOxfordshire.gdb"))
area_tag = "Arc"
elif region == "NP":
# Remember Leeds not in the list below because already done
# "Allerdale.gdb", "Barnsley.gdb", "Barrow-in-Furness.gdb", "Blackburn with Darwen.gdb", "Blackpool.gdb",
# "Bolton.gdb", "Bradford.gdb", "Burnley.gdb", "Bury.gdb", "Calderdale.gdb", "Carlisle.gdb",
# "Cheshire East.gdb", "Cheshire West and Chester.gdb", "Chorley.gdb", "Copeland.gdb", "County Durham.gdb",
# "Craven.gdb", "Darlington.gdb", "Doncaster.gdb",
# "East Riding of Yorkshire.gdb", "Eden.gdb", "Fylde.gdb", "Gateshead.gdb",
# "Halton.gdb", "Hambleton.gdb", "Harrogate.gdb", "Hartlepool.gdb", "Hyndburn.gdb", "Kirklees.gdb", "Knowsley.gdb",
# "Lancaster.gdb", "Liverpool.gdb", "Manchester.gdb", "Middlesbrough.gdb", "Newcastle upon Tyne.gdb",
# "North East Lincolnshire.gdb", "North Lincolnshire.gdb", "Northumberland.gdb", "North Tyneside.gdb", "Oldham.gdb",
# "Pendle.gdb", "Preston.gdb", "Redcar and Cleveland.gdb", "Ribble Valley.gdb",
# "Richmondshire.gdb", "Rochdale.gdb", "Rossendale.gdb", "Rotherham.gdb", "Ryedale.gdb", "Salford.gdb",
# "Scarborough.gdb", "Sefton.gdb", "Selby.gdb", "Sheffield.gdb", "South Lakeland.gdb", "South Ribble.gdb",
# "South Tyneside.gdb", "St Helens.gdb", "Stockport.gdb", "Stockton-on-Tees.gdb", "Sunderland.gdb",
# "Tameside.gdb", "Trafford.gdb", "Wakefield.gdb", "Warrington.gdb", "West Lancashire.gdb",
# "Wigan.gdb", "Wirral.gdb", "Wyre.gdb", "York.gdb"
gdb_names = ["East Riding of Yorkshire.gdb"]
gdbs = []
for gdb_name in gdb_names:
gdbs.append(os.path.join(r"M:\urban_development_natural_capital\LADs", gdb_name.replace(" ", "")))
area_tag = "NP"
elif region == "Oxon":
gdbs = []
LADs = ["Cherwell.gdb", "Oxford.gdb", "SouthOxfordshire.gdb", "ValeofWhiteHorse.gdb", "WestOxfordshire.gdb"]
for LAD in LADs:
gdbs.append(os.path.join(folder, LAD))
boundary = "boundary"
if method == "LERC":
base_map = "LERC_ALC_Desig_GS"
# Name of OSMM fields used for interpretation
MakeField = "make"
DescGroup = "DescGroup"
DescTerm = "DescTerm"
# Do not tidy up by deleting fields containing the string "_1" as there are lots we want to keep in this dataset!
delete_1 = False
# Feature classes to keep - the others will be deleted if you select 'tidy_workspace' = true
keep_fcs = ["boundary", "Designations", "LERC", "LERC_ALC", "LERC_ALC_Desig", "LERC_ALC_Desig_GS",
"LERC_ALC_Desig_GS_PA", "OS_Open_GS", "OS_Open_GS_clip", "OSGS", "New_snap_union_sp_delid_elim_del", "Public_access"]
else:
base_map = "OSMM_CR_PHI_ALC_Desig_GS"
# Name of OSMM fields used for interpretation
if region == "NP":
MakeField = "make"
DescGroup = "descriptivegroup"
DescTerm = "descriptiveterm"
else:
MakeField = "Make"
DescGroup = "DescriptiveGroup"
DescTerm = "DescriptiveTerm"
delete_1 = True
# Feature classes to keep - the others will be deleted if you select 'tidy_workspace' = true
keep_fcs = ["ALC_diss_Union", "boundary", "Designations", "LCM_arable", "LCM_improved_grassland",
"OS_Open_GS", "OS_Open_GS_clip", "OSGS",
"OSMM", "OSMM_CROME", "OSMM_CROME_PHI", "OSMM_CR_PHI_ALC", "OSMM_CR_PHI_ALC_Desig",
"OSMM_CR_PHI_ALC_Desig_GS", "OSMM_CR_PHI_ALC_Desig_GS_PA", "PHI", "Public_access"]
hab_field = "Interpreted_habitat"
# Source of public access data and gdb where public access layer will be created
if region == "Oxon":
data_gdb = r"D:\cenv0389\Oxon_GIS\Oxon_county\Data\Public_access.gdb"
elif region == "Arc":
data_gdb = r"D:\cenv0389\Oxon_GIS\OxCamArc\Data\Public_access.gdb"
elif region == "NP":
data_gdb = r"M:\urban_development_natural_capital\Public_access.gdb"
# Do not delete fid field at end (when all other surplus fields are deleted) as this is now the new name for TOID
protected_fields = ["fid"]
des_list = ['CountryPk', 'NT', 'NNR', 'LNR', 'DoorstepGn', 'MillenGn', 'RSPB']
des_list_expression = "(((CountryPk + NT + NNR + LNR + MillenGn + DoorstepGn + RSPB) = 0) OR " \
"(CountryPk IS NULL AND NT IS NULL AND NNR IS NULL AND LNR IS NULL AND MillenGn IS NULL AND DoorstepGn IS " \
"NULL AND RSPB IS NULL))"
# Table containing info for each input layer - user needs to set it up. Note: we also use OS Greenspace, OS Open Greenspace and
# various designations (e.g. nature reserves), but these are already merged into the base map so do not need to be listed in the info table.
InfoTable = os.path.join(data_gdb, "PublicAccessFiles")
AccessTable_name = "AccessMultipliers"
AccessTable = os.path.join(data_gdb, "AccessMultipliers")
# Buffer distance for paths
buffer_distance = "50 Meters"
# Need to dissolve all paths into a single buffer area if networks are complex, otherwise the process may crash
dissolve_paths = True
# Which stages of the process do we want to run? Useful for debugging or updates
create_access_layer = False
# These four stages will only be run if create_access_layer is True
prep_OSM_paths = True
clip_region = True
buffer_paths = True
merge_paths = True
clip_PA_into_LAD_gdb = True # Do not use this if the public access layer is made in the same gdb
extract_relevant_polygons = True
intersect_access = True
# *** note there is currently a temporary correction in the code here that needs to be removed in due course!
NT_correction = True # CORRECTION for Northern Powerhouse only
sp_and_repair = True
interpret_access = True
tidy_fields = True
# Recommend not using tidy_workspace here but using the separate code Delete_fcs_from_gdb instead - it is safer!
# if method == "CROME_PHI" or method == "LERC":
# tidy_workspace = False # DO NOT USE THIS FOR OXON HLU method!! It is not set up yet.
# else:
# tidy_workspace = False
# *** End of parameter entry
# --------------------------
if create_access_layer:
# Create public access layer by merging multiple input files, reading info from a table
# Linear features (paths, cycle routes) are converted to a 50m buffer zone
# Set up Type, Description and Name field for each file, reading info from InfoTable, and populate by copying existing relevant fields
arcpy.env.workspace = data_gdb
InAreas = []
InPaths = []
ipath = 0
# First loop through to find max length for Name and Description fields
max_NameLen = 0
max_DescLen = 0
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
if dissolve_paths == False or (dissolve_paths == True and row.getValue("Path") == 0):
DescLen = row.getValue("DescLength")
if DescLen > max_DescLen:
max_DescLen = DescLen
NameLen = row.getValue("NameLength")
if NameLen > max_NameLen:
max_NameLen = NameLen
# Deal with paths first.
# If we are dissolving paths, merge all the path input line files first
if dissolve_paths:
if merge_paths:
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
if row.getValue("Path") == 1:
in_file = row.getValue("Filename")
if clip_region:
print("Clipping " + in_file)
arcpy.Clip_analysis(in_file, region_boundary, in_file + "_" + area_tag)
if area_tag <> "":
in_file = in_file + "_" + area_tag
InPaths.append(in_file)
print "Merging paths"
arcpy.Merge_management(InPaths, "Paths_merge")
print("Buffering and dissolving merged paths")
arcpy.Buffer_analysis("Paths_merge", "Paths_merge_buffer", buffer_distance, dissolve_option="ALL")
# Add PAType
print("Adding Type field")
MyFunctions.check_and_add_field("Paths_merge_buffer", "PAType", "TEXT", 50)
arcpy.CalculateField_management("Paths_merge_buffer", "PAType", "'Path'", "PYTHON_9.3")
arcpy.MultipartToSinglepart_management("Paths_merge_buffer", "Paths_merge_buffer_sp")
# Now loop through the other areas (and paths if keeping separate) to set up the Type, Description and Name fields
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
exit_flag = False
in_file = row.getValue("Filename")
ShortName = row.getValue("ShortName")
print("Processing " + ShortName)
Type = row.getValue("Type")
Path = row.getValue("Path")
NameField = row.getValue("NameField")
DescField = row.getValue("DescField")
if Path == 1:
if dissolve_paths:
exit_flag = True
else:
exit_flag = False
if exit_flag == False:
if clip_region:
print("Clipping " + in_file)
arcpy.Clip_analysis(in_file, region_boundary, in_file + "_" + area_tag)
if area_tag <> "":
in_file = in_file + "_" + area_tag
if Path == 1:
if buffer_paths:
print("Buffering " + in_file)
arcpy.Buffer_analysis(in_file, in_file + "_buffer", buffer_distance, dissolve_option="NONE")
in_file = in_file + "_buffer"
MyFunctions.check_and_repair(in_file)
print("Adding Type field")
MyFunctions.check_and_add_field(in_file, "PAType", "TEXT", 50)
arcpy.CalculateField_management(in_file, "PAType", "'" + Type + "'", "PYTHON_9.3")
if DescField:
if max_DescLen <= 40:
max_DescLen = 40
print("Adding Description field")
MyFunctions.check_and_add_field(in_file, "PADescription", "TEXT", max_DescLen)
arcpy.CalculateField_management(in_file, "PADescription", "!" + DescField + "!", "PYTHON_9.3")
if NameField:
print("Adding Name field")
MyFunctions.check_and_add_field(in_file, "PAName", "TEXT", max_NameLen)
arcpy.CalculateField_management(in_file, "PAName", "!" + NameField + "!", "PYTHON_9.3")
# Delete fields that are not needed
needed_fields = ["PAType", "PADescription", "PAName"]
MyFunctions.delete_fields(in_file, needed_fields, in_file + "_input")
if Path:
# If this is not the first path dataset, erase it from the others and then append. This way we should avoid overlaps,
# provided that paths have been dissolved (as delete_identical method may not work for very large and complex layers
# with lots of overlaps).
if ipath == 1:
arcpy.CopyFeatures_management(in_file + "_input", "Access_paths_merge_1")
elif ipath > 1:
print ("Erasing " + in_file + "_input from merged paths")
try:
arcpy.Erase_analysis("Access_paths_merge_" + str(ipath-1), in_file + "_input", "Access_paths_merge_" + str(ipath))
except:
print("Erase failed - please try manually in ArcMap and then comment out this section and restart")
exit()
print ("Appending " + in_file + "_input to merged paths")
arcpy.Append_management(["Access_paths_merge_1" + str(ipath)], in_file + "_input", "NO_TEST")
else:
# Check for any duplicate polygons
arcpy.FindIdentical_management(in_file + "_input", "Identical_" + in_file, ["Shape"], output_record_option="ONLY_DUPLICATES")
numrows = arcpy.GetCount_management("Identical_" + in_file)
if numrows>0:
print ("Warning - " + str(numrows) + " duplicate polygons found in " + in_file +
"_input. All but one of each shape will be deleted.")
arcpy.DeleteIdentical_management(in_file + "_input", ["Shape"])
InAreas.append(in_file + "_input")
print("Merging areas: " + ', '.join(InAreas))
arcpy.Merge_management(InAreas, "Access_areas_merge")
# Need to convert merged paths to single part otherwise it crashes
print ("Converting merged paths to single part")
if not dissolve_paths:
arcpy.MultipartToSinglepart_management("Access_paths_merge_" + str(ipath), "Paths_merge_buffer_sp")
MyFunctions.check_and_repair("Paths_merge_buffer_sp")
# Erase any paths that are within the accessible areas or private (military) areas, to reduce the complexity of the merged shapes
print ("Erasing paths within areas")
arcpy.Merge_management(["Access_areas_merge", "OSM_military"], "Access_areas_to_erase")
print " Buffering and dissolving areas to erase (to remove internal slivers and simplify shapes)"
arcpy.Buffer_analysis("Access_areas_to_erase", "Access_areas_to_erase_buff_diss", "1 Meters", dissolve_option="ALL")
print " Converting to single part"
arcpy.MultipartToSinglepart_management("Access_areas_to_erase_buff_diss", "Access_areas_to_erase_buff_diss_sp")
MyFunctions.check_and_repair("Access_areas_to_erase_buff_diss_sp")
print " Erasing..."
try:
arcpy.Erase_analysis("Paths_merge_buffer_sp", "Access_areas_to_erase_buff_diss_sp", "Access_paths_erase")
except:
print("Erase failed but will probably work manually in ArcGIS. Please try this and then restart, commenting out previous steps")
exit()
print ("Merging paths and areas")
arcpy.Merge_management(["Access_areas_merge", "Access_paths_erase"], "Access_merge")
print("After merge there are " + str(arcpy.GetCount_management("Access_merge")) + " rows")
print ("Dissolving - retaining type, name and description")
arcpy.Dissolve_management("Access_merge", "Access_merge_diss", ["PAType", "PADescription", "PAName"], multi_part="SINGLE_PART")
print ("Unioning as first step to removing overlaps")
try:
arcpy.Union_analysis([["Access_merge_diss", 1]], "Access_merge_union", "NO_FID")
except:
print ("Union failed. Please do manually then comment out preceding steps and restart.")
exit()
print("After union there are " + str(arcpy.GetCount_management("Access_merge_union")) + " rows")
# If description is blank, fill in with Type
print ("Filling in missing Descriptions")
arcpy.MakeFeatureLayer_management("Access_merge_union", "join_lyr")
arcpy.SelectLayerByAttribute_management("join_lyr", where_clause="PADescription IS NULL OR PADescription = ''")
arcpy.CalculateField_management("join_lyr", "PADescription", "!PAType!", "PYTHON_9.3")
arcpy.Delete_management("join_lyr")
# Set up Access multiplier based on Type and Description (join to Access table then copy over source, type and multiplier)
print ("Joining to access multiplier")
MyFunctions.check_and_add_field("Access_merge_union", "Source", "TEXT", 30)
MyFunctions.check_and_add_field("Access_merge_union", "AccessType", "TEXT", 30)
MyFunctions.check_and_add_field("Access_merge_union", "AccessMult", "FLOAT", 0)
arcpy.MakeFeatureLayer_management("Access_merge_union", "join_lyr2")
print ("Adding join")
arcpy.AddJoin_management("join_lyr2", "PADescription", AccessTable, "Description", "KEEP_ALL")
print("Copying source field")
arcpy.CalculateField_management("join_lyr2", "Access_merge_union.Source", "!" + AccessTable_name + ".Source!", "PYTHON_9.3")
print ("Copying access type")
arcpy.CalculateField_management("join_lyr2", "Access_merge_union.AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
print ("Copying access multiplier")
arcpy.CalculateField_management("join_lyr2", "Access_merge_union.AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("join_lyr2", AccessTable_name)
arcpy.Delete_management("join_lyr2")
print("Sorting " + str(arcpy.GetCount_management("Access_merge_union")) + " rows")
# Sort by access multiplier (descending) so highest multipliers are at the top
arcpy.Sort_management("Access_merge_union", "Access_merge_union_sort", [["AccessMult", "DESCENDING"]])
# Delete identical polygons to remove overlaps but leave the highest access score. For complex path networks this may fail, so
# dissolve paths and then do this step only for areas, not paths
print ("Deleting overlapping polygons, keeping the one with the highest access score")
arcpy.MakeFeatureLayer_management("Access_merge_union_sort", "del_lyr")
if dissolve_paths:
arcpy.SelectLayerByAttribute_management("del_lyr", where_clause="AccessType <> 'Path'")
arcpy.DeleteIdentical_management("del_lyr", ["Shape"])
print("After deleting identical polygons there are " + str(arcpy.GetCount_management("Access_merge_union_sort")) + " rows")
arcpy.Delete_management("del_lyr")
print ("Dissolving")
dissolve_fields = ["PAType", "PADescription", "PAName", "Source", "AccessType", "AccessMult"]
arcpy.Dissolve_management("Access_merge_union_sort","Access_merge_union_sort_diss", dissolve_field=dissolve_fields)
print("After dissolving there are " + str(arcpy.GetCount_management("Access_merge_union_sort_diss")) + " rows")
arcpy.MultipartToSinglepart_management("Access_merge_union_sort_diss", "Public_access")
print("After converting to single part there are " + str(arcpy.GetCount_management("Public_access")) + " rows")
MyFunctions.check_and_repair("Public_access")
for gdb in gdbs:
arcpy.env.workspace = gdb
numrows = arcpy.GetCount_management(os.path.join(gdb, base_map))
print (''.join(["### Started processing ", gdb, " on ", time.ctime(), ": ", str(numrows), " rows"]))
if clip_PA_into_LAD_gdb:
# Use this to clip the master copy of the public access layer into each LAD gdb.
print(" Clipping public access layer")
PA_layer = os.path.join(data_gdb, "Public_access")
arcpy.Clip_analysis(PA_layer, boundary, "Public_access")
if extract_relevant_polygons:
# Select base map polygons that are not 'Manmade' or 'Garden', green space or designated as accessible types, and export to new file
print (" Extracting polygons that are not gardens or manmade and have no relevant greenspace or designation attributes")
arcpy.MakeFeatureLayer_management(base_map, "sel_lyr")
# There was an error here: Amenity grassland had an underscore between the words so would not have been excluded as intended.
# Fixed on 1/10/2020. This will have affected all the work done for Blenheim and EA Arc, and updated Oxon map sent to
# Nick and Mel end Sept 2020. But makes no difference? Because it simply added either Open or Path
# to amenity grassland not in Greenspace (rather than leaving it out), which is later over-ridden to Open for all amenity grassland.
expression = hab_field + " NOT IN ('Garden', 'Amenity grassland') AND " + MakeField + " <> 'Manmade' AND " \
"(GreenSpace IS NULL OR GreenSpace = '') AND " + des_list_expression
arcpy.SelectLayerByAttribute_management("sel_lyr", where_clause=expression)
arcpy.CopyFeatures_management("sel_lyr", "Natural_features")
arcpy.Delete_management("sel_lyr")
if intersect_access:
print (" Erasing and deleting existing greenspace from access layer, to reduce slivers")
arcpy.MakeFeatureLayer_management("Public_access", "del_lyr")
expression = "PADescription = 'country_park' OR PADescription = 'millennium_green' OR PADescription = 'doorstep_green'"
arcpy.SelectLayerByAttribute_management("del_lyr", where_clause=expression)
arcpy.DeleteFeatures_management("del_lyr")
arcpy.Delete_management("del_lyr")
arcpy.MakeFeatureLayer_management(base_map, "sel_lyr2")
expression = "GreenSpace IS NOT NULL AND GreenSpace <> ''"
arcpy.SelectLayerByAttribute_management("sel_lyr2", where_clause=expression)
arcpy.Erase_analysis("Public_access", "sel_lyr2", "Public_access_erase", cluster_tolerance="0.001 Meters")
print (" Deleting slivers")
arcpy.MultipartToSinglepart_management("Public_access_erase", "Public_access_erase_sp")
MyFunctions.delete_by_size("Public_access_erase_sp", 20)
print(" Intersect started on " + time.ctime() )
arcpy.Intersect_analysis(["Natural_features", "Public_access_erase_sp"], base_map + "_isect")
print(" Intersect completed on " + time.ctime())
print (" Erasing and merging back in")
arcpy.Erase_analysis(base_map, base_map + "_isect", base_map + "_isect_erase", cluster_tolerance="0.001 Meters" )
arcpy.Merge_management([base_map + "_isect_erase", base_map + "_isect"], base_map + "_merge")
print(" Merge completed on : " + time.ctime())
# *** TEMPORARY Correction for NP because access field was omitted accidentally when I made the designations layer
# if NT_correction and region == "NP":
# # select NT polygons and spatially join to a dataset containing only the NT access description
# print " Correcting by adding in missing NT access field"
# arcpy.MakeFeatureLayer_management(base_map + "_merge", "NT_lyr")
# arcpy.SelectLayerByAttribute_management("NT_lyr", where_clause="NT = 1")
# arcpy.SpatialJoin_analysis("NT_lyr", os.path.join(data_gdb, "NT_access"), "NT_access")
# # delete the NT features from the original file and then append the new spatially joined rows back in
# arcpy.DeleteFeatures_management("NT_lyr")
# arcpy.Delete_management("NT_lyr")
# MyFunctions.check_and_add_field(base_map + "_merge", "NT_desc", "TEXT", 20)
# arcpy.Append_management("NT_access", base_map + "_merge", "NO_TEST")
if sp_and_repair:
# Sort by shape so it displays faster
print(" Converting to single part and sorting")
arcpy.MultipartToSinglepart_management(base_map + "_merge", base_map + "_merge_sp")
arcpy.Sort_management(base_map + "_merge_sp", base_map + "_PA", [["SHAPE", "ASCENDING"]], "PEANO")
print (" Rows have increased from " + str(numrows) + " to " + str(arcpy.GetCount_management(base_map + "_PA")))
# Check and repair geometry
MyFunctions.check_and_repair(base_map + "_PA")
if interpret_access:
print(" Interpreting accessibility")
# Add interpretation for the remaining types of green space
# Amenity grassland - from habitat and/or OSGS 'Amenity - residential and business' - assume all is accessible.
# Hopefully OSGS amenity excludes most amenity associated with large rural houses but keeps urban green spaces that are usually
# accessible by all. Road verges and 'Amenity - transport' currently excluded as they include roundabouts / motorway embankments.
arcpy.MakeFeatureLayer_management(base_map + "_PA", "amenity_lyr")
expression = hab_field + " = 'Amenity grassland' AND (PAType IS NULL OR PAType = '' OR AccessType = 'Path') " \
"AND " + DescGroup + " NOT LIKE '%Rail%'"
arcpy.SelectLayerByAttribute_management("amenity_lyr", where_clause=expression)
arcpy.CalculateField_management("amenity_lyr", "PAType", "'Amenity grassland'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "PADescription", "'Amenity grassland'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "Source", "'Amenity grassland'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "AccessType", "'Open'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "AccessMult", 1.0, "PYTHON_9.3")
# Designated sites, e.g. country parks, millennium and doorstep greens, local and national nature reserves
for designation in des_list:
arcpy.MakeFeatureLayer_management(base_map + "_PA", "des_lyr")
arcpy.SelectLayerByAttribute_management("des_lyr", where_clause=designation + " = 1")
numrows = arcpy.GetCount_management("des_lyr")
print (" Designation: " + designation + " Rows: " + str(numrows))
if numrows >0:
arcpy.CalculateField_management("des_lyr", "PAType", "'" + designation + "'", "PYTHON_9.3")
# Special case for National Trust where description states degree of access
if designation == "NT":
arcpy.CalculateField_management("des_lyr", "PADescription", "!NT_desc!", "PYTHON_9.3")
else:
arcpy.CalculateField_management("des_lyr", "PADescription", "'" + designation + "'", "PYTHON_9.3")
arcpy.AddJoin_management("des_lyr", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("des_lyr", "Source", "'Designations'", "PYTHON_9.3")
arcpy.CalculateField_management("des_lyr", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("des_lyr", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("des_lyr", AccessTable_name)
arcpy.Delete_management("des_lyr")
# Green spaces (from OS green space and OS open green space) - correct for Rail in OSGS Amenity residential
# Exclude National Trust as that has better information on access, so we don't want to overwrite it
# Also exclude arable land (added 4/10/2020 at end of EA work) otherwise incorrect OSGS 'Amenity' over-rides habitat type
print " Interpreting green space"
arcpy.MakeFeatureLayer_management(base_map + "_PA", "sel_lyr4")
expression = hab_field + " NOT IN ('Arable', 'Arable and scattered trees', 'Arable fields, horticulture and temporary grass') "
expression = expression + "AND GreenSpace IS NOT NULL AND GreenSpace <> '' "
expression = expression + "AND " + DescGroup + " NOT LIKE '%Rail%' AND (NT IS NULL OR NT = 0)"
arcpy.SelectLayerByAttribute_management("sel_lyr4", where_clause=expression)
if arcpy.GetCount_management("sel_lyr4") > 0:
arcpy.CalculateField_management("sel_lyr4", "PAType", "!GreenSpace!", "PYTHON_9.3")
arcpy.CalculateField_management("sel_lyr4", "PADescription", "!GreenSpace!", "PYTHON_9.3")
arcpy.AddJoin_management("sel_lyr4", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("sel_lyr4", "Source", "'GreenSpace'", "PYTHON_9.3")
arcpy.CalculateField_management("sel_lyr4", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("sel_lyr4", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("sel_lyr4", AccessTable_name)
arcpy.Delete_management("sel_lyr4")
# Correction for school grounds from OSGS because playing fields were omitted (this will omit non-urban schools not in OSGS)
print " Interpreting schools"
arcpy.MakeFeatureLayer_management(base_map + "_PA", "school_lyr")
arcpy.SelectLayerByAttribute_management("school_lyr", where_clause="OSGS_priFunc = 'School Grounds'")
if arcpy.GetCount_management("school_lyr") > 0:
arcpy.CalculateField_management("school_lyr", "PAType", "'School Grounds'", "PYTHON_9.3")
arcpy.CalculateField_management("school_lyr", "PADescription", "'School Grounds'", "PYTHON_9.3")
arcpy.AddJoin_management("school_lyr", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("school_lyr", "Source", "'OSGS'", "PYTHON_9.3")
arcpy.CalculateField_management("school_lyr", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("school_lyr", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("school_lyr", AccessTable_name)
arcpy.Delete_management("school_lyr")
# Add in full accessibility for rivers, lakes, reservoirs, weirs and canals. Correction made 4 Oct 2020.
print " Interpreting water"
arcpy.MakeFeatureLayer_management(base_map + "_PA", "water_lyr")
expression = DescTerm + " IN ('Watercourse', 'Static Water', 'Canal', 'Weir', 'Reservoir')"
arcpy.SelectLayerByAttribute_management("water_lyr", where_clause=expression)
if arcpy.GetCount_management("water_lyr") > 0:
arcpy.CalculateField_management("water_lyr", "PAType", "'Water'", "PYTHON_9.3")
arcpy.CalculateField_management("water_lyr", "PADescription", "'Water'", "PYTHON_9.3")
arcpy.AddJoin_management("water_lyr", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("water_lyr", "Source", "'Water'", "PYTHON_9.3")
arcpy.CalculateField_management("water_lyr", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("water_lyr", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("water_lyr", AccessTable_name)
arcpy.Delete_management("water_lyr")
if tidy_fields:
# CAUTION: this deletes any field containing "_1" (if delete_1 is True) as well as those containing _OBJID,
# FID_, _FID, BaseID_, _Area, _Relationship unless in list of protected fields
print("Tidying up surplus attributes")
MyFunctions.tidy_fields(base_map + "_PA", delete_1, protected_fields)
# Recommend using the separate code Delete_fcs_from_gdb instead - it is safer!
# if tidy_workspace and (method == "CROME_PHI" or method == "LERC"): # Not set up yet for Oxon gdb used for HLU method
# print("Tidying workspace")
# fcs = arcpy.ListFeatureClasses("*")
# delete_fcs = []
# for fc in fcs:
# if fc not in keep_fcs and "NatCap_" not in fc:
# delete_fcs.append (fc)
# # print("Deleting " + fc + " from " + gdb)
# if len(delete_fcs) > 0:
# arcpy.Delete_management (fc)
# if len(delete_fcs) > 0:
# print(" Deleted intermediate feature classes: " + ', '.join(delete_fcs))
print(''.join(["Completed " + gdb + " on : ", time.ctime()]))
exit() | 60.479428 | 141 | 0.664458 |
067f96e0223f4a4ef755767aa7893138791277ee | 3,688 | py | Python | old_site/import/scraper.py | basilleaf/dailyrings | e970ee7771bd1fb60b07f8208b2ffc4866779e88 | [
"MIT"
] | null | null | null | old_site/import/scraper.py | basilleaf/dailyrings | e970ee7771bd1fb60b07f8208b2ffc4866779e88 | [
"MIT"
] | 3 | 2018-04-27T04:11:13.000Z | 2018-04-28T01:26:57.000Z | old_site/import/scraper.py | basilleaf/dailyrings | e970ee7771bd1fb60b07f8208b2ffc4866779e88 | [
"MIT"
] | null | null | null | """
this script parses the pds-rings press release gallery tree at
base_url = "http://pds-rings.seti.org/saturn/cassini/"
if an image already exists in the database it is updated
to get only the most recent month set latest_month_only to True
"""
latest_month_only = True # like I was really going to do this monthly
# Set up the Django Enviroment for running as shell script
from django.core.management import setup_environ
import settings
setup_environ(settings)
# script imports
from stripogram import html2text, html2safehtml
from priod.daily_image.models import Image
from HTMLParser import HTMLParser
from urlparse import urlparse
import exceptions, urllib2, re
base_url = "http://pds-rings.seti.org/saturn/cassini/"
# set to strict imports, ie want to know if an url is too long for field
from django.db import connection
cursor = connection.cursor()
cursor.execute("SET SQL_MODE = 'STRICT_ALL_TABLES'")
# get all the monthly gallery pages
print "scanning " + base_url
homepage = urllib2.urlopen(base_url).read()
list_pages = re.findall("HREF=\"([0-9]+-[0-9]+)\.html", homepage)
# get all the detail pages
detail_pages = []
for page_name in list_pages:
print "scanning gallery page " + page_name
list_page = urllib2.urlopen(base_url + page_name + ".html").read()
detail_pages += re.findall("HREF=\"\./(.*)\.html", list_page)
if latest_month_only: break
# scrape each detail page
errors = []
for page_name in detail_pages:
url = base_url + page_name + '.html'
try:
print "opening " + url
page = urllib2.urlopen(url).read()
except HTTPError:
print "failed at " + url
errors += [url]
print "scraping " + url
try:
name,title = re.search("<title>(.*)</title>", page).group(1).split(':')
name = name.strip()
title = title.strip()
more_info = "http://pds-rings.seti.org/saturn/cassini/" + name
caption = re.search("Original Caption Released with Image:(.*)Image Credit:", page, re.DOTALL | re.UNICODE).group(1).strip()
caption = html2safehtml(caption,valid_tags=("p","a","img","br")).strip()
credit = re.search("Image Credit:(.*)<br>", page, re.DOTALL | re.UNICODE).group(1).strip()
credit = html2safehtml(credit,valid_tags=("p","a","img")).strip()
# find images
image_url = re.search("href\t*=\t*\"(.*)\.tif\"", page).group(1)
image_url = urlparse(image_url).netloc
if not image_url: image_url = base_url
else: image_url = 'http://' + image_url + '/'
jpg = 'jpeg/' + name.strip() + '.jpg'
jpg_mod = 'jpegMod/' + name.strip() + '_modest.jpg'
tif = 'tiff/' + name.strip() + '.tif'
except:
errors += ["could not parse " + url]
print "failed " + url
continue
try:
pub_date=Image.objects.get(pk=name).pub_date
user_ordered=Image.objects.get(pk=name).user_ordered
pub_order=Image.objects.get(pk=name).pub_order
except Image.DoesNotExist:
pub_date = None
user_ordered = False
pub_order = None
# update db
image = Image(name=name,title=title,caption=caption,more_info=more_info,credit=credit,image_url=image_url,jpg=jpg,pub_date=pub_date,user_ordered=user_ordered,pub_order=pub_order)
try:
image.save()
print name + " saved \n"
except:
print "failed " + url
errors += ["could not save to db" + url]
print "finished! "
print ""
if len(errors): print "HTTP Errors could not load the following pages\n"
for e in errors:
print e + "\n"
| 33.225225 | 182 | 0.639371 |
068506b54ed89a62c865b814f0418d72003474e6 | 856 | py | Python | packit_dashboard/api/routes.py | lbarcziova/dashboard | 6ad1141a475d68b081a4fa2ceec5363678ae4e38 | [
"MIT"
] | null | null | null | packit_dashboard/api/routes.py | lbarcziova/dashboard | 6ad1141a475d68b081a4fa2ceec5363678ae4e38 | [
"MIT"
] | null | null | null | packit_dashboard/api/routes.py | lbarcziova/dashboard | 6ad1141a475d68b081a4fa2ceec5363678ae4e38 | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify, request
from packit_dashboard.utils import return_json
from packit_dashboard.config import API_URL
api = Blueprint("api", __name__)
# The react frontend will request information here instead of fetching directly
# from the main API.
# This is because it will be easier to implement caching API requests here.
# (Flask-Caching etc)
| 31.703704 | 79 | 0.731308 |
0687517d0be83d1b02e62dee6d9d45a4dee1e6e1 | 4,959 | py | Python | auth/util.py | prajask/Find-Your-Doctor | a4fc682a1757d83bd62b9daea7476f97db91ffb8 | [
"MIT"
] | 1 | 2020-09-10T10:52:21.000Z | 2020-09-10T10:52:21.000Z | auth/util.py | prajask/find-your-doctor | a4fc682a1757d83bd62b9daea7476f97db91ffb8 | [
"MIT"
] | null | null | null | auth/util.py | prajask/find-your-doctor | a4fc682a1757d83bd62b9daea7476f97db91ffb8 | [
"MIT"
] | null | null | null | """
Authentication Utility Functions
"""
from flask import session
from models import Patient, Doctor, Degree, database
import hashlib, binascii
from config import SECRET_KEY | 32.201299 | 154 | 0.689655 |
06875fe7ae608373332a8fb7606601d9cb556ce3 | 267 | py | Python | cctbx/eltbx/covalent_radii.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | cctbx/eltbx/covalent_radii.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx/eltbx/covalent_radii.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
import boost.python
ext = boost.python.import_ext("cctbx_eltbx_covalent_radii_ext")
from cctbx_eltbx_covalent_radii_ext import *
boost.python.inject(ext.table_iterator, boost.python.py3_make_iterator)
| 38.142857 | 71 | 0.861423 |
068765a77334bb8b750cf60699365fc4fc30858b | 4,004 | py | Python | player.py | akrabio/NBANormalizedStats | de5fb5243e9e26e4d534c319ea984712a908cadc | [
"MIT"
] | null | null | null | player.py | akrabio/NBANormalizedStats | de5fb5243e9e26e4d534c319ea984712a908cadc | [
"MIT"
] | null | null | null | player.py | akrabio/NBANormalizedStats | de5fb5243e9e26e4d534c319ea984712a908cadc | [
"MIT"
] | null | null | null | from nba_py import player
from numpy import mean
import league
from consts import all_time_stats
import consts
name_to_index = {'season': 1, consts.rebounds: 20, consts.assists: 21, consts.steals: 22, consts.blocks: 23, consts.points: 26}
if __name__ == '__main__':
# player = Player('michael', 'jordan')
player = Player('patrick', 'ewing')
print 'Career stats: {}'.format(player.career_stats)
print 'Normalized career stats: {}'.format(player.normalized_career_stats)
# print player.player_stats
| 43.521739 | 130 | 0.623876 |
0687810d3ca357eb81c8f40b9ee9e277ec90842e | 3,668 | py | Python | examples/mag_wmm2015.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 53 | 2018-11-17T03:29:55.000Z | 2022-03-18T02:36:25.000Z | examples/mag_wmm2015.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 3 | 2018-11-28T11:37:51.000Z | 2019-01-30T01:52:45.000Z | examples/mag_wmm2015.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 35 | 2018-11-17T03:29:57.000Z | 2022-03-23T17:57:06.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 10 18:34:07 2019
WMM2015WMM.cofpy
@author: chens
"""
import numpy as np
from pathlib import Path
import xarray
import ctypes as ct
import sys
import datetime
from matplotlib.pyplot import figure
#libwmm = ct.cdll.LoadLibrary(str('D:\\MyWorks\\WMM2015-master\\wmm15.dll'))
libwmm = ct.cdll.LoadLibrary(str('D:\\MyWorks\\WMM2015-master\\noaa.dll'))
from geoist.others.scidates import datetime2yeardec
dt = datetime.datetime(2012, 7, 12, 12)
print(datetime2yeardec(dt))
mag = noaa(45.5, 105.6, 0.2, datetime2yeardec(dt), mod='emm')
#print(mag.north.item())
#print(mag.east.item())
#print(mag.down.item())
print("F:",mag.total.item()) #F
print("D:",mag.decl.item()) #D
print("I:",mag.incl.item()) #I
from matplotlib.pyplot import show
lon, lat = np.meshgrid(np.arange(-180, 180+10, 10), np.arange(-90, 90+10, 10))
mag = noaa(lat, lon, 0, 2015)
plotwmm(mag)
show() | 33.045045 | 109 | 0.571156 |
0688619f7ef43b02605de1e45f9fd553d9142b12 | 3,089 | py | Python | test/e2e/tests/test_transit_gateway.py | timbyr/ec2-controller | d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec | [
"Apache-2.0"
] | 14 | 2021-08-04T00:21:49.000Z | 2022-03-21T01:06:09.000Z | test/e2e/tests/test_transit_gateway.py | timbyr/ec2-controller | d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec | [
"Apache-2.0"
] | 48 | 2021-08-03T19:00:42.000Z | 2022-03-31T22:18:42.000Z | test/e2e/tests/test_transit_gateway.py | timbyr/ec2-controller | d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec | [
"Apache-2.0"
] | 9 | 2021-07-22T15:49:43.000Z | 2022-03-06T22:24:14.000Z | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the Transit Gateway API.
"""
import boto3
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
RESOURCE_PLURAL = "transitgateways"
## The long delete wait is required to make sure the TGW can transition out of its "pending" status.
## TGWs are unable to be deleted while in "pending"
CREATE_WAIT_AFTER_SECONDS = 90
DELETE_WAIT_AFTER_SECONDS = 10
| 30.89 | 100 | 0.697637 |
06897ca4a2ea127df4c4fbdc8e71310f23dfe61f | 2,862 | py | Python | Phase 4/src/search.py | ishaanshah/GameDhaBha | 5ab4f13ec7554ba74739d9a149da1154bb09041a | [
"MIT"
] | null | null | null | Phase 4/src/search.py | ishaanshah/GameDhaBha | 5ab4f13ec7554ba74739d9a149da1154bb09041a | [
"MIT"
] | null | null | null | Phase 4/src/search.py | ishaanshah/GameDhaBha | 5ab4f13ec7554ba74739d9a149da1154bb09041a | [
"MIT"
] | null | null | null | """ Contains all the functions related to the search of enitities in the Database """
from tabulate import tabulate
def SearchPlayerByName(cur, con):
""" Searches for the provided name's similar occurences in the Player's first and last name """
# Take in the input for the search query
search = {}
search["pattern"] = input("Enter the player name that you are looking for: ")
search["pattern"] = "%" + search["pattern"] + "%"
query = """
SELECT *
FROM Players
WHERE FirstName LIKE %(pattern)s
OR LastName LIKE %(pattern)s
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, search)
# Print the output
headers = ["Username", "PlayerID", "FirstName", "LastName", "Winnings",
"Nationality", "DateOfBirth"]
rows = []
while True:
res = cur.fetchone()
if res is None:
break
rows.append([
res["Username"], res["PlayerID"], res["FirstName"], res["LastName"],
res["Winnings"], res["Nationality"], res["DateOfBirth"]
])
print(tabulate(rows, headers = headers, tablefmt = "orgtbl"))
print("")
def SearchOrganisationByName(cur, con):
""" Searches for an Organisation by the name given. """
# Take in the input for the search query
search = {}
search["pattern"] = input("Enter the organisation's name that you are looking for: ")
search["pattern"] = "%" + search["pattern"] + "%"
query = """
SELECT *
FROM Organisations
WHERE Name LIKE %(pattern)s
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, search)
# Print the output
headers = ["OrganisationID", "Name", "Headquarters", "Founded", "Earnings"]
rows = []
while True:
res = cur.fetchone()
if res is None:
break
rows.append([
res["OrganisationID"], res["Name"], res["Headquarters"], res["Founded"],
res["Earnings"]
])
print(tabulate(rows, headers = headers, tablefmt = "orgtbl"))
print("")
| 25.327434 | 99 | 0.568484 |
068a35a559d65ea89371c4e0284f743170c94d8d | 15,413 | py | Python | machine/qemu/sources/u-boot/test/py/tests/test_efi_fit.py | muddessir/framework | 5b802b2dd7ec9778794b078e748dd1f989547265 | [
"MIT"
] | 1 | 2021-11-21T19:56:29.000Z | 2021-11-21T19:56:29.000Z | machine/qemu/sources/u-boot/test/py/tests/test_efi_fit.py | muddessir/framework | 5b802b2dd7ec9778794b078e748dd1f989547265 | [
"MIT"
] | null | null | null | machine/qemu/sources/u-boot/test/py/tests/test_efi_fit.py | muddessir/framework | 5b802b2dd7ec9778794b078e748dd1f989547265 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2019, Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
#
# Work based on:
# - test_net.py
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
# - test_fit.py
# Copyright (c) 2013, Google Inc.
#
# Test launching UEFI binaries from FIT images.
"""
Note: This test relies on boardenv_* containing configuration values to define
which network environment is available for testing. Without this, the parts
that rely on network will be automatically skipped.
For example:
# Boolean indicating whether the Ethernet device is attached to USB, and hence
# USB enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_usb = False
# Boolean indicating whether the Ethernet device is attached to PCI, and hence
# PCI enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_pci = True
# True if a DHCP server is attached to the network, and should be tested.
# If DHCP testing is not possible or desired, this variable may be omitted or
# set to False.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. If solely relying on DHCP, this variable may be omitted or set to
# an empty list.
env__net_static_env_vars = [
('ipaddr', '10.0.0.100'),
('netmask', '255.255.255.0'),
('serverip', '10.0.0.1'),
]
# Details regarding a file that may be read from a TFTP server. This variable
# may be omitted or set to None if TFTP testing is not possible or desired.
# Additionally, when the 'size' is not available, the file will be generated
# automatically in the TFTP root directory, as specified by the 'dn' field.
env__efi_fit_tftp_file = {
'fn': 'test-efi-fit.img', # File path relative to TFTP root
'size': 3831, # File size
'crc32': '9fa3f79c', # Checksum using CRC-32 algorithm, optional
'addr': 0x40400000, # Loading address, integer, optional
'dn': 'tftp/root/dir', # TFTP root directory path, optional
}
"""
import os.path
import pytest
import u_boot_utils as util
# Define the parametrized ITS data to be used for FIT images generation.
ITS_DATA = '''
/dts-v1/;
/ {
description = "EFI image with FDT blob";
#address-cells = <1>;
images {
efi {
description = "Test EFI";
data = /incbin/("%(efi-bin)s");
type = "%(kernel-type)s";
arch = "%(sys-arch)s";
os = "efi";
compression = "%(efi-comp)s";
load = <0x0>;
entry = <0x0>;
};
fdt {
description = "Test FDT";
data = /incbin/("%(fdt-bin)s");
type = "flat_dt";
arch = "%(sys-arch)s";
compression = "%(fdt-comp)s";
};
};
configurations {
default = "config-efi-fdt";
config-efi-fdt {
description = "EFI FIT w/ FDT";
kernel = "efi";
fdt = "fdt";
};
config-efi-nofdt {
description = "EFI FIT w/o FDT";
kernel = "efi";
};
};
};
'''
# Define the parametrized FDT data to be used for DTB images generation.
FDT_DATA = '''
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
model = "%(sys-arch)s %(fdt_type)s EFI FIT Boot Test";
compatible = "%(sys-arch)s";
reset@0 {
compatible = "%(sys-arch)s,reset";
reg = <0 4>;
};
};
'''
| 33.579521 | 99 | 0.594693 |
068bed0bd09441343b0ab11a87d3f70ca8cbcf66 | 2,234 | py | Python | data_dictionary/data_dictionary.py | georgetown-analytics/DC-Bikeshare | 9f5a6a3256cff15a29f0dca6e9a9d8098ab2df28 | [
"MIT"
] | 11 | 2018-07-01T16:43:05.000Z | 2020-07-17T19:08:16.000Z | data_dictionary/data_dictionary.py | noahnewberger/Bikeshare-DC | 42676654d103cdaddfb76db76d1eece533251261 | [
"MIT"
] | 5 | 2021-02-08T20:21:12.000Z | 2021-12-13T19:47:04.000Z | data_dictionary/data_dictionary.py | noahnewberger/Bikeshare-DC | 42676654d103cdaddfb76db76d1eece533251261 | [
"MIT"
] | 5 | 2018-10-05T19:54:20.000Z | 2020-10-27T11:54:09.000Z | #!/usr/bin/env python
import report, sys
import psycopg2.extras
parser = report.get_parser(sys.argv[0])
parser.add_argument('--title', '-t', required=False, dest='title', default="Data Dictionary", help='Report Title')
args = parser.parse_args()
conn = report.get_connection(args)
curs = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
tmpl_vars = {
'dictionary': get_dictionary(),
'title': args.title
}
report.generate_report(tmpl_vars, args)
report.generate_csv(tmpl_vars, args)
| 29.394737 | 114 | 0.637422 |
068d0a9c6eb823b33105c8883388612ae4b08f65 | 1,112 | py | Python | LeetCode/InsertionLL.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | 1 | 2021-02-27T06:12:55.000Z | 2021-02-27T06:12:55.000Z | LeetCode/InsertionLL.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | 1 | 2021-02-02T08:52:17.000Z | 2021-02-03T08:19:12.000Z | LeetCode/InsertionLL.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | null | null | null |
head = takeinputLL()
printLL(insertionLL(head)) | 19.508772 | 52 | 0.522482 |
068db78fb9e1cc510a957bc841fd463a0fc7de6a | 2,581 | py | Python | migrations/versions/458a7da0c9da_.py | dmiklic/psiholeks-web | 68dda07228a53790ab1e797336bb236031a544de | [
"MIT"
] | null | null | null | migrations/versions/458a7da0c9da_.py | dmiklic/psiholeks-web | 68dda07228a53790ab1e797336bb236031a544de | [
"MIT"
] | 1 | 2018-05-01T09:15:12.000Z | 2018-05-01T09:25:03.000Z | migrations/versions/458a7da0c9da_.py | dmiklic/psiholeks-web | 68dda07228a53790ab1e797336bb236031a544de | [
"MIT"
] | null | null | null | """empty message
Revision ID: 458a7da0c9da
Revises:
Create Date: 2018-05-01 21:15:27.029811
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '458a7da0c9da'
down_revision = None
branch_labels = None
depends_on = None
| 46.089286 | 109 | 0.720651 |
068fc5e74266b5c9c2303aed1e80240bd5fd0b7c | 573 | py | Python | mimic/modalities/MimicLateral.py | Jimmy2027/MoPoE-MIMIC | d167719b0dc7ba002b7421eb82a83e47d2437795 | [
"MIT"
] | 1 | 2021-09-30T07:56:46.000Z | 2021-09-30T07:56:46.000Z | mimic/modalities/MimicLateral.py | Jimmy2027/MoPoE-MIMIC | d167719b0dc7ba002b7421eb82a83e47d2437795 | [
"MIT"
] | null | null | null | mimic/modalities/MimicLateral.py | Jimmy2027/MoPoE-MIMIC | d167719b0dc7ba002b7421eb82a83e47d2437795 | [
"MIT"
] | null | null | null | import torch
import mimic.modalities.utils
from mimic.modalities.Modality import ModalityIMG
| 31.833333 | 85 | 0.687609 |
06911eef246c1772226f520da651c6e6b9337698 | 40,564 | py | Python | src/rogerthat/rpc/rpc.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/rogerthat/rpc/rpc.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/rogerthat/rpc/rpc.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import json
import logging
import threading
import time
import traceback
import types
import uuid
from copy import deepcopy
from random import choice
from types import NoneType
from concurrent import futures # @UnresolvedImport
from google.appengine.api import urlfetch, memcache
from google.appengine.api.apiproxy_stub_map import UserRPC
from google.appengine.api.app_identity.app_identity import get_application_id
from google.appengine.api.taskqueue import TaskRetryOptions
from google.appengine.ext import db, deferred
from hyper import HTTP20Connection
from jose import jwt
from jose.constants import Algorithms
from mcfw.cache import set_cache_key
from mcfw.consts import MISSING
from mcfw.properties import azzert
from mcfw.rpc import arguments, returns, check_function_metadata, get_parameter_types, run, get_parameters, \
get_type_details, serialize_value, parse_parameter
from rogerthat.consts import DEBUG, HIGH_LOAD_WORKER_QUEUE, FAST_QUEUE
from rogerthat.dal.app import get_app_by_id
from rogerthat.dal.mobile import get_mobile_settings_cached
from rogerthat.dal.rpc_call import get_rpc_capi_backlog_parent_by_account, get_rpc_capi_backlog_parent_by_mobile
from rogerthat.models import UserProfile
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile, RpcAPIResult, RpcCAPICall, OutStandingFirebaseKick, \
ServiceAPICallback, RpcException
from rogerthat.settings import get_server_settings
from rogerthat.to.push import PushData
from rogerthat.to.system import LogErrorRequestTO
from rogerthat.utils import now, privatize
from rogerthat.utils.cloud_tasks import create_task, schedule_tasks
from rogerthat.utils.crypto import encrypt_for_jabber_cloud, decrypt_from_jabber_cloud
from rogerthat.utils.transactions import on_trans_committed
_CALL_ACTION_RESEND = 1
_CALL_ACTION_MUST_PROCESS = 2
_CALL_ACTION_DO_NOT_PROCESS = 3
BACKLOG_CONCURRENCY_PROTECTION_INTERVAL = 120
MESSAGE_LINGER_INTERVAL = 3600 * 24 * 20 # 20 days
MESSAGE_ALLOWED_FUTURE_TIME_INTERVAL = 3600 * 24
BACKLOG_MESSAGE_RETENTION_INTERVAL = 3600 * 24 + MESSAGE_LINGER_INTERVAL # 21 days
BACKLOG_DUPLICATE_AVOIDANCE_RETENTION_INTERVAL = 3600 * 24 # 1 day
APPENGINE_APP_ID = get_application_id()
DO_NOT_SAVE_RPCCALL_OBJECTS = "DO_NOT_SAVE_RPCCALL_OBJECTS"
PERFORM_CALLBACK_SYNCHRONOUS = "PERFORM_CALLBACK_SYNCHRONOUS"
SKIP_ACCOUNTS = "SKIP_ACCOUNTS"
MOBILE_ACCOUNT = "MOBILE_ACCOUNT"
DEFER_KICK = "DEFER_KICK"
TARGET_MFR = "TARGET_MFR"
API_VERSION = u"av"
API_DIRECT_PATH_KEY = u"ap"
CALL_ID = u"ci"
FUNCTION = u"f"
PARAMETERS = u"a"
STATUS = u"s"
STATUS_SUCCESS = u"success"
STATUS_FAIL = u"fail"
RESULT = u"r"
ERROR = u"e"
CALL_TIMESTAMP = u"t"
CALL_RESEND_TIMEOUT = 120
DEFAULT_RETENTION = 3600 * 24
MANDATORY_CALL_KEYS_SET = {PARAMETERS, API_VERSION, CALL_ID, FUNCTION}
SEND_ACK = 1
IGNORE = 2
PRIORITY_NORMAL = 5
PRIORITY_HIGH = 10
DEFAULT_APPLE_PUSH_MESSAGE = base64.encodestring('{"aps":{"content-available":1}}')
CAPI_KEYWORD_ARG_PRIORITY = "_cka_priority_"
CAPI_KEYWORD_ARG_APPLE_PUSH_MESSAGE = "_cka_apple_push_message_"
CAPI_KEYWORD_PUSH_DATA = '_push_data_'
def create_firebase_request(data, is_gcm=False):
# type: (dict) -> UserRPC
# See https://firebase.google.com/docs/cloud-messaging/http-server-ref
settings = get_server_settings()
rpc_item = urlfetch.create_rpc(5, None)
url = 'https://fcm.googleapis.com/fcm/send'
headers = {
'Content-Type': 'application/json',
'Authorization': 'key=%s' % (settings.gcmKey if is_gcm else settings.firebaseKey)
}
urlfetch.make_fetch_call(rpc_item, url, json.dumps(data), urlfetch.POST, headers)
return rpc_item
def retry_firebase_request(payload, is_gcm=False):
rpc_item = create_firebase_request(payload, is_gcm=is_gcm)
response = rpc_item.get_result() # type: urlfetch._URLFetchResult
if response.status_code != 200:
raise Exception(response.content)
apns_cache = APNSCache()
kicks = JabberRpcCaller("kick")
firebase = FirebaseKicker()
api_callbacks = DirectRpcCaller()
rpc_items = RpcFinisher()
context_threads = ContextFinisher()
# @arguments(alias=unicode, accept_sub_types=bool, priority=int, feature_version=Feature)
def _store_rpc_api_result_deferred(mobile_key, call_id, result_json, now_):
RpcAPIResult(parent=mobile_key, key_name=call_id, result=result_json, timestamp=now_).put()
def _ack_all_deferred(mobile_key, call_ids):
db.delete_async([db.Key.from_path(RpcAPIResult.kind(), call_id, parent=mobile_key) for call_id in call_ids])
def _validate_capi_call(result_f, error_f, target, alias, f, accept_sub_types=False):
check_decorations(result_f)
check_decorations(error_f)
funcs = result_f, error_f
logging.debug(funcs)
from rogerthat.rpc.calls import result_mapping
if any(filter(lambda fn: "mapping" not in fn.meta or fn.meta["mapping"] not in result_mapping, funcs)):
raise ValueError(
"Result and error processing functions must have their mapping declared in rogerthat.rpc.calls.result_mapping!")
if any(filter(lambda fn: fn.meta["return_type"] != NoneType, funcs)):
raise ValueError("Result and error processing functions cannot have return types.")
if any(filter(lambda fn: "context" not in fn.meta["kwarg_types"] or fn.meta["kwarg_types"]["context"] != RpcCAPICall, funcs)):
raise ValueError(
"Result and error processing functions must have a arg 'context' of type rogerthat.rpc.models.RpcCAPICall.")
if any(filter(lambda fn: len(fn.meta["kwarg_types"]) != 2, funcs)):
raise ValueError("Result and error processing functions must have 2 arguments!")
if not accept_sub_types and f.meta["return_type"] != result_f.meta["kwarg_types"]["result"]:
raise ValueError("Return value type and result function result argument types do not match!")
if accept_sub_types and not issubclass(f.meta["return_type"], result_f.meta["kwarg_types"]["result"]):
raise ValueError("Return value type and result function result argument types do not match!")
islist = False
if not isinstance(target, (users.User, NoneType)):
islist = True
if not isinstance(target, (list, set)):
raise_invalid_target()
if any((not isinstance(m, (users.User, NoneType)) for m in target)):
raise_invalid_target()
from rogerthat.rpc.calls import client_mapping
if not alias in client_mapping:
raise ValueError("Function is not present in client_mapping")
if not "error" in error_f.meta["kwarg_types"] or error_f.meta["kwarg_types"]["error"] in (str, unicode):
raise ValueError("Error function must have an error parameter of type string.")
return filter(lambda x: x, target) if islist else ([target] if target else [])
def check_decorations(f):
if not hasattr(f, "meta") or "return_type" not in f.meta or "kwarg_types" not in f.meta:
raise ValueError("Function needs to be decorated with argument and return types")
| 43.852973 | 200 | 0.636377 |
0693b9613a135ff67d5413df7255909db8145fcb | 1,131 | py | Python | setup.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 5 | 2021-06-25T16:44:38.000Z | 2021-12-31T01:29:00.000Z | setup.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | null | null | null | setup.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 1 | 2021-06-25T20:33:47.000Z | 2021-06-25T20:33:47.000Z | import os, sys, shutil
from cx_Freeze import setup, Executable
from pathlib import Path
# Dependencies are automatically detected, but it might need fine tuning.
additional_modules = []
build_exe_options = {
"includes": additional_modules,
"packages": [
"moderngl",
"moderngl_window",
"pyglet",
"moderngl_window.context.pyglet",
"glcontext",
"moderngl_window.loaders.texture",
"moderngl_window.loaders.program",
],
}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name="Catchbase",
version="1.0",
description="Play your fangame",
options={"build_exe": build_exe_options},
executables=[Executable(script="game.py", base=base)],
)
for x in Path("build").glob("*"):
p = x
break
copytree("resources", str(p / "resources"))
| 22.62 | 73 | 0.622458 |
0694fc4578a8a2dc3e2046b27ace2ea005f03139 | 5,332 | py | Python | api/login.py | lachouettecoop/inventory-coop-api | 583866737522ef473cbc05664835fe50787e2146 | [
"MIT"
] | null | null | null | api/login.py | lachouettecoop/inventory-coop-api | 583866737522ef473cbc05664835fe50787e2146 | [
"MIT"
] | 17 | 2020-10-05T19:21:55.000Z | 2022-02-13T09:28:44.000Z | api/login.py | lachouettecoop/inventory-coop | c89488ea177b5616f1fe0bb5f149c61ae4bce2d1 | [
"MIT"
] | null | null | null | import os
import time
import jwt
import ldap
from eve.auth import TokenAuth
from flask import Blueprint, abort, jsonify, request
blueprint = Blueprint("login", __name__)
ADMIN_USERS = os.environ.get("ADMIN_USERS", "papanowel@gmail.com")
JWT_ALGORITHM = os.environ.get("JWT_ALGORITHM", "HS256")
JWT_EXPIRE_OFFSET = os.environ.get("JWT_EXPIRE_OFFSET", 60 * 60 * 12) # 12H
JWT_SECRET = os.environ.get("JWT_SECRET")
LDAP_SERVER = os.environ.get("LDAP_SERVER", "ldap://ldap.lachouettecoop.fr:389")
LDAP_BASE_DN = os.environ.get("LDAP_BASE_DN", "cn=admin,dc=lachouettecoop,dc=fr")
LDAP_SEARCH_DN = os.environ.get("LDAP_SEARCH_DN", "dc=lachouettecoop,dc=fr")
LDAP_USER_DN = os.environ.get(
"LDAP_USER_DN", "cn={},ou=membres,o=lachouettecoop,dc=lachouettecoop,dc=fr"
)
LDAP_ADMIN_PASS = os.environ.get("LDAP_ADMIN_PASS")
LDAP_SCOPE_SUBTREE = 2
| 33.325 | 99 | 0.61853 |
069560330062cf15b22f72ebec9dba45c5f2ba00 | 411 | py | Python | tools/pinyin-completion/setup.py | Vayn/dotfiles | 5a3e0e71669dd1832df7147b14c2943de6746119 | [
"MIT"
] | 4 | 2015-03-25T01:46:39.000Z | 2017-04-30T18:04:46.000Z | tools/pinyin-completion/setup.py | Vayn/dotfiles | 5a3e0e71669dd1832df7147b14c2943de6746119 | [
"MIT"
] | null | null | null | tools/pinyin-completion/setup.py | Vayn/dotfiles | 5a3e0e71669dd1832df7147b14c2943de6746119 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
from distutils.core import setup
setup( name="pinyin-comp",
version="0.1",
description="complete path containing Chinese by pinyin acronym",
author="Jekyll Wu",
author_email="adaptee@gmail.com",
url="http://www.github.com/adaptee/pinyin-comp",
packages=['pinyin'],
scripts=['pinyin-comp'] ,
)
| 27.4 | 73 | 0.622871 |
069859b4e100fade3b9371a57b0661bbf0c77719 | 1,518 | py | Python | DailyCodingProblem/52_Google_LRU.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | 5 | 2019-09-07T17:31:17.000Z | 2022-03-05T09:59:46.000Z | DailyCodingProblem/52_Google_LRU.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | null | null | null | DailyCodingProblem/52_Google_LRU.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | 2 | 2019-09-07T17:31:24.000Z | 2019-10-28T16:10:52.000Z | """
Good morning! Here's your coding interview problem for today.
This problem was asked by Google.
Implement an LRU (Least Recently Used) cache.
It should be able to be initialized with a cache size n, and contain the following methods:
set(key, value): sets key to value. If there are already n items in
the cache and we are adding a new item,
then it should also remove the least recently used item.
get(key): gets the value at key. If no such key exists, return null.
Each operation should run in O(1) time.
"""
if __name__ == '__main__':
lru_cache = lru(5)
assert not lru_cache.get(key='a')
lru_cache.set('a', 1)
assert lru_cache.get(key='a') == 1
lru_cache.set('b', 2)
lru_cache.set('c', 3)
lru_cache.set('d', 4)
lru_cache.set('f', 6)
lru_cache.set('e', 5)
assert not lru_cache.get(key='a')
assert lru_cache.get('e') == 5
| 24.885246 | 91 | 0.601449 |
0698ddf547c57339f83986d2ec83f39b1adf80a3 | 233 | py | Python | setup.py | kowalcj0/teeb | 15b87d9857510890fbe417e4de473b8b685b3319 | [
"BSD-3-Clause"
] | null | null | null | setup.py | kowalcj0/teeb | 15b87d9857510890fbe417e4de473b8b685b3319 | [
"BSD-3-Clause"
] | null | null | null | setup.py | kowalcj0/teeb | 15b87d9857510890fbe417e4de473b8b685b3319 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
# Metadata goes in setup.cfg. These are here for GitHub's dependency graph.
setup(
name="Teeb",
install_requires=["chardet==4.0.0", "send2trash==1.5.0", "wand==0.6.5"],
)
| 25.888889 | 76 | 0.656652 |
069a8d7c6fbb5a8f120cebac621c759b5b2c0718 | 233 | py | Python | article_retrieval/__main__.py | aleph-oh/wikigame-solver | 9a7b0a16df41291890e2bbe5903be55b25cef0f4 | [
"MIT"
] | null | null | null | article_retrieval/__main__.py | aleph-oh/wikigame-solver | 9a7b0a16df41291890e2bbe5903be55b25cef0f4 | [
"MIT"
] | null | null | null | article_retrieval/__main__.py | aleph-oh/wikigame-solver | 9a7b0a16df41291890e2bbe5903be55b25cef0f4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Constructs article graph."""
from database import clear_db
from database.constants import engine
from .database_builder import populate_db
if __name__ == "__main__":
clear_db(engine)
populate_db()
| 21.181818 | 41 | 0.759657 |
069d259501ee0ad7f4c234fd4a9336eca8bab60c | 637 | py | Python | playlist_organizer/client/deezer/settings.py | perminovs/playlist_organizer | cc5da2b0a031e29235e9ebe982f1062900602dd4 | [
"MIT"
] | null | null | null | playlist_organizer/client/deezer/settings.py | perminovs/playlist_organizer | cc5da2b0a031e29235e9ebe982f1062900602dd4 | [
"MIT"
] | null | null | null | playlist_organizer/client/deezer/settings.py | perminovs/playlist_organizer | cc5da2b0a031e29235e9ebe982f1062900602dd4 | [
"MIT"
] | null | null | null | from pydantic import BaseSettings
| 25.48 | 61 | 0.66876 |
069dac451eea987083fb0222c0d932e8a5b6741b | 2,462 | py | Python | services/web/project/routes/api.py | sthe0/test-bot-fullstack | 602c876177eb16958748a9e46274533759ff5792 | [
"MIT"
] | null | null | null | services/web/project/routes/api.py | sthe0/test-bot-fullstack | 602c876177eb16958748a9e46274533759ff5792 | [
"MIT"
] | null | null | null | services/web/project/routes/api.py | sthe0/test-bot-fullstack | 602c876177eb16958748a9e46274533759ff5792 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request
from functools import wraps
from sqlalchemy import desc
from project.common import app, db, fb_api
from project.config import ApiConfig
from project.models import Client, Message
api = Blueprint('api', __name__)
| 27.662921 | 89 | 0.675467 |
069dc6e8933ab9b9913ed9fdacb63aac7e39388b | 3,830 | py | Python | c4/test.py | duilio/c4 | 6dcde8316603192b0bc713d1bedb94290d123a9d | [
"MIT"
] | 16 | 2016-03-19T16:34:58.000Z | 2021-11-07T08:59:53.000Z | c4/test.py | duilio/c4 | 6dcde8316603192b0bc713d1bedb94290d123a9d | [
"MIT"
] | 1 | 2017-08-27T10:18:39.000Z | 2018-02-24T20:55:27.000Z | c4/test.py | duilio/c4 | 6dcde8316603192b0bc713d1bedb94290d123a9d | [
"MIT"
] | 9 | 2017-02-23T23:14:17.000Z | 2020-12-25T12:26:47.000Z | import unittest
import numpy as np
from c4.board import Board, PLAYER1
| 42.555556 | 58 | 0.232376 |
069eac769d2ccf455170294707453e0b3dff4035 | 1,134 | py | Python | NLP/3-Advanced-RNN-2/3-1-cnn-rnn-1.py | agarwalanant/ThaparWorkshopANN | fd7858b89ade8a621f30e389721166b222228f02 | [
"MIT"
] | 16 | 2019-06-19T05:43:01.000Z | 2020-12-01T13:24:55.000Z | NLP/3-Advanced-RNN-2/3-1-cnn-rnn-1.py | agarwalanant/ThaparWorkshopANN | fd7858b89ade8a621f30e389721166b222228f02 | [
"MIT"
] | null | null | null | NLP/3-Advanced-RNN-2/3-1-cnn-rnn-1.py | agarwalanant/ThaparWorkshopANN | fd7858b89ade8a621f30e389721166b222228f02 | [
"MIT"
] | 37 | 2019-06-17T11:53:13.000Z | 2020-06-02T13:05:31.000Z | import numpy as np
from sklearn.metrics import accuracy_score
from keras.datasets import cifar10
from keras.utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, LSTM, Input, Activation, Reshape, concatenate
from keras import optimizers
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Conv2D(input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3]), filters = 50, kernel_size = (3,3), strides = (1,1), padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Reshape(target_shape = (16*16, 50)))
model.add(LSTM(50, return_sequences = False))
model.add(Dense(10))
model.add(Activation('softmax'))
adam = optimizers.Adam(lr = 0.001)
model.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics = ['accuracy'])
history = model.fit(X_train, y_train, epochs = 100, batch_size = 100, verbose = 1)
results = model.evaluate(X_test, y_test)
print('Test Accuracy: ', results[1]) | 37.8 | 157 | 0.751323 |
069f831615d2592dffe7e15e0512bd6627e063ca | 1,652 | py | Python | pyage2/lib/actions.py | kachayev/pyage2 | adf87e0deeddaa4301dbcaf4fa7d396a71d129de | [
"Apache-2.0"
] | 13 | 2021-07-09T07:10:42.000Z | 2022-03-16T10:38:59.000Z | pyage2/lib/actions.py | kachayev/pyage2 | adf87e0deeddaa4301dbcaf4fa7d396a71d129de | [
"Apache-2.0"
] | null | null | null | pyage2/lib/actions.py | kachayev/pyage2 | adf87e0deeddaa4301dbcaf4fa7d396a71d129de | [
"Apache-2.0"
] | 1 | 2022-01-16T13:24:32.000Z | 2022-01-16T13:24:32.000Z | # Copyright 2021 PyAge2, Oleksii Kachaiev <kachayev@gmail.com>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import pyage2.expert.action.action_pb2 as action
from pyage2.lib import expert
from pyage2.lib.expert import StrategicNumber, ObjectType, TechType | 37.545455 | 84 | 0.766344 |
069f9b47635b756c567cad2b645af0001f7d8f95 | 4,045 | py | Python | multi_view_ctrl/grid_element_div.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | 2 | 2019-12-12T20:57:37.000Z | 2021-09-29T02:59:19.000Z | multi_view_ctrl/grid_element_div.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | null | null | null | multi_view_ctrl/grid_element_div.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | null | null | null | from libavg import avg
from events.event_dispatcher import EventDispatcher
from multi_view_ctrl.grid_element import GridElement
from multi_view_ctrl.configurations.grid_element_div_configuration import GridElementDivConfigurations
def is_pos_in(self, pos):
"""
Checks if a given pos lies inside in this grid element div.
:param pos: The pos to check for.
:type pos: tuple[float, float]
:return: Is the given pos in this element?
:rtype: bool
"""
return self.pos[0] <= pos[0] <= self.pos[0] + self.size[0] and self.pos[1] <= pos[1] <= self.pos[1] + self.size[1]
def append_child_for_grid(self, node):
"""
Appends the given node. It also sets the size of the node to the size of this grid element div.
:param node: The node to add to this grid element.
:type node: Node
"""
node.size = self._internal_div.size
node.view_id = self.grid_id
self._internal_div.appendChild(node)
self._child_nodes.append(node)
def start_listening(self):
"""
Registers a callback to listen to changes to this grid elemen div. Listeners can register to any number of the provided
events. For the required structure of the callbacks see below.
"""
pass
def stop_listening(self):
"""
Stops listening to an event the listener has registered to previously. The provided callback needs to be the
same that was used to listen to the event in the fist place.
"""
pass
| 38.52381 | 135 | 0.648949 |
06a1854bfcd9896d019df15af2c9bb29dfe25337 | 4,152 | py | Python | relations-finder/src/relations_finder.py | ahmed91abbas/wiki-relations | 9f8d20c512f993cab6065cb2695c996c076b6d13 | [
"MIT"
] | 5 | 2021-03-05T12:19:44.000Z | 2022-01-05T19:28:44.000Z | relations-finder/src/relations_finder.py | ahmed91abbas/wiki-relations | 9f8d20c512f993cab6065cb2695c996c076b6d13 | [
"MIT"
] | null | null | null | relations-finder/src/relations_finder.py | ahmed91abbas/wiki-relations | 9f8d20c512f993cab6065cb2695c996c076b6d13 | [
"MIT"
] | 2 | 2021-03-13T12:07:33.000Z | 2021-03-30T07:58:38.000Z | import en_core_web_sm
from spacy import displacy
| 37.071429 | 98 | 0.575626 |
06a1e865401185ec36f7fc0f3099d2dfb3463d93 | 8,597 | py | Python | app/users/routes.py | Shikhar-SRJ/DL_Model_APIs | ea26cb415477cf58a2ddd925689b62588bf95b13 | [
"MIT"
] | null | null | null | app/users/routes.py | Shikhar-SRJ/DL_Model_APIs | ea26cb415477cf58a2ddd925689b62588bf95b13 | [
"MIT"
] | null | null | null | app/users/routes.py | Shikhar-SRJ/DL_Model_APIs | ea26cb415477cf58a2ddd925689b62588bf95b13 | [
"MIT"
] | null | null | null | from flask import jsonify, request, Blueprint, current_app, send_file, make_response
import tensorflow as tf
import numpy as np
from app.users import utils
import cv2
from app.models import User, Data, Predictions, Coordinates
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
users = Blueprint('users', __name__)
| 41.331731 | 139 | 0.609282 |
06a25915be2cf155b3153cd03aaa095116d1e484 | 352 | py | Python | Curso em Video - Exercicios/ex006.py | JefferMarcelino/Aulas-Python | 3885b1cfa7f27b90b702ee81fc5eab108c029e52 | [
"MIT"
] | 2 | 2021-01-27T19:30:05.000Z | 2022-01-10T20:34:53.000Z | Curso em Video - Exercicios/ex006.py | JefferMarcelino/Aulas-Python | 3885b1cfa7f27b90b702ee81fc5eab108c029e52 | [
"MIT"
] | null | null | null | Curso em Video - Exercicios/ex006.py | JefferMarcelino/Aulas-Python | 3885b1cfa7f27b90b702ee81fc5eab108c029e52 | [
"MIT"
] | null | null | null | """
EXERCCIO 006: Dobro, Triplo, Raiz Quadrada
Crie um algoritmo que leia um nmero e mostre o seu dobro, triplo e raiz quadrada.
"""
n = int(input('Digite um nmero: '))
print('O dobro de {} vale {}.'.format(n, (n * 2)))
print('O triplo de {} vale {}.'.format(n, (n * 3)))
print('A raiz quadrada de {} igual a {:.2f}.'.format(n, pow(n, (1 / 2))))
| 35.2 | 82 | 0.619318 |
06a27f548fc5e06937812be30ed778ed22cfc7a5 | 2,494 | py | Python | bionic/gcs.py | IDl0T/bionic | 8eaa868a2e7af81bb561492c045feb414f7c6326 | [
"Apache-2.0"
] | 98 | 2019-08-29T21:38:44.000Z | 2022-01-26T04:59:57.000Z | bionic/gcs.py | IDl0T/bionic | 8eaa868a2e7af81bb561492c045feb414f7c6326 | [
"Apache-2.0"
] | 143 | 2019-09-11T15:32:17.000Z | 2021-06-08T21:48:30.000Z | bionic/gcs.py | IDl0T/bionic | 8eaa868a2e7af81bb561492c045feb414f7c6326 | [
"Apache-2.0"
] | 20 | 2019-09-13T18:13:03.000Z | 2021-12-03T19:51:01.000Z | """
Utilities for working with Google Cloud Storage.
"""
import logging
import warnings
from .deps.optdep import import_optional_dependency
logger = logging.getLogger(__name__)
_cached_gcs_fs = None
# TODO: Consider using persistence.GcsFilesystem instead of exposing this function.
def upload_to_gcs(path, url):
"""
Copy a local path to GCS URL.
"""
gcs_fs = get_gcs_fs_without_warnings()
if path.is_dir():
gcs_fs.put(str(path), url, recursive=True)
else:
# If the GCS URL is a folder, we want to write the file in the folder.
# There seems to be a bug in fsspec due to which, the file is uploaded
# as the url, instead of inside the folder. What this means is, writing
# a file c.json to gs://a/b/ would result in file gs://a/b instead of
# gs://a/b/c.json.
#
# The `put` API is supposed to write the file inside the folder but it
# strips the ending "/" at the end in fsspec's `_strip_protocol` method.
# See https://github.com/intake/filesystem_spec/issues/448 for more
# details and tracking this issue.
if url.endswith("/"):
url = url + path.name
gcs_fs.put_file(str(path), url)
| 38.369231 | 85 | 0.676423 |
06a3f43967e178259c2fded854053a178b218002 | 208 | py | Python | src/utils/const.py | yizhongw/TagNN-PDTB | 9b944210bcc3851c65cb479ef705acbb1b45b08f | [
"MIT"
] | 14 | 2018-11-19T02:49:34.000Z | 2022-02-18T04:00:31.000Z | src/utils/const.py | lidejian/TreeLSTM-PDTB | 3f048d2a3daf3fb5e803037f9344f515d0e71450 | [
"MIT"
] | null | null | null | src/utils/const.py | lidejian/TreeLSTM-PDTB | 3f048d2a3daf3fb5e803037f9344f515d0e71450 | [
"MIT"
] | 5 | 2017-12-04T13:29:29.000Z | 2018-05-07T08:45:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: yizhong
# created_at: 17-5-2 5:00
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
NUM_WORD = '<num>'
PUNC_TAG = '<punc>'
| 16 | 27 | 0.586538 |
06a458881d352d1e5bc5252e5c9354f711ebe5e6 | 208 | py | Python | src_old/tests/scripts/core/ex7.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | src_old/tests/scripts/core/ex7.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | src_old/tests/scripts/core/ex7.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | #coding: utf-8
a = zeros((10,10), double)
for i in range(0,10):
a[i,i] = 2.0
for i in range(0,9):
a[i,i+1] = -1.0
for i in range(0,9):
a[i,i+1] = -1.0
n = 5
for i in range(0, n):
x = 1
| 10.947368 | 26 | 0.480769 |
06a83d0998f9996abe66240e832c87433d984bc2 | 626 | py | Python | src/learning_language/views.py | gsi-luis/djangolearning | 4cf1e016cfe2910c907a669e518f5233ae04fb12 | [
"MIT"
] | 1 | 2020-07-05T18:33:33.000Z | 2020-07-05T18:33:33.000Z | src/learning_language/views.py | gsi-luis/djangolearning | 4cf1e016cfe2910c907a669e518f5233ae04fb12 | [
"MIT"
] | 2 | 2021-03-30T13:49:58.000Z | 2021-06-10T19:43:27.000Z | src/learning_language/views.py | gsi-luis/djangolearning | 4cf1e016cfe2910c907a669e518f5233ae04fb12 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .forms import LanguageForm
from learning_django import settings
from django.utils import translation
| 27.217391 | 76 | 0.701278 |
06abef330b43336341fe87f19a5bb8dd00ab85db | 252 | py | Python | driver/comtypes_gamry_simulate.py | yul69-cell/HELAO | a39372eb385ee93b711443d9cbd56c5ec737ff70 | [
"CC0-1.0"
] | null | null | null | driver/comtypes_gamry_simulate.py | yul69-cell/HELAO | a39372eb385ee93b711443d9cbd56c5ec737ff70 | [
"CC0-1.0"
] | null | null | null | driver/comtypes_gamry_simulate.py | yul69-cell/HELAO | a39372eb385ee93b711443d9cbd56c5ec737ff70 | [
"CC0-1.0"
] | null | null | null | #create cinet and functions like COMError that simulate Gamry
#dtaq.Cook is defined to return dummy data when called
#import config here and check if a simulation is being run and if so load that simulation .py that overrides functions like dtaq.Cook | 50.4 | 133 | 0.809524 |
06ada35b71f676f14ae2a8fbfcb628afacd0c4d8 | 512 | py | Python | oj2.py | YanshuHu/combinatoricsoj2 | 51fa8cf06042e63642b8407d12de99d22f0e7a3b | [
"Apache-2.0"
] | null | null | null | oj2.py | YanshuHu/combinatoricsoj2 | 51fa8cf06042e63642b8407d12de99d22f0e7a3b | [
"Apache-2.0"
] | null | null | null | oj2.py | YanshuHu/combinatoricsoj2 | 51fa8cf06042e63642b8407d12de99d22f0e7a3b | [
"Apache-2.0"
] | null | null | null |
if __name__ == '__main__':
main()
| 20.48 | 40 | 0.564453 |
06af2b443d404bade0c4526a7994135505c898f7 | 737 | py | Python | kelte/maths/vector.py | brianbruggeman/rl | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | [
"MIT"
] | null | null | null | kelte/maths/vector.py | brianbruggeman/rl | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | [
"MIT"
] | null | null | null | kelte/maths/vector.py | brianbruggeman/rl | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from .point import Point
NONE: Direction = Direction(0, 0)
NORTH: Direction = Direction(0, -1)
SOUTH: Direction = Direction(0, 1)
EAST: Direction = Direction(1, 0)
WEST: Direction = Direction(-1, 0)
NORTH_EAST: Direction = NORTH + EAST
NORTH_WEST: Direction = NORTH + WEST
SOUTH_EAST: Direction = SOUTH + EAST
SOUTH_WEST: Direction = SOUTH + WEST
UP: Direction = Direction(0, -1)
DOWN: Direction = Direction(0, 1)
RIGHT: Direction = Direction(1, 0)
LEFT: Direction = Direction(-1, 0)
UP_RIGHT: Direction = UP + RIGHT
UP_LEFT: Direction = UP + LEFT
DOWN_RIGHT: Direction = DOWN + RIGHT
DOWN_LEFT: Direction = DOWN + LEFT
| 22.333333 | 40 | 0.708277 |
06af865f1a3973785536a7d3858ef8ea324bb911 | 1,437 | py | Python | tests/bugs/core_4158_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_4158_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_4158_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_4158
# title: Regression: LIKE with escape does not work
# decription:
# tracker_id: CORE-4158
# min_versions: ['2.0.7']
# versions: 2.0.7
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.0.7
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table tab1 (
id int constraint pk_tab1 primary key,
val varchar(30)
);
insert into tab1 (id, val) values (1, 'abcdef');
insert into tab1 (id, val) values (2, 'abc_ef');
insert into tab1 (id, val) values (3, 'abc%ef');
insert into tab1 (id, val) values (4, 'abc&%ef');
insert into tab1 (id, val) values (5, 'abc&_ef');
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
select id, val from tab1 where val like 'abc&%ef' escape '&';
select id, val from tab1 where val like 'abc&_ef' escape '&';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ID 3
VAL abc%ef
ID 2
VAL abc_ef
"""
| 26.127273 | 70 | 0.592902 |
06afc4b209dc7b6ac90802b9ff2ce19d8ee2b910 | 18,430 | py | Python | trustyroles/arpd_update/arpd_update.py | hmcguire1/trustyroles | 5dbe3d65353538f84f12f3ecef6de2a8cc3f731f | [
"MIT"
] | 2 | 2019-12-16T15:10:13.000Z | 2020-02-24T20:13:40.000Z | trustyroles/arpd_update/arpd_update.py | hmcguire1/trustyroles | 5dbe3d65353538f84f12f3ecef6de2a8cc3f731f | [
"MIT"
] | null | null | null | trustyroles/arpd_update/arpd_update.py | hmcguire1/trustyroles | 5dbe3d65353538f84f12f3ecef6de2a8cc3f731f | [
"MIT"
] | 1 | 2019-12-05T01:12:33.000Z | 2019-12-05T01:12:33.000Z | """
arpd_update focuses on easily editing the assume role policy document of a role.
"""
import os
import json
import logging
import argparse
from datetime import datetime
from typing import List, Dict, Optional
import boto3 # type: ignore
from botocore.exceptions import ClientError # type: ignore
LOGGER = logging.getLogger("IAM-ROLE-TRUST-POLICY")
logging.basicConfig(level=logging.WARNING)
PARSER = argparse.ArgumentParser()
def _main():
"""The _main method can take in a list of ARNs, role to update,
and method [get, update, remove, restore]."""
PARSER.add_argument(
"-a",
"--arn",
nargs="+",
required=False,
help="Add new ARNs to trust policy. Takes a comma-seperated list of ARNS.",
)
PARSER.add_argument(
"-u",
"--update_role",
type=str,
required=True,
help="Role for updating trust policy. Takes an role friendly name as string.",
)
PARSER.add_argument(
"-m",
"--method",
type=str,
required=False,
choices=["get", "update", "remove", "restore"],
help="Takes choice of method to get, update, or remove.",
)
PARSER.add_argument(
"-e",
"--add_external_id",
type=str,
required=False,
help="Takes an externalId as a string.",
)
PARSER.add_argument(
"--remove_external_id",
action="store_true",
required=False,
help="Method for removing externalId condition. Takes no arguments",
)
PARSER.add_argument(
"--json",
action="store_true",
required=False,
help="Add to print json in get method.",
)
PARSER.add_argument(
"--add_sid",
type=str,
required=False,
help="Add a Sid to trust policy. Takes a string.",
)
PARSER.add_argument(
"--remove_sid",
action="store_true",
required=False,
help="Remove a Sid from a trust policy. Takes no arguments.",
)
PARSER.add_argument(
"--backup_policy",
type=str,
required=False,
help="""Creates a backup of previous policy
in current directory as <ISO-time>.policy.bk""",
)
PARSER.add_argument(
"--dir_path",
type=str,
required=False,
help="Path to directory for backup policy. Takes a string",
)
PARSER.add_argument(
"--file_path",
type=str,
required=False,
help="File for backup policy. Takes a string",
)
PARSER.add_argument(
"--bucket",
type=str,
required=False,
help="S3 bucket name for backup policy. Takes a string",
)
PARSER.add_argument(
"--key",
type=str,
required=False,
help="S3 key name for restoring S3 policy. Takes a string",
)
args = vars(PARSER.parse_args())
if args["backup_policy"]:
if args["backup_policy"] == "local":
if args["dir_path"]:
dir_path = args["dir_path"]
else:
dir_path = os.getcwd()
bucket = None
elif args["backup_policy"] == "s3":
bucket = args["bucket"]
dir_path = None
else:
dir_path = os.getcwd()
bucket = ""
if args["method"] == "update":
arpd = update_arn(
args["arn"],
args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
elif args["method"] == "remove":
arpd = remove_arn(
args["arn"],
args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
elif args["method"] == "get":
arpd = get_arpd(args["update_role"])
if args["json"]:
print(json.dumps(arpd["Statement"][0], indent=4))
else:
print(f"\nARNS:")
if isinstance(arpd["Statement"][0]["Principal"]["AWS"], list):
for arn in arpd["Statement"][0]["Principal"]["AWS"]:
print(f" {arn}")
else:
print(f" {arpd['Statement'][0]['Principal']['AWS']}")
print(f"Conditions:")
if arpd["Statement"][0]["Condition"]:
print(f" {arpd['Statement'][0]['Condition']}")
elif args["method"] == "restore" and args["backup_policy"]:
if args["backup_policy"].lower() == "local" and args["file_path"]:
arpd = restore_from_backup(
role_name=args["update_role"],
location_type="local",
file_path=args["file_path"],
)
elif args["backup_policy"].lower() == "s3":
arpd = restore_from_backup(
role_name=args["update_role"],
location_type="s3",
file_path="",
key=args["key"],
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["add_external_id"]:
arpd = add_external_id(
external_id=args["add_external_id"],
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["remove_external_id"]:
arpd = remove_external_id(
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["add_sid"]:
arpd = add_sid(
role_name=args["update_role"],
sid=args["add_sid"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["remove_sid"]:
arpd = remove_sid(
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
def get_arpd(role_name: str, session=None, client=None) -> Dict:
"""The get_arpd method takes in a role_name as a string
and provides trusted ARNS and Conditions.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
return role["Role"]["AssumeRolePolicyDocument"]
def update_arn(
role_name: str,
arn_list: List,
dir_path: Optional[str],
client=None,
session=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The update_arn method takes a multiple ARNS(arn_list) and a role_name
to add to trust policy of suppplied role.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
old_principal_list = arpd["Statement"][0]["Principal"]["AWS"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
if isinstance(old_principal_list, list):
for arn in arn_list:
arpd["Statement"][0]["Principal"]["AWS"].append(arn)
else:
old_principal_list = [old_principal_list]
for arn in arn_list:
arpd["Statement"][0]["Principal"]["AWS"] = old_principal_list
arpd["Statement"][0]["Principal"]["AWS"].append(arn)
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def remove_arn(
role_name: str,
arn_list: List,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_arn method takes in a string or multiple of ARNs and a role_name
to remove ARNS from trust policy of supplied role.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
old_principal_list = arpd["Statement"][0]["Principal"]["AWS"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
for arn in arn_list:
if arn in old_principal_list:
arpd["Statement"][0]["Principal"]["AWS"].remove(arn)
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def add_external_id(
role_name: str,
external_id: str,
dir_path: Optional[str],
client=None,
session=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The add_external_id method takes an external_id and role_name as strings
to allow the addition of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {
"StringEquals": {"sts:ExternalId": external_id}
}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def remove_external_id(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_external_id method takes a role_name as a string
to allow the removal of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def add_sid(
role_name: str,
sid: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: str = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The add_sid method adds a statement ID to
the assume role policy document
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Sid"] = sid
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as ex:
raise ex
def remove_sid(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: str = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The remove_sid method removes the statement ID
from the assume role policy document
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
if arpd["Statement"][0]["Sid"]:
arpd["Statement"][0].pop("Sid")
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
except ClientError as error:
raise error
return arpd
def retain_policy(
role_name: str,
policy: Dict,
session=None,
client=None,
location_type: Optional[str] = None,
dir_path=os.getcwd(),
bucket: Optional[str] = None,
) -> None:
"""
The retain_policy method creates a backup of previous
policy in current directory by default as <ISO-time>.<RoleName>.bk or specified directory
for local file or with s3 to specified bucket and key name.
"""
assert location_type
if location_type.lower() == "local":
with open(
dir_path
+ "/"
+ datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ f".{role_name}.bk",
"w",
) as file:
json.dump(policy, file, ensure_ascii=False, indent=4)
elif location_type.lower() == "s3":
if session:
s3_client = session.client("s3")
elif client:
s3_client = client
else:
s3_client = boto3.client("s3")
try:
s3_client.put_object(
Bucket=bucket,
Key=datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ f".{role_name}.bk",
Body=json.dumps(policy).encode(),
)
except ClientError as error:
raise error
if __name__ == "__main__":
_main()
| 27.507463 | 93 | 0.566522 |
06aff71efc0dec027a46c0058c117887035af9c9 | 7,471 | py | Python | kartingpros/timetrial.py | Vishvak365/Karting-Pros | 1c482cff78e7402c8da8870ff519eea760be4a34 | [
"MIT"
] | 1 | 2021-06-28T21:55:18.000Z | 2021-06-28T21:55:18.000Z | kartingpros/timetrial.py | wboyd600/Karting-Pros | 4db4b9f075b152dfea79c89640c0bac1becce89b | [
"MIT"
] | 17 | 2020-11-27T14:33:39.000Z | 2020-12-08T00:45:18.000Z | kartingpros/timetrial.py | wboyd600/Karting-Pros | 4db4b9f075b152dfea79c89640c0bac1becce89b | [
"MIT"
] | 1 | 2021-06-27T20:27:38.000Z | 2021-06-27T20:27:38.000Z | import pygame
import time
import math
import sys
from kartingpros import track, mainmenu, car, settings, loadimage
from kartingpros.loadimage import _load_image, _load_sound, _load_font
import numpy as np
from numpy import save
from kartingpros.car import Car
from pygame.locals import *
from pygame import mixer
import os
| 33.959091 | 99 | 0.549458 |
06b0e4b7f2071c5642bd956f75e4b9df9624fc3e | 9,079 | py | Python | tests/location/test_location_utility.py | questionlp/wwdtm | f3cf3399c22bf19e369e6e0250e7c72de0be3a90 | [
"Apache-2.0"
] | null | null | null | tests/location/test_location_utility.py | questionlp/wwdtm | f3cf3399c22bf19e369e6e0250e7c72de0be3a90 | [
"Apache-2.0"
] | 1 | 2022-01-17T04:25:49.000Z | 2022-01-17T04:25:49.000Z | tests/location/test_location_utility.py | questionlp/wwdtm | f3cf3399c22bf19e369e6e0250e7c72de0be3a90 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2021 Linh Pham
# wwdtm is released under the terms of the Apache License 2.0
"""Testing for object: :py:class:`wwdtm.location.LocationUtility`
"""
import json
from typing import Any, Dict
import pytest
from wwdtm.location import LocationUtility
| 38.965665 | 88 | 0.702941 |
06b195aef83b65c429bf30fd2c08ed267c6351f6 | 2,204 | py | Python | test/create_cert.py | finsberg/pytest-tornado | 52ba5119310be5385ceed74ef94f4538660e3725 | [
"Apache-2.0"
] | 123 | 2015-03-31T17:25:34.000Z | 2021-12-16T12:14:38.000Z | test/create_cert.py | finsberg/pytest-tornado | 52ba5119310be5385ceed74ef94f4538660e3725 | [
"Apache-2.0"
] | 53 | 2015-02-04T06:02:21.000Z | 2020-11-25T20:04:52.000Z | test/create_cert.py | finsberg/pytest-tornado | 52ba5119310be5385ceed74ef94f4538660e3725 | [
"Apache-2.0"
] | 43 | 2015-02-26T05:02:44.000Z | 2021-12-17T10:08:44.000Z | # -*- coding: utf-8 -*-
"""
Create a cert with pyOpenSSL for tests.
Heavily based on python-opsi's OPSI.Util.Task.Certificate.
Source: https://github.com/opsi-org/python-opsi/blob/stable/OPSI/Util/Task/Certificate.py
"""
import argparse
import os
import random
import socket
from tempfile import NamedTemporaryFile
from OpenSSL import crypto
try:
import secrets
except ImportError:
secrets = None
def createCertificate(path):
"""
Creates a certificate.
"""
cert = crypto.X509()
cert.get_subject().C = "DE" # Country
cert.get_subject().ST = "HE" # State
cert.get_subject().L = "Wiesbaden" # Locality
cert.get_subject().O = "pytest-tornado" # Organisation
cert.get_subject().OU = "Testing Department" # organisational unit
cert.get_subject().CN = socket.getfqdn() # common name
# As described in RFC5280 this value is required and must be a
# positive and unique integer.
# Source: http://tools.ietf.org/html/rfc5280#page-19
cert.set_serial_number(random.randint(0, pow(2, 16)))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60) # Valid 1 hour
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.set_version(2)
cert.sign(k, 'sha512')
certcontext = b"".join(
(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
)
)
with open(path, "wt") as certfile:
certfile.write(certcontext.decode())
try:
with NamedTemporaryFile(mode="wb", delete=False) as randfile:
randfile.write(randomBytes(512))
command = u"openssl dhparam -rand {tempfile} 512 >> {target}".format(
tempfile=randfile.name, target=path
)
os.system(command)
finally:
os.remove(randfile.name)
def randomBytes(length):
"""
Return _length_ random bytes.
:rtype: bytes
"""
if secrets:
return secrets.token_bytes(512)
else:
return os.urandom(512)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create certificate for testing')
parser.add_argument('--cert', dest='cert', default="testcert.pem",
help='Name of the certificate')
args = parser.parse_args()
createCertificate(args.cert)
| 23.956522 | 89 | 0.71824 |
06b1a7bf9e162d2f1a93b478504af2c68a143b23 | 680 | py | Python | positional_args.py | nickaigi/effective_python_tips | 1a68b6eaed2e946b003c0cd0bdea03e79b8e8990 | [
"Unlicense"
] | null | null | null | positional_args.py | nickaigi/effective_python_tips | 1a68b6eaed2e946b003c0cd0bdea03e79b8e8990 | [
"Unlicense"
] | null | null | null | positional_args.py | nickaigi/effective_python_tips | 1a68b6eaed2e946b003c0cd0bdea03e79b8e8990 | [
"Unlicense"
] | null | null | null | def log(message, *values):
""" * operator instructs python to pass items from the sequence as
positional arguments
Remember:
- using the * operator with a generator may cause your program
to run out of memory and crash.
- adding new positional parameters to functions that accept
*args can introduce hard-to-find bugs
"""
if not values:
print(message)
else:
values_str = ', '.join(str(x) for x in values)
print('%s: %s' % (message, values_str))
if __name__ == '__main__':
log('My numbers are', 1, 2)
log('Hi there')
favorites = [7, 33, 99]
log('Favorites colors', *favorites)
| 28.333333 | 70 | 0.613235 |
06b1dfaeb76409a1c03b523aa234e5fabb549ad9 | 2,942 | py | Python | fastmri_recon/models/subclassed_models/updnet.py | samiulshuvo/fastmri-reproducible-benchmark | 5ac9ba3e7f1ad859dcf74e7019b574a6bf065eac | [
"MIT"
] | 105 | 2019-09-30T06:05:38.000Z | 2022-03-02T09:48:31.000Z | fastmri_recon/models/subclassed_models/updnet.py | samiulshuvo/fastmri-reproducible-benchmark | 5ac9ba3e7f1ad859dcf74e7019b574a6bf065eac | [
"MIT"
] | 103 | 2019-09-18T08:30:23.000Z | 2022-03-16T22:24:14.000Z | fastmri_recon/models/subclassed_models/updnet.py | samiulshuvo/fastmri-reproducible-benchmark | 5ac9ba3e7f1ad859dcf74e7019b574a6bf065eac | [
"MIT"
] | 38 | 2019-09-30T06:05:41.000Z | 2022-02-26T14:07:52.000Z | from .unet import UnetComplex
from .cross_domain import CrossDomainNet
from ..utils.fourier import FFT, IFFT
| 37.240506 | 81 | 0.602311 |
06b2849360054f2d534889fecd3a7de975d603e4 | 4,342 | py | Python | utilities/misc.py | lebionick/stereo-transformer | 6e7df042d917c5ed00d10bd6ddb6f76e90429148 | [
"Apache-2.0"
] | 410 | 2020-11-06T02:10:17.000Z | 2022-03-25T17:12:24.000Z | utilities/misc.py | lppllppl920/stereo-transformer | f07b1ee8ced1c36e10630401688a06e355056e56 | [
"Apache-2.0"
] | 55 | 2020-11-06T10:29:16.000Z | 2022-03-30T02:10:10.000Z | utilities/misc.py | lppllppl920/stereo-transformer | f07b1ee8ced1c36e10630401688a06e355056e56 | [
"Apache-2.0"
] | 72 | 2020-11-06T07:22:39.000Z | 2022-03-19T14:20:38.000Z | # Authors: Zhaoshuo Li, Xingtong Liu, Francis X. Creighton, Russell H. Taylor, and Mathias Unberath
#
# Copyright (c) 2020. Johns Hopkins University - All rights reserved.
import copy
import numpy as np
import torch
import torch.nn as nn
def torch_1d_sample(source, sample_points, mode='linear'):
"""
linearly sample source tensor along the last dimension
input:
source [N,D1,D2,D3...,Dn]
sample_points [N,D1,D2,....,Dn-1,1]
output:
[N,D1,D2...,Dn-1]
"""
idx_l = torch.floor(sample_points).long().clamp(0, source.size(-1) - 1)
idx_r = torch.ceil(sample_points).long().clamp(0, source.size(-1) - 1)
if mode == 'linear':
weight_r = sample_points - idx_l
weight_l = 1 - weight_r
elif mode == 'sum':
weight_r = (idx_r != idx_l).int() # we only sum places of non-integer locations
weight_l = 1
else:
raise Exception('mode not recognized')
out = torch.gather(source, -1, idx_l) * weight_l + torch.gather(source, -1, idx_r) * weight_r
return out.squeeze(-1)
def find_occ_mask(disp_left, disp_right):
"""
find occlusion map
1 indicates occlusion
disp range [0,w]
"""
w = disp_left.shape[-1]
# # left occlusion
# find corresponding pixels in target image
coord = np.linspace(0, w - 1, w)[None,] # 1xW
right_shifted = coord - disp_left
# 1. negative locations will be occlusion
occ_mask_l = right_shifted <= 0
# 2. wrong matches will be occlusion
right_shifted[occ_mask_l] = 0 # set negative locations to 0
right_shifted = right_shifted.astype(np.int)
disp_right_selected = np.take_along_axis(disp_right, right_shifted,
axis=1) # find tgt disparity at src-shifted locations
wrong_matches = np.abs(disp_right_selected - disp_left) > 1 # theoretically, these two should match perfectly
wrong_matches[disp_right_selected <= 0.0] = False
wrong_matches[disp_left <= 0.0] = False
# produce final occ
wrong_matches[occ_mask_l] = True # apply case 1 occlusion to case 2
occ_mask_l = wrong_matches
# # right occlusion
# find corresponding pixels in target image
coord = np.linspace(0, w - 1, w)[None,] # 1xW
left_shifted = coord + disp_right
# 1. negative locations will be occlusion
occ_mask_r = left_shifted >= w
# 2. wrong matches will be occlusion
left_shifted[occ_mask_r] = 0 # set negative locations to 0
left_shifted = left_shifted.astype(np.int)
disp_left_selected = np.take_along_axis(disp_left, left_shifted,
axis=1) # find tgt disparity at src-shifted locations
wrong_matches = np.abs(disp_left_selected - disp_right) > 1 # theoretically, these two should match perfectly
wrong_matches[disp_left_selected <= 0.0] = False
wrong_matches[disp_right <= 0.0] = False
# produce final occ
wrong_matches[occ_mask_r] = True # apply case 1 occlusion to case 2
occ_mask_r = wrong_matches
return occ_mask_l, occ_mask_r
| 32.893939 | 114 | 0.649931 |
06b29440122743c4d662f5e0b42777454bfb53b1 | 2,600 | py | Python | tfcli/resources/asg.py | leowa/tfcli | 21314feabcb56fe802298a98a66eb4e2a9de8cc7 | [
"MIT"
] | null | null | null | tfcli/resources/asg.py | leowa/tfcli | 21314feabcb56fe802298a98a66eb4e2a9de8cc7 | [
"MIT"
] | null | null | null | tfcli/resources/asg.py | leowa/tfcli | 21314feabcb56fe802298a98a66eb4e2a9de8cc7 | [
"MIT"
] | null | null | null | from .base import BaseResource
class LaunchTemplate(BaseResource):
""" launch template resource to generate from current region
"""
def list_all(self):
"""list all such kind of resources from AWS
:return: list of tupe for a resource (type, name, id)
"""
ec2 = self.session.client("ec2")
items = ec2.describe_launch_templates()["LaunchTemplates"]
for item in items:
_name = _id = item["LaunchTemplateId"]
yield "aws_launch_template", _name, _id
| 31.707317 | 87 | 0.611154 |
06b306a89a539a3cbfca1d1c817821e2aac7c4eb | 28,278 | py | Python | BASS-train.py | shlpu/Statlie-Image-Processor | e40355f43f344fd02041bdc8ce57b0ee101c6cdb | [
"Apache-2.0"
] | 1 | 2019-11-23T12:58:09.000Z | 2019-11-23T12:58:09.000Z | BASS-train.py | shlpu/Statlie-Image-Processor | e40355f43f344fd02041bdc8ce57b0ee101c6cdb | [
"Apache-2.0"
] | null | null | null | BASS-train.py | shlpu/Statlie-Image-Processor | e40355f43f344fd02041bdc8ce57b0ee101c6cdb | [
"Apache-2.0"
] | 3 | 2019-03-27T00:47:08.000Z | 2022-02-05T04:52:48.000Z | import numpy as np
import scipy.io
from sklearn.metrics import confusion_matrix
from random import randint, shuffle
from argparse import ArgumentParser
from helper import getValidDataset
import tensorflow as tf
parser = ArgumentParser()
parser.add_argument('--data', type=str, default='Indian_pines')
parser.add_argument('--patch_size', type=int, default=3)
parser.add_argument('--library', type=str, default='tensorflow')
opt = parser.parse_args()
import os
model_directory = os.path.join(os.getcwd(), 'BASSNET_Trained_model/')
# Load MATLAB pre-processed image data
try:
TRAIN = scipy.io.loadmat("./data/" + opt.data + "_Train_patch_" + str(opt.patch_size) + ".mat")
VALIDATION = scipy.io.loadmat("./data/" + opt.data + "_Val_patch_" + str(opt.patch_size) + ".mat")
TEST = scipy.io.loadmat("./data/" + opt.data + "_Test_patch_" + str(opt.patch_size) + ".mat")
except NameError:
raise print('--data options are: Indian_pines, Salinas, KSC, Botswana')
# Extract data and label from MATLAB file
training_data, training_label = TRAIN['train_patch'], TRAIN['train_labels']
validation_data, validation_label = VALIDATION['val_patch'], VALIDATION['val_labels']
test_data, test_label = TEST['test_patch'], TEST['test_labels']
getValidDataset(test_data, test_label)
print('\nData input shape')
print('training_data shape' + str(training_data.shape))
print('training_label shape' + str(training_label.shape) + '\n')
print('testing_data shape' + str(test_data.shape))
print('testing_label shape' + str(test_label.shape) + '\n')
SIZE = training_data.shape[0]
HEIGHT = training_data.shape[1]
WIDTH = training_data.shape[2]
BANDS = training_data.shape[3]
NUM_PARALLEL_BAND = 10
BAND_SIZE = BANDS / 10
NUM_CLASS = training_label.shape[1]
# Helper Functions
# Define BASSNET archicture
a =8
graph = tf.Graph()
with graph.as_default():
img_entry = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH, BANDS], name='img_entry')
img_label = tf.placeholder(tf.uint8, shape=[None, NUM_CLASS], name='img_label')
image_true_class = tf.argmax(img_label, axis=1, name="img_true_label")
prob = tf.placeholder(tf.float32)
model = bassnet(statlieImg=img_entry, prob=prob)
final_layer = model['block3_dense3']
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_layer,
labels=img_label)
cost = tf.reduce_mean(cross_entropy)
# Optimisation function
optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(cost)
predict_class = model['predict_class_number']
correction = tf.equal( predict_class, image_true_class)
accuracy = tf.reduce_mean(tf.cast(correction, tf.float32))
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
writer = tf.summary.FileWriter("BASSNETlogs/", session.graph)
if os.path.isdir(model_directory):
saver.restore(session, 'BASSNET_Trained_model/')
session.run(tf.global_variables_initializer())
total_iterations = 0
train(num_iterations=12000, train_batch_size=200)
saver.save(session, model_directory)
test()
# trainTestSwap(training_data, training_label, test_data, test_label, 1, size=250)
print('End session')
| 47.766892 | 169 | 0.607539 |
ebe8e591a0b9b4b36a891f5a5dfead02a0b973da | 2,300 | py | Python | hungarian/python/code/point.py | hrutkabence/tutorials | bd76294860804aee8ecda5e1445464506bf02ee0 | [
"CC0-1.0"
] | null | null | null | hungarian/python/code/point.py | hrutkabence/tutorials | bd76294860804aee8ecda5e1445464506bf02ee0 | [
"CC0-1.0"
] | null | null | null | hungarian/python/code/point.py | hrutkabence/tutorials | bd76294860804aee8ecda5e1445464506bf02ee0 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
def PolarP(dist, ang):
""" polar to rectangular coordinates returning Point """
return Point(dist * math.cos(ang), dist * math.sin(ang))
if __name__ == "__main__":
# tests
v = 0.1
A = Point(-100.4627, 52.5957)
B = Point(11.0532, 52.5956)
dist, bea = (B - A).polar()
P1 = A + PolarP(v, bea + math.pi * 3 / 2)
P2 = P1 + PolarP(dist, bea)
P3 = P2 + PolarP(v, bea + math.pi / 2)
P4 = A + PolarP(v, bea +math.pi / 2)
print(P1)
print(P2)
print(P3)
print(P4)
| 25.274725 | 65 | 0.513478 |
ebef4935fe5542a7f33a3a5e4cd173560258a38e | 4,588 | py | Python | mlmodels/model_tf/misc/tfcode2/CNN/alex-net/alexnet.py | gitter-badger/mlmodels | f08cc9b6ec202d4ad25ecdda2f44487da387569d | [
"MIT"
] | 1 | 2022-03-11T07:57:48.000Z | 2022-03-11T07:57:48.000Z | mlmodels/model_tf/misc/tfcode2/CNN/alex-net/alexnet.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | mlmodels/model_tf/misc/tfcode2/CNN/alex-net/alexnet.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from scipy.misc import imresize
from sklearn.cross_validation import train_test_split
import _pickle as cPickle
from train import train
# In[2]:
def unpickle(file):
with open(file, "rb") as fo:
dict = cPickle.load(fo, encoding="latin1")
return dict
unique_name = unpickle("cifar-10-batches-py/batches.meta")["label_names"]
batches = unpickle("cifar-10-batches-py/data_batch_1")
train_X, test_X, train_Y, test_Y = train_test_split(
batches["data"], batches["labels"], test_size=0.2
)
# In[3]:
BATCH_SIZE = 5
# alexnet original
IMG_SIZE = 224
LEARNING_RATE = 0.0001
# In[4]:
sess = tf.InteractiveSession()
model = Alexnet(IMG_SIZE, len(unique_name), LEARNING_RATE)
sess.run(tf.global_variables_initializer())
# In[5]:
RESULTS = train(
sess, model, 20, BATCH_SIZE, len(unique_name), IMG_SIZE, train_X, test_X, train_Y, test_Y
)
# In[13]:
sns.set()
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(np.arange(len(RESULTS[0])), RESULTS[0], label="entropy cost")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(np.arange(len(RESULTS[0])), RESULTS[1], label="accuracy training")
plt.plot(np.arange(len(RESULTS[0])), RESULTS[2], label="accuracy testing")
plt.legend()
plt.show()
# In[ ]:
| 37 | 99 | 0.649956 |
ebefcab7987e2949070f887144afd954129e8c65 | 4,184 | py | Python | p8_test/test_local/__init__.py | crazynayan/tpf1 | c81a15d88d4d1f3ed2cf043c90782a4b8509ef14 | [
"MIT"
] | 1 | 2020-01-27T10:10:40.000Z | 2020-01-27T10:10:40.000Z | p8_test/test_local/__init__.py | crazynayan/tpf1 | c81a15d88d4d1f3ed2cf043c90782a4b8509ef14 | [
"MIT"
] | 4 | 2019-08-23T05:24:23.000Z | 2021-09-16T10:05:55.000Z | p8_test/test_local/__init__.py | crazynayan/tpf1 | c81a15d88d4d1f3ed2cf043c90782a4b8509ef14 | [
"MIT"
] | null | null | null | import random
import string
import unittest
from typing import List, Union, Dict
from config import config
from p2_assembly.mac2_data_macro import DataMacro
from p3_db.test_data import TestData
from p3_db.test_data_elements import Pnr
from p4_execution.debug import get_debug_loc, add_debug_loc, get_missed_loc
from p4_execution.ex5_execute import TpfServer
| 42.693878 | 119 | 0.633843 |
ebf03515b2af7a1e35e2369f06d9b3087f0c51d9 | 2,893 | py | Python | models/models.py | ahuimanu/vatsimlib | 12f1bcd248b2157349304583cbc1b499d9f39be0 | [
"MIT"
] | null | null | null | models/models.py | ahuimanu/vatsimlib | 12f1bcd248b2157349304583cbc1b499d9f39be0 | [
"MIT"
] | null | null | null | models/models.py | ahuimanu/vatsimlib | 12f1bcd248b2157349304583cbc1b499d9f39be0 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import datetime
from typing import Optional
| 28.087379 | 230 | 0.614587 |
ebf042ca58b471a448c4adbffa95dc9ab6a93834 | 360 | py | Python | temp-api/app/models.py | TheLongRunSmoke/birdfeeder | 6f238c9b8c8abdc866aaf042f79b674714fdaa8c | [
"MIT"
] | null | null | null | temp-api/app/models.py | TheLongRunSmoke/birdfeeder | 6f238c9b8c8abdc866aaf042f79b674714fdaa8c | [
"MIT"
] | 1 | 2017-11-28T04:26:45.000Z | 2017-11-28T04:57:47.000Z | temp-api/app/models.py | TheLongRunSmoke/birdfeeder | 6f238c9b8c8abdc866aaf042f79b674714fdaa8c | [
"MIT"
] | null | null | null | from app import db
| 24 | 80 | 0.644444 |
ebf1ffe3b522e31d9f44e5d373462af230e2e497 | 3,199 | py | Python | src/GameController.py | salemalex11/Gomoku | e709bc161a945e5521ea3b234ce8db41d3fd5bfe | [
"MIT"
] | null | null | null | src/GameController.py | salemalex11/Gomoku | e709bc161a945e5521ea3b234ce8db41d3fd5bfe | [
"MIT"
] | null | null | null | src/GameController.py | salemalex11/Gomoku | e709bc161a945e5521ea3b234ce8db41d3fd5bfe | [
"MIT"
] | 3 | 2019-02-17T22:15:36.000Z | 2021-01-04T19:13:52.000Z | # Define imports
import pygame
from pygame import *
import sys
import time
| 36.352273 | 131 | 0.449203 |
ebf22c5792152fe6b5cb3d25a3473aad20996bcf | 17,101 | py | Python | silverberg/test/test_client.py | TimothyZhang/silverberg | fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca | [
"Apache-2.0"
] | 1 | 2019-09-22T04:00:56.000Z | 2019-09-22T04:00:56.000Z | silverberg/test/test_client.py | TimothyZhang/silverberg | fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca | [
"Apache-2.0"
] | 14 | 2015-01-22T01:00:50.000Z | 2017-12-06T03:35:46.000Z | silverberg/test/test_client.py | TimothyZhang/silverberg | fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca | [
"Apache-2.0"
] | 4 | 2015-03-31T19:49:05.000Z | 2020-03-03T20:44:32.000Z | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the client."""
import mock
from uuid import UUID
from twisted.internet import defer
from silverberg.client import CQLClient, ConsistencyLevel, TestingCQLClient
from silverberg.cassandra import ttypes, Cassandra
from silverberg.test.util import BaseTestCase
# class FaultTestCase(BaseTestCase):
# def setUp(self):
# self.client = CqlClient(TCP4ClientEndpoint(reactor, '127.0.0.1', 9160), 'blah')
# def test_vers(self):
# d = self.client.describe_version()
# def printR(r):
# print r
# d.addCallback(printR)
# return d
# def test_cql(self):
# d = self.client.execute("SELECT * FROM blah;", {})
# def printQ(r):
# print r
# d.addCallback(printQ)
# return d
| 41.009592 | 103 | 0.626513 |
ebf2bc1d88e8d3404f1439f8fb4400bf3874e4c0 | 3,386 | py | Python | drawer.py | jarekwg/crossword-packer | 88f90c16272c2c2f64475dffe3b0aaeec11c0606 | [
"MIT"
] | null | null | null | drawer.py | jarekwg/crossword-packer | 88f90c16272c2c2f64475dffe3b0aaeec11c0606 | [
"MIT"
] | null | null | null | drawer.py | jarekwg/crossword-packer | 88f90c16272c2c2f64475dffe3b0aaeec11c0606 | [
"MIT"
] | null | null | null | import re
from exceptions import WordPlacementConflict
from constants import ACROSS, DOWN
| 52.092308 | 159 | 0.560543 |
ebf45563a2d56576081e640ac1564e55a2546dba | 4,200 | py | Python | src/analyse/bubble_map.py | timtroendle/geographic-scale | 81ec940e10b8e692429797e6a066a177e1508a89 | [
"MIT"
] | 3 | 2020-08-19T17:56:22.000Z | 2021-08-19T08:52:21.000Z | src/analyse/bubble_map.py | timtroendle/geographic-scale | 81ec940e10b8e692429797e6a066a177e1508a89 | [
"MIT"
] | null | null | null | src/analyse/bubble_map.py | timtroendle/geographic-scale | 81ec940e10b8e692429797e6a066a177e1508a89 | [
"MIT"
] | null | null | null | import numpy as np
import shapely
import geopandas as gpd
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
EPSG_3035_PROJ4 = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs "
GREY = "#C0C0C0"
BLUE = "#4F6DB8"
YELLOW = "#FABC3C"
SUPPLY_TECHS = [
"hydro_reservoir", "hydro_run_of_river", "open_field_pv",
"roof_mounted_pv", "wind_offshore", "wind_onshore_competing",
"wind_onshore_monopoly"
]
DEMAND_TECH = "demand_elec"
MAP_MIN_X = 2200000
MAP_MIN_Y = 1400000
MAP_MAX_X = 6300000
MAP_MAX_Y = 5500000
if __name__ == "__main__":
bubble_map(
path_to_shapes=snakemake.input.shapes,
path_to_continent_shape=snakemake.input.continent_shape,
scenario=snakemake.wildcards.scenario,
colour=snakemake.wildcards.colour,
markersize=snakemake.wildcards.markersize,
resolution_km=snakemake.params.resolution_km,
path_to_results=snakemake.input.results,
path_to_output=snakemake.output[0]
)
| 30.882353 | 113 | 0.662619 |
ebf4ac0d537eab2740d82a66c4418235ab9c1ffc | 885 | py | Python | src/ytbdl/__init__.py | danloveg/dl-album | 20d142a0992f61b9d13beceb6abe9a9086e33e6a | [
"MIT"
] | null | null | null | src/ytbdl/__init__.py | danloveg/dl-album | 20d142a0992f61b9d13beceb6abe9a9086e33e6a | [
"MIT"
] | null | null | null | src/ytbdl/__init__.py | danloveg/dl-album | 20d142a0992f61b9d13beceb6abe9a9086e33e6a | [
"MIT"
] | null | null | null | import os
import confuse
config = confuse.LazyConfig('ytbdl', None)
def get_loaded_config_sources():
''' Get existing configuration files
Returns:
(list): A list of (string) paths to configuration files that exist on
the file system. Returns an empty list if no configuration files
exist
'''
config.resolve()
return [s.filename for s in config.sources if os.path.exists(s.filename)]
def get_main_config_path():
''' Get the main configuration file path
Returns:
(str): A path to the configuration file. This path may or may not exist
'''
return os.path.join(config.config_dir(), 'config.yaml')
def config_exists():
''' Determine if one or more configuration files exist.
Returns:
(bool): True if a config file exists, False otherwise
'''
return any(get_loaded_config_sources())
| 26.818182 | 79 | 0.673446 |
ebf5ca4f90a237385342b586d5c1e142847a2572 | 4,875 | py | Python | GUI/my_lib/factory.py | EnviableYapper0/FMachineSchedulerPL | 05ba6a2169ee481062b71b917d1f32d26e240eb8 | [
"MIT"
] | null | null | null | GUI/my_lib/factory.py | EnviableYapper0/FMachineSchedulerPL | 05ba6a2169ee481062b71b917d1f32d26e240eb8 | [
"MIT"
] | null | null | null | GUI/my_lib/factory.py | EnviableYapper0/FMachineSchedulerPL | 05ba6a2169ee481062b71b917d1f32d26e240eb8 | [
"MIT"
] | null | null | null | from . import machine as m
from . import machine_calculator as mc
from . import my_time as mt
| 35.326087 | 106 | 0.633231 |
ebfa57fc6af077b8e484bb5107bce4b51e06f9f3 | 1,898 | py | Python | places/models.py | amureki/lunchtime-with-channels | 7cf6cb15b88ceefbebd53963ff1e194d8df6c25c | [
"MIT"
] | null | null | null | places/models.py | amureki/lunchtime-with-channels | 7cf6cb15b88ceefbebd53963ff1e194d8df6c25c | [
"MIT"
] | null | null | null | places/models.py | amureki/lunchtime-with-channels | 7cf6cb15b88ceefbebd53963ff1e194d8df6c25c | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from stdimage import StdImageField
from stdimage.utils import UploadToUUID
| 29.2 | 67 | 0.615385 |
ebfc9f2828a65b31b16c43b42091b7e322b73651 | 2,363 | py | Python | models/process_dataset.py | Aremaki/MscProjectNMR | 5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d | [
"MIT"
] | null | null | null | models/process_dataset.py | Aremaki/MscProjectNMR | 5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d | [
"MIT"
] | null | null | null | models/process_dataset.py | Aremaki/MscProjectNMR | 5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d | [
"MIT"
] | 1 | 2021-07-28T11:18:00.000Z | 2021-07-28T11:18:00.000Z | import tensorflow as tf
def shuffle_and_batch_dataset(dataset, batch_size, shuffle_buffer=None):
"""
This function is used to shuffle and batch the dataset, using shuffle_buffer
and batch_size.
"""
if shuffle_buffer is not None:
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.batch(batch_size)
return dataset
def split_dataset(dataset, train_prop=0.8, val_prop=0.2):
"""
This function takes in the loaded TFRecordDataset, and builds training, validation
and test TFRecordDataset objects. The test_prop is automatically set up to be equal to
1 - (train_prop + val_prop).
"""
dataset_size = sum(1 for _ in dataset)
train_size = int(train_prop * dataset_size)
val_size = int(val_prop * dataset_size)
train_dataset = dataset.take(train_size)
remaining_dataset = dataset.skip(train_size)
val_dataset = remaining_dataset.take(val_size)
test_dataset = remaining_dataset.skip(val_size)
return train_dataset, val_dataset, test_dataset
def process_dataset(dataset, batch_sizes=None, shuffle_buffers=None, train_prop=0.8, val_prop=0.2):
"""
:param dataset: TFRecordDataset object
:param batch_sizes: list of batch_size for train set, validation set and test set
:param shuffle_buffers: an integer shuffle_buffer for the train set only
:param train_prop: the ratio between the full dataset size and the train set size
:param val_prop: the ratio between the full dataset size and the validation set size
:return: fully processed train, validation and test TFRecordDataset
"""
if batch_sizes is None:
batch_sizes = [64, 64, 64]
if type(shuffle_buffers) != int:
return "Error: shuffle_buffers should be an integer"
if len(batch_sizes) != 3:
return "Error: batch_sizes should have a length of 3."
train_dataset, val_dataset, test_dataset = split_dataset(dataset, train_prop, val_prop)
train_dataset = shuffle_and_batch_dataset(train_dataset, batch_sizes[0], shuffle_buffers)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.batch(batch_sizes[1]).prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(batch_sizes[2]).prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset, val_dataset, test_dataset
| 41.45614 | 99 | 0.7427 |
ebff48e4dc84757244340320b746691161bc3959 | 1,080 | py | Python | bdc_collection_builder/celery/worker.py | rodolfolotte/bdc-collection-builder | 62583f6c25bca79e7e1b5503bc6308298838c877 | [
"MIT"
] | null | null | null | bdc_collection_builder/celery/worker.py | rodolfolotte/bdc-collection-builder | 62583f6c25bca79e7e1b5503bc6308298838c877 | [
"MIT"
] | null | null | null | bdc_collection_builder/celery/worker.py | rodolfolotte/bdc-collection-builder | 62583f6c25bca79e7e1b5503bc6308298838c877 | [
"MIT"
] | null | null | null | #
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Defines a structure component to run celery worker."""
# Python Native
import logging
# 3rdparty
from celery.signals import celeryd_after_setup, worker_shutdown
# Builder
from .. import create_app
from ..utils import initialize_factories, finalize_factories
from . import create_celery_app
app = create_app()
celery = create_celery_app(app)
| 25.714286 | 96 | 0.759259 |