hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2300582ed8688ca839e05903662437f7a910f9a9 | 1,648 | py | Python | scratch/eyy/debug/bad_pair_analysis.py | sasgc6/pysmurf | a370b515ab717c982781223da147bea3c8fb3a9c | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-10-17T02:37:59.000Z | 2022-03-09T16:42:34.000Z | scratch/eyy/debug/bad_pair_analysis.py | sasgc6/pysmurf | a370b515ab717c982781223da147bea3c8fb3a9c | [
"BSD-3-Clause-LBNL"
] | 446 | 2019-04-10T04:46:20.000Z | 2022-03-15T20:27:57.000Z | scratch/eyy/debug/bad_pair_analysis.py | sasgc6/pysmurf | a370b515ab717c982781223da147bea3c8fb3a9c | [
"BSD-3-Clause-LBNL"
] | 13 | 2019-02-05T18:02:05.000Z | 2021-03-02T18:41:49.000Z | import numpy as np
import matplotlib.pyplot as plt
import os
f_cutoff = .25
df_cutoff = .05
data_dir = '/data/smurf_data/20181214/1544843999/outputs'
f2, df2 = np.load(os.path.join(data_dir, 'band3_badres.npy'))
f2p, df2p = np.load(os.path.join(data_dir, 'band3_badpair.npy'))
m = np.ravel(np.where(np.logical_or(f2 > f_cutoff, df2 > df_cutoff)))
f2[m] = np.nan
df2[m] = np.nan
f2p[m,0] = np.nan
f2p[m-1,1] = np.nan
df2p[m,0] = np.nan
df2p[m-1,1] = np.nan
n, _ = np.shape(df2p)
xp = np.arange(1,n)
fig, ax = plt.subplots(2, 2, sharex=True, figsize=(8,7))
ax[0,0].plot(f2, color='k')
ax[0,0].plot(f2p[:-1,0])
ax[0,0].plot(xp, f2p[:-1, 1])
ax[0,0].set_title('f')
ax[0,1].plot(df2, color='k', label='Solo')
ax[0,1].plot(df2p[:-1,0], label='R on')
ax[0,1].plot(xp, df2p[:-1,1], label='L on')
ax[0,1].set_title('df')
ax[0,1].legend()
delta_ron_f2 = f2[:-1] - f2p[:-1,0] # right on
delta_lon_f2 = f2[1:] - f2p[:-1,1] # left one
ax[1,0].plot(delta_ron_f2)
ax[1,0].plot(xp, delta_lon_f2)
delta_ron_df2 = df2[:-1] - df2p[:-1,0] # right on
delta_lon_df2 = df2[1:] - df2p[:-1,1] # left one
ax[1,1].plot(delta_ron_df2)
ax[1,1].plot(xp, delta_lon_df2)
ax[1,0].set_xlabel('Res #')
ax[1,1].set_xlabel('Res #')
fig, ax = plt.subplots(1,2, figsize=(8, 4))
bins = np.arange(-.1, 0.06, .01)
hist_mask_r = np.where(~np.isnan(delta_ron_df2))
hist_mask_l = np.where(~np.isnan(delta_lon_df2))
ax[1].hist(delta_ron_df2[hist_mask_r], bins=bins,
histtype='step', label='R on')
ax[1].hist(delta_lon_df2[hist_mask_l], bins=bins,
histtype='step', label='L on')
ax[1].axvline(0, color='k', linestyle=':')
ax[1].legend()
# ax[2,1].hist(delta_lon_df2[]) | 26.15873 | 69 | 0.646238 |
230125cca40653427f41d2b5c28c03de5e593aca | 2,794 | py | Python | examples/pytorch/eager/blendcnn/utils.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 172 | 2021-09-14T18:34:17.000Z | 2022-03-30T06:49:53.000Z | examples/pytorch/eager/blendcnn/utils.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 40 | 2021-09-14T02:26:12.000Z | 2022-03-29T08:34:04.000Z | examples/pytorch/eager/blendcnn/utils.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | 33 | 2021-09-15T07:27:25.000Z | 2022-03-25T08:30:57.000Z | # Copyright 2018 Dong-Hyun Lee, Kakao Brain.
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utils Functions """
import os
import random
import logging
import json
import numpy as np
import torch
def set_seeds(seed):
"set random seeds"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_device():
"get device (CPU or GPU)"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("%s (%d GPUs)" % (device, n_gpu))
return device
def split_last(x, shape):
"split the last dimension to given shape"
shape = list(shape)
assert shape.count(-1) <= 1
if -1 in shape:
shape[shape.index(-1)] = int(x.size(-1) / -np.prod(shape))
return x.view(*x.size()[:-1], *shape)
def merge_last(x, n_dims):
"merge the last n_dims to a dimension"
s = x.size()
assert n_dims > 1 and n_dims < len(s)
return x.view(*s[:-n_dims], -1)
def find_sublist(haystack, needle):
"""Return the index at which the sequence needle appears in the
sequence haystack, or -1 if it is not found, using the Boyer-
Moore-Horspool algorithm. The elements of needle and haystack must
be hashable.
https://codereview.stackexchange.com/questions/19627/finding-sub-list
"""
h = len(haystack)
n = len(needle)
skip = {needle[i]: n - i - 1 for i in range(n - 1)}
i = n - 1
while i < h:
for j in range(n):
if haystack[i - j] != needle[-j - 1]:
i += skip.get(haystack[i], n)
break
else:
return i - n + 1
return -1
def get_logger(name, log_path):
"get logger"
logger = logging.getLogger(name)
fomatter = logging.Formatter(
'[ %(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
if not os.path.isfile(log_path):
f = open(log_path, "w+")
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(fomatter)
logger.addHandler(fileHandler)
#streamHandler = logging.StreamHandler()
#streamHandler.setFormatter(fomatter)
#logger.addHandler(streamHandler)
logger.setLevel(logging.DEBUG)
return logger
| 28.222222 | 77 | 0.65927 |
23012fe006d829b36579833bc95d73785791bbf3 | 1,983 | py | Python | models/Nets.py | lorflea/FederatedLearningMLDL2021 | 453d273c14a06eb6d2522c1b9fe877b43212ab76 | [
"MIT"
] | 1 | 2021-11-22T01:20:29.000Z | 2021-11-22T01:20:29.000Z | models/Nets.py | lorflea/FederatedLearningMLDL2021 | 453d273c14a06eb6d2522c1b9fe877b43212ab76 | [
"MIT"
] | null | null | null | models/Nets.py | lorflea/FederatedLearningMLDL2021 | 453d273c14a06eb6d2522c1b9fe877b43212ab76 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
import torch.nn.functional as F
| 30.984375 | 65 | 0.547151 |
2301362f160d7326e050b581a469859de16747d7 | 38,783 | py | Python | carbondesign/tests/test_time_picker.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tests/test_time_picker.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tests/test_time_picker.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | # pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from .base import compare_template, SimpleTestCase
| 78.98778 | 267 | 0.766083 |
23020e348129d1d194a31757b17377da03a41aa9 | 652 | py | Python | dongtai_agent_python/tests/policy/test_tracking.py | jinghao1/DongTai-agent-python | c06e9dd72c49bde952efe3e3153fc2f5501461ca | [
"Apache-2.0"
] | 17 | 2021-11-13T11:57:10.000Z | 2022-03-26T12:45:30.000Z | dongtai_agent_python/tests/policy/test_tracking.py | Bidaya0/DongTai-agent-python | 4e437b22cb95648e583d1009df821520d7d1d3c3 | [
"Apache-2.0"
] | 2 | 2021-11-08T07:43:38.000Z | 2021-12-09T02:23:46.000Z | dongtai_agent_python/tests/policy/test_tracking.py | Bidaya0/DongTai-agent-python | 4e437b22cb95648e583d1009df821520d7d1d3c3 | [
"Apache-2.0"
] | 17 | 2021-11-02T08:21:57.000Z | 2022-02-19T13:24:36.000Z | import unittest
from dongtai_agent_python.policy import tracking
if __name__ == '__main__':
unittest.main()
| 29.636364 | 98 | 0.653374 |
2302c030895d0f8454025d172c02962a378b1662 | 1,107 | py | Python | setup.py | mansam/validator.py | 22d31a0b78e645cf4ec9694cdfb4612977370c6d | [
"MIT"
] | 87 | 2015-01-29T15:43:44.000Z | 2022-03-09T07:04:16.000Z | setup.py | mansam/validator.py | 22d31a0b78e645cf4ec9694cdfb4612977370c6d | [
"MIT"
] | 25 | 2015-01-05T14:19:53.000Z | 2021-03-05T17:20:03.000Z | setup.py | mansam/validator.py | 22d31a0b78e645cf4ec9694cdfb4612977370c6d | [
"MIT"
] | 32 | 2015-08-20T06:17:33.000Z | 2021-11-09T19:16:38.000Z | from setuptools import setup
setup(
name='validator.py',
version='1.3.0',
author='Samuel "mansam" Lucidi',
author_email="sam@samlucidi.com",
packages=['validator'],
url='https://github.com/mansam/validator.py',
description='A library for appling schemas to data structures.',
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy"
],
license='MIT'
)
| 36.9 | 68 | 0.599819 |
23045d3d5a94dd7bbdb73152afab227894299c52 | 3,137 | py | Python | app.py | jjchshayan/heroku | 7181631b52057a92d751e1756b7b422dfd8825c0 | [
"MIT"
] | null | null | null | app.py | jjchshayan/heroku | 7181631b52057a92d751e1756b7b422dfd8825c0 | [
"MIT"
] | null | null | null | app.py | jjchshayan/heroku | 7181631b52057a92d751e1756b7b422dfd8825c0 | [
"MIT"
] | null | null | null | from telegram.ext import Updater
from telegram import bot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
updater = Updater(token='660812730:AAEGP-xXkMKoplHR6YsUECqXB8diNgvlfbs')
dispatcher = updater.dispatcher
import logging
import requests
state = 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.all, echo)
dispatcher.add_handler(echo_handler)
# def caps(bot, update, args=''):
# text_caps = ' '.join(args).upper()
# bot.send_message(chat_id=update.message.chat_id, text=text_caps)
#
#
# caps_handler = CommandHandler('caps', caps, pass_args=True)
# dispatcher.add_handler(caps_handler)
# from telegram import InlineQueryResultArticle, InputTextMessageContent
#
#
# def inline_caps(bot, update):
# query = update.inline_query.query
# if not query:
# return
# results = list()
# results.append(
# InlineQueryResultArticle(
# id=query.upper(),
# title='Caps',
# input_message_content=InputTextMessageContent(query.upper())
# )
# )
# bot.answer_inline_query(update.inline_query.id, results)
# from telegram.ext import InlineQueryHandler
#
# inline_caps_handler = InlineQueryHandler(inline_caps)
# dispatcher.add_handler(inline_caps_handler)
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
#
# TOKEN = '545193892:AAF-i-kxjJBeEiVXL1PokHCCEGNnQ1sOXFo'
# HOST = 'shayantt.herokuapp.com' # Same FQDN used when generating SSL Cert
# PORT = 8443
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# # url_path=TOKEN)
# updater.bot.set_webhook("https://shayantt.herokuapp.com/" + TOKEN)
# updater.idle()
updater.start_polling()
| 29.87619 | 140 | 0.6927 |
2304f8609ba1a32f4e4adb328ecb9521ea5b5b8e | 893 | py | Python | onconet/models/custom_resnet.py | harrivle/Mirai | ea2d4839f1f8b9f881798b819b2192ce2795bd5d | [
"MIT"
] | 37 | 2021-01-28T06:00:34.000Z | 2022-03-29T21:14:12.000Z | onconet/models/custom_resnet.py | NkwamPhilip/Mirai | 70413de690da36c5878e2e6006711476e166bb1d | [
"MIT"
] | null | null | null | onconet/models/custom_resnet.py | NkwamPhilip/Mirai | 70413de690da36c5878e2e6006711476e166bb1d | [
"MIT"
] | 14 | 2021-02-02T09:42:18.000Z | 2022-03-23T00:36:41.000Z | from torch import nn
from onconet.models.factory import RegisterModel, load_pretrained_weights, get_layers
from onconet.models.default_resnets import load_pretrained_model
from onconet.models.resnet_base import ResNet
| 37.208333 | 85 | 0.712206 |
2307774a7abd7ba51f7f8bdccce0f3ce8a1bc5ee | 3,437 | py | Python | cifar_mlp.py | oplatek/ALI | 193b666f62236fa1837613beb807d9dcdf978ce6 | [
"MIT"
] | null | null | null | cifar_mlp.py | oplatek/ALI | 193b666f62236fa1837613beb807d9dcdf978ce6 | [
"MIT"
] | null | null | null | cifar_mlp.py | oplatek/ALI | 193b666f62236fa1837613beb807d9dcdf978ce6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
from argparse import ArgumentParser
import theano
from theano import tensor as tt
from blocks.algorithms import GradientDescent, Adam
from blocks.bricks import MLP, Tanh, Softmax
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.initialization import IsotropicGaussian, Constant
from fuel.streams import DataStream
from fuel.transformers import Flatten
from fuel.datasets import CIFAR10
from fuel.schemes import SequentialScheme
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Timing, Printing
from blocks.extensions.saveload import Checkpoint
from blocks.extensions.monitoring import (DataStreamMonitoring,
TrainingDataMonitoring)
from blocks.main_loop import MainLoop
from blocks.roles import WEIGHT
from customfuel import Cifar10Dataset
from customextensions import LogExtension
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser("CIFAR10")
parser.add_argument("--num-epochs", type=int, default=100,
help="Number of training epochs to do.")
parser.add_argument("--batch-size", type=int, default=64,
help="Batch size.")
parser.add_argument("save_to", default="cifar.pkl", nargs="?",
help=("Destination to save the state of the training process."))
args = parser.parse_args()
main(args.save_to, args.num_epochs, args.batch_size)
| 39.965116 | 92 | 0.676171 |
23088bb0c48cd2efc5f4f5582dd8f9fb037c941d | 3,682 | py | Python | src/sequel/hierarchical_search/functional.py | simone-campagna/sequel | a96e0f8b5000f8d0174f97f772cca5ac8a140acd | [
"Apache-2.0"
] | null | null | null | src/sequel/hierarchical_search/functional.py | simone-campagna/sequel | a96e0f8b5000f8d0174f97f772cca5ac8a140acd | [
"Apache-2.0"
] | null | null | null | src/sequel/hierarchical_search/functional.py | simone-campagna/sequel | a96e0f8b5000f8d0174f97f772cca5ac8a140acd | [
"Apache-2.0"
] | null | null | null | """
Search integral/derivative algorithm class
"""
from ..items import Items
from ..sequence import integral, derivative, summation, product
from ..utils import sequence_matches
from .base import RecursiveSearchAlgorithm
__all__ = [
"SearchSummation",
"SearchProduct",
"SearchIntegral",
"SearchDerivative",
]
| 33.171171 | 95 | 0.608637 |
230917ff4323cacb93016bdddc8f27e058c7786a | 409 | py | Python | data/plugins/items/camel.py | FavyTeam/Elderscape_server | 38bf75396e4e13222be67d5f15eb0b9862dca6bb | [
"MIT"
] | 3 | 2019-05-09T16:59:13.000Z | 2019-05-09T18:29:57.000Z | data/plugins/items/camel.py | FavyTeam/Elderscape_server | 38bf75396e4e13222be67d5f15eb0b9862dca6bb | [
"MIT"
] | null | null | null | data/plugins/items/camel.py | FavyTeam/Elderscape_server | 38bf75396e4e13222be67d5f15eb0b9862dca6bb | [
"MIT"
] | 7 | 2019-07-11T23:04:40.000Z | 2021-08-02T14:27:13.000Z | from core import ServerConstants | 68.166667 | 201 | 0.777506 |
230ca0bc145d70340fa1510e5f32fb9e40355ade | 1,662 | py | Python | tests/image/segmentation/test_backbones.py | lillekemiker/lightning-flash | a047330ba75486355378f22cbebfd053c3d63c08 | [
"Apache-2.0"
] | null | null | null | tests/image/segmentation/test_backbones.py | lillekemiker/lightning-flash | a047330ba75486355378f22cbebfd053c3d63c08 | [
"Apache-2.0"
] | null | null | null | tests/image/segmentation/test_backbones.py | lillekemiker/lightning-flash | a047330ba75486355378f22cbebfd053c3d63c08 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from pytorch_lightning.utilities import _BOLTS_AVAILABLE, _TORCHVISION_AVAILABLE
from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES
| 42.615385 | 118 | 0.760529 |
230cbf98d0fce9a1f8d3eb7ee8c52b62685cd185 | 6,972 | py | Python | src/ExtractData.py | AntoineMeresse/Terminal-chart | eff66c32d78c394849176c7777bf7c203dbac5b3 | [
"MIT"
] | null | null | null | src/ExtractData.py | AntoineMeresse/Terminal-chart | eff66c32d78c394849176c7777bf7c203dbac5b3 | [
"MIT"
] | null | null | null | src/ExtractData.py | AntoineMeresse/Terminal-chart | eff66c32d78c394849176c7777bf7c203dbac5b3 | [
"MIT"
] | null | null | null | import sys
import re
from src.GenGraph import *
| 30.578947 | 270 | 0.49455 |
230cfdf0108fc8a76637145365ade46b19c0f345 | 1,642 | py | Python | catalog/admin.py | iamsaeedfadaei/Library | 45a7b5c421252e20d2e1c0f9b6794b85dfc40eb3 | [
"MIT"
] | null | null | null | catalog/admin.py | iamsaeedfadaei/Library | 45a7b5c421252e20d2e1c0f9b6794b85dfc40eb3 | [
"MIT"
] | null | null | null | catalog/admin.py | iamsaeedfadaei/Library | 45a7b5c421252e20d2e1c0f9b6794b85dfc40eb3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Book, Author, BookInstance, Genre
#creating inline for copy instances in book model by TabularInline.
# we have foreignkey from bookinstances to book and from book to authors --> just the way of foreignkey!
admin.site.register(Genre)
| 31.576923 | 110 | 0.699147 |
230db22fc190c68752be940d1363fe5ecdb2a558 | 169 | py | Python | backend/api/models.py | tuguldurio/fullstack-ecommerce | 06257e704c657b008587aabb4075750899149b1d | [
"MIT"
] | null | null | null | backend/api/models.py | tuguldurio/fullstack-ecommerce | 06257e704c657b008587aabb4075750899149b1d | [
"MIT"
] | null | null | null | backend/api/models.py | tuguldurio/fullstack-ecommerce | 06257e704c657b008587aabb4075750899149b1d | [
"MIT"
] | null | null | null | from api.user.models import User
from api.cart.models import Cart, CartProduct
from api.order.models import Order, OrderProduct
from api.product.models import Product | 42.25 | 49 | 0.822485 |
230de14d7e6fc08a01de2fd55c6b8f3b77dd5b56 | 4,456 | py | Python | chemistry/compressibilities/optimize_compressibility_factor_sigmoid_minimum.py | davidson16807/tectonics-approximations | f69570fd0a9693fad8e8ec27ccc34e0d6b3fd50b | [
"CC0-1.0"
] | null | null | null | chemistry/compressibilities/optimize_compressibility_factor_sigmoid_minimum.py | davidson16807/tectonics-approximations | f69570fd0a9693fad8e8ec27ccc34e0d6b3fd50b | [
"CC0-1.0"
] | null | null | null | chemistry/compressibilities/optimize_compressibility_factor_sigmoid_minimum.py | davidson16807/tectonics-approximations | f69570fd0a9693fad8e8ec27ccc34e0d6b3fd50b | [
"CC0-1.0"
] | null | null | null | from math import *
import csv
import random
import numpy as np
from optimize import genetic_algorithm
with open('pTZ.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(csvreader, None) # skip header
observations = [( np.array([float(p),float(T)]), float(Z))
for (i,p,Z,T) in csvreader ]
Lout = np.array([Z for (p,T), Z in observations if p >= 5 or (p>=1.2 and T<1.05) ])
Lin = np.array([(p,T) for (p,T), Z in observations if p >= 5 or (p>=1.2 and T<1.05) ])
Zout = np.array([Z for (p,T), Z in observations])
Zin = np.array([(p, T) for (p,T), Z in observations])
# Lguess = np.array([1.098,0.118,-0.946,0.981,0.954])
Lguess = np.array([1.104, 0.101, -0.924, 1,1]) # best found where exponents are 1
Lsolutions = [Lguess + np.array([random.gauss(0,0.1) for j in range(len(Lguess))]) for i in range(1000000)]
Lsolutions = sorted(Lsolutions, key=Lcost1)[0:50000]
Lsolutions = genetic_algorithm([Lcost1], Ltext, Lsolutions, survival_rate=0.8, mutant_deviation=0.3)
Zguess = np.array([3,3, 1.12, 0.101, -0.928, 1,1, 7.7, -0.84])
# Zguess = np.array([1.098,0.118,-0.946,0.981,0.954, 18.033,-7.974,-24.599,3.465,0.116,9.261])
# Zguess = np.array([0.103,1.245,2.083,1.030,0.994]) # best found for the other model
Zsolutions = [Zguess]+[Zguess + np.random.normal(0, 0.3, len(Zguess)) for i in range(100000)]
Zsolutions = [x for x in Zsolutions if not isnan(Zcost1(x))]
Zsolutions = sorted(Zsolutions, key=Zcost1)[0:50000]
Zsolutions = genetic_algorithm([Zcost1], Ztext, Zsolutions, survival_rate=0.8, mutant_deviation=1)
| 31.380282 | 110 | 0.60772 |
230f2dcf82a79b046dcfaf9af3162a775c4bd915 | 1,198 | py | Python | test/minpwm.py | delijati/ultrabot | 37956187b3ed9a28ef655ab2ed064d11e5f29473 | [
"MIT"
] | 1 | 2016-12-06T01:25:03.000Z | 2016-12-06T01:25:03.000Z | test/minpwm.py | delijati/ultrabot | 37956187b3ed9a28ef655ab2ed064d11e5f29473 | [
"MIT"
] | null | null | null | test/minpwm.py | delijati/ultrabot | 37956187b3ed9a28ef655ab2ed064d11e5f29473 | [
"MIT"
] | null | null | null | import enc
import config
import motor
import threading
import time
enc_t = None
pwm_range = (50, 90)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
enc_t.stop()
pass
| 22.603774 | 62 | 0.537563 |
230f8a70cf89cd6ca954075bdfb7904ee2fe3de0 | 1,364 | py | Python | backend/apps/permissions/constants.py | hovedstyret/indok-web | 598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159 | [
"MIT"
] | 3 | 2021-11-18T09:29:14.000Z | 2022-01-13T20:12:11.000Z | backend/apps/permissions/constants.py | rubberdok/indok-web | 598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159 | [
"MIT"
] | 277 | 2022-01-17T18:16:44.000Z | 2022-03-31T19:44:04.000Z | backend/apps/permissions/constants.py | hovedstyret/indok-web | 598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159 | [
"MIT"
] | null | null | null | from typing import Final, Literal
DefaultPermissionsType = Final[list[tuple[str, str]]]
# Default ResponsibleGroup types
PRIMARY_TYPE: Literal["PRIMARY"] = "PRIMARY"
HR_TYPE: Literal["HR"] = "HR"
ORGANIZATION: Final = "Organization member"
INDOK: Final = "Indk"
REGISTERED_USER: Final = "Registered user"
PRIMARY_GROUP_NAME: Final = "Medlem"
HR_GROUP_NAME: Final = "HR"
DEFAULT_ORGANIZATION_PERMISSIONS: DefaultPermissionsType = [
("events", "add_event"),
("events", "change_event"),
("events", "delete_event"),
("listings", "add_listing"),
("listings", "change_listing"),
("listings", "delete_listing"),
("organizations", "add_membership"),
]
DEFAULT_INDOK_PERMISSIONS: DefaultPermissionsType = [
("listings", "view_listing"),
("events", "add_signup"),
("events", "view_signup"),
("events", "change_signup"),
("organizations", "view_organization"),
("forms", "add_answer"),
("forms", "change_answer"),
("forms", "view_answer"),
("forms", "view_form"),
("forms", "add_response"),
("archive", "view_archivedocument"),
]
DEFAULT_REGISTERED_USER_PERMISSIONS: DefaultPermissionsType = [
("events", "view_event"),
]
DEFAULT_GROUPS = {
ORGANIZATION: DEFAULT_ORGANIZATION_PERMISSIONS,
INDOK: DEFAULT_INDOK_PERMISSIONS,
REGISTERED_USER: DEFAULT_REGISTERED_USER_PERMISSIONS,
}
| 28.416667 | 63 | 0.692082 |
230ffd138e6c0b442e53f396664bbe99fe6ff440 | 1,037 | py | Python | magda/utils/logger/printers/message.py | p-mielniczuk/magda | 6359fa5721b4e27bd98f2c6af0e858b476645618 | [
"Apache-2.0"
] | 8 | 2021-02-25T14:00:25.000Z | 2022-03-10T00:32:43.000Z | magda/utils/logger/printers/message.py | p-mielniczuk/magda | 6359fa5721b4e27bd98f2c6af0e858b476645618 | [
"Apache-2.0"
] | 22 | 2021-03-24T11:56:47.000Z | 2021-11-02T15:09:50.000Z | magda/utils/logger/printers/message.py | p-mielniczuk/magda | 6359fa5721b4e27bd98f2c6af0e858b476645618 | [
"Apache-2.0"
] | 6 | 2021-04-06T07:26:47.000Z | 2021-12-07T18:55:52.000Z | from __future__ import annotations
from typing import Optional
from colorama import Fore, Style
from magda.utils.logger.parts import LoggerParts
from magda.utils.logger.printers.base import BasePrinter
from magda.utils.logger.printers.shared import with_log_level_colors
| 30.5 | 75 | 0.657666 |
2311235022e84d72f4d0c26645f17bee8edd6070 | 1,615 | py | Python | statzcw/stats.py | xt0fer/Py21-BasicStats | 5e747765e58092d014fb36e66e2c4d623b1dbcba | [
"MIT"
] | null | null | null | statzcw/stats.py | xt0fer/Py21-BasicStats | 5e747765e58092d014fb36e66e2c4d623b1dbcba | [
"MIT"
] | null | null | null | statzcw/stats.py | xt0fer/Py21-BasicStats | 5e747765e58092d014fb36e66e2c4d623b1dbcba | [
"MIT"
] | 1 | 2021-07-11T14:50:21.000Z | 2021-07-11T14:50:21.000Z |
from typing import List
# print("stats test")
# print("zcount should be 5 ==", zcount([1.0,2.0,3.0,4.0,5.0]))
| 23.071429 | 63 | 0.577709 |
2311a4831bf76119b74ab330fe6d74d995c77324 | 106 | py | Python | app/Mixtape.py | mlaude1/masonite_mixtapes | 37cc33bc04af6d626e5b65da9221ac848e996cf0 | [
"MIT"
] | null | null | null | app/Mixtape.py | mlaude1/masonite_mixtapes | 37cc33bc04af6d626e5b65da9221ac848e996cf0 | [
"MIT"
] | null | null | null | app/Mixtape.py | mlaude1/masonite_mixtapes | 37cc33bc04af6d626e5b65da9221ac848e996cf0 | [
"MIT"
] | null | null | null | """Mixtape Model."""
from masoniteorm.models import Model
| 15.142857 | 36 | 0.726415 |
23165b9f50977d462d02641d8468df5aa19bed3f | 10,872 | py | Python | priceprop/propagator.py | felixpatzelt/priceprop | 038832b5e89b8559c6162e39f1b446f4446fe7f2 | [
"MIT"
] | 17 | 2018-01-17T13:19:42.000Z | 2022-01-25T14:02:10.000Z | priceprop/propagator.py | felixpatzelt/priceprop | 038832b5e89b8559c6162e39f1b446f4446fe7f2 | [
"MIT"
] | null | null | null | priceprop/propagator.py | felixpatzelt/priceprop | 038832b5e89b8559c6162e39f1b446f4446fe7f2 | [
"MIT"
] | 7 | 2018-07-14T06:17:05.000Z | 2021-05-16T13:59:47.000Z | import numpy as np
from scipy.linalg import solve_toeplitz, solve
from scipy.signal import fftconvolve
from scipy.interpolate import Rbf
from scorr import xcorr, xcorr_grouped_df, xcorrshift, fftcrop, corr_mat
# Helpers
# =====================================================================
def integrate(x):
"Return lag 1 sum, i.e. price from return, or an integrated kernel."
return np.concatenate([[0], np.cumsum(x[:-1])])
def smooth_tail_rbf(k, l0=3, tau=5, smooth=1, epsilon=1):
"""Smooth tail of array k with radial basis functions"""
# interpolate in log-lags
l = np.log(np.arange(l0,len(k)))
# estimate functions
krbf = Rbf(
l, k[l0:], function='multiquadric', smooth=smooth, epsilon=epsilon
)
# weights to blend with original for short lags
w = np.exp(-np.arange(1,len(k)-l0+1)/ float(tau))
# interpolate
knew = np.empty_like(k)
knew[:l0] = k[:l0]
knew[l0:] = krbf(l) * (1-w) + k[l0:] * w
#done
return knew
def propagate(s, G, sfunc=np.sign):
"""Simulate propagator model from signs and one kernel.
Equivalent to tim1, one of the kernels in tim2 or hdim2.
"""
steps = len(s)
s = sfunc(s[:len(s)])
p = fftconvolve(s, G)[:steps]
return p
# Responses
# =====================================================================
def _return_response(ret, x, maxlag):
"""Helper for response and response_grouped_df."""
# return what?
ret = ret.lower()
res = []
for i in ret:
if i == 'l':
# lags
res.append(np.arange(-maxlag,maxlag+1))
elif i == 's':
res.append(
# differential response
np.concatenate([x[-maxlag:], x[:maxlag+1]])
)
elif i == 'r':
res.append(
# bare response === cumulated differential response
np.concatenate([
-np.cumsum(x[:-maxlag-1:-1])[::-1],
[0],
np.cumsum(x[:maxlag])
])
)
if len(res) > 1:
return tuple(res)
else:
return res[0]
def response(r, s, maxlag=10**4, ret='lsr', subtract_mean=False):
"""Return lag, differential response S, response R.
Note that this commonly used price response is a simple cross correlation
and NOT equivalent to the linear response in systems analysis.
Parameters:
===========
r: array-like
Returns
s: array-like
Order signs
maxlag: int
Longest lag to calculate
ret: str
can include 'l' to return lags, 'r' to return response, and
's' to return differential response (in specified order).
subtract_mean: bool
Subtract means first. Default: False (signal means already zero)
"""
maxlag = min(maxlag, len(r) - 2)
s = s[:len(r)]
# diff. resp.
# xcorr == S(0, 1, ..., maxlag, -maxlag, ... -1)
x = xcorr(r, s, norm='cov', subtract_mean=subtract_mean)
return _return_response(ret, x, maxlag)
def response_grouped_df(
df, cols, nfft='pad', ret='lsr', subtract_mean=False, **kwargs
):
"""Return lag, differential response S, response R calculated daily.
Note that this commonly used price response is a simple cross correlation
and NOT equivalent to the linear response in systems analysis.
Parameters
==========
df: pandas.DataFrame
Dataframe containing order signs and returns
cols: tuple
The columns of interest
nfft:
Length of the fft segments
ret: str
What to return ('l': lags, 'r': response, 's': incremental response).
subtract_mean: bool
Subtract means first. Default: False (signal means already zero)
See also response, spectral.xcorr_grouped_df for more explanations
"""
# diff. resp.
x = xcorr_grouped_df(
df,
cols,
by = 'date',
nfft = nfft,
funcs = (lambda x: x, lambda x: x),
subtract_mean = subtract_mean,
norm = 'cov',
return_df = False,
**kwargs
)[0]
# lag 1 -> element 0, lag 0 -> element -1, ...
#x = x['xcorr'].values[x.index.values-1]
maxlag = len(x) / 2
return _return_response(ret, x, maxlag)
# Analytical power-laws
# =====================================================================
def beta_from_gamma(gamma):
"""Return exponent beta for the (integrated) propagator decay
G(lag) = lag**-beta
that compensates a sign-autocorrelation
C(lag) = lag**-gamma.
"""
return (1-gamma)/2.
def G_pow(steps, beta):
"""Return power-law Propagator kernel G(l). l=0...steps"""
G = np.arange(1,steps)**-beta#+1
G = np.r_[0, G]
return G
def k_pow(steps, beta):
"""Return increment of power-law propagator kernel g. l=0...steps"""
return np.diff(G_pow(steps, beta))
# TIM1 specific
# =====================================================================
def calibrate_tim1(c, Sl, maxlag=10**4):
"""Return empirical estimate TIM1 kernel
Parameters:
===========
c: array-like
Cross-correlation (covariance).
Sl: array-like
Price-response. If the response is differential, so is the returned
kernel.
maxlag: int
length of the kernel.
See also: integrate, g2_empirical, tim1
"""
lS = int(len(Sl) / 2)
g = solve_toeplitz(c[:maxlag], Sl[lS:lS+maxlag])
return g
def tim1(s, G, sfunc=np.sign):
"""Simulate Transient Impact Model 1, return price or return.
Result is the price p when the bare responses G is passed
and the 1 step ahead return p(t+1)-p(t) for the differential kernel
g, where G == numpy.cumsum(g).
Parameters:
===========
s: array-like
Order signs
G: array-like
Kernel
See also: calibrate_tim1, integrate, tim2, hdim2.
"""
return propagate(s, G, sfunc=sfunc)
# TIM2 specific
# =====================================================================
def calibrate_tim2(
nncorr, cccorr, cncorr, nccorr, Sln, Slc, maxlag=2**10
):
"""
Return empirical estimate for both kernels of the TIM2.
(Transient Impact Model with two propagators)
Parameters:
===========
nncorr: array-like
Cross-covariance between non-price-changing (n-) orders.
cccorr: array-like
Cross-covariance between price-changing (c-) orders.
cncorr: array-like
Cross-covariance between c- and n-orders
nccorr: array-like
Cross-covariance between n- and c-orders.
Sln: array-like
(incremental) price response for n-orders
Slc: array-like
(incremental) price response for c-orders
maxlag: int
Length of the kernels.
See also: calibrate_tim1, calibrate_hdim2
"""
# incremental response
lSn = int(len(Sln) / 2)
lSc = int(len(Slc) / 2)
S = np.concatenate([Sln[lSn:lSn+maxlag], Slc[lSc:lSc+maxlag]])
# covariance matrix
mat_fn = lambda x: corr_mat(x, maxlag=maxlag)
C = np.bmat([
[mat_fn(nncorr), mat_fn(cncorr)],
[mat_fn(nccorr), mat_fn(cccorr)]
])
# solve
g = solve(C, S)
gn = g[:maxlag]
gc = g[maxlag:]
return gn, gc
def tim2(s, c, G_n, G_c, sfunc=np.sign):
"""Simulate Transient Impact Model 2
Returns prices when integrated kernels are passed as arguments
or returns for differential kernels.
Parameters:
===========
s: array
Trade signs
c: array
Trade labels (1 = change; 0 = no change)
G_n: array
Kernel for non-price-changing trades
G_c: array
Kernel for price-changing trades
sfunc: function [optional]
Function to apply to signs. Default: np.sign.
See also: calibrate_tim2, tim1, hdim2.
"""
assert c.dtype == bool, "c must be a boolean indicator!"
return propagate(s * c, G_c) + propagate(s * (~c), G_n)
# HDIM2 specific
# =====================================================================
def calibrate_hdim2(
Cnnc, Cccc, Ccnc, Sln, Slc,
maxlag=None, force_lag_zero=True
):
"""Return empirical estimate for both kernels of the HDIM2.
(History Dependent Impact Model with two propagators).
Requres three-point correlation matrices between the signs of one
non-lagged and two differently lagged orders.
We distinguish between price-changing (p-) and non-price-changing (n-)
orders. The argument names corresponds to the argument order in
spectral.x3corr.
Parameters:
===========
Cnnc: 2d-array-like
Cross-covariance matrix for n-, n-, c- orders.
Cccc: 2d-array-like
Cross-covariance matrix for c-, c-, c- orders.
Ccnc: 2d-array-like
Cross-covariance matrix for c-, n-, c- orders.
Sln: array-like
(incremental) lagged price response for n-orders
Slc: array-like
(incremental) lagged price response for c-orders
maxlag: int
Length of the kernels.
See also: hdim2,
"""
maxlag = maxlag or min(len(Cccc), len(Sln))/2
# incremental response
lSn = int(len(Sln) / 2)
lSc = int(len(Slc) / 2)
S = np.concatenate([
Sln[lSn:lSn+maxlag],
Slc[lSc:lSc+maxlag]
])
# covariance matrix
Cncc = Ccnc.T
C = np.bmat([
[Cnnc[:maxlag,:maxlag], Ccnc[:maxlag,:maxlag]],
[Cncc[:maxlag,:maxlag], Cccc[:maxlag,:maxlag]]
])
if force_lag_zero:
C[0,0] = 1
C[0,1:] = 0
# solve
g = solve(C, S)
gn = g[:maxlag]
gc = g[maxlag:]
return gn, gc
def hdim2(s, c, k_n, k_c, sfunc=np.sign):
"""Simulate History Dependent Impact Model 2, return return.
Parameters:
===========
s: array
Trade signs
c: array
Trade labels (1 = change; 0 = no change)
k_n: array
Differential kernel for non-price-changing trades
k_c: array
Differential kernel for price-changing trades
sfunc: function [optional]
Function to apply to signs. Default: np.sign.
See also: calibrate_hdim2, tim2, tim1.
"""
assert c.dtype == bool, "c must be a boolean indicator!"
return c * (propagate(s * c, k_c) + propagate(s * (~c), k_n))
| 30.2 | 79 | 0.545438 |
231680e3bbb8bd90319b6c531c7b915437fa932f | 661 | py | Python | src/code-challenges/codewars/7KYU/longest/test_longest.py | maltewirz/code-challenges | 97777b10963f19bc587ddd984f0526b221c081f8 | [
"MIT"
] | 1 | 2020-08-30T07:52:20.000Z | 2020-08-30T07:52:20.000Z | src/code-challenges/codewars/7KYU/longest/test_longest.py | maltewirz/code-challenges | 97777b10963f19bc587ddd984f0526b221c081f8 | [
"MIT"
] | 6 | 2020-08-12T07:05:04.000Z | 2021-08-23T06:10:10.000Z | src/code-challenges/codewars/7KYU/longest/test_longest.py | maltewirz/code-challenges | 97777b10963f19bc587ddd984f0526b221c081f8 | [
"MIT"
] | null | null | null | from longest import longest
import unittest
if __name__ == "__main__":
unittest.main()
| 26.44 | 79 | 0.673222 |
2316d7baa946659edc0058ea0663bc1e4f77f7ab | 14 | py | Python | getv/__init__.py | FUNNYDMAN/getv | b0c495c9c9b9dea8bff86916aee85ecac4f505ab | [
"MIT"
] | 1 | 2018-08-07T18:50:43.000Z | 2018-08-07T18:50:43.000Z | getv/__init__.py | FUNNYDMAN/getv | b0c495c9c9b9dea8bff86916aee85ecac4f505ab | [
"MIT"
] | null | null | null | getv/__init__.py | FUNNYDMAN/getv | b0c495c9c9b9dea8bff86916aee85ecac4f505ab | [
"MIT"
] | null | null | null | name = "getv"
| 7 | 13 | 0.571429 |
2317503e6a916f16a70dd2104fe9aa18b505c980 | 3,035 | py | Python | 2020/day16/day16.py | Zojka/advent | 0f967bf308ae0502db3656d2e9e8a0d310b00594 | [
"Apache-2.0"
] | 1 | 2020-12-16T20:34:30.000Z | 2020-12-16T20:34:30.000Z | 2020/day16/day16.py | Zojka/adventofcode | 0f967bf308ae0502db3656d2e9e8a0d310b00594 | [
"Apache-2.0"
] | null | null | null | 2020/day16/day16.py | Zojka/adventofcode | 0f967bf308ae0502db3656d2e9e8a0d310b00594 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: zparteka
"""
if __name__ == '__main__':
main()
| 27.342342 | 119 | 0.497858 |
231a46a705da24db623316f5754d9f510e7b8d96 | 1,527 | py | Python | api/views/user.py | jcasmer/grow_control_backend- | 6a18a137e0a16138607413925727d7e5f8486777 | [
"BSD-3-Clause"
] | 1 | 2019-05-11T14:45:47.000Z | 2019-05-11T14:45:47.000Z | api/views/user.py | jcasmer/grow_control_backend- | 6a18a137e0a16138607413925727d7e5f8486777 | [
"BSD-3-Clause"
] | 6 | 2021-03-18T20:45:02.000Z | 2021-09-22T17:41:38.000Z | api/views/user.py | jcasmer/grow_control_backend- | 6a18a137e0a16138607413925727d7e5f8486777 | [
"BSD-3-Clause"
] | null | null | null | '''
'''
from django.contrib.auth.models import User, Group
from rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework import mixins
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from src.base_view import BaseViewSet
from ..serializers import UserSerializer, UserFullDataSerializer
from ..filters import UserFilter, UserFullDataFilter
| 31.8125 | 102 | 0.728225 |
231aa17295db10591d7e97d44c06178132b509d0 | 2,481 | py | Python | core/characters.py | gnbuck/rpg_game | a0e7a0d2002230d5628f7a811e831a36b0904d2c | [
"Apache-2.0"
] | null | null | null | core/characters.py | gnbuck/rpg_game | a0e7a0d2002230d5628f7a811e831a36b0904d2c | [
"Apache-2.0"
] | null | null | null | core/characters.py | gnbuck/rpg_game | a0e7a0d2002230d5628f7a811e831a36b0904d2c | [
"Apache-2.0"
] | null | null | null | from random import randint
from core.players import Players
| 29.188235 | 128 | 0.583636 |
231b5c3a6ff047a112893a6a6f2da0e0da9bf4d4 | 1,893 | py | Python | raytracerchallenge_python/material.py | toku345/RayTracerChallenge_Python | 40ced097f92cc61b116d24c6d6c4f27d6b13029d | [
"MIT"
] | 1 | 2020-05-13T20:54:01.000Z | 2020-05-13T20:54:01.000Z | raytracerchallenge_python/material.py | toku345/RayTracerChallenge_Python | 40ced097f92cc61b116d24c6d6c4f27d6b13029d | [
"MIT"
] | null | null | null | raytracerchallenge_python/material.py | toku345/RayTracerChallenge_Python | 40ced097f92cc61b116d24c6d6c4f27d6b13029d | [
"MIT"
] | null | null | null | from raytracerchallenge_python.tuple import Color
from math import pow
| 33.210526 | 77 | 0.56524 |
231c19be88b4ad2d044eaa6cc1261367a03e271b | 673 | py | Python | dawgmon/local.py | anvilventures/dawgmon | 59c28f430d896aa5e7afd9c2f40584113e8d52dc | [
"BSD-3-Clause"
] | 54 | 2017-09-18T21:24:25.000Z | 2021-03-11T00:11:43.000Z | dawgmon/local.py | anvilventures/dawgmon | 59c28f430d896aa5e7afd9c2f40584113e8d52dc | [
"BSD-3-Clause"
] | null | null | null | dawgmon/local.py | anvilventures/dawgmon | 59c28f430d896aa5e7afd9c2f40584113e8d52dc | [
"BSD-3-Clause"
] | 8 | 2017-09-19T09:48:45.000Z | 2020-03-22T01:18:44.000Z | import subprocess, shlex
from dawgmon import commands
| 32.047619 | 86 | 0.738484 |
231f0d4149a6494f0d37247083fc3c9b9526fe29 | 504 | py | Python | graphchart.py | hengloem/py-data-visualization | 181ff5db7ace8111508efc7d5c351839935d652e | [
"MIT"
] | null | null | null | graphchart.py | hengloem/py-data-visualization | 181ff5db7ace8111508efc7d5c351839935d652e | [
"MIT"
] | null | null | null | graphchart.py | hengloem/py-data-visualization | 181ff5db7ace8111508efc7d5c351839935d652e | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
years = [1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015]
pops = [2.5, 2.7, 3, 3.3, 3.6, 4.0, 4.4, 4.8, 5.3, 5.7, 6.1, 6.5, 6.9, 7.3]
deaths = [1.2, 1.7, 1.8, 2.2, 2.5, 2.7, 2.9, 3, 3.1, 3.3, 3.5, 3.8, 4, 4.3]
plt.plot(years, pops, color=(255/255, 100/255, 100/255))
plt.plot(years, deaths, color=(.6, .6, 1))
plt.title("World Population")
plt.ylabel("Population in billion.")
plt.xlabel("Population growth by year.")
plt.show()
| 26.526316 | 92 | 0.605159 |
231f6aa566919c06850651c755c3b8c14c876a0c | 38,747 | py | Python | py_knots/clasper.py | Chinmaya-Kausik/py_knots | 3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f | [
"MIT"
] | null | null | null | py_knots/clasper.py | Chinmaya-Kausik/py_knots | 3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f | [
"MIT"
] | null | null | null | py_knots/clasper.py | Chinmaya-Kausik/py_knots | 3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from matplotlib.pyplot import close
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
from matplotlib.mathtext import math_to_image
from io import BytesIO
from PIL import ImageTk, Image
from sympy import latex
from math import pi, cos, sin
from sgraph import *
from braid import *
from col_perm import *
from pres_mat import *
from visualization import *
from casson_gordon import *
from typing import List, Tuple, Callable, Dict
from math import log10, floor
font_style = "Calibri"
font_size = 25
# Function for rounding eigenvalues
# Class for main window
# Class for invariants
# Class for strand inputs
# Class for color inputs
# Class for signature inputs
# Class for Casson Gordon inputs
# Executing everything
if __name__ == "__main__":
root = tk.Tk()
root.title("Clasper")
# Get the screen dimension
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# Find the center point
center_x = int(screen_width/2)
center_y = int(screen_height/2)
window_width = screen_width
window_height = screen_height
# Set the position of the window to the center of the screen
root.geometry(f'{window_width}x{window_height}+{center_x}+{0}')
root.state('zoomed')
clasper_canvas = tk.Canvas(root)
hbar = tk.Scrollbar(root, orient='horizontal',
command=clasper_canvas.xview)
scrollbar = tk.Scrollbar(root, orient='vertical',
command=clasper_canvas.yview)
hbar.pack(side="bottom", fill="both")
clasper_canvas.pack(side="left", fill="both", expand=True, padx=10, pady=10)
scrollbar.pack(side="right", fill="both")
clasper_canvas['yscrollcommand'] = scrollbar.set
clasper_canvas['xscrollcommand'] = hbar.set
clasper = Clasper(clasper_canvas)
clasper_canvas.create_window(0, 0,
height=2800,
width=3000,
window=clasper, anchor="nw", tags="frame")
clasper_canvas.bind("<Configure>", onCanvasConfigure)
clasper_canvas.configure(scrollregion=clasper_canvas.bbox("all"))
clasper_canvas.itemconfig('frame',
height=2800,
width=3000)
root.bind_all("<MouseWheel>", on_mousewheel)
root.bind_all("<Shift-MouseWheel>", on_shift_mousewheel)
root.bind('<Return>', clasper.compute_with_defaults)
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
finally:
root.mainloop()
# Setting up the entry for strands
"""ttk.Label(
self, text='Number of Strands:',
font=(font_style, font_size)).grid(column=0, row=2, pady=10)
self.strand_str = tk.StringVar()
ttk.Entry(self, textvariable=self.strand_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for the colour list
"""ttk.Label(self, text='Colours (start from 0, BFD):',
font=(font_style, font_size)).grid(
column=0, row=5, pady=10)
self.colour_list = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_list,
font=(font_style, font_size)).grid(
column=1, row=5, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for orientations of colours
"""ttk.Label(self, text='Orientations (+1/-1, BFD):',
font=(font_style, font_size)).grid(
column=0, row=6, pady=10)
self.colour_signs = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_signs,
font=(font_style, font_size)).grid(
column=1, row=6, padx=0, pady=10, sticky='W', columnspan=3)
"""
# Set up entry for complex tuple
"""ttk.Label(self, text='Signature input,'+
'space sep\n (1/3 means 2*pi/3, BFD):',
font=(font_style, font_size)).grid(
column=0, row=7, pady=10)
self.cplx_tuple = tk.StringVar()
ttk.Entry(self, textvariable=self.cplx_tuple,
font=(font_style, font_size)).grid(
column=1, row=7, padx=0, pady=10, sticky='W', columnspan=2)"""
| 36.901905 | 83 | 0.596666 |
231f9bd0145ee2eafeef11269aea705cc5fa7a87 | 6,136 | py | Python | source/FnAssetAPI/Host.py | IngenuityEngine/ftrack-connect-foundry | a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d | [
"MIT"
] | 1 | 2019-10-22T06:33:08.000Z | 2019-10-22T06:33:08.000Z | source/FnAssetAPI/Host.py | IngenuityEngine/ftrack-connect-foundry | a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d | [
"MIT"
] | null | null | null | source/FnAssetAPI/Host.py | IngenuityEngine/ftrack-connect-foundry | a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d | [
"MIT"
] | null | null | null | from .audit import auditApiCall
from .exceptions import InvalidCommand
__all__ = ['Host']
# def progress(self, decimalProgress, message):
# """
#
# A method to provide alternate progress reporting. If not implemented, then
# the standard logging mechanism will print the progress message to the
# standard logging call with a progress severity.
#
# @see log
# @see python.logging
#
# """
# raise NotImplementedError
| 25.355372 | 82 | 0.699641 |
23203ffa2e49d090e30c618e5403e0af89df7c09 | 17,259 | py | Python | state_graph.py | Lukx19/KR-QR | be90434de57759e077bce208398ee12e8f1ec85a | [
"MIT"
] | null | null | null | state_graph.py | Lukx19/KR-QR | be90434de57759e077bce208398ee12e8f1ec85a | [
"MIT"
] | null | null | null | state_graph.py | Lukx19/KR-QR | be90434de57759e077bce208398ee12e8f1ec85a | [
"MIT"
] | null | null | null | import copy
import queue
import pydot
def stationaryToIntervalChange(state_obj):
for qt in state_obj.quantities:
if qt.isStationary():
return True
return False
def genFlipedInflow(state_obj):
states = []
if state_obj.state['inflow']['der'].getVal() == 0:
states.append(newState(state_obj,[('inflow','der',+1)],
desc="Id+", transition="increase"))
if state_obj.state['inflow']['mag'].getVal() != 0:
states.append(newState(state_obj,[('inflow','der',-1)],
desc="Id-", transition="decrease"))
return states
if (state_obj.state['inflow']['mag'].getVal() == 0
and state_obj.state['inflow']['der'].getVal() == 1):
return states
if (state_obj.state['inflow']['mag'].getVal() == 1
and state_obj.state['outflow']['der'].getVal() == 0
and state_obj.state['outflow']['mag'].getVal() != 2):
return states
if (state_obj.state['inflow']['der'].getVal() == -1
and state_obj.state['outflow']['mag'].getVal() == 2):
return states
if state_obj.state['inflow']['der'].getVal() == -1:
states.append(newState(state_obj,[('inflow','der',+1)],
desc="Id+", transition="increase"))
return states
if state_obj.state['inflow']['der'].getVal() == 1:
states.append(newState(state_obj,[('inflow','der',-1)],
desc="Id-", transition="decrease"))
return states
return states
def newState(state_obj,change =[('inflow','der',0)],desc="", transition=""):
new_state = copy.deepcopy(state_obj)
for ch in change:
if ch[2] == -1:
new_state.state[ch[0]][ch[1]].decrease()
elif ch[2] == 1:
new_state.state[ch[0]][ch[1]].increase()
return {'state': new_state, 'desc':desc, 'transition': transition}
def generateNextStates(state_obj):
state = state_obj.state
new_states = []
# imidiate changes
if state['outflow']['mag'].getVal() == 0 and state['outflow']['der'].getVal() == 1:
new_states.append(newState(state_obj,[('volume','mag',1),('outflow','mag',1)],
desc="Im+->Vd+,Od+", transition="time"))
#new_states[-1]['state'].desc="Positive change in volume/outflow causes increase in magnitude of these quantities."
if state['inflow']['mag'].getVal() == 0 and state['inflow']['der'].getVal() == 1:
changes = [('inflow','mag',1)]
desc = "Id+->Im+. "
state_desc = "Positive change in inflow increases magnitude of inflow."
if state['outflow']['der'].isStationary():
changes.append(('outflow','der',1))
changes.append(('volume','der',1))
state_desc+=" Positive change in inflow magnitude causes to positively increase change of volume and outflow."
new_states.append(newState(state_obj,changes, desc=desc+"Im+->Vd+,Od+", transition="time"))
new_states[-1]['state'].desc=state_desc
if len(new_states) == 0:
new_states = new_states + genFlipedInflow(state_obj)
# Changes which take long time:
# increasing inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == 1):
# apply positive Infuence
if state['outflow']['mag'].getVal() != 2:
new_states.append(newState(state_obj,[('volume','der',+1),('outflow','der',+1)],
desc="E+->Vd+,Od+", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Increasing derivation of Volume and Outflow."
if state['outflow']['mag'].getVal() == 1 and state['outflow']['der'].getVal() == 1:
# go to maximal state
new_states.append(newState(state_obj,[('volume','mag',1),
('volume','der',-1),('outflow','mag',1),('outflow','der',-1)],
desc="E+->Om+", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Maximal capacity of container reached."
# rate of changes between inflow and outflow- outflow is faster -> go back to steady
if (state['outflow']['mag'].getVal() == 1
and state['outflow']['der'].getVal() == state['inflow']['der'].getVal()):
new_states.append(newState(state_obj,[('volume','der',-1),('outflow','der',-1)],
desc="Im<Om->Vd-,Od-", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Inflow is increasing slower than Outflow. The volume is in positive steady state."
# steady inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == 0):
change = -1* state['outflow']['der'].getVal()
s = '+' if change >0 else '-' if change < 0 else '~'
new_states.append(newState(state_obj,
[('volume','der',change),('outflow','der',change)],
desc="E~->Vd"+s+',Od'+s))
new_states[-1]['state'].desc="Positive steady inflow."
if state['outflow']['der'].getVal() == 1:
new_states.append(newState(state_obj,[('volume','mag',1),
('volume','der',-1),('outflow','mag',1),('outflow','der',-1)],
desc="E~->Vm+,Om+", transition="time"))
new_states[-1]['state'].desc="Positive steady inflow. Maximal capacity of container reached."
# decreasing inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == -1):
# apply negative influence
new_states.append(newState(state_obj,[('volume','der',-1),('outflow','der',-1)],
desc="E-->Vd-,Od-", transition="time"))
# extreme no inflow volume left
if state['outflow']['der'].getVal() == -1 and state['outflow']['mag'].getVal() < 2:
new_states.append(newState(state_obj,[('inflow','der',+1),('inflow','mag',-1)],
desc="E-->Id0,Im0", transition="time"))
new_states[-1]['state'].desc="Inflow is empty."
# colapsing from maximum to plus
if state['outflow']['mag'].getVal() == 2 and state['outflow']['der'].getVal() == -1:
new_states.append(newState(state_obj,[('volume','mag',-1),('outflow','mag',-1)],
desc="E-->Vm-,Om-", transition="time"))
new_states[-1]['state'].desc="Inflow is is slowing down what causes increase in outflow rate."
# speed of decrease can be different in inflow and outflow -> go to steady outflow
if (state['outflow']['der'].getVal() == state['inflow']['der'].getVal()
and not state['outflow']['mag'].isStationary()):
new_states.append(newState(state_obj,[('volume','der',+1),('outflow','der',+1)],
desc="E-->Vd-,Od-", transition="time"))
new_states[-1]['state'].desc="Positive steady state"
# no inflow volume
if (state['inflow']['mag'].getVal() == 0 and state['inflow']['der'].getVal() == 0):
if state['outflow']['mag'].getVal() > 0:
new_states.append(newState(state_obj,
[('volume','der',-1),('outflow','der',-1)],
desc="E0->Vd-,Od-", transition="time"))
if (state['outflow']['mag'].getVal() == 1 and state['outflow']['der'].getVal() == -1):
new_states.append(newState(state_obj,[('volume','der',1),('outflow','der',1),
('volume','mag',-1),('outflow','mag',-1)],
desc="E0->Vd+,Od+", transition="time"))
# print('new states generated: ',len(new_states))
return new_states
def printState(state_obj):
state = state_obj.state
print("State",state_obj.name)
print(state['inflow']['mag'].getName(), state['inflow']['der'].getName())
print(state['volume']['mag'].getName(), state['volume']['der'].getName())
print(state['outflow']['mag'].getName(), state['outflow']['der'].getName())
print('----------------------')
def createEdge(source, target, desc, transition):
return {"explanation": desc,"source": source, "target": target, "transition": transition}
def addNewState(edges, states, source, target, desc, transition):
source.next_states.append(target)
edges.append(createEdge(source,target,desc,transition))
states.append(target)
return edges, states
def existingState(states, state):
for s in states:
if s == state:
return s
return None
#------------------------------------ VISUALIZATION -------------------------------
# returns the values for all variables in text format
# generates a visual (directed) graph of all states
# --------------------------------------- MAIN --------------------------------------
inflow_mag = QSpace('inflow_mag', ZP(), 0)
inflow_der = QSpace('inflow_der', NZP(), 1)
volume_mag = QSpace('volume_mag', ZPM(), 0)
volume_der = QSpace('volume_der', NZP(), 1)
outflow_mag = QSpace('outflow_mag', ZPM(), 0)
outflow_der = QSpace('outflow_der', NZP(), 1)
initial_state = State(
[inflow_mag, inflow_der,
volume_mag, volume_der,
outflow_mag, outflow_der])
states = [initial_state]
edges = []
fringe = queue.Queue()
fringe.put(initial_state)
iteration = 0
print("INTER-STATE TRACE")
dot_graph = None
while not fringe.empty():
curr_state = fringe.get(block=False)
new_states = generateNextStates(curr_state)
for state_dict in new_states:
same_state = existingState(states, state_dict['state'])
if same_state is None:
state_dict['state'].name = str(len(states))
edges, states = addNewState(edges, states,
source=curr_state, target=state_dict['state'],
desc=state_dict['desc'],transition=state_dict['transition'])
fringe.put(state_dict['state'])
printInterstate(curr_state.name,state_dict['state'].name,state_dict['desc'])
elif curr_state != same_state:
curr_state.next_states.append(same_state)
edges.append(createEdge(source=curr_state, target=same_state,
desc=state_dict['desc'], transition=state_dict['transition']))
printInterstate(curr_state.name,same_state.name,state_dict['desc'])
dot_graph = generateGraph(edges)
iteration+=1
# print('************'+str(iteration)+'*****************')
# input("Press Enter to continue...")
dot_graph.write('graph.dot')
dot_graph.write_png('TEST_graph.png')
print("\n")
print("INTRA-STATE TRACE")
for st in states:
printIntraState(st)
print("\n")
| 39.767281 | 143 | 0.576453 |
2321173b6cb9584852d15f26101b77960f964729 | 144 | py | Python | fhwebscrapers/__init__.py | dantas5/FinanceHub | 9691c9e10654c0608d1ca8c8798a5a26c227af87 | [
"MIT"
] | null | null | null | fhwebscrapers/__init__.py | dantas5/FinanceHub | 9691c9e10654c0608d1ca8c8798a5a26c227af87 | [
"MIT"
] | null | null | null | fhwebscrapers/__init__.py | dantas5/FinanceHub | 9691c9e10654c0608d1ca8c8798a5a26c227af87 | [
"MIT"
] | null | null | null | from fhwebscrapers.B3derivatives.curvasb3 import ScraperB3
from fhwebscrapers.CETIP.getcetipdata import CETIP
__all__ = ['ScraperB3', 'CETIP']
| 28.8 | 58 | 0.826389 |
23240f288abf89b78f596d8ce66de1c2719d6da7 | 43 | py | Python | app/data/__init__.py | codenio/cvcam | 4bfb16ae20375abee9dfdf0383c0df0bb5b31db7 | [
"MIT"
] | 2 | 2021-02-12T10:10:41.000Z | 2022-02-01T12:29:34.000Z | app/data/__init__.py | codenio/cvcam | 4bfb16ae20375abee9dfdf0383c0df0bb5b31db7 | [
"MIT"
] | null | null | null | app/data/__init__.py | codenio/cvcam | 4bfb16ae20375abee9dfdf0383c0df0bb5b31db7 | [
"MIT"
] | 1 | 2020-08-08T17:19:05.000Z | 2020-08-08T17:19:05.000Z | from .lite_data_store import LiteDataStore
| 21.5 | 42 | 0.883721 |
2324184f8448361dc8a0618b5d05232be22a8ed2 | 6,040 | py | Python | service/logging.py | IIEG/employment-forecast-jalisco | 83de3bef5ad91706822ffa1e1d5b8b1c29e2f6c0 | [
"Apache-2.0"
] | null | null | null | service/logging.py | IIEG/employment-forecast-jalisco | 83de3bef5ad91706822ffa1e1d5b8b1c29e2f6c0 | [
"Apache-2.0"
] | 1 | 2021-06-01T22:29:58.000Z | 2021-06-01T22:29:58.000Z | service/logging.py | IIEG/employment-forecast-jalisco | 83de3bef5ad91706822ffa1e1d5b8b1c29e2f6c0 | [
"Apache-2.0"
] | null | null | null | from conf import settings
import pandas as pd
import numpy as np
import datetime
import os
| 41.088435 | 108 | 0.654636 |
23271db66f8bb4de60b78338e614df097d3bd2ec | 665 | py | Python | systemtools/test/clearterminaltest.py | hayj/SystemTools | 89c32c2cac843dfa2719f0ce37a0a52cda0b0c0b | [
"MIT"
] | 11 | 2018-08-10T00:55:20.000Z | 2022-02-11T13:34:06.000Z | systemtools/test/clearterminaltest.py | hayj/SystemTools | 89c32c2cac843dfa2719f0ce37a0a52cda0b0c0b | [
"MIT"
] | 5 | 2018-05-01T14:30:37.000Z | 2021-11-18T11:48:28.000Z | systemtools/test/clearterminaltest.py | hayj/SystemTools | 89c32c2cac843dfa2719f0ce37a0a52cda0b0c0b | [
"MIT"
] | 7 | 2019-08-16T13:32:19.000Z | 2022-01-27T10:51:19.000Z |
# print("aaaaaaaaaa bbbbbbbbbb")
# # print(chr(27) + "[2J")
import os
import sys
from enum import Enum
import signal
print(getOutputType())
exit()
# import os
# os.system('cls' if os.name == 'nt' else 'clear')
size = os.get_terminal_size()
print(size[0])
if signal.getsignal(signal.SIGHUP) == signal.SIG_DFL: # default action
print("No SIGHUP handler")
else:
print("In nohup mode")
import time
for x in range (0,5):
b = "Loading" + "." * x
print (b, end="\r")
time.sleep(1)
import sys
print("FAILED...")
sys.stdout.write("\033[F") #back to previous line
time.sleep(1)
sys.stdout.write("\033[K") #clear line
print("SUCCESS!") | 14.777778 | 71 | 0.645113 |
23273537cf14476c6fb5136eab49c7351f22035d | 7,674 | py | Python | polytrack/deep_learning.py | malikaratnayake/Polytrack2.0 | 4ce45f26823c6ac63469112954fa23ed5ffd04bc | [
"MIT"
] | 1 | 2022-03-24T07:06:37.000Z | 2022-03-24T07:06:37.000Z | polytrack/deep_learning.py | malikaratnayake/Polytrack2.0 | 4ce45f26823c6ac63469112954fa23ed5ffd04bc | [
"MIT"
] | null | null | null | polytrack/deep_learning.py | malikaratnayake/Polytrack2.0 | 4ce45f26823c6ac63469112954fa23ed5ffd04bc | [
"MIT"
] | null | null | null | import os
import time
import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
import pytesseract
import core.utils as utils
from core.config import cfg
import re
from PIL import Image
from polytrack.general import cal_dist
import itertools as it
import math
# import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.set_visible_devices(physical_devices[0:1], 'GPU')
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from polytrack.config import pt_cfg
model_weights = './checkpoints/custom-416'
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
saved_model_loaded = tf.saved_model.load(model_weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
#Extract the data from result and calculate the center of gravity of the insect
#Detect insects in frame using Deep Learning
# Calculate the distance between two coordinates
#Verify that there are no duplicate detections (The distance between two CoG are >= 20 pixels)
#Evaluvate the confidence levels in DL and remove the least confidence detections
| 33.365217 | 130 | 0.690253 |
2327a93cda5f2e2914fc9a547155549bead73408 | 765 | py | Python | pypi_uploader/setup.py | p-geon/DockerBonsai | 1b1deafe228438e5ce3b4a41026aef4748f98573 | [
"MIT"
] | 1 | 2021-11-28T13:27:41.000Z | 2021-11-28T13:27:41.000Z | docker-pypi_uploader/setup.py | p-geon/DockerBonsai | 1b1deafe228438e5ce3b4a41026aef4748f98573 | [
"MIT"
] | 8 | 2021-02-19T12:54:22.000Z | 2021-02-25T02:32:23.000Z | pypi_uploader/setup.py | p-geon/DockerBonsai | 1b1deafe228438e5ce3b4a41026aef4748f98573 | [
"MIT"
] | null | null | null | from setuptools import setup
from codecs import open
from os import path
NAME_REPO="imagechain"
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=NAME_REPO,
packages=[NAME_REPO],
version='0.1',
license='MIT',
install_requires=[],
author='p-geon',
author_email='alchemic4s@gmail.com',
url='https://github.com/p-geon/' + NAME_REPO,
description='Image plotting & Image conversion',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='image plot',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
) | 25.5 | 63 | 0.673203 |
232912e4c2ce40a26c2b13a2ea3b4a25afdd40e1 | 3,077 | py | Python | bookstoreapp/forms.py | Timoh97/SMART-READERS | 99ff765d156b3a40698a6d0c9137f8afa03544ac | [
"MIT"
] | null | null | null | bookstoreapp/forms.py | Timoh97/SMART-READERS | 99ff765d156b3a40698a6d0c9137f8afa03544ac | [
"MIT"
] | null | null | null | bookstoreapp/forms.py | Timoh97/SMART-READERS | 99ff765d156b3a40698a6d0c9137f8afa03544ac | [
"MIT"
] | 1 | 2022-01-14T10:26:35.000Z | 2022-01-14T10:26:35.000Z | from django import forms
from .models import *
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.db import transaction
from bookstoreapp.models import *
#ordersystem
from django import forms
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
# Create your forms here.
# class BookForm(forms.ModelForm):
# class Meta:
# model = Books
# fields = ('file', 'image','author',"year_published",'title','price')
User = get_user_model()
#Authentication
# class CustomerSignUp(UserCreationForm):
# first_name= forms.CharField(label='First Name' ,error_messages={'required': 'Please enter your first name'})
# last_name= forms.CharField(label='Last Name',error_messages={'required': 'Please enter your last name'})
# email= forms.EmailField(label='Email Address' ,help_text='Format: 123@gmail.com, 456@yahoo.com',error_messages={'required': 'Please enter your email address'})
# class Meta(UserCreationForm.Meta):
# model = User
# fields=['first_name','last_name','username','email','password1','password2']
# @transaction.atomic
# def save(self):
# user = super().save(commit=False)
# user.is_customer=True
# user.save()
# customer = Customer.objects.create(user=user)
# customer.first_name = self.cleaned_data.get('first_name')
# customer.last_name = self.cleaned_data.get('last_name')
# customer.email = self.cleaned_data.get('email')
# return user
# class AuthorSignUp(UserCreationForm):
# first_name= forms.CharField(label='First Name' ,error_messages={'required': 'Please enter your first name'})
# last_name= forms.CharField(label='Last Name',error_messages={'required': 'Please enter your last name'})
# email= forms.EmailField(label='Email Address' ,help_text='Format: 123@gmail.com, 456@yahoo.com',error_messages={'required': 'Please enter your email address'})
# class Meta(UserCreationForm.Meta):
# model = User
# fields=['first_name','last_name','username','email','password1','password2']
# @transaction.atomic
# def save(self):
# user = super().save(commit=False)
# user.is_author=True
# user.save()
# author = Author.objects.create(user=user)
# author.first_name = self.cleaned_data.get('first_name')
# author.last_name = self.cleaned_data.get('last_name')
# author.email = self.cleaned_data.get('email')
# return user
#order system | 39.961039 | 165 | 0.675333 |
23294fabdcf63ba5d2ca1685c4bb3c0849350f0e | 207 | py | Python | game_test.py | jakub530/PyGame-Neural-Net | 6f592ee97d97470cddc6599203c9a5d9759905c4 | [
"MIT"
] | null | null | null | game_test.py | jakub530/PyGame-Neural-Net | 6f592ee97d97470cddc6599203c9a5d9759905c4 | [
"MIT"
] | null | null | null | game_test.py | jakub530/PyGame-Neural-Net | 6f592ee97d97470cddc6599203c9a5d9759905c4 | [
"MIT"
] | null | null | null | import sys, pygame,math
import numpy as np
from pygame import gfxdraw
import pygame_lib, nn_lib
import pygame.freetype
from pygame_lib import color
import random
import copy
import auto_maze
import node_vis | 20.7 | 28 | 0.845411 |
232aa5dcc39387e06484add60fa99039e0f84ed2 | 563 | py | Python | uaa_bot/config.py | cloud-gov/uaa-bot | d2191621d364ce0fe4804283243a5195cfe84c7a | [
"CC0-1.0"
] | 1 | 2021-03-27T21:34:28.000Z | 2021-03-27T21:34:28.000Z | uaa_bot/config.py | cloud-gov/uaa-bot | d2191621d364ce0fe4804283243a5195cfe84c7a | [
"CC0-1.0"
] | 4 | 2021-02-11T18:02:16.000Z | 2022-02-23T18:55:11.000Z | uaa_bot/config.py | cloud-gov/uaa-bot | d2191621d364ce0fe4804283243a5195cfe84c7a | [
"CC0-1.0"
] | null | null | null | import os
SMTP_KEYS = {
"SMTP_HOST": "localhost",
"SMTP_PORT": 25,
"SMTP_FROM": "no-reply@example.com",
"SMTP_USER": None,
"SMTP_PASS": None,
"SMTP_CERT": None,
}
UAA_KEYS = {
"UAA_BASE_URL": "https://uaa.bosh-lite.com",
"UAA_CLIENT_ID": None,
"UAA_CLIENT_SECRET": None,
}
smtp = parse_config_env(SMTP_KEYS)
uaa = parse_config_env(UAA_KEYS)
| 18.766667 | 53 | 0.651865 |
232aa8e2e7ba295ede12f5cba7bf5a933e010de8 | 31,253 | py | Python | pytest_docker_registry_fixtures/fixtures.py | crashvb/pytest-docker-registry-fixtures | aab57393f8478982751da140e259eb4bf81869a7 | [
"Apache-2.0"
] | null | null | null | pytest_docker_registry_fixtures/fixtures.py | crashvb/pytest-docker-registry-fixtures | aab57393f8478982751da140e259eb4bf81869a7 | [
"Apache-2.0"
] | 1 | 2021-02-17T04:23:09.000Z | 2021-02-17T04:29:22.000Z | pytest_docker_registry_fixtures/fixtures.py | crashvb/pytest-docker-registry-fixtures | aab57393f8478982751da140e259eb4bf81869a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# pylint: disable=redefined-outer-name,too-many-arguments,too-many-locals
"""The actual fixtures, you found them ;)."""
import logging
import itertools
from base64 import b64encode
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from ssl import create_default_context, SSLContext
from string import Template
from time import sleep, time
from typing import Dict, Generator, List, NamedTuple
import pytest
from docker import DockerClient, from_env
from lovely.pytest.docker.compose import Services
from _pytest.tmpdir import TempPathFactory
from .imagename import ImageName
from .utils import (
check_url_secure,
DOCKER_REGISTRY_SERVICE,
DOCKER_REGISTRY_SERVICE_PATTERN,
generate_cacerts,
generate_htpasswd,
generate_keypair,
get_docker_compose_user_defined,
get_embedded_file,
get_user_defined_file,
replicate_image,
start_service,
)
# Caching is needed, as singular-fixtures and list-fixtures will conflict at scale_factor=1
# This appears to only matter when attempting to start the docker secure registry service
# for the second time.
CACHE = {}
LOGGER = logging.getLogger(__name__)
# Note: NamedTuple does not support inheritance :(
def _docker_compose_insecure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
cache_key = _docker_compose_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
# TODO: lovely-docker-compose uses the file for teardown ...
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
def _docker_compose_secure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
cache_key = _docker_compose_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
def _docker_registry_auth_header(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
cache_key = _docker_registry_auth_header.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
auth = b64encode(
f"{docker_registry_username_list[i]}:{docker_registry_password_list[i]}".encode(
"utf-8"
)
).decode("utf-8")
result.append({"Authorization": f"Basic {auth}"})
CACHE[cache_key] = result
return result
def _docker_registry_cacerts(
*,
docker_registry_certs_list: List[DockerRegistryCerts],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
cache_key = _docker_registry_cacerts.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "cacerts"),
generate_cacerts(
tmp_path_factory,
certificate=docker_registry_certs_list[i].ca_certificate,
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate cacerts!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
def _docker_registry_certs(
*, scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
# TODO: Augment to allow for reading certificates from /test ...
cache_key = _docker_registry_certs.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
tmp_path = tmp_path_factory.mktemp(__name__)
keypair = generate_keypair()
docker_registry_cert = DockerRegistryCerts(
ca_certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.crt"),
ca_private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.key"),
certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.crt"),
private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.key"),
)
docker_registry_cert.ca_certificate.write_bytes(keypair.ca_certificate)
docker_registry_cert.ca_private_key.write_bytes(keypair.ca_private_key)
docker_registry_cert.certificate.write_bytes(keypair.certificate)
docker_registry_cert.private_key.write_bytes(keypair.private_key)
result.append(docker_registry_cert)
CACHE[cache_key] = result
yield result
for docker_registry_cert in result:
docker_registry_cert.ca_certificate.unlink(missing_ok=True)
docker_registry_cert.ca_private_key.unlink(missing_ok=True)
docker_registry_cert.certificate.unlink(missing_ok=True)
docker_registry_cert.private_key.unlink(missing_ok=True)
def _docker_registry_htpasswd(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
cache_key = _docker_registry_htpasswd.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "htpasswd"),
generate_htpasswd(
tmp_path_factory,
username=docker_registry_username_list[i],
password=docker_registry_password_list[i],
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate htpasswd!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
def _docker_registry_insecure(
*,
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
cache_key = _docker_registry_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_insecure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
# Note: Needed to correctly populate the embedded, consolidated, service template ...
"PATH_CERTIFICATE": "/dev/null",
"PATH_HTPASSWD": "/dev/null",
"PATH_KEY": "/dev/null",
}
),
"utf-8",
)
LOGGER.debug("Starting insecure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(" service name : %s", service_name)
endpoint = start_service(
docker_services,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Insecure docker registry endpoint [%d]: %s", i, endpoint)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistryInsecure(
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
images=images,
service_name=service_name,
)
)
CACHE[cache_key] = result
yield result
def _docker_registry_password(*, scale_factor: int) -> List[str]:
"""Provides the password to use for authentication to the secure registry service."""
cache_key = _docker_registry_password.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(f"pytest.password.{time()}")
sleep(0.05)
CACHE[cache_key] = result
return result
def _docker_registry_secure(
*,
docker_client: DockerClient,
docker_compose_secure_list: List[Path],
docker_registry_auth_header_list: List[Dict[str, str]],
docker_registry_cacerts_list: List[Path],
docker_registry_certs_list: List[DockerRegistryCerts],
docker_registry_htpasswd_list: List[Path],
docker_registry_password_list: List[str],
docker_registry_ssl_context_list: List[SSLContext],
docker_registry_username_list: List[str],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistrySecure], None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
cache_key = _docker_registry_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_secure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
"PATH_CERTIFICATE": docker_registry_certs_list[i].certificate,
"PATH_HTPASSWD": docker_registry_htpasswd_list[i],
"PATH_KEY": docker_registry_certs_list[i].private_key,
}
),
"utf-8",
)
LOGGER.debug("Starting secure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(
" ca certificate : %s", docker_registry_certs_list[i].ca_certificate
)
LOGGER.debug(" certificate : %s", docker_registry_certs_list[i].certificate)
LOGGER.debug(" htpasswd : %s", docker_registry_htpasswd_list[i])
LOGGER.debug(" private key : %s", docker_registry_certs_list[i].private_key)
LOGGER.debug(" password : %s", docker_registry_password_list[i])
LOGGER.debug(" service name : %s", service_name)
LOGGER.debug(" username : %s", docker_registry_username_list[i])
check_server = partial(
check_url_secure,
auth_header=docker_registry_auth_header_list[i],
ssl_context=docker_registry_ssl_context_list[i],
)
endpoint = start_service(
docker_services,
check_server=check_server,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Secure docker registry endpoint [%d]: %s", i, endpoint)
# DUCK PUNCH: Inject the secure docker registry credentials into the docker client ...
docker_client.api._auth_configs.add_auth( # pylint: disable=protected-access
endpoint,
{
"password": docker_registry_password_list[i],
"username": docker_registry_username_list[i],
},
)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistrySecure(
auth_header=docker_registry_auth_header_list[i],
cacerts=docker_registry_cacerts_list[i],
certs=docker_registry_certs_list[i],
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
htpasswd=docker_registry_htpasswd_list[i],
password=docker_registry_password_list[i],
images=images,
service_name=service_name,
ssl_context=docker_registry_ssl_context_list[i],
username=docker_registry_username_list[i],
)
)
CACHE[cache_key] = result
yield result
def _docker_registry_ssl_context(
*, docker_registry_cacerts_list: List[Path], scale_factor: int
) -> List[SSLContext]:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
cache_key = _docker_registry_ssl_context.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(
create_default_context(cafile=str(docker_registry_cacerts_list[i]))
)
CACHE[cache_key] = result
return result
def _docker_registry_username(*, scale_factor: int) -> List[str]:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
cache_key = _docker_registry_username.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(f"pytest.username.{time()}")
sleep(0.05)
CACHE[cache_key] = result
return result
def _replicate_images(
docker_client: DockerClient, endpoint: str, request
) -> List[ImageName]:
"""
Replicates all marked images to a docker registry service at a given endpoint.
Args:
docker_client: Docker client with which to replicate the marked images.
endpoint: The endpoint of the docker registry service.
request: The pytest requests object from which to retrieve the marks.
Returns: The list of images that were replicated.
"""
always_pull = strtobool(str(request.config.getoption("--always-pull", True)))
images = request.config.getoption("--push-image", [])
# images.extend(request.node.get_closest_marker("push_image", []))
# * Split ',' separated lists
# * Remove duplicates - see conftest.py::pytest_collection_modifyitems()
images = [image for i in images for image in i.split(",")]
images = [ImageName.parse(image) for image in list(set(images))]
for image in images:
LOGGER.debug("- %s", image)
try:
replicate_image(docker_client, image, endpoint, always_pull=always_pull)
except Exception as exception: # pylint: disable=broad-except
LOGGER.warning(
"Unable to replicate image '%s': %s", image, exception, exc_info=True
)
return images
| 35.964327 | 118 | 0.696381 |
232ab34c654fc84b1b9af2251151c7a436bd3f09 | 1,346 | py | Python | TcpServer.py | WinHtut/BootCampPython-1 | c784a23d73304f328b8d6a1e29a1c43e6b6c44c7 | [
"MIT"
] | null | null | null | TcpServer.py | WinHtut/BootCampPython-1 | c784a23d73304f328b8d6a1e29a1c43e6b6c44c7 | [
"MIT"
] | null | null | null | TcpServer.py | WinHtut/BootCampPython-1 | c784a23d73304f328b8d6a1e29a1c43e6b6c44c7 | [
"MIT"
] | 1 | 2021-12-04T16:08:17.000Z | 2021-12-04T16:08:17.000Z | import socket
import threading
import FetchData
if __name__ == "__main__":
while True:
server =TCPserver()
server.main() | 32.829268 | 90 | 0.616642 |
232aee5e5c70b6ac013e320c3a04f48e6af0f6b1 | 11,122 | py | Python | Jump_Trend_labeling/Trend/jump.py | anakinanakin/neural-network-on-finance-data | 1842606294ca3d5dafa7387d6db95a1c21d323eb | [
"MIT"
] | 1 | 2021-05-11T09:11:53.000Z | 2021-05-11T09:11:53.000Z | Jump_Trend_labeling/Trend/jump.py | anakinanakin/neural-network-on-finance-data | 1842606294ca3d5dafa7387d6db95a1c21d323eb | [
"MIT"
] | null | null | null | Jump_Trend_labeling/Trend/jump.py | anakinanakin/neural-network-on-finance-data | 1842606294ca3d5dafa7387d6db95a1c21d323eb | [
"MIT"
] | 1 | 2020-07-28T03:59:31.000Z | 2020-07-28T03:59:31.000Z | #source code: https://github.com/alvarobartt/trendet
import psycopg2, psycopg2.extras
import os
import glob
import csv
import time
import datetime
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import patches
from matplotlib.pyplot import figure
from datetime import timedelta, date
from math import ceil, sqrt
from statistics import mean
from unidecode import unidecode
# transform array to rectangle shape
def identify_df_trends(df, column, window_size=5, identify='both'):
"""
This function receives as input a pandas.DataFrame from which data is going to be analysed in order to
detect/identify trends over a certain date range. A trend is considered so based on the window_size, which
specifies the number of consecutive days which lead the algorithm to identify the market behaviour as a trend. So
on, this function will identify both up and down trends and will remove the ones that overlap, keeping just the
longer trend and discarding the nested trend.
Args:
df (:obj:`pandas.DataFrame`): dataframe containing the data to be analysed.
column (:obj:`str`): name of the column from where trends are going to be identified.
window_size (:obj:`window`, optional): number of days from where market behaviour is considered a trend.
identify (:obj:`str`, optional):
which trends does the user wants to be identified, it can either be 'both', 'up' or 'down'.
Returns:
:obj:`pandas.DataFrame`:
The function returns a :obj:`pandas.DataFrame` which contains the retrieved historical data from Investing
using `investpy`, with a new column which identifies every trend found on the market between two dates
identifying when did the trend started and when did it end. So the additional column contains labeled date
ranges, representing both bullish (up) and bearish (down) trends.
Raises:
ValueError: raised if any of the introduced arguments errored.
"""
if df is None:
raise ValueError("df argument is mandatory and needs to be a `pandas.DataFrame`.")
if not isinstance(df, pd.DataFrame):
raise ValueError("df argument is mandatory and needs to be a `pandas.DataFrame`.")
if column is None:
raise ValueError("column parameter is mandatory and must be a valid column name.")
if column and not isinstance(column, str):
raise ValueError("column argument needs to be a `str`.")
if isinstance(df, pd.DataFrame):
if column not in df.columns:
raise ValueError("introduced column does not match any column from the specified `pandas.DataFrame`.")
else:
if df[column].dtype not in ['int64', 'float64']:
raise ValueError("supported values are just `int` or `float`, and the specified column of the "
"introduced `pandas.DataFrame` is " + str(df[column].dtype))
if not isinstance(window_size, int):
raise ValueError('window_size must be an `int`')
if isinstance(window_size, int) and window_size < 3:
raise ValueError('window_size must be an `int` equal or higher than 3!')
if not isinstance(identify, str):
raise ValueError('identify should be a `str` contained in [both, up, down]!')
if isinstance(identify, str) and identify not in ['both', 'up', 'down']:
raise ValueError('identify should be a `str` contained in [both, up, down]!')
objs = list()
up_trend = {
'name': 'Up Trend',
'element': np.negative(df['close'])
}
down_trend = {
'name': 'Down Trend',
'element': df['close']
}
if identify == 'both':
objs.append(up_trend)
objs.append(down_trend)
elif identify == 'up':
objs.append(up_trend)
elif identify == 'down':
objs.append(down_trend)
#print(objs)
results = dict()
for obj in objs:
mov_avg = None
values = list()
trends = list()
for index, value in enumerate(obj['element'], 0):
# print(index)
# print(value)
if mov_avg and mov_avg > value:
values.append(value)
mov_avg = mean(values)
elif mov_avg and mov_avg < value:
if len(values) > window_size:
min_value = min(values)
for counter, item in enumerate(values, 0):
if item == min_value:
break
to_trend = from_trend + counter
trend = {
'from': df.index.tolist()[from_trend],
'to': df.index.tolist()[to_trend],
}
trends.append(trend)
mov_avg = None
values = list()
else:
from_trend = index
values.append(value)
mov_avg = mean(values)
results[obj['name']] = trends
# print(results)
# print("\n\n")
# deal with overlapping labels, keep longer trends
if identify == 'both':
up_trends = list()
for up in results['Up Trend']:
flag = True
for down in results['Down Trend']:
if (down['from'] <= up['from'] <= down['to']) or (down['from'] <= up['to'] <= down['to']):
#print("up")
if (up['to'] - up['from']) <= (down['to'] - down['from']):
#print("up")
flag = False
for other_up in results['Up Trend']:
if (other_up['from'] < up['from'] < other_up['to']) or (other_up['from'] < up['to'] < other_up['to']):
#print("up")
if (up['to'] - up['from']) < (other_up['to'] - other_up['from']):
#print("up")
flag = False
if flag is True:
up_trends.append(up)
labels = [letter for letter in string.printable[:len(up_trends)]]
for up_trend, label in zip(up_trends, labels):
for index, row in df[up_trend['from']:up_trend['to']].iterrows():
df.loc[index, 'Up Trend'] = label
down_trends = list()
for down in results['Down Trend']:
flag = True
for up in results['Up Trend']:
if (up['from'] <= down['from'] <= up['to']) or (up['from'] <= down['to'] <= up['to']):
#print("down")
if (up['to'] - up['from']) >= (down['to'] - down['from']):
#print("down")
flag = False
for other_down in results['Down Trend']:
if (other_down['from'] < down['from'] < other_down['to']) or (other_down['from'] < down['to'] < other_down['to']):
#print("down")
if (other_down['to'] - other_down['from']) > (down['to'] - down['from']):
#print("down")
flag = False
if flag is True:
down_trends.append(down)
labels = [letter for letter in string.printable[:len(down_trends)]]
for down_trend, label in zip(down_trends, labels):
for index, row in df[down_trend['from']:down_trend['to']].iterrows():
df.loc[index, 'Down Trend'] = label
return df
elif identify == 'up':
up_trends = results['Up Trend']
up_labels = [letter for letter in string.printable[:len(up_trends)]]
for up_trend, up_label in zip(up_trends, up_labels):
for index, row in df[up_trend['from']:up_trend['to']].iterrows():
df.loc[index, 'Up Trend'] = up_label
return df
elif identify == 'down':
down_trends = results['Down Trend']
down_labels = [letter for letter in string.printable[:len(down_trends)]]
for down_trend, down_label in zip(down_trends, down_labels):
for index, row in df[down_trend['from']:down_trend['to']].iterrows():
df.loc[index, 'Down Trend'] = down_label
return df
conn = psycopg2.connect(**eval(open('auth.txt').read()))
cmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
start_date = date(2010, 3, 25)
end_date = date(2010, 3, 26)
# sampling window
window_size = 5
for single_date in date_range(start_date, end_date):
#smp no volume
#cmd.execute('select * from market_index where mid = 3 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
#smp with volume
cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
recs = cmd.fetchall()
if recs == []:
continue;
df = pd.DataFrame(recs, columns = recs[0].keys())
df.sort_values(by='dt')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
close_price = df['close'].values
maxprice = max(close_price)
minprice = min(close_price)
# prevent from equal to 0
df['close'] = (df['close']-minprice)/(maxprice - minprice)+0.01
close_price = df['close'].values
# close_price = close_price.tolist()
# df_trend = df.copy()
# df_trend['Up Trend'] = np.nan
# df_trend['Down Trend'] = np.nan
df_trend = identify_df_trends(df, 'close', window_size=window_size, identify='both')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df_trend)
df.reset_index(inplace=True)
figure(num=None, figsize=(48, 10), dpi=180, facecolor='w', edgecolor='k')
ax = sns.lineplot(x=df.index, y=df['close'])
ax.set(xlabel='minute')
a=0
b=0
try:
labels = df_trend['Up Trend'].dropna().unique().tolist()
except:
df_trend['Up Trend'] = np.nan
a=1
if a == 0:
for label in labels:
ax.axvspan(df[df['Up Trend'] == label].index[0], df[df['Up Trend'] == label].index[-1], alpha=0.2, color='red')
try:
labels = df_trend['Down Trend'].dropna().unique().tolist()
except:
df_trend['Down Trend'] = np.nan
b=1
if b == 0:
for label in labels:
ax.axvspan(df[df['Down Trend'] == label].index[0], df[df['Down Trend'] == label].index[-1], alpha=0.2, color='green')
plt.savefig('date='+single_date.strftime("%m-%d-%Y")+'_window={}.png'.format(window_size))
| 31.68661 | 130 | 0.573368 |
232d44b9e301f131b81fce59b6e44322f7b61b53 | 978 | py | Python | dmatrix.py | sanchitcop19/redHackProject | 16f8d2e2a675dc5bd370e28ab5880a6b1f113a2d | [
"Apache-2.0"
] | null | null | null | dmatrix.py | sanchitcop19/redHackProject | 16f8d2e2a675dc5bd370e28ab5880a6b1f113a2d | [
"Apache-2.0"
] | 1 | 2021-06-02T00:26:30.000Z | 2021-06-02T00:26:30.000Z | dmatrix.py | sanchitcop19/redHackProject | 16f8d2e2a675dc5bd370e28ab5880a6b1f113a2d | [
"Apache-2.0"
] | 1 | 2019-09-22T08:46:11.000Z | 2019-09-22T08:46:11.000Z | import requests
import json
content = None
with open("scored_output.json") as file:
content = json.load(file)
matrix = [[0 for i in range(len(content))] for j in range(len(content))]
mapping = {}
for i, origin in enumerate(content):
mapping[i] = origin
for j, destination in enumerate(content):
print(i, j)
if origin[0] == ',' or destination[0] == ',' or origin[-2:] != destination[-2:] or origin[-2:] != 'CA':
continue
response = requests.get("https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=" + origin + "&destinations=" + destination + "&key=" + "AIzaSyA3kdX2kwoRQpkmui8GtloGvGQB-rn1tMU")
try:
matrix[i][j] = json.loads(response.content)["rows"][0]["elements"][0]["distance"]["value"]
except:
continue
data = {
'mapping': mapping,
'matrix': matrix
}
with open("dmatrix.json", "w") as file:
json.dump(data, file)
| 30.5625 | 211 | 0.603272 |
232d65d107c7ac95d64e3240caf376ce0bbcff3f | 2,416 | py | Python | src/SetExpan/util.py | jmshen1994/SetExpan | d725bb9896c45478217294d188fafaea56660858 | [
"Apache-2.0"
] | 36 | 2017-11-08T01:54:43.000Z | 2021-08-04T08:26:54.000Z | src/SetExpan/util.py | mickeystroller/SetExpan | d725bb9896c45478217294d188fafaea56660858 | [
"Apache-2.0"
] | 4 | 2017-10-30T19:47:14.000Z | 2018-11-22T02:51:55.000Z | src/SetExpan/util.py | mickeystroller/SetExpan | d725bb9896c45478217294d188fafaea56660858 | [
"Apache-2.0"
] | 10 | 2017-11-10T03:50:54.000Z | 2020-12-16T19:52:29.000Z | '''
__author__: Ellen Wu (modified by Jiaming Shen)
__description__: A bunch of utility functions
__latest_update__: 08/31/2017
'''
from collections import defaultdict
import set_expan
import eid_pair_TFIDF_selection
import extract_seed_edges
import extract_entity_pair_skipgrams
def loadWeightByEidAndFeatureMap(filename, idx = -1):
''' Load the (eid, feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
weight = float(seg[idx])
weightByEidAndFeatureMap[(eid, feature)] = weight
return weightByEidAndFeatureMap
def loadWeightByEidPairAndFeatureMap(filename, idx = -1):
''' Load the ((eid1, eid2), feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidPairAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
weight = float(seg[idx])
weightByEidPairAndFeatureMap[(eidPair, feature)] = weight
return weightByEidPairAndFeatureMap | 30.974359 | 68 | 0.68005 |
232e28fbfd431f5f262b4d4fadc8f82e257b7c68 | 534 | py | Python | solutions/container-generator.py | hydrargyrum/python-exercises | f99889d18179dce45956ce68382e37a987c8f460 | [
"Unlicense"
] | null | null | null | solutions/container-generator.py | hydrargyrum/python-exercises | f99889d18179dce45956ce68382e37a987c8f460 | [
"Unlicense"
] | null | null | null | solutions/container-generator.py | hydrargyrum/python-exercises | f99889d18179dce45956ce68382e37a987c8f460 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env pytest-3
import pytest
# Exercice: iter
# test
def test_iter():
gen = multiples_of(3)
for n, mult in enumerate(gen):
assert n * 3 == mult
if n >= 100:
break
for n, mult in enumerate(gen):
assert (n + 101) * 3 == mult
if n >= 100:
break
gen = multiples_of(4)
for n, mult in enumerate(gen):
assert n * 4 == mult
if n >= 100:
break
| 16.181818 | 36 | 0.488764 |
23300efdd697b2575e312f7edd92461f467cdc9c | 161 | py | Python | src/onegov/gis/forms/__init__.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/gis/forms/__init__.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/gis/forms/__init__.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from onegov.gis.forms.fields import CoordinatesField
from onegov.gis.forms.widgets import CoordinatesWidget
__all__ = ['CoordinatesField', 'CoordinatesWidget']
| 32.2 | 54 | 0.832298 |
2330a75a4af76c6269b983247c9bbf1f53e9a024 | 8,468 | py | Python | pds_github_util/plan/plan.py | NASA-PDS/pds-github-util | 155f60532a02bcbc7a9664b8a170a2e7ab0463d1 | [
"Apache-2.0"
] | null | null | null | pds_github_util/plan/plan.py | NASA-PDS/pds-github-util | 155f60532a02bcbc7a9664b8a170a2e7ab0463d1 | [
"Apache-2.0"
] | 42 | 2020-09-17T17:30:40.000Z | 2022-03-31T21:09:19.000Z | pds_github_util/plan/plan.py | NASA-PDS/pds-github-util | 155f60532a02bcbc7a9664b8a170a2e7ab0463d1 | [
"Apache-2.0"
] | 3 | 2020-08-12T23:02:40.000Z | 2021-09-30T11:57:59.000Z | """Release Planning."""
import argparse
import github3
import logging
import os
import sys
import traceback
from pds_github_util.issues.utils import get_labels, is_theme
from pds_github_util.zenhub.zenhub import Zenhub
from pds_github_util.utils import GithubConnection, addStandardArguments
from pkg_resources import resource_string
from jinja2 import Template
from yaml import FullLoader, load
# PDS Github Org
GITHUB_ORG = 'NASA-PDS'
REPO_INFO = ('\n--------\n\n'
'{}\n'
'{}\n\n'
'*{}*\n\n'
'.. list-table:: \n'
' :widths: 15 15 15 15 15 15\n\n'
' * - `User Guide <{}>`_\n'
' - `Github Repo <{}>`_\n'
' - `Issue Tracking <{}/issues>`_ \n'
' - `Backlog <{}/issues?q=is%3Aopen+is%3Aissue+label%3Abacklog>`_ \n'
' - `Stable Release <{}/releases/latest>`_ \n'
' - `Dev Release <{}/releases>`_ \n\n')
# Quiet github3 logging
logger = logging.getLogger('github3')
logger.setLevel(level=logging.WARNING)
# Enable logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 36.978166 | 164 | 0.535782 |
23341f5ed2859fb2d6684316810220212f51ba71 | 612 | py | Python | users/models.py | nimbustan/Otozi | 69d2ff734da05ffdf87936b44a86f4ca00f1ca7a | [
"MIT"
] | null | null | null | users/models.py | nimbustan/Otozi | 69d2ff734da05ffdf87936b44a86f4ca00f1ca7a | [
"MIT"
] | null | null | null | users/models.py | nimbustan/Otozi | 69d2ff734da05ffdf87936b44a86f4ca00f1ca7a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here. | 32.210526 | 110 | 0.727124 |
233559cbbce20a6e666ce90f9a2459c195da1807 | 19,404 | py | Python | Veerappan_bnfo601_exam2/Veerappan_BLAST_prot.py | aravindvrm/bnfo | a6d33b197626fdb753e256b7c38bd923c9c6ae99 | [
"MIT"
] | null | null | null | Veerappan_bnfo601_exam2/Veerappan_BLAST_prot.py | aravindvrm/bnfo | a6d33b197626fdb753e256b7c38bd923c9c6ae99 | [
"MIT"
] | null | null | null | Veerappan_bnfo601_exam2/Veerappan_BLAST_prot.py | aravindvrm/bnfo | a6d33b197626fdb753e256b7c38bd923c9c6ae99 | [
"MIT"
] | null | null | null | """
Aravind Veerappan
BNFO 601 - Exam 2
Question 2. Protein BLAST
"""
import math
from PAM import PAM
# MAIN PROGRAM
numbat = 'LVSMLESYVAAPDLILLDIMMPGMDGLELGGMDGGKPILT'
quoll = 'DDMEVIGTAYNPDVLVLDIIMPHLDGLAVAAMEAGRPLIS'
# calculate PAM120 matrix
A = PAM(N=120)
PAM1 = A.Build_PAMN()
B = BLAST(numbat, quoll, PAM=PAM1)
print B.score()
| 48.148883 | 121 | 0.619408 |
233829e027347a91a2e7a94f36a3b2dffcb111ee | 68 | py | Python | Mundo01/Python/aula06b.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | Mundo01/Python/aula06b.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | Mundo01/Python/aula06b.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | n = input('Digite um algo: ')
print(n.isalpha())
print(n.isupper())
| 17 | 29 | 0.647059 |
2338e51f497f2917867ef18cfad79cfe5635f3ea | 717 | py | Python | setup.py | DigiKlausur/ilias2nbgrader | ef6b14969ce73f8203aa125175915f76f07c8e43 | [
"MIT"
] | 4 | 2020-01-17T08:39:00.000Z | 2021-12-13T13:54:14.000Z | setup.py | DigiKlausur/ilias2nbgrader | ef6b14969ce73f8203aa125175915f76f07c8e43 | [
"MIT"
] | 12 | 2020-01-24T14:52:35.000Z | 2020-05-26T15:34:20.000Z | setup.py | DigiKlausur/ilias2nbgrader | ef6b14969ce73f8203aa125175915f76f07c8e43 | [
"MIT"
] | 1 | 2020-03-23T17:16:06.000Z | 2020-03-23T17:16:06.000Z | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='ilias2nbgrader',
version='0.4.3',
license='MIT',
url='https://github.com/DigiKlausur/ilias2nbgrader',
description='Exchange submissions and feedbacks between ILIAS and nbgrader',
long_description=readme,
long_description_content_type="text/markdown",
author='Tim Metzler',
author_email='tim.metzler@h-brs.de',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
"rapidfuzz",
"nbformat"
],
include_package_data = True,
zip_safe=False,
test_suite='tests',
tests_require=['pytest-cov']
)
| 26.555556 | 80 | 0.668061 |
23395cc50637ff5b0993e2601b07c4a0ab09d8ac | 2,343 | py | Python | citrees/utils.py | m0hashi/citrees | e7d4866109ce357d5d67cffa450604567f7b469e | [
"MIT"
] | null | null | null | citrees/utils.py | m0hashi/citrees | e7d4866109ce357d5d67cffa450604567f7b469e | [
"MIT"
] | null | null | null | citrees/utils.py | m0hashi/citrees | e7d4866109ce357d5d67cffa450604567f7b469e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, print_function
from numba import jit
import numpy as np
# from externals.six.moves import range
def bayes_boot_probs(n):
"""Bayesian bootstrap sampling for case weights
Parameters
----------
n : int
Number of Bayesian bootstrap samples
Returns
-------
p : 1d array-like
Array of sampling probabilities
"""
p = np.random.exponential(scale=1.0, size=n)
return p/p.sum()
def logger(name, message):
"""Prints messages with style "[NAME] message"
Parameters
----------
name : str
Short title of message, for example, train or test
message : str
Main description to be displayed in terminal
Returns
-------
None
"""
print('[{name}] {message}'.format(name=name.upper(), message=message))
def estimate_margin(y_probs, y_true):
"""Estimates margin function of forest ensemble
Note : This function is similar to margin in R's randomForest package
Parameters
----------
y_probs : 2d array-like
Predicted probabilities where each row represents predicted
class distribution for sample and each column corresponds to
estimated class probability
y_true : 1d array-like
Array of true class labels
Returns
-------
margin : float
Estimated margin of forest ensemble
"""
# Calculate probability of correct class
n, p = y_probs.shape
true_probs = y_probs[np.arange(n, dtype=int), y_true]
# Calculate maximum probability for incorrect class
other_probs = np.zeros(n)
for i in range(n):
mask = np.zeros(p, dtype=bool)
mask[y_true[i]] = True
other_idx = np.ma.array(y_probs[i,:], mask=mask).argmax()
other_probs[i] = y_probs[i, other_idx]
# Margin is P(y == j) - max(P(y != j))
return true_probs - other_probs
| 24.154639 | 74 | 0.599659 |
233b1c9f4e244ac8cb55094347c4c0772dd724da | 4,820 | py | Python | blog/views.py | arascch/Django_blog | 091a5a4974534fbe37560bd8e451716a3b1bdcbf | [
"Apache-2.0"
] | 1 | 2019-03-04T15:02:03.000Z | 2019-03-04T15:02:03.000Z | blog/views.py | arascch/Django_blog | 091a5a4974534fbe37560bd8e451716a3b1bdcbf | [
"Apache-2.0"
] | null | null | null | blog/views.py | arascch/Django_blog | 091a5a4974534fbe37560bd8e451716a3b1bdcbf | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import ListView
from .models import Post , Comment
from .forms import EmailPostForm , CommentForm , SearchForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import SearchVector , SearchQuery , SearchRank , TrigramSimilarity
| 40.504202 | 122 | 0.567427 |
233d6f3fd59520be733341519e2ee7dc3d18d10a | 2,424 | py | Python | StudentAssociation/tasks.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | null | null | null | StudentAssociation/tasks.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | 15 | 2020-03-09T11:56:13.000Z | 2022-02-10T15:03:01.000Z | StudentAssociation/tasks.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | null | null | null | from django.utils import timezone
from django.db.models import Q
from celery.decorators import task, periodic_task
from celery.utils.log import get_task_logger
from celery.task.schedules import crontab
from accounts.models.user_profile import ClubUserProfile
from management.models.activity_apply import ActivityApplication
from accounts.models.messages import Messages
from StudentAssociation.utils import message_service
from .utils import send_email
logger = get_task_logger(__name__)
| 39.737705 | 108 | 0.667904 |
233dd3a1892a3e39ce7f0e1314827e36c01fc57e | 433 | py | Python | streaming/take_picture.py | jsse-2017-ph23/rpi-streaming | a701e6bc818b24b880a409db65b43a43e78259f8 | [
"MIT"
] | 1 | 2017-08-25T08:31:01.000Z | 2017-08-25T08:31:01.000Z | streaming/take_picture.py | jsse-2017-ph23/rpi-streaming | a701e6bc818b24b880a409db65b43a43e78259f8 | [
"MIT"
] | null | null | null | streaming/take_picture.py | jsse-2017-ph23/rpi-streaming | a701e6bc818b24b880a409db65b43a43e78259f8 | [
"MIT"
] | null | null | null | import threading
from datetime import datetime
from io import BytesIO
capture_lock = threading.Lock()
| 22.789474 | 71 | 0.678984 |
233e938c1235975c31635e57391932a8a3358fab | 692 | py | Python | tests/tf_tests/functional/test_tf_inference.py | Deeplite/deeplite-profiler | 2b21c0dc5948606c47377f786b605baf4fa31bee | [
"Apache-2.0"
] | 17 | 2021-04-13T06:09:52.000Z | 2021-11-24T06:39:41.000Z | tests/tf_tests/functional/test_tf_inference.py | Deeplite/deeplite-profiler | 2b21c0dc5948606c47377f786b605baf4fa31bee | [
"Apache-2.0"
] | 14 | 2021-04-14T13:46:42.000Z | 2021-12-20T21:10:25.000Z | tests/tf_tests/functional/test_tf_inference.py | Deeplite/deeplite-profiler | 2b21c0dc5948606c47377f786b605baf4fa31bee | [
"Apache-2.0"
] | 7 | 2021-04-09T16:47:56.000Z | 2022-03-05T11:04:30.000Z | import pytest
from tests.tf_tests.functional import BaseFunctionalTest, TENSORFLOW_SUPPORTED, TENSORFLOW_AVAILABLE, MODEL, DATA
| 36.421053 | 113 | 0.728324 |
233ed42bf0115e5edc1f5fad0d0fd1255e0ee7ed | 24,372 | py | Python | model.py | jgasthaus/gpu_python | ae044d616e22cfa10479bd5717148e91cdca5bb5 | [
"BSD-2-Clause"
] | 1 | 2016-01-27T21:52:54.000Z | 2016-01-27T21:52:54.000Z | model.py | jgasthaus/gpu_python | ae044d616e22cfa10479bd5717148e91cdca5bb5 | [
"BSD-2-Clause"
] | null | null | null | model.py | jgasthaus/gpu_python | ae044d616e22cfa10479bd5717148e91cdca5bb5 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2008-2011, Jan Gasthaus
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy.random as R
from collections import deque
from utils import *
from numpy import *
| 38.260597 | 81 | 0.575127 |
233ff6c005185d4e5c1c1893c6042a130f890b7d | 343 | py | Python | nadlogar/documents/migrations/0007_remove_documentsort_html_fragment.py | ul-fmf/nadlogar | 4b3eb4dd0be7dba20a075b2e4bd425ffc64756e3 | [
"MIT"
] | 9 | 2019-12-19T12:11:58.000Z | 2022-02-01T15:00:16.000Z | nadlogar/documents/migrations/0007_remove_documentsort_html_fragment.py | ul-fmf/nadlogar | 4b3eb4dd0be7dba20a075b2e4bd425ffc64756e3 | [
"MIT"
] | 58 | 2019-12-18T15:07:17.000Z | 2022-01-04T12:21:44.000Z | nadlogar/documents/migrations/0007_remove_documentsort_html_fragment.py | ul-fmf/nadlogar | 4b3eb4dd0be7dba20a075b2e4bd425ffc64756e3 | [
"MIT"
] | 7 | 2019-12-18T13:29:37.000Z | 2021-07-17T13:01:30.000Z | # Generated by Django 3.2.6 on 2021-09-24 07:53
from django.db import migrations
| 19.055556 | 49 | 0.609329 |
2340ff27f70c0f25fa92baa0c7cf6b801391d2c6 | 8,061 | py | Python | src/bin/shipyard_airflow/shipyard_airflow/plugins/deployment_status_operator.py | rb560u/airship-shipyard | 01b6960c1f80b44d1db31c081139649c40b82308 | [
"Apache-2.0"
] | 12 | 2018-05-18T18:59:23.000Z | 2019-05-10T12:31:44.000Z | src/bin/shipyard_airflow/shipyard_airflow/plugins/deployment_status_operator.py | rb560u/airship-shipyard | 01b6960c1f80b44d1db31c081139649c40b82308 | [
"Apache-2.0"
] | 4 | 2021-07-28T14:36:57.000Z | 2022-03-22T16:39:23.000Z | src/bin/shipyard_airflow/shipyard_airflow/plugins/deployment_status_operator.py | rb560u/airship-shipyard | 01b6960c1f80b44d1db31c081139649c40b82308 | [
"Apache-2.0"
] | 9 | 2018-05-18T16:42:41.000Z | 2019-04-18T20:12:14.000Z | # Copyright 2019 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import yaml
from airflow import AirflowException
from airflow.plugins_manager import AirflowPlugin
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import kubernetes
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_config_map import V1ConfigMap
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
from shipyard_airflow.conf import config
from shipyard_airflow.control.helpers.action_helper import \
get_deployment_status
from shipyard_airflow.plugins.xcom_puller import XcomPuller
from shipyard_airflow.common.document_validators.document_validation_utils \
import DocumentValidationUtils
from shipyard_airflow.plugins.deckhand_client_factory import \
DeckhandClientFactory
from shipyard_airflow.common.document_validators.errors import \
DocumentNotFoundError
LOG = logging.getLogger(__name__)
# Variable to hold details about how the Kubernetes ConfigMap is stored
CONFIG_MAP_DETAILS = {
'api_version': 'v1',
'kind': 'ConfigMap',
'pretty': 'true'
}
| 36.977064 | 79 | 0.677832 |
2343415fb0bf26dd085e1bfe9473a5a15110089a | 2,162 | py | Python | utils/split_evids_by_cluster.py | davmre/sigvisa | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | [
"BSD-3-Clause"
] | null | null | null | utils/split_evids_by_cluster.py | davmre/sigvisa | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | [
"BSD-3-Clause"
] | null | null | null | utils/split_evids_by_cluster.py | davmre/sigvisa | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from optparse import OptionParser
from sigvisa.treegp.gp import GP, GPCov
from sigvisa import Sigvisa
from sigvisa.source.event import get_event
from sigvisa.treegp.cover_tree import VectorTree
import pyublas
if __name__ == "__main__":
main()
| 38.607143 | 147 | 0.676226 |
234480e438bd2c85ca7ae34d5da8cd88c72c878b | 1,048 | py | Python | fias/fiAS_track.py | cristina-mt/fias | ce264754997e14a403a9a1d3c5c6c0af646d4463 | [
"BSD-3-Clause"
] | null | null | null | fias/fiAS_track.py | cristina-mt/fias | ce264754997e14a403a9a1d3c5c6c0af646d4463 | [
"BSD-3-Clause"
] | null | null | null | fias/fiAS_track.py | cristina-mt/fias | ce264754997e14a403a9a1d3c5c6c0af646d4463 | [
"BSD-3-Clause"
] | null | null | null | # =================================================
# GUI program to analyse STEM images of filamentous structures: TRACKING
# -----------------------------------------------------------------------------
# Version 1.0
# Created: November 7th, 2017
# Last modification: January 8th, 2019
# author: @Cristina_MT
# =================================================
from sys import platform as sys_pf
import tkinter as tk
from tkinter import ttk, filedialog
import time
import numpy as np
from PIL import Image
if sys_pf == 'darwin':
import matplotlib
matplotlib.use('TkAgg')
from wintrack import WindowTracking
app = fiAS()
app.master.title('fiAS Tracking v1.0 (January 2019)')
if fiAS.controlanchor == 0: app.master.geometry('800x600+50+50')
elif fiAS.controlanchor == 1: app.master.geometry('900x550+50+50')
app.mainloop()
| 27.578947 | 80 | 0.592557 |
23464d407345ae66d94f69478f5a3d5337be637f | 946 | py | Python | golf-sim/golf-sim.py | cbarrese/katas | 655b07562c06bb8b532ca141705ff127fb7e9e12 | [
"MIT"
] | null | null | null | golf-sim/golf-sim.py | cbarrese/katas | 655b07562c06bb8b532ca141705ff127fb7e9e12 | [
"MIT"
] | null | null | null | golf-sim/golf-sim.py | cbarrese/katas | 655b07562c06bb8b532ca141705ff127fb7e9e12 | [
"MIT"
] | null | null | null | import random
p = [4, 3, 4, 4, 5, 3, 5, 4, 4, 5, 4, 4, 3, 4, 5, 4, 3, 4]
b = ['b', 0, 'B']
f = [{i: [0, 0] for i in range(4)} for z in range(3)]
w = None
for r in range(3):
c = True
a = [0, 1, 2, 3]
m = None
while c:
t = [map(lambda x: random.randint(x-1, x+1), p) for i in range(4)]
s = [sum(i) for i in t]
g = [[l if b[l-p[i]+1] == 0 else b[l-p[i]+1] for i, l in enumerate(l)] for l in t]
m = min(s)
if s.count(m) == 1:
c = False
if w is not None:
l = max(s)
i = s.index(l)
f[r][w] = [l, g[i]]
del s[i]
del g[i]
a.remove(w)
for i in range(len(a)):
f[r][a[i]] = [s[i], g[i]]
w = s.index(min(s))
for r in f:
print "Round %d" % (f.index(r)+1)
for p, q in sorted(r.iteritems(), key=lambda (x, y): y[0]):
print "Player %d: %s - %d" % ((p+1), reduce(lambda x, y: '{} {}'.format(x, y), q[1]), q[0])
| 30.516129 | 99 | 0.428118 |
2346b7d4b689aedf70be90e22366c7d461f0ff5d | 1,479 | py | Python | mupub/tests/test_utils.py | MutopiaProject/mupub | 8c59ae15ea13af14139570fcccfef850e1363548 | [
"MIT"
] | null | null | null | mupub/tests/test_utils.py | MutopiaProject/mupub | 8c59ae15ea13af14139570fcccfef850e1363548 | [
"MIT"
] | 1 | 2017-02-22T17:33:23.000Z | 2017-02-23T10:02:48.000Z | mupub/tests/test_utils.py | MutopiaProject/mupub | 8c59ae15ea13af14139570fcccfef850e1363548 | [
"MIT"
] | null | null | null | """Util module tests
"""
import os.path
from unittest import TestCase
import mupub
from clint.textui.validators import ValidationError
from .tutils import PREFIX
_SIMPLE_PATH = os.path.join(PREFIX, 'SorF', 'O77', 'sorf-o77-01',)
_LYS_PATH = os.path.join(PREFIX, 'PaganiniN', 'O1', 'Caprice_1',)
| 30.183673 | 67 | 0.593644 |
23479c6aeea396d6cdcce0a007d798ea7a728144 | 2,736 | py | Python | routemaster/cli.py | thread/routemaster | 1fd997a3bcee5e6760e9f7a60cb54323c3dfdc41 | [
"MIT"
] | 13 | 2018-01-16T14:26:27.000Z | 2022-03-19T12:43:17.000Z | routemaster/cli.py | thread/routemaster | 1fd997a3bcee5e6760e9f7a60cb54323c3dfdc41 | [
"MIT"
] | 86 | 2018-01-03T17:00:56.000Z | 2021-12-06T12:58:06.000Z | routemaster/cli.py | thread/routemaster | 1fd997a3bcee5e6760e9f7a60cb54323c3dfdc41 | [
"MIT"
] | 3 | 2018-02-21T23:13:45.000Z | 2022-03-19T12:43:23.000Z | """CLI handling for `routemaster`."""
import logging
import yaml
import click
import layer_loader
from routemaster.app import App
from routemaster.cron import CronThread
from routemaster.config import ConfigError, load_config
from routemaster.server import server
from routemaster.middleware import wrap_application
from routemaster.validation import ValidationError, validate_config
from routemaster.gunicorn_application import GunicornWSGIApplication
logger = logging.getLogger(__name__)
def _validate_config(app: App):
try:
validate_config(app, app.config)
except ValidationError as e:
msg = f"Validation Error: {e}"
logger.exception(msg)
click.get_current_context().exit(1)
| 23.186441 | 76 | 0.665205 |
2347b9234fc5c7c0d69316595f595a34f0ab7e85 | 2,988 | py | Python | app/test/test_s3.py | troydieter/aws-auto-cleanup | 523bae5cc57b81d3a2f0d43c87b9f1ef5390e3a4 | [
"MIT"
] | 322 | 2019-04-15T01:59:57.000Z | 2022-03-09T00:06:55.000Z | app/test/test_s3.py | troydieter/aws-auto-cleanup | 523bae5cc57b81d3a2f0d43c87b9f1ef5390e3a4 | [
"MIT"
] | 70 | 2019-04-15T01:27:21.000Z | 2022-03-02T00:39:29.000Z | app/test/test_s3.py | troydieter/aws-auto-cleanup | 523bae5cc57b81d3a2f0d43c87b9f1ef5390e3a4 | [
"MIT"
] | 49 | 2019-04-15T06:36:42.000Z | 2022-01-17T11:37:32.000Z | import datetime
import logging
import moto
import pytest
from .. import s3_cleanup
| 28.730769 | 78 | 0.558568 |
2348e1dd77f2ba0e869197de55900d212aa3c556 | 965 | py | Python | grid_sticky_example_3.py | crazcalm/learn_tkinter_canvas | b798a6f2217a478e9222bb6eaa2afec3d28a2758 | [
"MIT"
] | null | null | null | grid_sticky_example_3.py | crazcalm/learn_tkinter_canvas | b798a6f2217a478e9222bb6eaa2afec3d28a2758 | [
"MIT"
] | 2 | 2020-02-14T02:14:26.000Z | 2020-02-14T02:15:58.000Z | grid_sticky_example_3.py | crazcalm/learn_tkinter_canvas | b798a6f2217a478e9222bb6eaa2afec3d28a2758 | [
"MIT"
] | 1 | 2021-11-24T13:00:34.000Z | 2021-11-24T13:00:34.000Z | """
When a widget is positioned with sticky,
the size of the widget itself is just big
enough to contain any text and other
contents inside of it. It wont fill the
entire grid cell. In order to fill the
grid, you can specify "ns" to force the
widget to fill the cell in the vertical
direction, or "ew" to fill the cell in the
vertical direction. To fill the entire
cell, set sticky to "nsew". The following
example illustrates each of these options:
"""
import tkinter as tk
window = tk.Tk()
window.rowconfigure(0, minsize=50)
window.columnconfigure([0, 1, 2, 3], minsize=50)
label1 = tk.Label(text="1", bg="black", fg="white")
label2 = tk.Label(text="2", bg="black", fg="white")
label3 = tk.Label(text="3", bg="black", fg="white")
label4 = tk.Label(text="4", bg="black", fg="white")
label1.grid(row=0, column=0)
label2.grid(row=0, column=1, sticky="ew")
label3.grid(row=0, column=2, sticky="ns")
label4.grid(row=0, column=3, sticky="nsew")
window.mainloop() | 30.15625 | 51 | 0.71399 |
234e687b4c2d9a30aa4b74e5c45d432bddf763ca | 27,529 | py | Python | server/graphManager.py | zhanghuijun-hello/Detangler | 255c8f82fbdaa36365db1bb86fd1bf42483f9d29 | [
"MIT",
"X11",
"Unlicense"
] | 5 | 2015-07-29T22:19:09.000Z | 2021-09-26T09:57:59.000Z | server/graphManager.py | zhanghuijun-hello/Detangler | 255c8f82fbdaa36365db1bb86fd1bf42483f9d29 | [
"MIT",
"X11",
"Unlicense"
] | null | null | null | server/graphManager.py | zhanghuijun-hello/Detangler | 255c8f82fbdaa36365db1bb86fd1bf42483f9d29 | [
"MIT",
"X11",
"Unlicense"
] | 5 | 2015-12-02T14:59:38.000Z | 2020-02-15T17:57:07.000Z | #!/usr/bin/env python
'''
**************************************************************************
* This class performs most of the graph manipulations.
* @authors Benjamin Renoust, Guy Melancon
* @created May 2012
**************************************************************************
'''
import json
import sys
from tulip import *
import entanglementAnalysisLgt
import entanglementSynchronization
import harmonizedLayout
'''
This class stores the graphs, and performs the manipulations on it.
I guess we want in the future to propose only one graph per session, and maybe store different graphs.
'''
| 44.689935 | 209 | 0.518471 |
234e920fdc139ffec693a188e6071590ea84ef74 | 20,151 | py | Python | praatio/pitch_and_intensity.py | timmahrt/praatIO | 000d0477fffb033b63d54311fac5c913157a59a6 | [
"MIT"
] | 208 | 2016-04-20T12:42:05.000Z | 2022-03-25T13:44:03.000Z | praatio/pitch_and_intensity.py | timmahrt/praatIO | 000d0477fffb033b63d54311fac5c913157a59a6 | [
"MIT"
] | 37 | 2017-10-31T15:22:59.000Z | 2022-01-02T02:55:46.000Z | praatio/pitch_and_intensity.py | timmahrt/praatIO | 000d0477fffb033b63d54311fac5c913157a59a6 | [
"MIT"
] | 33 | 2016-05-09T07:34:22.000Z | 2022-03-30T09:00:58.000Z | # coding: utf-8
"""
Functions for working with pitch data
This file depends on the praat script get_pitch_and_intensity.praat
(which depends on praat) to extract pitch and intensity values from
audio data. Once the data is extracted, there are functions for
data normalization and calculating various measures from the time
stamped output of the praat script (ie **generatePIMeasures()**)
For brevity, 'pitch_and_intensity' is referred to as 'PI'
see **examples/get_pitch_and_formants.py**
"""
import os
from os.path import join
import io
import math
from typing import List, Tuple, Optional, cast
from praatio import data_points
from praatio import praatio_scripts
from praatio import textgrid
from praatio.utilities import errors
from praatio.utilities import my_math
from praatio.utilities import utils
from praatio.utilities.constants import Point
HERTZ = "Hertz"
UNSPECIFIED = "unspecified"
_PITCH_ERROR_TIER_NAME = "pitch errors"
def _extractPIPiecewise(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
tgFN: str,
tierName: str,
tmpOutputPath: str,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and int from each labeled interval in a textgrid
This has the benefit of being faster than using _extractPIFile if only
labeled regions need to have their pitch values sampled, particularly
for longer files.
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
windowSize = medianFilterWindowSize
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
utils.makeDir(tmpOutputPath)
splitAudioList = praatio_scripts.splitAudioOnTier(
inputFN, tgFN, tierName, tmpOutputPath, False
)
allPIList: List[Tuple[str, str, str]] = []
for start, _, fn in splitAudioList:
tmpTrackName = os.path.splitext(fn)[0] + ".txt"
piList = _extractPIFile(
join(tmpOutputPath, fn),
join(tmpOutputPath, tmpTrackName),
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate=True,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
convertedPiList = [
("%0.3f" % (float(time) + start), str(pV), str(iV))
for time, pV, iV in piList
]
allPIList.extend(convertedPiList)
outputData = [",".join(row) for row in allPIList]
with open(outputFN, "w") as fd:
fd.write("\n".join(outputData) + "\n")
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def _extractPIFile(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity values from an audio file
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
argList = [
inputFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
pitchUnit,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractIntensity(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
sampleStep: float = 0.01,
forceRegenerate: bool = True,
undefinedValue: float = None,
) -> List[Tuple[float, ...]]:
"""
Extract the intensity for an audio file
Calculates intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [inputFN, outputFN, sampleStep, minPitch, -1, -1]
scriptName = "get_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPitchTier(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> data_points.PointObject2D:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitchtier.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return data_points.open2DPointObject(outputFN)
def extractPitch(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
undefinedValue - if None remove from the dataset, otherset set to
undefinedValue
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPI(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
tgFN: str = None,
tierName: str = None,
tmpOutputPath: str = None,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity from a file wholesale or piecewise
If the parameters for a tg are passed in, this will only extract labeled
segments in a tier of the tg. Otherwise, pitch will be extracted from
the entire file.
male: minPitch=50; maxPitch=350
female: minPitch=75; maxPitch=450
pitchUnit: "Hertz", "semitones re 100 Hz", etc
Calculates pitch and intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
windowSize = medianFilterWindowSize
if tgFN is None or tierName is None:
piList = _extractPIFile(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
else:
if tmpOutputPath is None:
tmpOutputPath = join(outputPath, "piecewise_output")
piList = _extractPIPiecewise(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
tgFN,
tierName,
tmpOutputPath,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
return piList
def loadTimeSeriesData(
fn: str, undefinedValue: float = None
) -> List[Tuple[float, ...]]:
"""
For reading the output of get_pitch_and_intensity or get_intensity
Data should be of the form
[(time1, value1a, value1b, ...),
(time2, value2a, value2b, ...), ]
"""
name = os.path.splitext(os.path.split(fn)[1])[0]
try:
with io.open(fn, "r", encoding="utf-8") as fd:
data = fd.read()
except IOError:
print(f"No pitch track for: {name}")
raise
dataList = [row.split(",") for row in data.splitlines() if row != ""]
# The new praat script includes a header
if dataList[0][0] == "time":
dataList = dataList[1:]
newDataList = []
for row in dataList:
time = float(row.pop(0))
entry = [
time,
]
doSkip = False
for value in row:
if "--" in value:
if undefinedValue is not None:
appendValue = undefinedValue
else:
doSkip = True
break
else:
appendValue = float(value)
entry.append(appendValue)
if doSkip is True:
continue
newDataList.append(tuple(entry))
return newDataList
def generatePIMeasures(
dataList: List[Tuple[float, float, float]],
tgFN: str,
tierName: str,
doPitch: bool,
medianFilterWindowSize: int = None,
globalZNormalization: bool = False,
localZNormalizationWindowSize: int = 0,
) -> List[Tuple[float, ...]]:
"""
Generates processed values for the labeled intervals in a textgrid
nullLabelList - labels to ignore in the textgrid. Defaults to ["",]
if 'doPitch'=true get pitch measures; if =false get rms intensity
medianFilterWindowSize: if none, no filtering is done
globalZNormalization: if True, values are normalized with the mean
and stdDev of the data in dataList
localZNormalization: if greater than 1, values are normalized with the mean
and stdDev of the local context (for a window of 5, it
would consider the current value, 2 values before and 2
values after)
"""
# Warn user that normalizing a second time nullifies the first normalization
if globalZNormalization is True and localZNormalizationWindowSize > 0:
raise errors.NormalizationException()
castDataList = cast(List[Tuple[float, ...]], dataList)
if globalZNormalization is True:
if doPitch:
castDataList = my_math.znormalizeSpeakerData(castDataList, 1, True)
else:
castDataList = my_math.znormalizeSpeakerData(castDataList, 2, True)
# Raw values should have 0 filtered; normalized values are centered around 0, so don't filter
filterZeroFlag = not globalZNormalization
tg = textgrid.openTextgrid(tgFN, False)
if not isinstance(tg.tierDict[tierName], textgrid.IntervalTier):
raise errors.IncompatibleTierError(tg.tierDict[tierName])
tier = cast(textgrid.IntervalTier, tg.tierDict[tierName])
piData = tier.getValuesInIntervals(castDataList)
outputList: List[List[float]] = []
for interval, entryList in piData:
label = interval[0]
if doPitch:
tmpValList = [f0Val for _, f0Val, _ in entryList]
f0Measures = getPitchMeasures(
tmpValList, tgFN, label, medianFilterWindowSize, filterZeroFlag
)
outputList.append(list(f0Measures))
else:
tmpValList = [intensityVal for _, _, intensityVal in entryList]
if filterZeroFlag:
tmpValList = [
intensityVal for intensityVal in tmpValList if intensityVal != 0.0
]
rmsIntensity = 0.0
if len(tmpValList) != 0:
rmsIntensity = my_math.rms(tmpValList)
outputList.append(
[
rmsIntensity,
]
)
# Locally normalize the output
if localZNormalizationWindowSize > 0 and len(outputList) > 0:
for colI in range(len(outputList[0])):
featValList = [row[colI] for row in outputList]
featValList = my_math.znormWindowFilter(
featValList, localZNormalizationWindowSize, True, True
)
if len(featValList) != len(outputList): # This should hopefully not happen
raise errors.UnexpectedError(
"Lists must be of the same length but are not: "
f"({len(featValList)}), ({len(outputList)})"
)
for i, val in enumerate(featValList):
outputList[i][colI] = val
return [tuple(row) for row in outputList]
def getPitchMeasures(
f0Values: List[float],
name: str = None,
label: str = None,
medianFilterWindowSize: int = None,
filterZeroFlag: bool = False,
) -> Tuple[float, float, float, float, float, float]:
"""
Get various measures (min, max, etc) for the passed in list of pitch values
name is the name of the file. Label is the label of the current interval.
Both of these labels are only used debugging and can be ignored if desired.
medianFilterWindowSize: None -> no median filtering
filterZeroFlag:True -> zero values are removed
"""
if name is None:
name = UNSPECIFIED
if label is None:
label = UNSPECIFIED
if medianFilterWindowSize is not None:
f0Values = my_math.medianFilter(
f0Values, medianFilterWindowSize, useEdgePadding=True
)
if filterZeroFlag:
f0Values = [f0Val for f0Val in f0Values if int(f0Val) != 0]
if len(f0Values) == 0:
myStr = f"No pitch data for file: {name}, label: {label}"
print(myStr.encode("ascii", "replace"))
counts = 0.0
meanF0 = 0.0
maxF0 = 0.0
minF0 = 0.0
rangeF0 = 0.0
variance = 0.0
std = 0.0
else:
counts = float(len(f0Values))
meanF0 = sum(f0Values) / counts
maxF0 = max(f0Values)
minF0 = min(f0Values)
rangeF0 = maxF0 - minF0
variance = sum([(val - meanF0) ** 2 for val in f0Values]) / counts
std = math.sqrt(variance)
return (meanF0, maxF0, minF0, rangeF0, variance, std)
def detectPitchErrors(
pitchList: List[Tuple[float, float]],
maxJumpThreshold: float = 0.70,
tgToMark: Optional[textgrid.Textgrid] = None,
) -> Tuple[List[Point], Optional[textgrid.Textgrid]]:
"""
Detect pitch halving and doubling errors.
If a textgrid is passed in, it adds the markings to the textgrid
"""
if maxJumpThreshold < 0 or maxJumpThreshold > 1:
raise errors.ArgumentError(
f"'maxJumpThreshold' must be between 0 and 1. Was given ({maxJumpThreshold})"
)
tierName = _PITCH_ERROR_TIER_NAME
if tgToMark is not None and tierName in tgToMark.tierNameList:
raise errors.ArgumentError(
f"Tier name '{tierName}' is already in provided textgrid"
)
errorList = []
for i in range(1, len(pitchList)):
lastPitch = pitchList[i - 1][1]
currentPitch = pitchList[i][1]
ceilingCutoff = currentPitch / maxJumpThreshold
floorCutoff = currentPitch * maxJumpThreshold
if (lastPitch <= floorCutoff) or (lastPitch >= ceilingCutoff):
currentTime = pitchList[i][0]
errorList.append(Point(currentTime, str(currentPitch / lastPitch)))
if tgToMark is not None:
pointTier = textgrid.PointTier(
tierName, errorList, tgToMark.minTimestamp, tgToMark.maxTimestamp
)
tgToMark.addTier(pointTier)
return errorList, tgToMark
| 31.193498 | 97 | 0.626966 |
234efbd93d84cd1c579cc2b9b03be2e426d9604e | 1,488 | py | Python | keras_classifier.py | 03pie/SMPCUP2017 | 956f97fce8620b3b0c35e6b3757347ede30c64ba | [
"MIT"
] | 25 | 2017-11-08T08:56:45.000Z | 2021-11-24T20:24:37.000Z | keras_classifier.py | 03pie/SMPCUP2017 | 956f97fce8620b3b0c35e6b3757347ede30c64ba | [
"MIT"
] | null | null | null | keras_classifier.py | 03pie/SMPCUP2017 | 956f97fce8620b3b0c35e6b3757347ede30c64ba | [
"MIT"
] | 13 | 2017-12-11T05:47:52.000Z | 2021-03-04T13:53:41.000Z | import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
# return the best three results
# basic neural network model
if __name__ == '__main__':
X = pd.read_csv('./data/triple_train_x_mean.txt', header=None, encoding='utf-8')
Y = pd.read_csv('./data/triple_train_y.txt', header=None, encoding='utf-8')
X_test = pd.read_csv('./data/triple_test_x_mean.txt', header=None, encoding='utf-8')
matrix_y = np_utils.to_categorical(Y,42)
# KerasClassifier analysis
classifier = KerasClassifier(build_fn=basic_model, nb_epoch=10, batch_size=500)
classifier.fit(X, Y)
pred_prob = classifier.predict_proba(X_test)
with open('./model/task2_label_space.txt', encoding='utf-8') as flabel:
label_map = flabel.read().split()
pd.DataFrame(top_n(pred_prob, label_map)).to_csv('./data/task2_ans_int_index.txt', index=None, header=None, encoding='utf-8')
| 40.216216 | 126 | 0.755376 |
234f3d49dc75338604b163336e34c3247e009fb7 | 2,012 | py | Python | greening/get_tiles_from_google_maps.py | uchr/Hackathon-Urbaton | 83362fec9777054050c858eda87905c8b512372a | [
"MIT"
] | null | null | null | greening/get_tiles_from_google_maps.py | uchr/Hackathon-Urbaton | 83362fec9777054050c858eda87905c8b512372a | [
"MIT"
] | null | null | null | greening/get_tiles_from_google_maps.py | uchr/Hackathon-Urbaton | 83362fec9777054050c858eda87905c8b512372a | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import os
import time
import requests
import shutil
def main():
"""
https://api.openstreetmap.org/api/0.6/map?bbox=82.54715,54.839455,83.182984,55.103517
https://sat02.maps.yandex.net/tiles?l=sat&v=3.465.0&x=2989&y=1297&z=12&lang=ru_RU
"""
city_min_x = 5975
city_max_x = 5989
city_min_y = 2582
city_max_y = 2597
all_x = city_max_x - city_min_x + 1
all_y = city_max_y - city_min_y + 1
path = './google_tiles_' + str(13) + '/'
for x_index in range(5975, 5990):
for y_index in range(2582, 2598):
file_name = os.path.join(path, "_".join(map(str, [x_index, y_index])) + '.png')
get_route_tile(x_index, y_index, file_name)
time.sleep(0.1)
final_image = union(all_x, all_y, path)
if __name__ == '__main__':
main() | 30.029851 | 89 | 0.614811 |
23549dd532a597635dde1ce83730aec62792e9bd | 200 | py | Python | waymo_open_dataset/latency/examples/tensorflow/multiframe/wod_latency_submission/__init__.py | mirtaheri/waymo-open-dataset | 16c6a1a98fa8bb005fdfe798d27e6f3edf98c356 | [
"Apache-2.0"
] | 1,814 | 2019-08-20T18:30:38.000Z | 2022-03-31T04:14:51.000Z | waymo_open_dataset/latency/examples/tensorflow/multiframe/wod_latency_submission/__init__.py | mirtaheri/waymo-open-dataset | 16c6a1a98fa8bb005fdfe798d27e6f3edf98c356 | [
"Apache-2.0"
] | 418 | 2019-08-20T22:38:02.000Z | 2022-03-31T07:51:15.000Z | waymo_open_dataset/latency/examples/tensorflow/multiframe/wod_latency_submission/__init__.py | mirtaheri/waymo-open-dataset | 16c6a1a98fa8bb005fdfe798d27e6f3edf98c356 | [
"Apache-2.0"
] | 420 | 2019-08-21T10:59:06.000Z | 2022-03-31T08:31:44.000Z | """Example __init__.py to wrap the wod_latency_submission module imports."""
from . import model
initialize_model = model.initialize_model
run_model = model.run_model
DATA_FIELDS = model.DATA_FIELDS
| 28.571429 | 76 | 0.815 |
23549ec1228d9e42823643453e7b9895b370ca45 | 1,933 | py | Python | reVX/utilities/cluster_methods.py | NREL/reVX | 4d62eb2c003c3b53b959f7a58bdc342d18098884 | [
"BSD-3-Clause"
] | 7 | 2020-04-06T00:29:55.000Z | 2022-01-23T20:00:14.000Z | reVX/utilities/cluster_methods.py | NREL/reVX | 4d62eb2c003c3b53b959f7a58bdc342d18098884 | [
"BSD-3-Clause"
] | 67 | 2020-02-28T20:15:35.000Z | 2022-03-31T21:34:52.000Z | reVX/utilities/cluster_methods.py | NREL/reVX | 4d62eb2c003c3b53b959f7a58bdc342d18098884 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Clustering Methods
"""
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
| 27.225352 | 77 | 0.562338 |
2354fdf8dad70153d9baf4c5be2ae3e5d8f5ea68 | 47 | py | Python | lotoes/secciones/sorteosLnac/__init__.py | vidddd/lotoes | caf5fe71006e00e590549f921052f110c4bbb75f | [
"MIT"
] | null | null | null | lotoes/secciones/sorteosLnac/__init__.py | vidddd/lotoes | caf5fe71006e00e590549f921052f110c4bbb75f | [
"MIT"
] | null | null | null | lotoes/secciones/sorteosLnac/__init__.py | vidddd/lotoes | caf5fe71006e00e590549f921052f110c4bbb75f | [
"MIT"
] | null | null | null | from .controller_sorteosLnac import sorteosLnac | 47 | 47 | 0.914894 |
23563f75e2c3a54101ad242b1632d00ca9727d80 | 87 | py | Python | trainloops/__init__.py | Gerryflap/master_thesis | 5dc16e21b23837fee8a4532679bb5cb961af0b7c | [
"MIT"
] | null | null | null | trainloops/__init__.py | Gerryflap/master_thesis | 5dc16e21b23837fee8a4532679bb5cb961af0b7c | [
"MIT"
] | null | null | null | trainloops/__init__.py | Gerryflap/master_thesis | 5dc16e21b23837fee8a4532679bb5cb961af0b7c | [
"MIT"
] | null | null | null | """
This folder contains training loops and accompanying loggers and listeners
"""
| 21.75 | 78 | 0.747126 |
23568ef84806142d79d34cfa3458b41993b9107e | 3,902 | py | Python | python/researchDev/boot.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | 112 | 2017-07-26T00:30:29.000Z | 2021-11-09T14:02:12.000Z | python/researchDev/boot.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | null | null | null | python/researchDev/boot.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | 38 | 2017-07-28T03:09:01.000Z | 2021-05-07T03:21:32.000Z | import os
print (' _____ _____ _____ _____ _____ _______ _____ ')
print (' /\ \ /\ \ /\ \ /\ \ /\ \ /::\ \ /\ \ ')
print (' /::\ \ /::\____\ /::\ \ /::\ \ /::\ \ /::::\ \ /::\___ \ ')
print (' /::::\ \ /:::/ / \:::\ \ /::::\ \ \:::\ \ /::::::\ \ /::::| | ')
print (' /::::::\ \ /:::/ / \:::\ \ /::::::\ \ \:::\ \ /::::::::\ \ /:::::| | ')
print (' /:::/\:::\ \ /:::/ / \:::\ \ /:::/\:::\ \ \:::\ \ /:::/~~\:::\ \ /::::::| | ')
print (' /:::/__\:::\ \ /:::/____/ \:::\ \ /:::/__\:::\ \ \:::\ \ /:::/ \:::\ \ /:::/|::| | ')
print (' /::::\ \:::\ \ /::::\ \ /::::\ \ /::::\ \:::\ \ /::::\ \ /:::/ / \:::\ \ /:::/ |::| | ')
print (' /::::::\ \:::\ \ /::::::\____\________ /::::::\ \ /::::::\ \:::\ \ ____ /::::::\ \ /:::/____/ \:::\____\ /:::/ |::| | _____ ')
print (' /:::/\:::\ \:::\ \ /:::/\:::::::::::\ \ /:::/\:::\ \ /:::/\:::\ \:::\ \ /\ \ /:::/\:::\ \ |:::| | |:::| | /:::/ |::| |/\ \ ')
print ('/:::/ \:::\ \:::\____\/:::/ |:::::::::::\____\ /:::/ \:::\____\/:::/ \:::\ \:::\____\/::\ \/:::/ \:::\____\|:::|____| |:::| |/:: / |::| /::\___ \ ')
print ('\::/ \:::\ /:::/ /\::/ |::|~~~|~~~~~ /:::/ \::/ /\::/ \:::\ /:::/ /\:::\ /:::/ \::/ / \:::\ \ /:::/ / \::/ /|::| /:::/ / ')
print (' \/____/ \:::\/:::/ / \/____|::| | /:::/ / \/____/ \/____/ \:::\/:::/ / \:::\/:::/ / \/____/ \:::\ \ /:::/ / \/____/ |::| /:::/ / ')
print (' \::::::/ / |::| | /:::/ / \::::::/ / \::::::/ / \:::\ /:::/ / |::|/:::/ / ')
print (' \::::/ / |::| | /:::/ / \::::/ / \::::/____/ \:::\__/:::/ / |::::::/ / ')
print (' /:::/ / |::| | \::/ / /:::/ / \:::\ \ \::::::::/ / |:::::/ / ')
print (' /:::/ / |::| | \/____/ /:::/ / \:::\ \ \::::::/ / |::::/ / ')
print (' /:::/ / |::| | /:::/ / \:::\ \ \::::/ / /:::/ / ')
print (' /:::/ / \::| | /:::/ / \:::\____\ \::/____/ /:::/ / ')
print (' \::/ / \:| | \::/ / \::/ / ~~ \::/ / ')
print (' \/____/ \|___| \/____/ \/____/ \/____/ ')
#try:
# input ('Press enter to continue:')
#except NameError:
# pass
os.system('read -s -n 1 -p "Press any key to continue..."')
print | 121.9375 | 182 | 0.098155 |
23585aa3fd91ad92d3f8755c7797b9e71281a6bc | 918 | py | Python | Unit3/Lesson7.py | szhua/PythonLearn | 12eaf7cc74a0310bb23e21773f3c83deb91d0362 | [
"Apache-2.0"
] | null | null | null | Unit3/Lesson7.py | szhua/PythonLearn | 12eaf7cc74a0310bb23e21773f3c83deb91d0362 | [
"Apache-2.0"
] | null | null | null | Unit3/Lesson7.py | szhua/PythonLearn | 12eaf7cc74a0310bb23e21773f3c83deb91d0362 | [
"Apache-2.0"
] | null | null | null |
#Pythonitertools
import itertools
#10
naturals =itertools.count(10)
from collections import Iterator
#naturals
print(isinstance(naturals,Iterator))
for x in naturals:
if x>70:
break
print(x)
#cycle()
cycles =itertools.cycle("szhualeilei")
print(isinstance(cycles,Iterator))
n =0
for x in cycles :
#print(x)
n+=1
if n >100:
break
#repeat
repeats =itertools.repeat("szhua",10)
for x in repeats:
print(x)
inter =(x**2 for x in range(100) if x%2==0and x%3==0)
#take whileIterrator
ns =itertools.takewhile(lambda x :x<1000,inter)
print(list(ns))
#chain()
#chain()
print(list(itertools.chain("fjksjdfk","abcdefghijklmn")))
#groupby()
#groupby()
for key ,value in itertools.groupby("aaajjjfdsfkkkfffff"):
print(str(key).upper(),list(value))
| 14.123077 | 58 | 0.704793 |
23593360ab941b0e68d201d7be4b82afc1cc2f9c | 8,536 | py | Python | flaskr/databaseCURD.py | Ln-Yangzl/yukiyu-webpage | f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7 | [
"Apache-2.0"
] | null | null | null | flaskr/databaseCURD.py | Ln-Yangzl/yukiyu-webpage | f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7 | [
"Apache-2.0"
] | null | null | null | flaskr/databaseCURD.py | Ln-Yangzl/yukiyu-webpage | f9aaf71dca18067ecbe43faccb74a7f8d4cf56b7 | [
"Apache-2.0"
] | 2 | 2021-03-23T12:22:04.000Z | 2021-05-24T13:56:26.000Z | # CURD
#
#
import traceback
import pymysql
from userManage import commmitChangeToUserlist, privilegeOfUser, ifManage
global db
# TODO: improve the robustness
# this function call updataItem, insertItem, deleteItem
# according to the oldInfo and newInfo
# if oldInfo is None, call insert
# if newInfo is None, call delete
# else, call updata
#
# OK code: return 1
# error code:
# 0 : sql run time error
# -1 : invalid target table
# -2 : user is None
# -3 : user has not target privilege
# -4 : manager's privilege is not 'YYYY'
# -5 : user name chongfu
# shuffle : ((a,),(b,),(c,)) --> (a, b, c)
# shuffle datetime.date to str: 2021-02-20
# get all tables, including table names and data
# return the string: key1=value1 seperate key2=valuue2...
# return the string: value1 seperate value2...
# if strlization is True, when the data[i] is str, the value will be: 'value' | 28.740741 | 115 | 0.600633 |
235b2d901b1bea2fa217606a67dfa81205191041 | 23 | py | Python | sensu_plugins_aws_subnet/__init__.py | supernova106/sensu_plugins_aws_subnet | 07edd3b414def15809c331b7269ecdafd3faf762 | [
"MIT"
] | 12 | 2021-08-15T04:38:25.000Z | 2021-08-16T18:17:25.000Z | sensu_plugins_aws_subnet/__init__.py | supernova106/sensu_plugins_aws_subnet | 07edd3b414def15809c331b7269ecdafd3faf762 | [
"MIT"
] | 1 | 2020-12-05T18:35:55.000Z | 2020-12-05T18:35:55.000Z | sensu_plugins_aws_subnet/__init__.py | supernova106/sensu_plugins_aws_subnet | 07edd3b414def15809c331b7269ecdafd3faf762 | [
"MIT"
] | 2 | 2021-08-15T09:29:43.000Z | 2021-11-17T05:41:41.000Z | from __main__ import *
| 11.5 | 22 | 0.782609 |
235cde5e9828e617c08855acd10392c015a0948e | 121 | py | Python | scripts/02750.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
] | null | null | null | scripts/02750.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
] | null | null | null | scripts/02750.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
] | null | null | null | N = int(input())
nums = []
for _ in range(N):
nums.append(int(input()))
nums.sort()
for num in nums:
print(num) | 13.444444 | 29 | 0.586777 |
235d6ef789a7fcfed4e828ec3bd555a9f55c0dc4 | 1,207 | py | Python | notebooks/beaconrunner2050/ASAPValidator.py | casparschwa/beaconrunner | d5430e08b120462beea19f65a4cf335ec9eb9134 | [
"MIT"
] | 11 | 2020-07-06T12:36:17.000Z | 2021-04-22T11:00:18.000Z | notebooks/beaconrunner2050/ASAPValidator.py | casparschwa/beaconrunner | d5430e08b120462beea19f65a4cf335ec9eb9134 | [
"MIT"
] | 3 | 2021-09-22T16:04:35.000Z | 2021-09-22T16:05:25.000Z | notebooks/beaconrunner2050/ASAPValidator.py | casparschwa/beaconrunner | d5430e08b120462beea19f65a4cf335ec9eb9134 | [
"MIT"
] | 12 | 2021-05-24T15:21:04.000Z | 2022-03-28T17:50:37.000Z | from typing import Optional
import specs
import validatorlib as vlib | 32.621622 | 89 | 0.62966 |
236087aea9a609e4effde96065112e3417f806cd | 3,864 | py | Python | src/imreg_dft/show.py | GCBallesteros/imreg_dft | 3eb7137403dd0689711ff1dae78200b0fbdcedfb | [
"BSD-3-Clause"
] | 167 | 2015-02-28T19:14:52.000Z | 2022-03-30T03:42:33.000Z | src/imreg_dft/show.py | GCBallesteros/imreg_dft | 3eb7137403dd0689711ff1dae78200b0fbdcedfb | [
"BSD-3-Clause"
] | 40 | 2015-01-18T23:58:41.000Z | 2021-08-02T13:36:48.000Z | src/imreg_dft/show.py | GCBallesteros/imreg_dft | 3eb7137403dd0689711ff1dae78200b0fbdcedfb | [
"BSD-3-Clause"
] | 51 | 2015-02-27T21:19:55.000Z | 2022-03-24T12:28:45.000Z | # -*- coding: utf-8 -*-
# show.py
# Copyright (c) 2016-?, Matj T
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse as ap
from imreg_dft import cli
from imreg_dft import reporting
TOSHOW = (
"filtered input (I)mages",
"filtered input images (S)pectra",
"spectra (L)ogpolar transform",
"(1) angle-scale phase correlation",
"angle-scale transform (A)pplied",
"(2) translation phase correlation",
"(T)ile info",
)
TOSHOW_ABBR = "isl1a2t"
if __name__ == "__main__":
main()
| 35.449541 | 77 | 0.694358 |
23624728d154d6219d7807790fecc8aef8e482f2 | 9,897 | py | Python | src/regenerate_distributions.py | Rumperuu/Threat-Intelligence-Service | c72e312c9b2ad7acc0f3b564f735944b437c298b | [
"CNRI-Python"
] | null | null | null | src/regenerate_distributions.py | Rumperuu/Threat-Intelligence-Service | c72e312c9b2ad7acc0f3b564f735944b437c298b | [
"CNRI-Python"
] | null | null | null | src/regenerate_distributions.py | Rumperuu/Threat-Intelligence-Service | c72e312c9b2ad7acc0f3b564f735944b437c298b | [
"CNRI-Python"
] | null | null | null | """
Distributions (Re)generation Script
This script generates likelihood and cost distributions based on threat
intelligence data stored in a connected Neo4j graph database. It attempts to
do so for every possible permutation of (size, industry) values.
These are then consumed by `montecarlo.py`, which runs a Monte Carlo
simulation based on these figures.
Acknowledgements: Dr Dan Prince & Dr Chris Sherlock
"""
import os
import sys
import argparse
import warnings
import logging as log
from typing import Tuple
import itertools
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from graph import GraphInterface as gi
# Used for logging, equivalent to `logging.WARNING` + 1.
SUCCESS = 31
# The arbitrary maximum number of incidents that an organisation can experience
# in a year.
MAX_ANNUAL_INCIDENTS = 8000
# Quantifies the quantitative boundaries for human-readable incident frequencies,
# which many sources (e.g., the CSBS 2020) use to present their results.
#
# 'None' = 0
# 'Annually' = 1
# 'Less than monthly' = 27
# 'Monthly' = 817
# 'Weekly' = 1879
# 'Daily' = 80399
# 'More than daily' = 4008000
BOUNDARIES = {
"None": 0,
"Once per year": 1,
"Less than once a month": 2,
"Once a month": 8,
"Once a week": 18,
"Once a day": 80,
"Several times a day": 400,
"MAX": MAX_ANNUAL_INCIDENTS,
}
OUTPUT_DIR = None
IMAGES = None
# pylint: disable=invalid-name,anomalous-backslash-in-string
def _generate_new_incident_frequency_distribution(pairing: Tuple = (None, None)) -> int:
"""
Generates a new incident frequency distribution.
Notes
-----
(Re)generates the incident frequency distribution for a
:math:`\left(\text{size}, \text{industry}\right)` pairing from the data in
a Neo4j graph database.
Currently this only produces log-normal distributions. Additional types of
distribution can be implemented by overloading this method (by importing the
`multipledispatch` package) and returning the values required for defining
that distribution (e.g., :math:`\mu` and :math:`\sigma` instead of :math:`a`
and :math:`b`).
"""
# pylint: enable=anomalous-backslash-in-string
log.info("Generating new incident frequency distribution for '%s'...", str(pairing))
# Attempts to get the incident probabilities for the pairing from the graph
# database
incident_frequency_probabilities = gi.get_incident_frequency_probabilities(
list(BOUNDARIES.values())[:-1], pairing
)
if incident_frequency_probabilities is None:
log.info(
"No incident frequency distribution generated for '%s'.",
str(pairing),
)
return 0
log.debug(
"Returned values are: incident frequency probabilities = %s",
str(incident_frequency_probabilities),
)
# If values are found, generate a distribution
Fs = np.cumsum(incident_frequency_probabilities)
xs = np.log(list(BOUNDARIES.values())[1:])
ys = np.log(1 - Fs)
data = pd.DataFrame(xs, ys)
# pylint: disable=line-too-long
# See <https://www.statsmodels.org/stable/_modules/statsmodels/stats/stattools.html#omni_normtest> for explanation
# pylint: enable=line-too-long
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fit = smf.ols(formula="ys ~ xs", data=data).fit()
log.debug(fit.summary())
# Get the parameters for the generated distribution and store them in the
# graph database.
alogb = fit.params[0]
a = -fit.params[1]
b = np.exp(alogb / a)
gi.create_incident_frequency_distribution_node(pairing, a, b)
log.log(
SUCCESS,
"New incident frequency distribution successfully generated for '%s'.",
str(pairing),
)
return 1
# pylint: enable=invalid-name
# pylint: disable=anomalous-backslash-in-string
def _generate_new_incident_costs_distribution(pairing: Tuple = (None, None)) -> int:
"""
(Re)generates the incident cost distribution for a
:math:`\left(\text{size}, \text{industry}\right)` pairing from the data in
a Neo4j graph database.
Currently this only produces log-normal distributions. Additional types of
distribution can be implemented by overloading this method (by importing the
`multipledispatch` package) and returning the values required for defining
that distribution (e.g., :math:`\mu` and :math:`\sigma` instead of :math:`a`
and :math:`b`).
"""
# pylint: enable=anomalous-backslash-in-string
# Plots the distribution for the average cost of incident(s) over 12 months
log.info("Generating new incident cost distribution for '%s'...", str(pairing))
incident_mean_cost, incident_median_cost = gi.get_incident_cost_averages(pairing)
if incident_mean_cost is None or incident_median_cost is None:
log.info(
"No incident costs distribution generated for '%s'.",
str(pairing),
)
return 0
log.debug(
"Returned values are: mean = %s, median = %s",
str(incident_mean_cost),
str(incident_median_cost),
)
log_stddev = np.sqrt(
2
* (
np.log(incident_mean_cost) - 0
if (incident_median_cost == 0)
else np.log(incident_median_cost)
)
)
stddev = np.exp(1) ** log_stddev
_label_plot(
"Average annual incident-with-outcome cost distribution", "Cost ()", "Density"
)
plt.plot(
[
lognorm.pdf(
np.log(i),
np.log(incident_mean_cost),
np.log(incident_median_cost) if incident_median_cost > 0 else 0,
)
for i in range(1, 2500)
]
)
_save_plot("3 - cost dist")
gi.create_incident_costs_distribution_node(pairing, incident_mean_cost, stddev)
log.log(
SUCCESS,
"New incident costs distribution successfully generated for '%s'.",
str(pairing),
)
return 1
def _generate_new_distributions(pairing: Tuple = (None, None)) -> Tuple:
"""(Re)generates the cost and likelihood distributions."""
gi.__init__()
log.info("Existing distributions deleted: %s", bool(gi.delete_distributions()))
successful_incidents_dists = 0
successful_costs_dists = 0
# If either size or industry is unspecified, gets all possible values.
sizes = gi.get_sizes() if pairing[0] is None else [pairing[0]]
industries = gi.get_industries() if pairing[1] is None else [pairing[1]]
# Attempts to generate new distributions for every combination of size and
# industry values.
for pair in list(itertools.product(sizes, industries)):
successful_incidents_dists += _generate_new_incident_frequency_distribution(
pair
)
successful_costs_dists += _generate_new_incident_costs_distribution(pair)
return successful_incidents_dists, successful_costs_dists
def main():
"""Called when the script is run from the command-line."""
# pylint: disable=global-statement
global OUTPUT_DIR, IMAGES
# pylint: enable=global-statement
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--size",
help="Specify the org. size (default: None)",
choices=["micro", "small", "medium", "large"],
type=str,
default=None,
)
parser.add_argument(
"-i",
"--industry",
help="Specify the org. industry SIC code (top-level only, e.g. C for "
"Manufacturing) (default: None)",
choices=list(map(chr, range(65, 86))),
type=chr,
default=None,
)
parser.add_argument(
"-o",
"--output",
help="Specify the output directory (default: ./output/)",
type=str,
default=os.path.join(os.path.dirname(__file__), "output/"),
metavar="DIRECTORY",
)
parser.add_argument(
"-p",
"--images",
help="Output images at each step of the script (default: false, just "
"output the final LEC image)",
action="store_true",
default=False,
)
parser.add_argument(
"-v",
"--verbose",
help="Verbose console output (default: false)",
action="store_true",
default=False,
)
parser.add_argument(
"-d",
"--debug",
help="Show debug console output (default: false)",
action="store_true",
default=False,
)
args = parser.parse_args()
OUTPUT_DIR = args.output
IMAGES = args.images
size = args.size
industry = args.industry
if args.debug:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("Debug output.")
elif args.verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.INFO)
log.info("Verbose output.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if not os.path.isdir(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
incidents_dists, costs_dists = _generate_new_distributions((size, industry))
log.log(
SUCCESS,
"Successfully generated %s incident frequency distributions and %s "
"incident costs distributions!",
str(incidents_dists),
str(costs_dists),
)
sys.exit(0)
def _label_plot(title="Untitled Plot", xlabel="x axis", ylabel="y axis") -> None:
"""Apply titles and axis labels to a plot."""
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def _save_plot(filename="untitled") -> None:
"""Save a plot and clear the figure."""
if IMAGES:
plt.savefig(OUTPUT_DIR + filename + ".png")
plt.clf()
if __name__ == "__main__":
main()
| 29.720721 | 118 | 0.651106 |
23658b032c06956a00496d7055711bc9d8118a63 | 26 | py | Python | hello_world.py | fordjango/new_profiles_rest_api | b4086ad4211e5e278b2a8bcf3624f48925ea6040 | [
"MIT"
] | null | null | null | hello_world.py | fordjango/new_profiles_rest_api | b4086ad4211e5e278b2a8bcf3624f48925ea6040 | [
"MIT"
] | null | null | null | hello_world.py | fordjango/new_profiles_rest_api | b4086ad4211e5e278b2a8bcf3624f48925ea6040 | [
"MIT"
] | null | null | null | print("hello from santa")
| 13 | 25 | 0.730769 |
236634d05aadb9d36762574305057814f7a3b99e | 3,939 | py | Python | tests/unit/transport/pecan/models/response/test_health.py | jqxin2006/poppy | 10636e6255c7370172422afece4a5c3d95c1e937 | [
"Apache-2.0"
] | null | null | null | tests/unit/transport/pecan/models/response/test_health.py | jqxin2006/poppy | 10636e6255c7370172422afece4a5c3d95c1e937 | [
"Apache-2.0"
] | null | null | null | tests/unit/transport/pecan/models/response/test_health.py | jqxin2006/poppy | 10636e6255c7370172422afece4a5c3d95c1e937 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
from poppy.common import util
from poppy.transport.pecan.models.response import health
from tests.unit import base
| 35.809091 | 75 | 0.68393 |
23677a38847faaff345e3e57ebe5e7c34aeac4a3 | 1,476 | py | Python | weeabot/info.py | Anonymousey/bongbot | 3498d379ef28206f3325691e340347baa14c2c97 | [
"MIT"
] | null | null | null | weeabot/info.py | Anonymousey/bongbot | 3498d379ef28206f3325691e340347baa14c2c97 | [
"MIT"
] | null | null | null | weeabot/info.py | Anonymousey/bongbot | 3498d379ef28206f3325691e340347baa14c2c97 | [
"MIT"
] | null | null | null | # vim: set ts=2 expandtab:
# -*- coding: utf-8 -*-
"""
Module: info.py
Desc: print current stream info
Author: on_three
Email: on.three.email@gmail.com
DATE: Sat, Oct 4th 2014
This could become very elaborate, showing stream status (up/down)
and number of viewers, etc, but at present i'm just going to
display stream URL in it for reference.
"""
import string
import re
#from pytz import timezone
#from datetime import datetime
#import locale
#import time
from twisted.python import log
import credentials
COMMAND_REGEX_STR = ur'^(?P<command>\.i|\.info|\.streaminfo)( (?P<data>\S+)$)?'
COMMAND_REGEX = re.compile(COMMAND_REGEX_STR, re.UNICODE)
| 22.707692 | 79 | 0.674119 |
236931ea9461223fe34c99e295340ff93405cc67 | 229 | py | Python | Src/Squar-root/squar-root.py | MadushikaPerera/Python | b7919b252c02b5e1017273a65dd022ac9d13f3e4 | [
"MIT"
] | null | null | null | Src/Squar-root/squar-root.py | MadushikaPerera/Python | b7919b252c02b5e1017273a65dd022ac9d13f3e4 | [
"MIT"
] | null | null | null | Src/Squar-root/squar-root.py | MadushikaPerera/Python | b7919b252c02b5e1017273a65dd022ac9d13f3e4 | [
"MIT"
] | null | null | null | #1
number = int(input("Enter a number to find the square root : "))
#2
if number < 0 :
print("Please enter a valid number.")
else :
#3
sq_root = number ** 0.5
#4
print("Square root of {} is {} ".format(number,sq_root))
| 20.818182 | 64 | 0.624454 |
2369a4c986708b3067b08b2725a7bdc63e4b378b | 12,141 | py | Python | Tools/resultsdbpy/resultsdbpy/model/mock_model_factory.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 6 | 2021-07-05T16:09:39.000Z | 2022-03-06T22:44:42.000Z | Tools/resultsdbpy/resultsdbpy/model/mock_model_factory.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | Tools/resultsdbpy/resultsdbpy/model/mock_model_factory.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | null | null | null | # Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import io
import time
import calendar
from resultsdbpy.controller.configuration import Configuration
from resultsdbpy.model.configuration_context_unittest import ConfigurationContextTest
from resultsdbpy.model.mock_repository import MockStashRepository, MockSVNRepository
from resultsdbpy.model.model import Model
| 61.318182 | 205 | 0.730335 |
2369b489eab801857e1ed7ae2c6d1938141cf46b | 8,530 | py | Python | tests/test_rpc.py | thrau/pymq | 7b924d475af8efb1e67e48a323d3f715a589a116 | [
"MIT"
] | 9 | 2019-08-20T20:31:56.000Z | 2022-03-13T23:17:05.000Z | tests/test_rpc.py | thrau/pymq | 7b924d475af8efb1e67e48a323d3f715a589a116 | [
"MIT"
] | 9 | 2019-08-20T21:13:23.000Z | 2020-10-20T11:48:21.000Z | tests/test_rpc.py | thrau/pymq | 7b924d475af8efb1e67e48a323d3f715a589a116 | [
"MIT"
] | null | null | null | import logging
import time
from typing import List
import pytest
import pymq
from pymq import NoSuchRemoteError
from pymq.exceptions import RemoteInvocationError
from pymq.typing import deep_from_dict, deep_to_dict
logger = logging.getLogger(__name__)
def void_function() -> None:
pass
def delaying_function() -> None:
time.sleep(1.5)
def simple_remote_function(param) -> str:
return f"Hello {param}!"
def simple_multiple_param_function(p1: int, p2: int) -> int:
return p1 * p2
def simple_multiple_param_default_function(p1: int, p2: int = 3) -> int:
return p1 * p2
def simple_list_param_function(ls: List[int]) -> int:
return sum(ls)
def echo_command_function(cmd: EchoCommand) -> str:
return "Hello %s!" % cmd.param
def echo_command_response_function(cmd: EchoCommand) -> EchoResponse:
return EchoResponse("Hello %s!" % cmd.param)
def error_function():
raise ValueError("oh noes")
# noinspection PyUnresolvedReferences
| 28.817568 | 100 | 0.661782 |
236c5f0d3ad9eba2bd8b973cfc71aa175670c211 | 2,178 | py | Python | game/textIntro.py | guluc3m/ElJuegoDePusles | 45b5ac281c3ac8ec1f144556e588346a7a015021 | [
"MIT"
] | 1 | 2018-10-21T11:42:31.000Z | 2018-10-21T11:42:31.000Z | game/textIntro.py | guluc3m/ElJuegoDePusles | 45b5ac281c3ac8ec1f144556e588346a7a015021 | [
"MIT"
] | null | null | null | game/textIntro.py | guluc3m/ElJuegoDePusles | 45b5ac281c3ac8ec1f144556e588346a7a015021 | [
"MIT"
] | 2 | 2018-12-30T14:04:50.000Z | 2019-10-27T03:32:41.000Z | #Intro
Intro_Tavern_Elrick = u"Qu os parece? Trabajaremos juntos?"
Intro_Tavern_Alida = u"Pero si todos superamos las pruebas, quien se casara con la princesa?"
Harek = u"Supongo que la princesa se casar con quin ms le guste"
Elrick = u"Sera lo ms inteligente. Utilizar las pruebas para conocernos y ver nuestras virtudes, especialmente las mias, para elegir depus quien ms le guste."
Alida = u"Te veo muy seguro de tus posibilidades"
Elrick = u"Seguro que la princesa busca un pretendiente inteligente que sepa tomar buenas decisiones. Y si adems es chico guapo, mejor"
Harek = u"Puede que yo no sea muy guapo, pero si la princesa Melisenda me escogiera a m me devivira por hacerla sentir como una reina"
Alida = u"Eso es realmente hermoso, Harek. Pero cuando Melisenda regente Sapiensa necesitar a su lado alguien que se preocupe por el reino y sepa hacerlo prosperar"
Sullx = u"Elrick ser un buen soberano, ya dirigimos hace aos un ejercito de zombis"
Alida = u"Sapiensa no es un reino blico! Hace tiempo que estamos en paz y no creo que nadie quiera que eso cambie."
Elrick = u"Sullx habla del pasado, yo tampoco quiero que se acabe la paz en Sapiensa"
Harek = u"Nada de guerra en Sapiensa"
Alida = u"Bien, parece que empezamos a entendernos. Que hacemos para superar las pruebas?"
Elrick = u"..."
Harek = u"..."
Sullx = u"Yo conozco a una tarotixta, lo mismo ella nos da alguna pista"
Harek = u"Eso es una buena idea! Adems seguro que tiene un gato negro. Me encantan los gatos."
Alida = u"Los gatos son ms tpicos de la brujas."
Elrick = u"Pero creo que ya no sigue adivinando, que ahora se dedica a la respostera"
Harek = u"Oh! Es la mujer que vende galletitas de la fortuna?"
Elrick = u"Esa! Habes probado las bambas? Yo cuando llevo bien la lnea me permito desayunar alguna. Estn de muerte!"
# transicin para dialogo de puzles
Puzles = u"Seguramente os preguntares como alguien tan prometedor como yo a acabado ayudando a un brbaro amante de los gatos, a un nigromante con delirios de grandeza y a una cazadora disfrazada de hombrea conseguir el corazn de una princesa."
Puzles = u"Mi nombre es HADOKEN Puzles y esta es mi historia." | 87.12 | 246 | 0.772268 |
236f461f8b6d07d3beef17a23e616ee5fd033b61 | 3,488 | py | Python | 02_Flask_REST/04_MongoDB_REST/app/main.py | CrispenGari/python-flask | 3e7896f401920b8dd045d807212ec24b8353a75a | [
"Apache-2.0"
] | 2 | 2021-11-08T07:37:18.000Z | 2021-11-13T09:23:46.000Z | 02_Flask_REST/04_MongoDB_REST/app/main.py | CrispenGari/Flask | 3e7896f401920b8dd045d807212ec24b8353a75a | [
"Apache-2.0"
] | null | null | null | 02_Flask_REST/04_MongoDB_REST/app/main.py | CrispenGari/Flask | 3e7896f401920b8dd045d807212ec24b8353a75a | [
"Apache-2.0"
] | null | null | null | from keys.keys import pwd
import pymongo
from flask import Flask, request, abort
from flask_restful import Resource, Api, reqparse, marshal_with, fields
"""
DATABASE CONFIGURATION
"""
databaseName = "students"
connection_url = f'mongodb+srv://crispen:{pwd}@cluster0.3zay8.mongodb.net/{databaseName}?retryWrites=true&w=majority'
client = pymongo.MongoClient(connection_url)
cursor = client.list_database_names()
db = client.blob
"""
Student post args
"""
student_post_args = reqparse.RequestParser()
student_post_args.add_argument("name", type=str, help="name required", required=True)
student_post_args.add_argument("surname", type=str, help="surname required", required=True)
student_post_args.add_argument("student_number", type=int, help="student number required", required=True)
student_post_args.add_argument("course", type=str, help="name required", required=True)
student_post_args.add_argument("mark", type=int, help="surname required", required=True)
"""
Student patch args
* We want to be able only to update student course and mark
"""
"""
Resource Fields
"""
resource_fields = {
'_id': fields.String,
'name': fields.String,
'surname': fields.String,
'course': fields.String,
'mark': fields.Integer,
"student_number":fields.Integer,
}
app = Flask(__name__)
app.config["ENV"] = "development"
api = Api(app)
api.add_resource(PostStudent, '/student')
api.add_resource(GetPatchDeleteStudent, '/student/<int:id>')
if __name__ == "__main__":
app.run(debug=True) | 33.219048 | 117 | 0.641628 |
236fe878b484e34a105ad050281a3bd06899f1d7 | 4,703 | py | Python | data/validate_possession.py | lpraat/scep2019 | f120ee20397648e708cce41a7949c70b523b6e56 | [
"MIT"
] | 1 | 2021-11-02T20:34:22.000Z | 2021-11-02T20:34:22.000Z | data/validate_possession.py | lpraat/scep2019 | f120ee20397648e708cce41a7949c70b523b6e56 | [
"MIT"
] | null | null | null | data/validate_possession.py | lpraat/scep2019 | f120ee20397648e708cce41a7949c70b523b6e56 | [
"MIT"
] | 1 | 2021-11-02T20:34:29.000Z | 2021-11-02T20:34:29.000Z | import csv
import math
import datetime
| 26.874286 | 119 | 0.584095 |
2370cb70aa4ccbe33c76c9f8fc510ffbcf707f15 | 6,065 | py | Python | directory_components/context_processors.py | uktrade/directory-components | f5f52ceeecd2975bff07d1bd3afa7a84046fdd50 | [
"MIT"
] | 2 | 2019-06-24T20:22:23.000Z | 2019-07-26T12:51:31.000Z | directory_components/context_processors.py | uktrade/directory-components | f5f52ceeecd2975bff07d1bd3afa7a84046fdd50 | [
"MIT"
] | 278 | 2018-02-21T11:49:46.000Z | 2021-09-16T08:27:54.000Z | directory_components/context_processors.py | uktrade/directory-components | f5f52ceeecd2975bff07d1bd3afa7a84046fdd50 | [
"MIT"
] | 3 | 2019-05-02T15:26:26.000Z | 2020-02-18T17:47:57.000Z | from directory_constants import urls
from django.conf import settings
from django.utils import translation
from directory_components import helpers
| 41.541096 | 120 | 0.711459 |
237138c111b7235bbb0b60fb326edee46f57fa80 | 1,962 | py | Python | src/leetcodepython/string/remove_duplicate_letters_316.py | zhangyu345293721/leetcode | 1aa5bcb984fd250b54dcfe6da4be3c1c67d14162 | [
"MIT"
] | 90 | 2018-12-25T06:01:30.000Z | 2022-01-03T14:01:26.000Z | src/leetcodepython/string/remove_duplicate_letters_316.py | zhangyu345293721/leetcode | 1aa5bcb984fd250b54dcfe6da4be3c1c67d14162 | [
"MIT"
] | 1 | 2020-08-27T09:53:49.000Z | 2020-08-28T08:57:49.000Z | src/leetcodepython/string/remove_duplicate_letters_316.py | zhangyu345293721/leetcode | 1aa5bcb984fd250b54dcfe6da4be3c1c67d14162 | [
"MIT"
] | 27 | 2019-01-02T01:41:32.000Z | 2022-01-03T14:01:30.000Z | # encoding='utf-8'
'''
/**
* This is the solution of No.316 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* texttext
* <p>
* 1
* <p>
* "cdadabcc"
* "adbc"
* 2
* <p>
* "abcd"
* "abcd"
* <p>
* LeetCode
* https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters
*
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
if __name__ == '__main__':
s = 'cdadabcc'
solution = Solution()
result = solution.remove_duplicate_letters(s)
assert result == 'adbc'
| 25.480519 | 109 | 0.469929 |
23723b37428721d547ab23434d036479e7a2836c | 1,055 | py | Python | setup.py | julienvaslet/interactive-shell | 9ae800f2d9bb3365b5e68b2beef577fb39264f10 | [
"MIT"
] | null | null | null | setup.py | julienvaslet/interactive-shell | 9ae800f2d9bb3365b5e68b2beef577fb39264f10 | [
"MIT"
] | null | null | null | setup.py | julienvaslet/interactive-shell | 9ae800f2d9bb3365b5e68b2beef577fb39264f10 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from setuptools import setup
current_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(current_directory, "VERSION"), "r", encoding="utf-8") as f:
version = f.read()
with open(os.path.join(current_directory, "README.rst"), "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="interactive-shell",
version=version,
description="Interactive shell classes to easily integrate a terminal in application.",
long_description=long_description,
license="MIT License",
author="Julien Vaslet",
author_email="julien.vaslet@gmail.com",
url="https://github.com/julienvaslet/interactive-shell",
packages=["interactive_shell"],
install_requires=[],
scripts=[],
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development",
"Topic :: Terminals"
]
)
| 31.029412 | 91 | 0.660664 |
2372e5c093d241cc7faa942820756f058a038286 | 4,530 | py | Python | spacetrading/create_svg/generate_planet_market.py | claudiobierig/doppeldenk | 770cd5322753450834ec393a0801de1d2de2bfa2 | [
"MIT"
] | 1 | 2020-11-08T12:32:36.000Z | 2020-11-08T12:32:36.000Z | spacetrading/create_svg/generate_planet_market.py | claudiobierig/doppeldenk | 770cd5322753450834ec393a0801de1d2de2bfa2 | [
"MIT"
] | 1 | 2021-06-04T22:23:30.000Z | 2021-06-04T22:23:30.000Z | spacetrading/create_svg/generate_planet_market.py | claudiobierig/doppeldenk | 770cd5322753450834ec393a0801de1d2de2bfa2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
generate the planet market
"""
from spacetrading.create_svg.svg_commands import Svg
from spacetrading.create_svg import generate_svg_symbols
def draw_planet(svg, planet, name, fill_colour):
"""
actually draw the planet market
"""
x_shift = 30
y_shift = [0, 30, 80]
x_offset = [1.5*x_shift, 1.5*x_shift, x_shift/2]
y_offset = 30
scale_factor = 3/2
font_size_price = 12
font_size_header = 11
left = x_offset[2]/2
right = 1.5*x_offset[2] + 7*x_shift
top = y_offset - 10
bottom = y_offset + y_shift[1] + y_shift[2] + 10
vertical_middle = y_offset + y_shift[1] + (y_shift[1]-y_shift[0]) + \
(y_shift[2] - (2*y_shift[1]-y_shift[0]))/2
horizontal_middle = 120
svg.create_path(
(
"M {left},{top} V {bottom} " +
"C {left_2},{bottom_2} {right_2},{bottom_2} {right},{bottom} " +
"V {top} C {right_2},{top_2} {left_2},{top_2} {left},{top}").format(
left=left, right=right, top=top, bottom=bottom,
left_2=left+20, right_2=right-20, bottom_2=bottom+20, top_2=top-20
),
stroke_colour="black",
fill_colour=fill_colour,
id_name="box_{}".format(name)
)
for i in range(1, 8):
svg.create_text(
"{}_pricetext_{}".format(name, i),
[x_offset[2] + (i-0.5)*x_shift, vertical_middle + font_size_price/2],
str(i),
font_size=font_size_price,
text_align="center",
text_anchor="middle",
font_weight="bold"
)
size_ellipse = [80, 10]
offset_border_ellipse = 9
svg.create_ellipse(
size_ellipse,
[horizontal_middle, top - offset_border_ellipse],
"black",
"ellipse_top_{}".format(name),
fill="white",
stroke_width="1",
stroke_opacity="1",
opacity="1"
)
svg.create_text(
"demand_text_{}".format(name),
[horizontal_middle, top - offset_border_ellipse + font_size_header/2],
"Demand",
font_size=font_size_header,
text_align="center",
text_anchor="middle",
font_weight="bold"
)
svg.create_ellipse(
size_ellipse,
[horizontal_middle, bottom + offset_border_ellipse],
"black",
"ellipse_bottom_{}".format(name),
fill="white",
stroke_width="1",
stroke_opacity="1",
opacity="1"
)
svg.create_text(
"supply_text_{}".format(name),
[horizontal_middle, bottom + offset_border_ellipse + font_size_header/2],
"Supply",
font_size=font_size_header,
text_align="center",
text_anchor="middle",
font_weight="bold"
)
resources = [planet.planet_demand_resource, planet.planet_supply_resource]
prices = [
planet.planet_demand_resource_price,
planet.planet_supply_resource_price
]
for row in range(2):
for column in range(6):
if row == 1:
price = column + 1
else:
price = column + 2
if price is prices[row]:
symbolname = generate_svg_symbols.get_symbol_name(resources[row])
else:
symbolname = generate_svg_symbols.get_symbol_name('0')
svg.use_symbol(
symbolname,
"{}_name_{}_row_{}_column".format(name, row, column),
position=[(x_offset[row + 1] + column*x_shift)/scale_factor,
(y_offset + y_shift[row + 1])/scale_factor],
additional_arguments={
"transform": f"scale({scale_factor})"
}
)
if __name__ == '__main__':
pass
| 31.901408 | 87 | 0.593377 |
2373ab8962f73ef7abf5effc552053ed5c20e4ab | 146 | py | Python | ex28_sh/sh_spike.py | techieguy007/learn-more-python-the-hard-way-solutions | 7886c860f69d69739a41d6490b8dc3fa777f227b | [
"Zed",
"Unlicense"
] | 466 | 2016-11-01T19:40:59.000Z | 2022-03-23T16:34:13.000Z | ex28_sh/sh_spike.py | Desperaaado/learn-more-python-the-hard-way-solutions | 7886c860f69d69739a41d6490b8dc3fa777f227b | [
"Zed",
"Unlicense"
] | 2 | 2017-09-20T09:01:53.000Z | 2017-09-21T15:03:56.000Z | ex28_sh/sh_spike.py | Desperaaado/learn-more-python-the-hard-way-solutions | 7886c860f69d69739a41d6490b8dc3fa777f227b | [
"Zed",
"Unlicense"
] | 241 | 2017-06-17T08:02:26.000Z | 2022-03-30T09:09:39.000Z | import subprocess
import sys
import os
while True:
line = input('> ')
exec = line.strip().split(' ')
status = subprocess.run(exec)
| 13.272727 | 34 | 0.630137 |
2375cf7ad137352d0c5065ecc52c2afbf6c29b7b | 1,858 | py | Python | src/parser.py | Nanoteck137/Clockwoot | a2b039b2095834d4ad0a03ab030492a70ac097f5 | [
"MIT"
] | 1 | 2019-06-07T00:23:06.000Z | 2019-06-07T00:23:06.000Z | src/parser.py | Nanoteck137/Clockwoot | a2b039b2095834d4ad0a03ab030492a70ac097f5 | [
"MIT"
] | null | null | null | src/parser.py | Nanoteck137/Clockwoot | a2b039b2095834d4ad0a03ab030492a70ac097f5 | [
"MIT"
] | null | null | null | from sly import Lexer, Parser
import vm
| 23.820513 | 59 | 0.577503 |