hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11a8090bef6d5fb982bc2e421b4aadbc73c27dfc | 3,861 | py | Python | src/tree/tree_builder.py | rpSebastian/LeducPoker | 5bbdf61d885bcb23490410ef871de924c58bbf01 | [
"MIT"
] | 1 | 2020-05-22T15:45:22.000Z | 2020-05-22T15:45:22.000Z | src/tree/tree_builder.py | rpSebastian/LeducPoker | 5bbdf61d885bcb23490410ef871de924c58bbf01 | [
"MIT"
] | null | null | null | src/tree/tree_builder.py | rpSebastian/LeducPoker | 5bbdf61d885bcb23490410ef871de924c58bbf01 | [
"MIT"
] | 1 | 2020-05-31T03:01:42.000Z | 2020-05-31T03:01:42.000Z | from settings import constants
from game import bet_sizing, card_tools, card_to_string
from base import Node
import torch
tree_builder = PokerTreeBuilder()
| 38.227723 | 98 | 0.644911 |
11ab85dad8fb08a5c5eee01b9be2f4e803d8712c | 50,062 | py | Python | src/htsql/core/tr/bind.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | null | null | null | src/htsql/core/tr/bind.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | null | null | null | src/htsql/core/tr/bind.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.tr.bind`
=========================
This module implements the binding process.
"""
from ..util import maybe, listof, tupleof, similar
from ..adapter import Adapter, Protocol, adapt, adapt_many
from ..domain import (Domain, BooleanDomain, IntegerDomain, DecimalDomain,
FloatDomain, UntypedDomain, EntityDomain, RecordDomain, ListDomain,
IdentityDomain, VoidDomain)
from ..classify import normalize
from ..error import Error, translate_guard, choices_guard, point
from ..syn.syntax import (Syntax, CollectSyntax, SelectSyntax, ApplySyntax,
FunctionSyntax, PipeSyntax, OperatorSyntax, PrefixSyntax,
ProjectSyntax, FilterSyntax, LinkSyntax, DetachSyntax, AttachSyntax,
AssignSyntax, ComposeSyntax, LocateSyntax, IdentitySyntax, GroupSyntax,
IdentifierSyntax, UnpackSyntax, ReferenceSyntax, LiftSyntax,
StringSyntax, LabelSyntax, NumberSyntax, RecordSyntax, DirectSyntax)
from .binding import (Binding, WrappingBinding, CollectBinding, RootBinding,
HomeBinding, TableBinding, ChainBinding, ColumnBinding,
QuotientBinding, KernelBinding, ComplementBinding, LocateBinding,
SieveBinding, AttachBinding, SortBinding, CastBinding, IdentityBinding,
ImplicitCastBinding, RescopingBinding, AssignmentBinding,
DefineBinding, DefineReferenceBinding, DefineCollectionBinding,
DefineLiftBinding, SelectionBinding, WildSelectionBinding,
DirectionBinding, TitleBinding, RerouteBinding,
ReferenceRerouteBinding, AliasBinding, LiteralBinding, FormulaBinding,
VoidBinding, Recipe, LiteralRecipe, SelectionRecipe, FreeTableRecipe,
AttachedTableRecipe, ColumnRecipe, KernelRecipe, ComplementRecipe,
IdentityRecipe, ChainRecipe, SubstitutionRecipe, BindingRecipe,
ClosedRecipe, PinnedRecipe, AmbiguousRecipe)
from .lookup import (lookup_attribute, lookup_reference, lookup_complement,
lookup_attribute_set, lookup_reference_set, expand, direct, guess_tag,
identify, unwrap)
from .signature import IsEqualSig, AndSig
from .coerce import coerce
from .decorate import decorate
def hint_choices(choices):
# Generate a hint from a list of choices.
assert isinstance(choices, listof(unicode))
if not choices:
return None
chunks = ["did you mean:"]
if len(choices) == 1:
chunks.append("'%s'" % choices[0].encode('utf-8'))
else:
chunks.append(", ".join("'%s'" % choice.encode('utf-8')
for choice in choices[:-1]))
chunks.append("or")
chunks.append("'%s'" % choices[-1].encode('utf-8'))
return " ".join(chunks)
def __call__(self):
# The default implementation; override in subclasses.
# Generate a hint with a list of alternative names.
model = self.name.lower()
arity = None
if self.arguments is not None:
arity = len(self.arguments)
attributes = lookup_attribute_set(self.state.scope)
global_attributes = set()
for component_name in BindByName.__catalogue__():
component_arity = -1
if isinstance(component_name, tuple):
component_name, component_arity = component_name
if isinstance(component_name, str):
component_name = component_name.decode('utf-8')
component_name = component_name.lower()
global_attributes.add((component_name, component_arity))
all_attributes = sorted(attributes|global_attributes)
choices = []
if not choices and arity is None:
names = lookup_reference_set(self.state.scope)
if model in names:
choices = ["a reference '$%s'" % model.encode('utf-8')]
if not choices and arity is None:
if any(model == sample
for sample, sample_arity in all_attributes
if sample_arity is not None):
choices = ["a function '%s'" % model.encode('utf-8')]
if not choices and arity is None:
choices = [sample
for sample, sample_arity in all_attributes
if sample_arity is None and sample != model
and similar(model, sample)]
if not choices and arity is not None \
and not isinstance(self.syntax, OperatorSyntax):
arities = [sample_arity
for sample, sample_arity in all_attributes
if sample == model and
sample_arity not in [None, -1, arity]]
if arities:
required_arity = []
arities.sort()
if len(arities) == 1:
required_arity.append(str(arities[0]))
else:
required_arity.append(", ".join(str(sample_arity)
for sample_arity in arities[:-1]))
required_arity.append("or")
required_arity.append(str(arities[-1]))
if required_arity[-1] == "1":
required_arity.append("argument")
else:
required_arity.append("arguments")
required_arity = " ".join(required_arity)
raise Error("Function '%s' requires %s; got %s"
% (self.syntax.identifier,
required_arity, arity))
if not choices and arity is not None:
if any(model == sample
for sample, sample_arity in all_attributes
if sample_arity is None):
choices = ["an attribute '%s'" % model.encode('utf-8')]
if not choices and arity is not None:
choices = [sample
for sample, sample_arity in all_attributes
if sample_arity in [-1, arity] and sample != model
and similar(model, sample)]
scope_name = guess_tag(self.state.scope)
if scope_name is not None:
scope_name = scope_name.encode('utf-8')
with choices_guard(choices):
if isinstance(self.syntax, (FunctionSyntax, PipeSyntax)):
raise Error("Found unknown function",
self.syntax.identifier)
if isinstance(self.syntax, OperatorSyntax):
raise Error("Found unknown operator",
self.syntax.symbol)
if isinstance(self.syntax, PrefixSyntax):
raise Error("Found unknown unary operator",
self.syntax.symbol)
if isinstance(self.syntax, IdentifierSyntax):
raise Error("Found unknown attribute",
"%s.%s" % (scope_name, self.syntax)
if scope_name is not None else str(self.syntax))
class BindByRecipe(Adapter):
"""
Applies a recipe to generate a binding node.
This is an abstract adapter that generates new binding nodes
from binding recipes. The :class:`BindByRecipe` interface
has the following signature::
BindByRecipe: (Recipe, Syntax, BindingState) -> Binding
The adapter is polymorphic by the first argument.
`recipe` (:class:`htsql.core.tr.binding.Recipe`)
A recipe to apply.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node associated with the recipe.
`state` (:class:`BindingState`)
The current binding state.
"""
adapt(Recipe)
class BindByLiteral(BindByRecipe):
adapt(LiteralRecipe)
class BindBySelection(BindByRecipe):
adapt(SelectionRecipe)
class BindByFreeTable(BindByRecipe):
adapt(FreeTableRecipe)
class BindByAttachedTable(BindByRecipe):
adapt(AttachedTableRecipe)
class BindByColumn(BindByRecipe):
adapt(ColumnRecipe)
class BindByKernel(BindByRecipe):
adapt(KernelRecipe)
class BindByComplement(BindByRecipe):
adapt(ComplementRecipe)
class BindByIdentity(BindByRecipe):
adapt(IdentityRecipe)
class BindBySubstitution(BindByRecipe):
adapt(SubstitutionRecipe)
class BindByBinding(BindByRecipe):
adapt(BindingRecipe)
class BindByClosed(BindByRecipe):
adapt(ClosedRecipe)
class BindByChain(BindByRecipe):
adapt(ChainRecipe)
class BindByPinned(BindByRecipe):
adapt(PinnedRecipe)
class BindByAmbiguous(BindByRecipe):
adapt(AmbiguousRecipe)
def bind(syntax, environment=None):
recipes = []
if environment is not None:
for name in sorted(environment):
value = environment[name]
if value.data is None:
recipe = LiteralRecipe(value.data, value.domain)
elif isinstance(value.domain, ListDomain):
item_recipes = [LiteralRecipe(item,
value.domain.item_domain)
for item in value.data]
recipe = SelectionRecipe(item_recipes)
elif isinstance(value.domain, RecordDomain):
item_recipes = [LiteralRecipe(item, profile.domain)
for item, profile in
zip(value.data, value.domain.fields)]
recipe = SelectionRecipe(item_recipes)
elif isinstance(value.domain, IdentityDomain):
recipe = convert(value.domain, value.data)
else:
recipe = LiteralRecipe(value.data, value.domain)
recipes.append((name, recipe))
root = RootBinding(syntax)
state = BindingState(root, recipes)
if isinstance(syntax, AssignSyntax):
specifier = syntax.larm
with translate_guard(specifier):
if specifier.identifier is None:
raise Error("Expected an identifier")
identifier = specifier.larms[0]
binding = state.bind(syntax.rarm)
binding = Select.__invoke__(binding, state)
binding = TitleBinding(binding, identifier, binding.syntax)
else:
binding = state.bind(syntax)
binding = Select.__invoke__(binding, state)
return binding
| 37.028107 | 80 | 0.583716 |
11ad8fe6bba3193be56826f292aa054b4c5199e3 | 2,226 | py | Python | locuszoom_plotting_service/gwas/tests/factories.py | statgen/locuszoom-hosted | ecfcc5f48fefe2869ab277202a661c2575af6abb | [
"MIT"
] | null | null | null | locuszoom_plotting_service/gwas/tests/factories.py | statgen/locuszoom-hosted | ecfcc5f48fefe2869ab277202a661c2575af6abb | [
"MIT"
] | 14 | 2021-01-01T17:16:23.000Z | 2022-02-28T19:37:28.000Z | locuszoom_plotting_service/gwas/tests/factories.py | statgen/locuszoom-hosted | ecfcc5f48fefe2869ab277202a661c2575af6abb | [
"MIT"
] | null | null | null | import os
import random
from django.db.models import signals
from django.utils import timezone
import factory
from factory.django import DjangoModelFactory
from locuszoom_plotting_service.users.tests.factories import UserFactory
from .. import constants as lz_constants
from .. import models as lz_models
| 29.68 | 108 | 0.709344 |
11aed6db8dec1d89d1561ef9163cbf9b2aff8920 | 761 | py | Python | utils/api.py | alirzaev/vyatsu-schedule-viber-bot | ff44195742b07c541d67be1e8f4ce0e204cba70b | [
"MIT"
] | 1 | 2020-01-31T16:29:15.000Z | 2020-01-31T16:29:15.000Z | utils/api.py | alirzaev/vyatsu-schedule-viber-bot | ff44195742b07c541d67be1e8f4ce0e204cba70b | [
"MIT"
] | null | null | null | utils/api.py | alirzaev/vyatsu-schedule-viber-bot | ff44195742b07c541d67be1e8f4ce0e204cba70b | [
"MIT"
] | null | null | null | import requests
from os import getenv
from typing import List, Dict
_API_URL = getenv('API_URL')
| 21.742857 | 78 | 0.697766 |
11b20ebad8eab479fb6fed2be3f7940e76f88665 | 22,860 | py | Python | lib/modeling/torchResNet.py | Min-Sheng/CA_FSIS_Cell | c24750d860a9417b30819c05613282cd74dc517f | [
"MIT"
] | null | null | null | lib/modeling/torchResNet.py | Min-Sheng/CA_FSIS_Cell | c24750d860a9417b30819c05613282cd74dc517f | [
"MIT"
] | 1 | 2021-03-01T09:16:15.000Z | 2021-03-01T09:34:49.000Z | lib/modeling/torchResNet.py | Min-Sheng/CA_FSIS_Cell | c24750d860a9417b30819c05613282cd74dc517f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import math
import copy
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
from core.config import cfg
import utils.net as net_utils
from deform.torch_deform_conv.layers import ConvOffset2D
model_urls = {
'resnet50': 'https://s3.amazonaws.com/pytorch/models/resnet50-19c8e357.pth',
'resnet101': 'https://s3.amazonaws.com/pytorch/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth',
}
# ---------------------------------------------------------------------------- #
# Helper functions
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# Bits for specific architectures (ResNet50, ResNet101, ...)
# ---------------------------------------------------------------------------- #
def ResNet50_conv4_body(pretrained=True, model_path=None):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 6, 3), 4)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet50_conv5_body(pretrained=True, model_path=None):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 6, 3), 5)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet101_conv4_body(pretrained=True, model_path = None):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 23, 3), 4)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet101'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet101_conv5_body(pretrained=True, model_path = None):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 23, 3), 5)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet101'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet152_conv5_body(pretrained=True, model_path=None):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 8, 36, 3), 5)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet152'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
# ---------------------------------------------------------------------------- #
# Generic ResNet components
# ---------------------------------------------------------------------------- #
def add_stage(inplanes, outplanes, innerplanes, nblocks, dilation=1, stride_init=2, deform=False):
"""Make a stage consist of `nblocks` residual blocks.
Returns:
- stage module: an nn.Sequentail module of residual blocks
- final output dimension
"""
res_blocks = []
stride = stride_init
for _ in range(nblocks):
res_blocks.append(add_residual_block(
inplanes, outplanes, innerplanes, dilation, stride, deform=deform)
)
inplanes = outplanes
stride = 1
return nn.Sequential(*res_blocks), outplanes
def add_residual_block(inplanes, outplanes, innerplanes, dilation, stride, deform=False):
"""Return a residual block module, including residual connection, """
if stride != 1 or inplanes != outplanes:
shortcut_func = globals()[cfg.RESNETS.SHORTCUT_FUNC]
downsample = shortcut_func(inplanes, outplanes, stride)
else:
downsample = None
trans_func = globals()[cfg.RESNETS.TRANS_FUNC]
res_block = trans_func(
inplanes, outplanes, innerplanes, stride,
dilation=dilation, group=cfg.RESNETS.NUM_GROUPS,
downsample=downsample, deform=deform)
return res_block
# ------------------------------------------------------------------------------
# various downsample shortcuts (may expand and may consider a new helper)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# various stems (may expand and may consider a new helper)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# various transformations (may expand and may consider a new helper)
# ------------------------------------------------------------------------------
def freeze_params(m):
"""Freeze all the weights by setting requires_grad to False
"""
for p in m.parameters():
p.requires_grad = False | 38.484848 | 104 | 0.580971 |
11b5aaf2858fc133d106e1faff9a6c588ffce900 | 1,530 | py | Python | node_Interface.py | robocol-rem-u/master_msgs | fac49cf34a25c16b01ab6014ac47b60c3c5c14a8 | [
"MIT"
] | null | null | null | node_Interface.py | robocol-rem-u/master_msgs | fac49cf34a25c16b01ab6014ac47b60c3c5c14a8 | [
"MIT"
] | null | null | null | node_Interface.py | robocol-rem-u/master_msgs | fac49cf34a25c16b01ab6014ac47b60c3c5c14a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from master_msgs.msg import traction_Orders, imu_Speed, imu_Magnetism, pots, current, rpm, arm_Orders, goal,connection
if __name__ == '__main__':
try:
node_Interface()
except rospy.ROSInterruptException:
pass
| 29.423077 | 118 | 0.760784 |
11b627ad398f9ae3625b734210d1a5d1347b9bf2 | 1,700 | py | Python | pantofola_search/management/commands/_private.py | phingage/pantofola.io | f41036d2e568a45f328e2a7ca81d76a27cd134dc | [
"WTFPL"
] | 1 | 2018-06-09T22:20:00.000Z | 2018-06-09T22:20:00.000Z | pantofola_search/management/commands/_private.py | phingage/pantofola.io | f41036d2e568a45f328e2a7ca81d76a27cd134dc | [
"WTFPL"
] | 4 | 2020-02-11T22:01:16.000Z | 2021-06-10T17:38:56.000Z | pantofola_search/management/commands/_private.py | phingage/pantofola.io | f41036d2e568a45f328e2a7ca81d76a27cd134dc | [
"WTFPL"
] | null | null | null | from pantofola_search.models import *
from pantofola_search.tools.imdb_fetcher import ImdbFetcher
| 36.956522 | 78 | 0.648824 |
11b673d3e56e187a96e8ce75c9577f8cea8df161 | 200 | py | Python | pymtl3/passes/rtlir/structural/__init__.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 152 | 2020-06-03T02:34:11.000Z | 2022-03-30T04:16:45.000Z | pymtl3/passes/rtlir/structural/__init__.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 139 | 2019-05-29T00:37:09.000Z | 2020-05-17T16:49:26.000Z | pymtl3/passes/rtlir/structural/__init__.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 22 | 2020-05-18T13:42:05.000Z | 2022-03-11T08:37:51.000Z | """Expose structural RTLIR generation pass.
PyMTL user should only interact with the passes exposed here.
"""
from .StructuralRTLIRGenL4Pass import StructuralRTLIRGenL4Pass as StructuralRTLIRGenPass
| 33.333333 | 88 | 0.84 |
11b6a22d0d9d730ae6441343ec296d67f55adf10 | 7,663 | py | Python | ArcLint.py | namur007/ArcLint | b17b39cf7fdfeff144339b6f3494d9120eafde90 | [
"MIT"
] | null | null | null | ArcLint.py | namur007/ArcLint | b17b39cf7fdfeff144339b6f3494d9120eafde90 | [
"MIT"
] | 4 | 2020-07-17T18:11:54.000Z | 2020-07-26T12:34:57.000Z | ArcLint.py | namur007/ArcLint | b17b39cf7fdfeff144339b6f3494d9120eafde90 | [
"MIT"
] | null | null | null | import json
import re
import datetime
import os
import arcpy
regex_flag_dict = {
# 'ASCII' re.A, # this is py3 only so wont work in arcgis desktop
'IGNORECASE': re.I,
'LOCALE': re.L,
"MULTILINE": re.M,
"DOTMATCH": re.S,
"UNICODE": re.U,
"VERBOSE": re.X,
}
def _arc_process(rule_data, feature):
"""
impure function as i am modifying the rule_data
input = {
"Rules": rule_dict,
"Fields": field_dict,
"Groups": group_dict
}
returns dictionary of the rules"""
fields = [field for field in rule_data['Fields']]
with arcpy.da.SearchCursor(feature, ["OID@"] + fields) as sc:
for row in sc:
_id = row[0]
for ix, value in enumerate(row[1:]):
field_rules = rule_data['Fields'][fields[ix]]
# append ID to each rule if they test = False
[rule['result'].append(_id) for rule in field_rules if rule['rule'](value)]
for group_name in rule_data['Groups']:
group = rule_data['Groups'][group_name]
group_func = any if group.get('match') == 'any' else all
group_result = group_func([True if _id in r['result'] else False for r in group['rules']])
if group_result == True:
group['result'].append(_id)
return rule_data
# region Linters
# region builders
def _compile_global_rules(json_obj):
"""
returns
rule name is either global_RULENAME for global or fieldname_RULENAME for field specific ones
{
rule_name: rule_function > str: function
}
"""
rule_dict = {}
for rule in json_obj.get('globalRules', []):
rule_name = rule.get('ruleName', '').upper()
nm = 'global_{}'.format(rule_name)
f = _parse_rule(rule)
rule_dict[nm] = f
return rule_dict
def _compile_field_rules(json_obj, rule_dict):
"""
returns:
{
FieldName > str: {
'result': [] > str: list,
'ruleName': ruleName > str: str,
'rule': rule_dict[fieldname_rule_name] > str: function,
}
}
"""
field_dict = {}
for field in json_obj.get('fields', []):
field_rules = []
field_name = field.get('fieldName')
for rule in field.get('rules', []):
rule_name = rule.get('ruleName', '').upper()
rule_type = rule.get('type')
output_rule = rule.get('output', True)
nm = None
if rule_type is None and 'global_{}'.format(rule_name) in rule_dict:
nm = 'global_{}'.format(rule_name)
else:
nm = '{}_{}'.format(field_name, rule_name)
rule_dict[nm] = _parse_rule(rule)
field_rules.append({
'result': [],
'ruleName': rule_name,
'rule': rule_dict[nm],
'output': output_rule
})
field_dict[field_name] = field_rules
return field_dict
def _compile_group_rules(json_obj, field_dict):
"""
rules are the address to the rule from the field dictionary. when updating the result in the field results, should be available here
returns
{
group_name: {
"result": [], # array of ids with errors,
"match": "all" or "any", # type of match to test for
"rules": [group_rules], # array of the rules for this group
}
}
"""
group_dict = {}
for group in json_obj.get("ruleGroups", []):
group_name = group.get("groupName", "")
match_type = group.get("match", "")
group_rules = []
for rule in group.get("rules", []):
f = rule.get("fieldName")
rn = rule.get("ruleName","").upper()
group_rules += [r for r in field_dict[f] if r['ruleName']==rn]
group_dict[group_name] = {
"result": [],
"match": match_type,
"rules": group_rules,
"description": group.get('description', '')
}
return group_dict
# region parse rules
if __name__ == "__main__":
feat = r"C:\Users\scody\Desktop\ArcPro Model\AllPipes2020\Data\ModelNetwork.gdb\facility_junction"
main('facil_jct.json', feat)
| 27.66426 | 136 | 0.588542 |
11b7cee72a017b56ab9f447f74c1610717cfe52e | 8,784 | py | Python | tests/st/ops/gpu/test_scatter_nd_func_op.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 1 | 2021-12-27T13:42:29.000Z | 2021-12-27T13:42:29.000Z | tests/st/ops/gpu/test_scatter_nd_func_op.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | null | null | null | tests/st/ops/gpu/test_scatter_nd_func_op.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter
import mindspore.common.dtype as mstype
import mindspore.ops as ops
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
func_map = {
"update": ops.ScatterNdUpdate,
"add": ops.ScatterNdAdd,
"sub": ops.ScatterNdSub,
}
| 37.378723 | 86 | 0.612136 |
11b7d8f84ea9074863867abdbc15c4a61c060614 | 1,710 | py | Python | files/persona_dao.py | DaletWolff/Curso_postgresql | a9d716236b1a840f104c98a4982eab9b1ad641ba | [
"Unlicense"
] | null | null | null | files/persona_dao.py | DaletWolff/Curso_postgresql | a9d716236b1a840f104c98a4982eab9b1ad641ba | [
"Unlicense"
] | null | null | null | files/persona_dao.py | DaletWolff/Curso_postgresql | a9d716236b1a840f104c98a4982eab9b1ad641ba | [
"Unlicense"
] | null | null | null | from persona import Persona
from logger_base import log
from cursor import Cursor
| 38 | 92 | 0.612281 |
11b95e0f9e7afe8543bf0c3e7be151865cf4b771 | 5,394 | py | Python | tests/serve/mock/end-to-end/opbank/test_opbank.py | dfioravanti/hmt | df79404076ec7acea0cfb12b636d58e3ffc83bc5 | [
"MIT"
] | 25 | 2020-05-14T13:25:42.000Z | 2021-11-09T10:09:27.000Z | tests/serve/mock/end-to-end/opbank/test_opbank.py | dfioravanti/hmt | df79404076ec7acea0cfb12b636d58e3ffc83bc5 | [
"MIT"
] | 19 | 2020-05-05T19:47:41.000Z | 2021-02-05T17:06:53.000Z | tests/serve/mock/end-to-end/opbank/test_opbank.py | dfioravanti/hmt | df79404076ec7acea0cfb12b636d58e3ffc83bc5 | [
"MIT"
] | 6 | 2020-05-16T10:02:48.000Z | 2021-10-04T08:03:49.000Z | import json
import pytest
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from hmt.serve.mock.log import Log
from hmt.serve.mock.scope import Scope
from hmt.serve.mock.specs import load_specs
from hmt.serve.utils.routing import HeaderRouting
ACCOUNTS_HEADERS = {
"Host": "sandbox.apis.op-palvelut.fi",
"x-api-key": "ZoStul8nNuwq1SYCzSrLcO1wAj4Tyf7x",
"x-request-id": "12345",
"x-session-id": "12345",
"authorization": "Bearer 6c18c234b1b18b1d97c7043e2e41135c293d0da9",
"x-authorization": "6c18c234b1b18b1d97c7043e2e41135c293d0da9",
}
#
PAYMENTS_HEADERS = {
"Host": "sandbox.apis.op-palvelut.fi",
"x-api-key": "ZoStul8nNuwq1SYCzSrLcO1wAj4Tyf7x",
"x-request-id": "12345",
"x-session-id": "12345",
# 'authorization': "Bearer 6c18c234b1b18b1d97c7043e2e41135c293d0da9",
"x-authorization": "6c18c234b1b18b1d97c7043e2e41135c293d0da9",
}
"""
def get_accounts(http_client: AsyncHTTPClient, base_url: str):
req = HTTPRequest(base_url+'/accounts/v3/accounts', headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
return json.loads(ret.body)['accounts']
"""
"""
def init_payment(payer_iban, receiver_iban, amount, http_client, base_url):
body = {
"amount": amount,
"subject": "Client Test",
"currency": "EUR",
"payerIban": payer_iban,
"valueDate": "2020-01-27T22:59:34Z",
"receiverBic": "string",
"receiverIban": receiver_iban,
"receiverName": "string"
}
url = base_url + '/v1/payments/initiate'
req = HTTPRequest(url, method='POST', headers=PAYMENTS_HEADERS, body=json.dumps(body))
res = yield http_client.fetch(req)
return json.loads(res.body)
"""
"""
def confirm_payment(payment_id, http_client: AsyncHTTPClient, base_url: str):
body = {
'paymentId': payment_id
}
url = base_url + '/v1/payments/confirm'
req = HTTPRequest(url, headers=PAYMENTS_HEADERS, body=json.dumps(body))
response = yield http_client.fetch(req)
return json.loads(response)
"""
| 32.493976 | 90 | 0.670931 |
11ba755db1dbc0aa52b8605bc8949960f9ba11a9 | 346 | py | Python | less3_task5.py | rezapci/Algorithms-with-Python | 5f4faf2d463f33375856f5a5ab525467d303aa24 | [
"MIT"
] | null | null | null | less3_task5.py | rezapci/Algorithms-with-Python | 5f4faf2d463f33375856f5a5ab525467d303aa24 | [
"MIT"
] | null | null | null | less3_task5.py | rezapci/Algorithms-with-Python | 5f4faf2d463f33375856f5a5ab525467d303aa24 | [
"MIT"
] | null | null | null | # Find the maximum negative element in the array.
# Display its value and position in the array.
import random
arr = [random.randint(-50, 50) for _ in range(10)]
print(arr)
num = -50
position = 0
for i in arr:
if i < 0 and i > num:
num = i
print ('The maximum negative element {}, its position: {}'.format(num, arr.index(num)))
| 19.222222 | 87 | 0.656069 |
11bc0b3cb2807ff10941fab0ad8b5ff296d80b41 | 253 | py | Python | pages/wacs.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | pages/wacs.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | pages/wacs.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | import collections
from numpy.core.defchararray import lower
import streamlit as st
import numpy as np
import pandas as pd
from pages import utils
| 25.3 | 44 | 0.754941 |
11c058f314fcdf27f630e4e67e934c957629b5a4 | 1,000 | py | Python | pype9/cmd/convert.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | null | null | null | pype9/cmd/convert.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | null | null | null | pype9/cmd/convert.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | 1 | 2021-04-08T12:46:21.000Z | 2021-04-08T12:46:21.000Z | """
Tool to convert 9ML files between different supported formats (e.g. XML_,
JSON_, YAML_) and 9ML versions.
"""
from argparse import ArgumentParser
from pype9.utils.arguments import nineml_document
from pype9.utils.logging import logger
| 33.333333 | 77 | 0.664 |
11c2627f43e4b6eeb9e8f2281dbb147804505bde | 85 | py | Python | test.py | Wuzhiqiang88/myFirstSpider | 6e964d26038e2937b0f060c1ff6d30b092394ee3 | [
"Apache-2.0"
] | 1 | 2018-09-12T07:13:53.000Z | 2018-09-12T07:13:53.000Z | test.py | Wuzhiqiang88/myFirstSpider | 6e964d26038e2937b0f060c1ff6d30b092394ee3 | [
"Apache-2.0"
] | null | null | null | test.py | Wuzhiqiang88/myFirstSpider | 6e964d26038e2937b0f060c1ff6d30b092394ee3 | [
"Apache-2.0"
] | null | null | null | i=0
s=[50]
for i in range(0,10):
print("w%dwww"%i)
s[i]=i
print(s[i]
| 7.727273 | 21 | 0.470588 |
11c29c94c567a27034de5cc0c60d69d3debbcc00 | 871 | py | Python | Python_UN_POCO_MAS_AVANZADO.py | FreyderUrbano/Python_Programas | 8a11729d1148c319d8fa145ad18038cc7d63f0d9 | [
"MIT"
] | null | null | null | Python_UN_POCO_MAS_AVANZADO.py | FreyderUrbano/Python_Programas | 8a11729d1148c319d8fa145ad18038cc7d63f0d9 | [
"MIT"
] | null | null | null | Python_UN_POCO_MAS_AVANZADO.py | FreyderUrbano/Python_Programas | 8a11729d1148c319d8fa145ad18038cc7d63f0d9 | [
"MIT"
] | null | null | null | #PYTHON UN POCO MAS AVANZADO METODO DE ABRIR EN CONSOLA
print("PYTHON MAS AVANZADO")
texto = "TEXTO DE PRUEBA"
nombre = "FREYDER"
altura = "2 metros"
year = 2021
#print(f"{texto}--{nombre}--{altura}--{str(year)}")
print(texto + " " +nombre + " "+ altura +" "+ str(year))
#entradas o peticiones por teclado
sitio = input("CUAL ES TU NOMBRE: ")
print(sitio)
#condicionales
"""
altura = int(input("Cual es tu altura?: " ))
if altura > 190:
print("ALT@")
else:
print("BAJO")
"""
"""
#FUNCIONES
var_altura = int(input("Cual es tu altura?: " ))
def mostrarAltura(estatura):
resultado = ""
if estatura > 190:
resultado = ("ALT@")
else:
resultado = ("BAJO")
return resultado
print(mostrarAltura(var_altura))
"""
#listas
personas = ["PACHO", "HUGO", "PEIPEI"]
print(personas[2])
for persona in personas:
print(persona)
| 17.078431 | 56 | 0.629162 |
11c365d4ccc71a94837656d754364a0fe60f8958 | 3,615 | py | Python | Tools/MakeHDF.py | Kadantte/VideoSuperResolution | 4c86e49d81c7a9bea1fe0780d651afc126768df3 | [
"MIT"
] | 1,447 | 2018-06-04T08:44:07.000Z | 2022-03-29T06:19:10.000Z | Tools/MakeHDF.py | Evergreengyq/VideoSuperResolution | 1d0c54fafaf7a02f0d69408502f90c55f0f76536 | [
"MIT"
] | 96 | 2018-08-29T01:02:45.000Z | 2022-01-12T06:00:01.000Z | Tools/MakeHDF.py | Evergreengyq/VideoSuperResolution | 1d0c54fafaf7a02f0d69408502f90c55f0f76536 | [
"MIT"
] | 307 | 2018-06-26T13:35:54.000Z | 2022-01-21T09:01:54.000Z | # Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/4/3 5:03
import argparse
import time
from pathlib import Path
import h5py
import numpy as np
import tqdm
from PIL import Image
__all__ = ["gather_videos_vqp", "gather_videos", "print_dataset"]
parser = argparse.ArgumentParser(description="Make HDF5 datasets")
parser.add_argument("input_dir", help="path of the input root folder.")
parser.add_argument("-o", "--output", help="output hdf file path.")
parser.add_argument("-a", "--append", action='store_true')
parser.add_argument("-t", "--task_name", choices=__all__, help="task name")
parser.add_argument("--compression", type=int, default=None)
parser.add_argument("--glob", help="glob pattern to gather files inside input."
"For recursively glob, use **/*.")
parser.add_argument("--data_format",
choices=('channels_first', 'channels_last'),
default='channels_first', help="data format (default: CHW)")
FLAGS, args = parser.parse_known_args()
def gather_videos_vqp(fd: h5py.File):
"""Specified for VQP"""
root = Path(FLAGS.input_dir)
glob = FLAGS.glob or '*'
inputs = sorted(root.glob(glob))
candidates = set(i.parent for i in filter(lambda f: f.is_file(), inputs))
frames_info = {}
for p in tqdm.tqdm(candidates):
seq = [Image.open(f) for f in
filter(lambda f: f.is_file(), sorted(p.rglob('*')))]
cube = np.stack(seq)
if FLAGS.data_format == 'channels_first':
cube = cube.transpose([0, 3, 1, 2])
cube = np.expand_dims(cube, 0)
path = p.relative_to(root)
# ugly
path = path.parent / path.stem.split('_')[0]
key = str(path.as_posix())
if not key in fd:
fd.create_dataset(key, data=cube,
maxshape=(52,) + cube.shape[1:],
compression=FLAGS.compression)
frames_info[key] = len(seq)
else:
d = fd[key]
cnt = d.shape[0] + 1
d.resize(cnt, 0)
d[-1] = cube
del cube
def gather_videos(fd: h5py.File):
"""Gather videos. Video is defined in a folder containing sequential images."""
root = Path(FLAGS.input_dir)
glob = FLAGS.glob or '*'
inputs = sorted(root.glob(glob))
candidates = set(i.parent for i in filter(lambda f: f.is_file(), inputs))
frames_info = {}
for p in tqdm.tqdm(candidates):
seq = [Image.open(f) for f in
filter(lambda f: f.is_file(), sorted(p.rglob('*')))]
cube = np.stack(seq)
if FLAGS.data_format == 'channels_first':
cube = cube.transpose([0, 3, 1, 2])
path = p.relative_to(root)
key = str(path.as_posix())
fd.create_dataset(key, data=cube, compression=FLAGS.compression)
frames_info[key] = len(seq)
del cube
fd.attrs['frames_info'] = list(frames_info.items())
if __name__ == '__main__':
main()
| 30.125 | 81 | 0.634302 |
11c45856fc39f00ce8b427bda4629a69a7f9c3b7 | 1,480 | py | Python | modules/ddg_appwv_cookies.py | ItWasDNS/DDG-Parser | fd63099df7b93a603b9fe2ae4259c232f0555a65 | [
"MIT"
] | null | null | null | modules/ddg_appwv_cookies.py | ItWasDNS/DDG-Parser | fd63099df7b93a603b9fe2ae4259c232f0555a65 | [
"MIT"
] | null | null | null | modules/ddg_appwv_cookies.py | ItWasDNS/DDG-Parser | fd63099df7b93a603b9fe2ae4259c232f0555a65 | [
"MIT"
] | null | null | null | """
Process 'com.duckduckgo.mobile.android/app_webview/Cookies'
"""
import os
import sqlite3
from modules.helpers.ddg_path_handler import process_directory_paths
query_cookies = """
SELECT
host_key,
path,
name,
value,
creation_utc,
last_access_utc,
expires_utc,
secure,
httponly,
persistent,
encrypted_value
FROM cookies;
"""
cookies_template = """--
Host: %s
Path: %s
Cookie Name: %s
Cookie Value: %s
Cookie Creation: %s
Cookie Expiration: %s
"""
def process_appwv_cookies(duckduckgo_path, output_path):
""" Process DDG 'Cookies' database """
with open(os.path.join(output_path, 'appwv_cookies_output.txt'), 'w') as o:
o.write("Processed: 'com.duckduckgo.mobile.android/app_webview/Cookies'\n")
try:
conn = sqlite3.connect(duckduckgo_path + 'app_webview/Cookies')
answer = conn.execute(query_cookies).fetchall()
conn.close()
except sqlite3.OperationalError as e:
o.write("Error: %s" % str(e))
return None
if len(answer) == 0:
o.write("No Cookies Found in app_webview/Cookies")
return None
for result in answer:
o.write(cookies_template % (result[0], result[1], result[2], result[3], result[4], result[5]))
if __name__ == '__main__':
# Set DDG application data path for testing
ddg_path, out_path = process_directory_paths()
# Process artifacts
process_appwv_cookies(ddg_path, out_path)
| 25.964912 | 107 | 0.663514 |
11c4b04fb594071b02b7ee34e2b0b343fa536a12 | 3,382 | py | Python | scripts/redis_performance_test.py | Robbybp/IDAES-CLC | 5498aeab070afe5f3dc57be4cd198250f0f88ff9 | [
"MIT"
] | null | null | null | scripts/redis_performance_test.py | Robbybp/IDAES-CLC | 5498aeab070afe5f3dc57be4cd198250f0f88ff9 | [
"MIT"
] | 1 | 2021-06-01T23:42:14.000Z | 2021-06-01T23:42:14.000Z | scripts/redis_performance_test.py | Robbybp/IDAES-CLC | 5498aeab070afe5f3dc57be4cd198250f0f88ff9 | [
"MIT"
] | null | null | null | """
A simple and short Redis performance test.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '8/8/16'
import argparse
import logging
import os
import redis
import subprocess
import sys
import time
_log = logging.getLogger(__name__)
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter('%(asctime)s %(levelname)10s - %(message)s'))
_log.addHandler(_h)
def verbose_add(parser):
"""Add a verbosity argument to an ArgumentParser.
"""
parser.add_argument('-v', '--verbose', dest='vb',
action='count', default=0)
def verbose_set_log(vb, log):
"""Set logging level from verbosity level.
"""
if vb >= 2:
log.setLevel(logging.DEBUG)
elif vb >= 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARN)
if __name__ == '__main__':
sys.exit(main())
| 28.661017 | 104 | 0.59521 |
11c756cc812aa8aa64b2f69c97b3ae507b530f8b | 1,323 | py | Python | Question-1.py | sowmyamanojna/CS6910-Deep-Learning-Assignment-1 | e46d3a82bdfb61d7527ed3daf9250bb4ce228854 | [
"MIT"
] | null | null | null | Question-1.py | sowmyamanojna/CS6910-Deep-Learning-Assignment-1 | e46d3a82bdfb61d7527ed3daf9250bb4ce228854 | [
"MIT"
] | null | null | null | Question-1.py | sowmyamanojna/CS6910-Deep-Learning-Assignment-1 | e46d3a82bdfb61d7527ed3daf9250bb4ce228854 | [
"MIT"
] | null | null | null | print("Importing packages... ", end="")
##############################################################################
import wandb
import numpy as np
from keras.datasets import fashion_mnist
import matplotlib.pyplot as plt
wandb.init(project="trail-1")
print("Done!")
##############################################################################
print("Loading data... ", end="")
# Load the dataset
[(x_train, y_train), (x_test, y_test)] = fashion_mnist.load_data()
# Get the number of classes and their name mappings
num_classes = 10
class_mapping = {0: "T-shirt/top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat", 5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle boot"}
print("Done!")
##############################################################################
# Plotting a figure from each class
plt.figure(figsize=[12, 5])
img_list = []
class_list = []
for i in range(num_classes):
position = np.argmax(y_train==i)
image = x_train[position,:,:]
plt.subplot(2, 5, i+1)
plt.imshow(image)
plt.title(class_mapping[i])
img_list.append(image)
class_list.append(class_mapping[i])
wandb.log({"Question 1": [wandb.Image(img, caption=caption) for img, caption in zip(img_list, class_list)]})
##############################################################################
| 35.756757 | 152 | 0.517763 |
11c77e0e125890c44783034eeeb3c9b9a0ff0a7d | 1,386 | py | Python | app/api/v1/task.py | coder-yuan/vue-template-api | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | [
"MIT"
] | null | null | null | app/api/v1/task.py | coder-yuan/vue-template-api | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | [
"MIT"
] | null | null | null | app/api/v1/task.py | coder-yuan/vue-template-api | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : icode_flask_be
# @Package : task
# @Author : jackeroo
# @Time : 2019/11/29 5:25
# @File : task.py
# @Contact :
# @Software : PyCharm
# @Desc :
from app.extensions import celery
from flask_jwt_extended import jwt_required
from app.helper.HttpHelper import HttpHelper
from app.libs.redprint import RedPrint
api = RedPrint('task')
| 27.72 | 69 | 0.554113 |
11c82b11914ac9b51ec458c369a7893fadc1d1d2 | 1,851 | bzl | Python | config/infra/buildkite/deps.bzl | corypaik/labtools | 1d9d75eff40e8bf258e8de6d4377bbea073e109d | [
"Apache-2.0"
] | 1 | 2021-09-16T11:57:35.000Z | 2021-09-16T11:57:35.000Z | config/infra/buildkite/deps.bzl | corypaik/labtools | 1d9d75eff40e8bf258e8de6d4377bbea073e109d | [
"Apache-2.0"
] | null | null | null | config/infra/buildkite/deps.bzl | corypaik/labtools | 1d9d75eff40e8bf258e8de6d4377bbea073e109d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The LabTools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" defines dependencies for building a buildkite deployment """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("@io_bazel_rules_docker//container:container.bzl", "container_pull")
def buildkite_deps():
""" download buildkite deps """
http_file(
name = "com_github_krallin_tini",
downloaded_file_path = "tini",
urls = ["https://github.com/krallin/tini/releases/download/v0.19.0/tini"],
sha256 = "93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c",
executable = True,
)
http_archive(
name = "com_github_buildkite_agent",
url = "https://github.com/buildkite/agent/releases/download/v3.31.0/buildkite-agent-linux-amd64-3.31.0.tar.gz",
build_file_content = "exports_files([\"buildkite-agent\", \"buildkite-agent.cfg\"])",
sha256 = "f8b3b59d1c27e7e2ccc46819e4cafedb6d58ee1fdbfd006b22f34950558e4a27",
)
container_pull(
name = "bazel_ubuntu_2004",
registry = "gcr.io",
repository = "bazel-public/ubuntu2004-nojava",
digest = "sha256:4fceaeb1734849aa3d08168e1845165c98b3acfc69901cd4bf097f7512764d8f",
)
| 43.046512 | 119 | 0.688817 |
11cb6ae52142719479c56b52ce2a6eeaa8a094de | 3,855 | py | Python | Tests/Data/Parabolic/T/3D_3BHEs_array/pre/3bhes.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | null | null | null | Tests/Data/Parabolic/T/3D_3BHEs_array/pre/3bhes.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | 1 | 2021-09-02T14:21:33.000Z | 2021-09-02T14:21:33.000Z | Tests/Data/Parabolic/T/3D_3BHEs_array/pre/3bhes.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | null | null | null | ###
# Copyright (c) 2012-2020, OpenGeoSys Community (http://www.opengeosys.org)
# Distributed under a Modified BSD License.
# See accompanying file LICENSE.txt or
# http://www.opengeosys.org/project/license
###
# Execute this file to generate TESPy network csv files
from tespy import cmp, con, nwk, hlp
from tespy import nwkr
import numpy as np
import pandas as pd
# %% network
btes = nwk.network(fluids=['water'],
T_unit='K',
p_unit='bar',
h_unit='kJ / kg',
T_range=[273.25, 373.15],
p_range=[1, 20],
h_range=[1, 1000])
# components
fc_in = cmp.source('from consumer inflow')
fc_out = cmp.sink('from consumer outflow')
pu = cmp.pump('pump')
sp = cmp.splitter('splitter', num_out=3)
# bhe:
bhe_name = 'BHE1'
assert 'BHE1' in bhe_name, "BHE should be named with 'BHE1'"
bhe1 = cmp.heat_exchanger_simple(bhe_name)
bhe_name = 'BHE2'
assert 'BHE2' in bhe_name, "BHE should be named with 'BHE2'"
bhe2 = cmp.heat_exchanger_simple(bhe_name)
bhe_name = 'BHE3'
assert 'BHE3' in bhe_name, "BHE should be named with 'BHE3'"
bhe3 = cmp.heat_exchanger_simple(bhe_name)
mg = cmp.merge('merge', num_in=3)
cons = cmp.heat_exchanger_simple('consumer')
# connections
# inlet
fc_pu = con.connection(fc_in, 'out1', pu, 'in1')
pu_sp = con.connection(pu, 'out1', sp, 'in1')
sp_bhe1 = con.connection(sp, 'out1', bhe1, 'in1')
sp_bhe2 = con.connection(sp, 'out2', bhe2, 'in1')
sp_bhe3 = con.connection(sp, 'out3', bhe3, 'in1')
bhe1_mg = con.connection(bhe1, 'out1', mg, 'in1')
bhe2_mg = con.connection(bhe2, 'out1', mg, 'in2')
bhe3_mg = con.connection(bhe3, 'out1', mg, 'in3')
mg_cons = con.connection(mg, 'out1', cons, 'in1')
cons_fc = con.connection(cons, 'out1', fc_out, 'in1')
btes.add_conns(fc_pu, pu_sp, sp_bhe1, sp_bhe2, sp_bhe3, bhe1_mg, bhe2_mg,
bhe3_mg, mg_cons, cons_fc)
# busses
heat = con.bus('consumer heat demand')
heat.add_comps({'c': cons, 'p': 'P'})
btes.add_busses(heat)
# flow_char
# provide volumetric flow in m^3 / s
x = np.array([
0.00, 0.00001952885971862, 0.00390577194372, 0.005858657915586,
0.007811543887448, 0.00976442985931, 0.011717315831173, 0.013670201803035,
0.015623087774897, 0.017575973746759, 0.019528859718621, 0.021481745690483,
0.023434631662345, 0.025387517634207, 0.027340403606069, 0.029293289577931,
0.031246175549793, 0.033199061521655, 0.035151947493517, 0.037104833465379,
0.039057719437241, 0.041010605409104, 0.042963491380966, 0.044916377352828,
0.04686926332469, 0.048822149296552, 0.050775035268414, 0.052727921240276,
0.054680807212138, 0.056633693184
])
# provide head in Pa
y = np.array([
0.47782539, 0.47725723, 0.47555274, 0.47271192, 0.46873478, 0.46362130,
0.45737151, 0.44998538, 0.44146293, 0.43180416, 0.4220905, 0.40907762,
0.39600986, 0.38180578, 0.36646537, 0.34998863, 0.33237557, 0.31362618,
0.29374046, 0.27271841, 0.25056004, 0.22726535, 0.20283432, 0.17726697,
0.15056329, 0.12272329, 0.09374696, 0.06363430, 0.03238531, 0.00000000
]) * 1e5
f = hlp.dc_cc(x=x, y=y, is_set=True)
pu.set_attr(flow_char=f)
# components paramerization
# system inlet
inflow_head = 2 # bar
fc_pu.set_attr(p=inflow_head, m=0.6, fluid={'water': 1})
# pump
pu.set_attr(eta_s=0.90)
# bhes
bhe1.set_attr(D=0.02733, L=100, ks=0.00001)
bhe2.set_attr(D=0.02733, L=100, ks=0.00001)
bhe3.set_attr(D=0.02733, L=100, ks=0.00001)
# consumer
cons.set_attr(D=0.2, L=20, ks=0.00001)
# connection parametrization
# Tin:
pu_sp.set_attr(h=con.ref(cons_fc, 1, 0))
# for BHEs:
# Tout:
bhe1_mg.set_attr(T=303.15)
bhe2_mg.set_attr(T=303.15)
bhe3_mg.set_attr(T=303.15)
# consumer heat demand
heat.set_attr(P=-3000) # W
# solve
btes.set_printoptions(print_level='info')
btes.solve('design')
# save to csv:
btes.save('tespy_nw', structure=True)
| 29.204545 | 79 | 0.692866 |
11cc4762ea46108968ee8aa2c98fc1627da5eca3 | 981 | py | Python | pypy/jit/codegen/ppc/test/test_rgenop.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/jit/codegen/ppc/test/test_rgenop.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/jit/codegen/ppc/test/test_rgenop.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | import py
from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
from pypy.rpython.lltypesystem import lltype
from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTests, FUNC, FUNC2
from ctypes import cast, c_int, c_void_p, CFUNCTYPE
from pypy.jit.codegen.ppc import instruction as insn
# for the individual tests see
# ====> ../../test/rgenop_tests.py
| 29.727273 | 79 | 0.755352 |
11cd2cba6c6fa6a758300d6008e0f69f4e32d609 | 996 | py | Python | app/someapp/views.py | artas728/monitoring-example-prometheus-grafana | 2d72f29c19e8a280eca82ca1f25a7fa88453559c | [
"MIT"
] | null | null | null | app/someapp/views.py | artas728/monitoring-example-prometheus-grafana | 2d72f29c19e8a280eca82ca1f25a7fa88453559c | [
"MIT"
] | null | null | null | app/someapp/views.py | artas728/monitoring-example-prometheus-grafana | 2d72f29c19e8a280eca82ca1f25a7fa88453559c | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from .models import TestModel
import json
import redis
import time
redis_cli = redis.Redis(host='127.0.0.1', port=6379, db=0)
| 29.294118 | 73 | 0.638554 |
11cf52ea9a3f1fafc7387cfc0418073a1858bb56 | 1,114 | py | Python | scripts/feedforwardness_algo_compare.py | neurodata/maggot_connectome | 7a1d5dcf3a01c0d60e287efeac6b50f7ccb29cdf | [
"MIT"
] | 1 | 2021-01-20T00:37:31.000Z | 2021-01-20T00:37:31.000Z | scripts/feedforwardness_algo_compare.py | neurodata/maggot_connectome | 7a1d5dcf3a01c0d60e287efeac6b50f7ccb29cdf | [
"MIT"
] | 17 | 2021-03-03T14:48:54.000Z | 2021-09-08T15:52:50.000Z | scripts/feedforwardness_algo_compare.py | neurodata/maggot_connectome | 7a1d5dcf3a01c0d60e287efeac6b50f7ccb29cdf | [
"MIT"
] | 2 | 2021-03-05T12:23:20.000Z | 2021-03-29T11:49:53.000Z | #%% [markdown]
# # Comparing approaches to feedforwardness ordering
# For evaluating feedforwardness, we have:
# - 4 networks
# - Axo-dendritic (AD)
# - Axo-axonic (AA)
# - Dendro-dendritic (DD)
# - Dendro-axonic (DA)
# - 4+ algorithms for finding an ordering
# - Signal flow (SF)
# - Spring rank (SR)
# - Graph match flow (GMF)
# - Linear regression flow (LRF)
# - Others...
# - SyncRank
# - SVD based, introduced in SyncRank paper, and a few regularized follow ups
# - 1+ test statistic for feedforwardness
# - Proportion of edges in upper triangle after sorting ($p_{upper}$)
# - Others...
# - Spring rank energy
#
# This notebook compares the performance of the different ordering algorithms on the
# same data, as well as the ordering for each of the 4 networks predicted by a single
# algorithm.
#%% [markdown]
# ## Different algorithms, same dataset
#%% [markdown]
# ### Plot pairsplots of the ranks from each algorithm
#%% [markdown]
# ## Different datasets, same algorithm
#%% [markdown]
# ## Plot the adjacency matrices sorted by each algorithm
| 29.315789 | 85 | 0.672352 |
11d1192c076a5c79df7f15736899d5d72fa6cb5f | 1,401 | py | Python | NewEventReporter/blockmanager/blockmanager.py | Deofex/GETNFTBOTV3 | 0b8f1a77925b8f87224b2eaae93560e154b881b8 | [
"MIT"
] | null | null | null | NewEventReporter/blockmanager/blockmanager.py | Deofex/GETNFTBOTV3 | 0b8f1a77925b8f87224b2eaae93560e154b881b8 | [
"MIT"
] | null | null | null | NewEventReporter/blockmanager/blockmanager.py | Deofex/GETNFTBOTV3 | 0b8f1a77925b8f87224b2eaae93560e154b881b8 | [
"MIT"
] | null | null | null | import logging
import json
import os
# Initialize logger
logger = logging.getLogger(__name__)
if __name__ == '__main__':
blockprocessedconfig = './config/blockprocessed.json'
bm = BlockManager(blockprocessedconfig,300000)
bm.set_processedblock(242443)
print(bm.get_processedblock())
| 31.133333 | 72 | 0.660243 |
11d3d683bc5376ecd600cfbd620489e72ca787ca | 5,299 | py | Python | nmf_eval.py | logan-wright/INMF | 611ccdfd4608ec37629975d04e013ab97e05ff31 | [
"Apache-2.0"
] | 2 | 2017-06-16T19:18:53.000Z | 2019-04-18T02:11:45.000Z | nmf_eval.py | logan-wright/INMF | 611ccdfd4608ec37629975d04e013ab97e05ff31 | [
"Apache-2.0"
] | null | null | null | nmf_eval.py | logan-wright/INMF | 611ccdfd4608ec37629975d04e013ab97e05ff31 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 20:35:49 2017
@author: wrightad
"""
import numpy as N
import matplotlib.pyplot as plt
def rmse(v1,v2):
'''
rmse(v1,v2) - Calculates the root mean square error between two vectors
Version 1.0
Created On: Apr 17, 2017
Last Modified: Jun 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
- Calculates the Root-Mean-Square-Error between two vectors
- Vectors must be the same length
Inputs:
v1 - Numpy 1-dimensional array of arbitrary length
v2 - Numpy 1-dimensional array with a length equal to that of v1
Output:
rmse, the rmse value for the comparison of the two vectors
'''
dims1 = v1.shape
dims2 = v2.shape
if dims1 == dims2:
diff = v1 - v2
err = N.sum(diff**2)/dims1[0]
rms = N.sqrt(err)
else:
print('Dimension Mismatch: v1.shape ~= v2.shape!')
rms = None
return rms
def sid(v1,v2):
'''
sid(v1,v2) - Calculates the spectral information divergence (SID) between
two vectors
Version 1.0
Created On: Apr 17, 2017
Last Modified: Jun 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
- Calculates the Spectral Information Divergence between two vectors
- Vectors must be the same length
Reference:
Chang, C.-I. (2000), An information-theoretic approach to spectral
variability, similarity, and discrimination for hyperspectral image
analysis, Inf. Theory, IEEE Trans., 46(5), 19271932,
doi:10.1109/18.857802.
Inputs:
v1 - Numpy 1-dimensional array of arbitrary length
v2 - Numpy 1-dimensional array with a length equal to that of v1
Output:
SID, the SID value for the comparison of the two vectors
'''
p = v1 / N.sum(v1)
q = v2 / N.sum(v2)
D1 = N.sum(p * N.log(p / q))
D2 = N.sum(q * N.log(q / p))
D_sum = D1 + D2
return D_sum
def scattering_fit(data, function, sigma = 1e-9):
'''
Linear least-squares fit for a function of the form y = a * f(x)
Version 1.0
Created On: Apr 17, 2017
Last Modified: Apr 17, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
Reference:
Inputs:
wvl, wavelength in NANOMETERS, must be same length as data and function
data, the y data that the function is to be fit to. Should be a vector
(N,) or a 2D array with one single dimension.
function, the function to be scaled with a linear factor to fit the
data. Again it should be a vector (N,) or a 2D array with one
single dimension. data and function must be the same length.
OPTIONAL:
sigma, the value small value that determines when iteration stops
Output:
a, a single scalar describing the best-fit value of "a"
'''
# Initialize parametrs, including change and the initial minimum
change = 100 # Arbitrary value greater than sigma
minval = N.sum((data - function) ** 2) # Initial Min
# Calculate the intial multiplicative factor between the data and function,
# and use to set range for calculating minimums
Amin = 0
Amax = (data/function).max()
# Iterate
while change > sigma:
# Create Array of Amplitudes for the fit
Arr = N.linspace(Amin,Amax,100)
Test = N.matmul(N.reshape(Arr,(-1,1)),function)
# Calculate the square difference between the data and the fit guess
diff = Test - N.matlib.repmat(N.reshape(data,(1,-1)),100,1)
# Find Minimum, and calculate the change and difference.
val = N.sum(diff ** 2, axis = 1)
vali = N.argmin(val)
change = minval - val.min()
minval = val.min()
# Calculate New range of "a" for next iteration
Amin = Arr[max(vali-2,0)]
Amax = Arr[min(vali+2,len(Arr)-1)]
result = N.squeeze(Arr[vali] * function)
return result
def bodhaine(wvl):
'''
bodhaine(wvl) - Calculates the Bodhaine aproximation of rayleigh optical depth
Version 1.0
Created On: Apr 17, 2017
Last Modified: June 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
Reference:
Bodhaine, B. A., N. B. Wood, E. G. Dutton, and J. R. Slusser (1999),
On Rayleigh optical depth calculations, J. Atmos. Ocean. Technol.,
16(11 PART 2), 18541861,
doi:10.1175/1520-0426(1999)016<1854:ORODC>2.0.CO;2.
Inputs:
wvl - a vector of wavelengths at which to calculate the rayleigh optical
depth. Wavelength sould be in MICROMETERS
Output:
tr - vector of rayleigh optical depths corresponding to wavelengths from the input vectora single scalar describing the best-fit value of "a"
'''
s = 0.0021520
a = 1.0455996
b = 341.29061
c = 0.90230850
d = 0.0027059889
e = 85.968563
tr = s * (a - b * wvl ** -2 - c * wvl ** 2)/(1 + d * wvl ** -2 - e * wvl ** 2)
return tr | 32.509202 | 150 | 0.60351 |
11d4d6356bac4be3d9c387ca7446a41aec22d1ea | 89 | py | Python | navedex/apps.py | SousaPedro11/navedex-api | f9b2bc0284ebf27de368ece718434a94704c3876 | [
"MIT"
] | null | null | null | navedex/apps.py | SousaPedro11/navedex-api | f9b2bc0284ebf27de368ece718434a94704c3876 | [
"MIT"
] | null | null | null | navedex/apps.py | SousaPedro11/navedex-api | f9b2bc0284ebf27de368ece718434a94704c3876 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 |
11d5570c1f5104f2732b1bf852cd1144b65ea155 | 61 | py | Python | fastISM/__init__.py | kundajelab/fastISM | 1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b | [
"MIT"
] | 12 | 2020-09-20T17:03:48.000Z | 2022-03-16T06:51:52.000Z | fastISM/__init__.py | kundajelab/fastISM | 1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b | [
"MIT"
] | 5 | 2020-10-24T20:43:45.000Z | 2022-02-25T19:40:47.000Z | fastISM/__init__.py | kundajelab/fastISM | 1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b | [
"MIT"
] | 2 | 2020-10-14T05:18:55.000Z | 2022-02-21T07:34:14.000Z | from .fast_ism import FastISM
from .ism_base import NaiveISM
| 20.333333 | 30 | 0.836066 |
11d7cc28fca1672a8acd01df3e20ebc2577f0edc | 3,127 | py | Python | dipy/reconst/tests/test_odf.py | Garyfallidis/dipy | 4341b734995d6f51ac9c16df26a7de00c46f57ef | [
"BSD-3-Clause"
] | 3 | 2015-07-31T20:43:18.000Z | 2019-07-26T13:58:07.000Z | dipy/reconst/tests/test_odf.py | Garyfallidis/dipy | 4341b734995d6f51ac9c16df26a7de00c46f57ef | [
"BSD-3-Clause"
] | 9 | 2015-05-13T17:44:42.000Z | 2018-05-27T20:09:55.000Z | dipy/reconst/tests/test_odf.py | Garyfallidis/dipy | 4341b734995d6f51ac9c16df26a7de00c46f57ef | [
"BSD-3-Clause"
] | 3 | 2016-08-05T22:43:16.000Z | 2017-06-23T18:35:13.000Z | import numpy as np
from numpy.testing import assert_array_equal
from ..odf import OdfFit, OdfModel, gfa
from dipy.core.triangle_subdivide import (create_half_unit_sphere,
disperse_charges)
from nose.tools import (assert_almost_equal, assert_equal, assert_raises,
assert_true)
| 37.674699 | 73 | 0.688839 |
11d8b360dafd771af3d50fb23f126c256bc27cc5 | 423 | py | Python | recieve.py | RyuYamamoto/inter-process-communication-py | 377c73833f230ba1132006c2cda86decd3580a5b | [
"MIT"
] | null | null | null | recieve.py | RyuYamamoto/inter-process-communication-py | 377c73833f230ba1132006c2cda86decd3580a5b | [
"MIT"
] | null | null | null | recieve.py | RyuYamamoto/inter-process-communication-py | 377c73833f230ba1132006c2cda86decd3580a5b | [
"MIT"
] | null | null | null | import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 50007))
s.listen(1)
while True:
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(1024)
if not data:
break
print('data: {}, add: {}'.format(data, addr))
conn.sendall(b'Recieved: ' + data)
| 28.2 | 61 | 0.486998 |
11dc5601e32f2a14e2e6dbd6c443d6cb0fdbc322 | 4,503 | py | Python | utils.py | bbpp222006/elec_nose_plus | d79faa47d3fbb63c697501dd521e834bcc8e4814 | [
"MIT"
] | 1 | 2021-04-08T04:17:04.000Z | 2021-04-08T04:17:04.000Z | utils.py | bbpp222006/elec_nose_plus | d79faa47d3fbb63c697501dd521e834bcc8e4814 | [
"MIT"
] | null | null | null | utils.py | bbpp222006/elec_nose_plus | d79faa47d3fbb63c697501dd521e834bcc8e4814 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# encoding: utf-8
#!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
from tqdm import tqdm
import numpy as np
import cv2
import os
import random
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def props_to_onehot(props):
if isinstance(props, list):
props = np.array(props)
a = np.argmax(props, axis=1)
b = np.zeros((len(a), props.shape[1]))
b[np.arange(len(a)), a] = 1
return b
def onehot_to_num(onehot):
if isinstance(onehot, list):
onehot = np.array(onehot)
b = np.zeros((onehot.shape[0], 1))
for i, h in enumerate(onehot):
b[i, 0] = np.argwhere(onehot[i] == 1)
return b
def draw(preds, x_train_batch,x_label,ax):
predsnp = preds.cpu().detach().numpy()
x_train_batchnp = x_train_batch.cpu().detach().numpy()
x_label = x_label.cpu().detach().numpy()
# print(predsnp.shape, x_train_batchnp.shape) # (2000,6)
predsnp = props_to_onehot(predsnp)
# print(predsnp)
predsnp = onehot_to_num(predsnp)
# print(max(predsnp))
#kmeans
estimator = KMeans(n_clusters=2) #
estimator.fit(x_train_batchnp) #
label_pred = estimator.labels_ #
# k-means
if label_pred[0]==1:
label_pred = 1-label_pred
# plt.plot(np.argwhere(label_pred == 0), np.zeros(len(np.argwhere(label_pred == 0)))*x_label,'go-')
# plt.plot(np.argwhere(label_pred == 1), np.ones(len(np.argwhere(label_pred == 1))) * x_label,'go-')
ax.scatter(np.argwhere(label_pred == 0), np.zeros(len(np.argwhere(label_pred == 0)))*x_label, c="green", marker='o',s = 10, label='kmeans')
ax.scatter(np.argwhere(label_pred == 1), np.ones(len(np.argwhere(label_pred == 1)))*x_label, c="green", marker='o',s = 10, label='kmeans')
for i in range(int(max(predsnp))+1):
x= np.argwhere(predsnp == i)[:,0]
y = np.ones(len(x))*i
# plt.plot(x, y, c = "red")
ax.scatter(x, y, c = "red", marker='.', label='pred',s = 5)
| 27.457317 | 143 | 0.584055 |
11dc7d7484bc78800544b03df7488f722be7a5ea | 2,729 | py | Python | down.py | pcahan1/CellNet_Cloud | a228953946b81ccb304fbd068e33766e134103b6 | [
"MIT"
] | 1 | 2020-11-13T10:53:27.000Z | 2020-11-13T10:53:27.000Z | down.py | pcahan1/CellNet_Cloud | a228953946b81ccb304fbd068e33766e134103b6 | [
"MIT"
] | 2 | 2020-06-28T18:17:59.000Z | 2020-12-18T14:11:29.000Z | down.py | pcahan1/CellNet_Cloud | a228953946b81ccb304fbd068e33766e134103b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input FASTQ Directory")
parser.add_argument("-n", "--number", type=int, help="number of reads to sample")
args = parser.parse_args()
random.seed(12)
if not args.number:
print("No sample size specified. Defaulting to five million reads.")
args.number = 5000000
# LIST FILES TO BE DOWN-SAMPLED
fastq_files = os.listdir(args.input)
if int(len(fastq_files)) <= 0:
print("No files in listed directory")
exit()
# CREATE OUTPUT DIRECTORY
output_dir = "subset_"+args.input
os.mkdir(output_dir)
for fastq in fastq_files:
print("\tcounting records....")
with open(args.input+"/"+fastq) as inRead:
num_lines = sum([1 for line in inRead])
print("Num lines:" + str(num_lines) )
if int(num_lines % 4) != 0:
print("FILE " + fastq + " CORRUPTED: Number of lines in FASTQ file not divisible by 4. Is file decompressed?")
exit()
total_records = int(num_lines / 4)
number_to_sample = args.number
print("\tsampling " + str(number_to_sample) + " out of " + str(total_records) + " records")
try:
records_to_keep = set(random.sample(range(total_records), number_to_sample))
record_number = 0
with open(args.input+"/"+fastq) as inFile:
with open(output_dir+"/"+"subset_"+fastq, "w") as output:
for tag in inFile:
bases = next(inFile)
sign = next(inFile)
quality = next(inFile)
if record_number in records_to_keep:
output.write(tag)
output.write(bases)
output.write(sign)
output.write(quality)
record_number += 1
except ValueError as e:
if str(e) == "Sample larger than population or is negative":
print("Desired number of reads is greater than number of reads in original file.")
print("No down-sampling is necessary.")
elif str(e) == "sample larger than population":
print("Desired number of reads is greater than number of reads in original file.")
print("No down-sampling is necessary.")
else:
raise
print("Compressing downsampled reads")
os.system("COPYFILE_DISABLE=1 tar cvfz compressed_reads.tgz "+output_dir)
if os.path.getsize("compressed_reads.tgz") >= 4000000000:
print("WARNING: Your archive contains too many FASTQ files. Max size is 4GB.")
else:
print("Archive file size is ~"+str(os.path.getsize("compressed_reads.tgz")/1000000000)+"GB")
| 35.907895 | 118 | 0.629901 |
11dce67a3e4c4459fb478df3826b5f61db5fbe5f | 777 | py | Python | Cms/Dtcms/apps/areas/views.py | Highsir/cms | 2d820212227ad2760cd762873365c0df0604c730 | [
"MIT"
] | null | null | null | Cms/Dtcms/apps/areas/views.py | Highsir/cms | 2d820212227ad2760cd762873365c0df0604c730 | [
"MIT"
] | null | null | null | Cms/Dtcms/apps/areas/views.py | Highsir/cms | 2d820212227ad2760cd762873365c0df0604c730 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework.viewsets import ReadOnlyModelViewSet, ModelViewSet
from areas.models import Area
from areas.serializers import AreaSerializer, SubAreaSerializer
| 22.2 | 70 | 0.6139 |
11dd785ec5b8ae06d2da8209269f7b856ed4f908 | 173 | py | Python | 01-logica-de-programacao-e-algoritmos/Aula 03/aula03-exemplo03.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 03/aula03-exemplo03.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 03/aula03-exemplo03.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | # par ou impar ( condicional simples)
x = int(input('Digite um valor inteiro: '))
if (x % 2 == 0):
print('O numero par!')
if(x % 2 == 1):
print('O numero impar') | 28.833333 | 43 | 0.583815 |
11ded52efac2b1e7adb5a0379b064cebcf41d701 | 900 | py | Python | zeta_python_sdk/oracle_utils.py | prettyirrelevant/zeta-python-sdk | 536967259c89d380b8853b1cfd0621c50143b8b9 | [
"Apache-2.0"
] | 2 | 2022-03-02T04:05:07.000Z | 2022-03-10T11:49:37.000Z | zeta_python_sdk/oracle_utils.py | prettyirrelevant/zeta-python-sdk | 536967259c89d380b8853b1cfd0621c50143b8b9 | [
"Apache-2.0"
] | null | null | null | zeta_python_sdk/oracle_utils.py | prettyirrelevant/zeta-python-sdk | 536967259c89d380b8853b1cfd0621c50143b8b9 | [
"Apache-2.0"
] | null | null | null | import math
from .exceptions import OutOfBoundsException
| 26.470588 | 94 | 0.684444 |
11deda09dc4cd77f3a703e78c0ad5fb515e8de96 | 3,507 | py | Python | CSR/utility.py | MoreNiceJay/CAmanager_web | 29c6e35b9b1b9e8d851b2825df18e34699f6c5d2 | [
"bzip2-1.0.6"
] | null | null | null | CSR/utility.py | MoreNiceJay/CAmanager_web | 29c6e35b9b1b9e8d851b2825df18e34699f6c5d2 | [
"bzip2-1.0.6"
] | 3 | 2020-02-11T23:59:34.000Z | 2021-06-10T21:19:16.000Z | CSR/utility.py | MoreNiceJay/CAmanager_web | 29c6e35b9b1b9e8d851b2825df18e34699f6c5d2 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
import sys, json, random, hashlib, calendar,time, datetime, os, random
import ast
from cryptography.fernet import Fernet
from django.shortcuts import redirect
from django.http import Http404, HttpResponse
import json
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, NoEncryption,load_pem_private_key
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import serialization,hashes
from cryptography.x509 import load_pem_x509_csr
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.backends import default_backend
| 34.722772 | 115 | 0.741374 |
11df93a40b853400f38b4c489077ebc7674cd549 | 51,584 | py | Python | uctp_ufabc/src/uctp.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | null | null | null | uctp_ufabc/src/uctp.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | 6 | 2018-10-30T00:37:20.000Z | 2019-07-23T00:23:18.000Z | uctp_ufabc/src/uctp.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | 1 | 2019-06-06T00:54:13.000Z | 2019-06-06T00:54:13.000Z | # UCTP Main Methods
import objects
import ioData
import random
# Set '1' to allow, during the run, the print on terminal of some steps
printSteps = 0
#==============================================================================================================
# Create the first generation of solutions
#-------------------------------------------------------
# Create new Candidate Full-Random
#==============================================================================================================
# Extracts info about what Subj appears in which Prof PrefList
#==============================================================================================================
# Separation of solutions into 2 populations
#==============================================================================================================
# Detect the violation of a Restriction into a candidate
#==============================================================================================================
# Calculate the Fitness of the candidate
#==============================================================================================================
# Calculate Fitness of Infeasible Candidates
#-------------------------------------------------------
# i1: penalty to how many Professors does not have at least one relation with a Subject
#-------------------------------------------------------
# i2: penalty to how many Subjects, related to the same Professor, are teach in the same day, hour and quadri
# i3: penalty to how many Subjects, related to the same Professor, are teach in the same day and quadri but in different campus
#==============================================================================================================
# Calculate Fitness of Feasible Candidates
#-------------------------------------------------------
# f1: how balanced is the distribution of Subjects, considering the "Charge" of each Professor and its Subj related
#-------------------------------------------------------
# f2: how many and which Subjects are the professors preference, considering "prefSubj..." Lists
#-------------------------------------------------------
# f3: how many Subjects are teach in a "Quadri" that is not the same of Professors 'quadriSabbath'
#-------------------------------------------------------
# f4: how many Subjects are teach in the same "Period" of the Professor preference "pPeriod"
#-------------------------------------------------------
# f5: how many Subjects are teach in the same "Campus" of the Professor preference "prefCampus"
#-------------------------------------------------------
# f6: average of relations between profs
#-------------------------------------------------------
# f7: quality of relations (subj appears in some list of pref or/and same quadri)
#==============================================================================================================
# Generate new solutions from the current Infeasible population
#==============================================================================================================
# Generate new solutions from the current Feasible population
#==============================================================================================================
# Make a mutation into a infeasible candidate
#==============================================================================================================
# Make a mutation into a feasible candidate
#==============================================================================================================
# Make a selection of the solutions from all Infeasible Pop.('infPool' and 'solutionsI')
#==============================================================================================================
# Make a Selection of the best solutions from Feasible Pop.
#==============================================================================================================
# Make a rand mutation into a solution
#==============================================================================================================
# Make some deterministic type of adjustment changing some 'bad' relation
#==============================================================================================================
# Make a crossover between two solutions
#==============================================================================================================
# Selection by elitism
#==============================================================================================================
# Make selection of objects by Roulette Wheel
#==============================================================================================================
# Detect the stop condition
#==============================================================================================================
| 53.015416 | 165 | 0.615462 |
11e06d5dd0202783c3e0b55b6bc21794e4419ef3 | 840 | py | Python | tests/ManualLoggerTests.py | velexio/pyLegos | 64d3622f2b6d78a02b171e0438a0224a951d2644 | [
"MIT"
] | null | null | null | tests/ManualLoggerTests.py | velexio/pyLegos | 64d3622f2b6d78a02b171e0438a0224a951d2644 | [
"MIT"
] | 2 | 2016-11-23T00:36:34.000Z | 2016-11-23T00:39:08.000Z | tests/ManualLoggerTests.py | velexio/pyLegos | 64d3622f2b6d78a02b171e0438a0224a951d2644 | [
"MIT"
] | null | null | null |
from pylegos import LogFactory
if __name__ == '__main__':
main() | 25.454545 | 90 | 0.703571 |
11e388ebd565f092940b5ad2ddba87b868dac5de | 3,171 | py | Python | HyperV/WS2012R2/stress/StorVSCIOZoneTest.py | microsoft/FreeBSD-Test-Automation | e96a84054d771ece83908299d37e3c02a19f98b3 | [
"Apache-2.0"
] | 1 | 2020-01-16T08:45:59.000Z | 2020-01-16T08:45:59.000Z | HyperV/WS2012R2/stress/StorVSCIOZoneTest.py | LIS/FreeBSD-Test-Automation | e96a84054d771ece83908299d37e3c02a19f98b3 | [
"Apache-2.0"
] | null | null | null | HyperV/WS2012R2/stress/StorVSCIOZoneTest.py | LIS/FreeBSD-Test-Automation | e96a84054d771ece83908299d37e3c02a19f98b3 | [
"Apache-2.0"
] | 1 | 2021-08-03T00:22:40.000Z | 2021-08-03T00:22:40.000Z | #!/usr/bin/env python
import sys
import os
import time
import test_class
import subprocess
| 32.030303 | 82 | 0.561337 |
11e3f9c5f47a0f678f4c4be381a8ca3e9eaec6d2 | 16,809 | py | Python | LDDMM_Python/lddmm_python/lib/plotly/colors.py | tt6746690/lddmm-ot | 98e45d44969221b0fc8206560d9b7a655ef7e137 | [
"MIT"
] | 48 | 2017-08-04T03:30:22.000Z | 2022-03-09T03:24:11.000Z | LDDMM_Python/lddmm_python/lib/plotly/colors.py | hushunbo/lddmm-ot | 5af26fe32ae440c598ed403ce2876e98d6e1c692 | [
"MIT"
] | null | null | null | LDDMM_Python/lddmm_python/lib/plotly/colors.py | hushunbo/lddmm-ot | 5af26fe32ae440c598ed403ce2876e98d6e1c692 | [
"MIT"
] | 15 | 2017-09-30T18:55:48.000Z | 2021-04-27T18:27:55.000Z | """
colors
=====
Functions that manipulate colors and arrays of colors
There are three basic types of color types: rgb, hex and tuple:
rgb - An rgb color is a string of the form 'rgb(a,b,c)' where a, b and c are
floats between 0 and 255 inclusive.
hex - A hex color is a string of the form '#xxxxxx' where each x is a
character that belongs to the set [0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f]. This is
just the list of characters used in the hexadecimal numeric system.
tuple - A tuple color is a 3-tuple of the form (a,b,c) where a, b and c are
floats between 0 and 1 inclusive.
"""
from __future__ import absolute_import
from plotly import exceptions
from numbers import Number
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
PLOTLY_SCALES = {
'Greys': [
[0, 'rgb(0,0,0)'], [1, 'rgb(255,255,255)']
],
'YlGnBu': [
[0, 'rgb(8,29,88)'], [0.125, 'rgb(37,52,148)'],
[0.25, 'rgb(34,94,168)'], [0.375, 'rgb(29,145,192)'],
[0.5, 'rgb(65,182,196)'], [0.625, 'rgb(127,205,187)'],
[0.75, 'rgb(199,233,180)'], [0.875, 'rgb(237,248,217)'],
[1, 'rgb(255,255,217)']
],
'Greens': [
[0, 'rgb(0,68,27)'], [0.125, 'rgb(0,109,44)'],
[0.25, 'rgb(35,139,69)'], [0.375, 'rgb(65,171,93)'],
[0.5, 'rgb(116,196,118)'], [0.625, 'rgb(161,217,155)'],
[0.75, 'rgb(199,233,192)'], [0.875, 'rgb(229,245,224)'],
[1, 'rgb(247,252,245)']
],
'YlOrRd': [
[0, 'rgb(128,0,38)'], [0.125, 'rgb(189,0,38)'],
[0.25, 'rgb(227,26,28)'], [0.375, 'rgb(252,78,42)'],
[0.5, 'rgb(253,141,60)'], [0.625, 'rgb(254,178,76)'],
[0.75, 'rgb(254,217,118)'], [0.875, 'rgb(255,237,160)'],
[1, 'rgb(255,255,204)']
],
'Bluered': [
[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']
],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
'RdBu': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(106,137,247)'],
[0.5, 'rgb(190,190,190)'], [0.6, 'rgb(220,170,132)'],
[0.7, 'rgb(230,145,90)'], [1, 'rgb(178,10,28)']
],
# Scale for non-negative numeric values
'Reds': [
[0, 'rgb(220,220,220)'], [0.2, 'rgb(245,195,157)'],
[0.4, 'rgb(245,160,105)'], [1, 'rgb(178,10,28)']
],
# Scale for non-positive numeric values
'Blues': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(40,60,190)'],
[0.5, 'rgb(70,100,245)'], [0.6, 'rgb(90,120,245)'],
[0.7, 'rgb(106,137,247)'], [1, 'rgb(220,220,220)']
],
'Picnic': [
[0, 'rgb(0,0,255)'], [0.1, 'rgb(51,153,255)'],
[0.2, 'rgb(102,204,255)'], [0.3, 'rgb(153,204,255)'],
[0.4, 'rgb(204,204,255)'], [0.5, 'rgb(255,255,255)'],
[0.6, 'rgb(255,204,255)'], [0.7, 'rgb(255,153,255)'],
[0.8, 'rgb(255,102,204)'], [0.9, 'rgb(255,102,102)'],
[1, 'rgb(255,0,0)']
],
'Rainbow': [
[0, 'rgb(150,0,90)'], [0.125, 'rgb(0,0,200)'],
[0.25, 'rgb(0,25,255)'], [0.375, 'rgb(0,152,255)'],
[0.5, 'rgb(44,255,150)'], [0.625, 'rgb(151,255,0)'],
[0.75, 'rgb(255,234,0)'], [0.875, 'rgb(255,111,0)'],
[1, 'rgb(255,0,0)']
],
'Portland': [
[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'],
[0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'],
[1, 'rgb(217,30,30)']
],
'Jet': [
[0, 'rgb(0,0,131)'], [0.125, 'rgb(0,60,170)'],
[0.375, 'rgb(5,255,255)'], [0.625, 'rgb(255,255,0)'],
[0.875, 'rgb(250,0,0)'], [1, 'rgb(128,0,0)']
],
'Hot': [
[0, 'rgb(0,0,0)'], [0.3, 'rgb(230,0,0)'],
[0.6, 'rgb(255,210,0)'], [1, 'rgb(255,255,255)']
],
'Blackbody': [
[0, 'rgb(0,0,0)'], [0.2, 'rgb(230,0,0)'],
[0.4, 'rgb(230,210,0)'], [0.7, 'rgb(255,255,255)'],
[1, 'rgb(160,200,255)']
],
'Earth': [
[0, 'rgb(0,0,130)'], [0.1, 'rgb(0,180,180)'],
[0.2, 'rgb(40,210,40)'], [0.4, 'rgb(230,230,50)'],
[0.6, 'rgb(120,70,20)'], [1, 'rgb(255,255,255)']
],
'Electric': [
[0, 'rgb(0,0,0)'], [0.15, 'rgb(30,0,100)'],
[0.4, 'rgb(120,0,100)'], [0.6, 'rgb(160,90,0)'],
[0.8, 'rgb(230,200,0)'], [1, 'rgb(255,250,220)']
],
'Viridis': [
[0, '#440154'], [0.06274509803921569, '#48186a'],
[0.12549019607843137, '#472d7b'], [0.18823529411764706, '#424086'],
[0.25098039215686274, '#3b528b'], [0.3137254901960784, '#33638d'],
[0.3764705882352941, '#2c728e'], [0.4392156862745098, '#26828e'],
[0.5019607843137255, '#21918c'], [0.5647058823529412, '#1fa088'],
[0.6274509803921569, '#28ae80'], [0.6901960784313725, '#3fbc73'],
[0.7529411764705882, '#5ec962'], [0.8156862745098039, '#84d44b'],
[0.8784313725490196, '#addc30'], [0.9411764705882353, '#d8e219'],
[1, '#fde725']
]
}
def color_parser(colors, function):
"""
Takes color(s) and a function and applies the function on the color(s)
In particular, this function identifies whether the given color object
is an iterable or not and applies the given color-parsing function to
the color or iterable of colors. If given an iterable, it will only be
able to work with it if all items in the iterable are of the same type
- rgb string, hex string or tuple
"""
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, '__iter__'):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
def validate_colors(colors):
"""
Validates color(s) and returns an error for invalid colors
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color."
)
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
if isinstance(colors, dict):
colors_list.extend(colors.values())
elif isinstance(colors, list):
colors_list = colors
# Validate colors in colors_list
for j, each_color in enumerate(colors_list):
if 'rgb' in each_color:
each_color = color_parser(
each_color, unlabel_rgb
)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
elif '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
elif isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
return colors
def convert_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted and
colortype will not be applicable
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return PLOTLY_SCALES[colors]
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a Plotly "
"scale, an rgb color or a hex color.")
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
elif isinstance(colors, list):
colors_list = colors
# convert all colors to rgb
for j, each_color in enumerate(colors_list):
if '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
elif isinstance(each_color, tuple):
each_color = color_parser(
each_color, convert_to_RGB_255
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
if colortype == 'rgb':
return colors_list
elif colortype == 'tuple':
for j, each_color in enumerate(colors_list):
each_color = color_parser(
each_color, unlabel_rgb
)
each_color = color_parser(
each_color, unconvert_from_RGB_255
)
colors_list[j] = each_color
return colors_list
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def convert_dict_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted
"""
for key in colors:
if '#' in colors[key]:
colors[key] = color_parser(
colors[key], hex_to_rgb
)
colors[key] = color_parser(
colors[key], label_rgb
)
elif isinstance(colors[key], tuple):
colors[key] = color_parser(
colors[key], convert_to_RGB_255
)
colors[key] = color_parser(
colors[key], label_rgb
)
if colortype == 'rgb':
return colors
elif colortype == 'tuple':
for key in colors:
colors[key] = color_parser(
colors[key], unlabel_rgb
)
colors[key] = color_parser(
colors[key], unconvert_from_RGB_255
)
return colors
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def make_colorscale(colors, scale=None):
"""
Makes a colorscale from a list of colors and a scale
Takes a list of colors and scales and constructs a colorscale based
on the colors in sequential order. If 'scale' is left empty, a linear-
interpolated colorscale will be generated. If 'scale' is a specificed
list, it must be the same legnth as colors and must contain all floats
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
colorscale = []
# validate minimum colors length of 2
if len(colors) < 2:
raise exceptions.PlotlyError("You must input a list of colors that "
"has at least two colors.")
if not scale:
scale_incr = 1./(len(colors) - 1)
return [[i * scale_incr, color] for i, color in enumerate(colors)]
else:
# validate scale
if len(colors) != len(scale):
raise exceptions.PlotlyError("The length of colors and scale "
"must be the same.")
if (scale[0] != 0) or (scale[-1] != 1):
raise exceptions.PlotlyError(
"The first and last number in scale must be 0.0 and 1.0 "
"respectively."
)
for j in range(1, len(scale)):
if scale[j] <= scale[j-1]:
raise exceptions.PlotlyError(
"'scale' must be a list that contains an increasing "
"sequence of numbers where the first and last number are"
"0.0 and 1.0 respectively."
)
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
def find_intermediate_color(lowcolor, highcolor, intermed):
"""
Returns the color at a given distance between two colors
This function takes two color tuples, where each element is between 0
and 1, along with a value 0 < intermed < 1 and returns a color that is
intermed-percent from lowcolor to highcolor
"""
diff_0 = float(highcolor[0] - lowcolor[0])
diff_1 = float(highcolor[1] - lowcolor[1])
diff_2 = float(highcolor[2] - lowcolor[2])
inter_colors = (lowcolor[0] + intermed * diff_0,
lowcolor[1] + intermed * diff_1,
lowcolor[2] + intermed * diff_2)
return inter_colors
def unconvert_from_RGB_255(colors):
"""
Return a tuple where each element gets divided by 255
Takes a (list of) color tuple(s) where each element is between 0 and
255. Returns the same tuples where each tuple element is normalized to
a value between 0 and 1
"""
un_rgb_color = (colors[0]/(255.0),
colors[1]/(255.0),
colors[2]/(255.0))
return un_rgb_color
def convert_to_RGB_255(colors):
"""
Multiplies each element of a triplet by 255
"""
return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)
def n_colors(lowcolor, highcolor, n_colors):
"""
Splits a low and high color into a list of n_colors colors in it
Accepts two color tuples and returns a list of n_colors colors
which form the intermediate colors between lowcolor and highcolor
from linearly interpolating through RGB space
"""
diff_0 = float(highcolor[0] - lowcolor[0])
incr_0 = diff_0/(n_colors - 1)
diff_1 = float(highcolor[1] - lowcolor[1])
incr_1 = diff_1/(n_colors - 1)
diff_2 = float(highcolor[2] - lowcolor[2])
incr_2 = diff_2/(n_colors - 1)
color_tuples = []
for index in range(n_colors):
new_tuple = (lowcolor[0] + (index * incr_0),
lowcolor[1] + (index * incr_1),
lowcolor[2] + (index * incr_2))
color_tuples.append(new_tuple)
return color_tuples
def label_rgb(colors):
"""
Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'
"""
return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))
def unlabel_rgb(colors):
"""
Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)
This function takes either an 'rgb(a, b, c)' color or a list of
such colors and returns the color tuples in tuple(s) (a, b, c)
"""
str_vals = ''
for index in range(len(colors)):
try:
float(colors[index])
str_vals = str_vals + colors[index]
except ValueError:
if colors[index] == ',' or colors[index] == '.':
str_vals = str_vals + colors[index]
str_vals = str_vals + ','
numbers = []
str_num = ''
for char in str_vals:
if char != ',':
str_num = str_num + char
else:
numbers.append(float(str_num))
str_num = ''
return (numbers[0], numbers[1], numbers[2])
def hex_to_rgb(value):
"""
Calculates rgb values from a hex color code.
:param (string) value: Hex color string
:rtype (tuple) (r_value, g_value, b_value): tuple of rgb values
"""
value = value.lstrip('#')
hex_total_length = len(value)
rgb_section_length = hex_total_length // 3
return tuple(int(value[i:i + rgb_section_length], 16)
for i in range(0, hex_total_length, rgb_section_length))
def colorscale_to_colors(colorscale):
"""
Converts a colorscale into a list of colors
"""
color_list = []
for color in colorscale:
color_list.append(color[1])
return color_list
| 32.638835 | 77 | 0.55268 |
11e3feaa8eddda799c32e0dc2f9c36ee4b41ba9c | 420 | py | Python | nonebot/consts.py | he0119/nonebot2 | bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e | [
"MIT"
] | 1 | 2022-01-26T12:52:33.000Z | 2022-01-26T12:52:33.000Z | nonebot/consts.py | he0119/nonebot2 | bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e | [
"MIT"
] | null | null | null | nonebot/consts.py | he0119/nonebot2 | bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e | [
"MIT"
] | null | null | null | # used by Matcher
RECEIVE_KEY = "_receive_{id}"
LAST_RECEIVE_KEY = "_last_receive"
ARG_KEY = "{key}"
REJECT_TARGET = "_current_target"
REJECT_CACHE_TARGET = "_next_target"
# used by Rule
PREFIX_KEY = "_prefix"
CMD_KEY = "command"
RAW_CMD_KEY = "raw_command"
CMD_ARG_KEY = "command_arg"
SHELL_ARGS = "_args"
SHELL_ARGV = "_argv"
REGEX_MATCHED = "_matched"
REGEX_GROUP = "_matched_groups"
REGEX_DICT = "_matched_dict"
| 20 | 36 | 0.757143 |
11e42e8d7b995de0658689f4a01d37ca6d28aa0b | 2,124 | py | Python | todo/views.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | todo/views.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | todo/views.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.contrib.auth import authenticate, login, logout
from .models import TodoModel
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from django.urls import reverse_lazy
def signupview(request):
if request.method == 'POST':
username_data = request.POST['username_data']
password_data = request.POST['password_data']
try:
user = User.objects.create_user(username_data, '', password_data)
except IntegrityError:
return render(request, 'signup.html', {'error': ''})
else:
return render(request, 'signup.html', {})
return render(request, 'signup.html', {})
def loginview(request):
if request.method == 'POST':
username_data = request.POST['username_data']
password_data = request.POST['password_data']
user = authenticate(request, username=username_data,
password=password_data)
if user is not None:
login(request, user)
return redirect('list')
else:
return redirect('login')
return render(request, 'login.html')
def logoutview(request):
logout(request)
return redirect('login')
| 29.5 | 89 | 0.664313 |
11e827caf9a2f6b79a2d0287af4086e1ef14f2b8 | 269 | py | Python | bindings/kepler.gl-jupyter/keplergl/__init__.py | sw1227/kepler.gl | 14c35fc048a745faab0c6770cab7a4625ccedda3 | [
"MIT"
] | 4,297 | 2019-05-04T01:29:14.000Z | 2022-03-31T19:28:10.000Z | bindings/kepler.gl-jupyter/keplergl/__init__.py | sw1227/kepler.gl | 14c35fc048a745faab0c6770cab7a4625ccedda3 | [
"MIT"
] | 968 | 2019-05-05T16:13:03.000Z | 2022-03-30T13:11:31.000Z | bindings/kepler.gl-jupyter/keplergl/__init__.py | sw1227/kepler.gl | 14c35fc048a745faab0c6770cab7a4625ccedda3 | [
"MIT"
] | 1,082 | 2019-05-04T15:55:24.000Z | 2022-03-30T16:27:53.000Z | from ._version import version_info, __version__
from .keplergl import *
| 22.416667 | 47 | 0.624535 |
11ed16385a989b7c743480e1ee477feb796f62cc | 9,845 | py | Python | iaso/tests/api/test_token.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | iaso/tests/api/test_token.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | iaso/tests/api/test_token.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | from django.test import tag
from django.core.files import File
from unittest import mock
from iaso import models as m
from iaso.test import APITestCase
| 40.020325 | 159 | 0.622854 |
11f08a8bd257b57737ab450a04da370a5b819540 | 302 | py | Python | core/shortname.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | core/shortname.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | core/shortname.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | import world
import api.captcha.captcha_phone
from api.token.jwt_token import JWTToken
"""
Django shortcuts.py
"""
world_instance = world.World.instance()
redis_server = world_instance.redis
captcha_manager = api.captcha.captcha_phone.CaptchaPhone(redis_server)
jwt_cli = JWTToken()
import django.db | 25.166667 | 70 | 0.817881 |
11f229b9297d3ad1a65bef9c394df841a9ccc992 | 6,552 | py | Python | interpro.py | TAMU-CPT/blast-db-download | 53261f08d1f9193c4f538fa90983a465502190a9 | [
"BSD-3-Clause"
] | null | null | null | interpro.py | TAMU-CPT/blast-db-download | 53261f08d1f9193c4f538fa90983a465502190a9 | [
"BSD-3-Clause"
] | 3 | 2017-09-15T18:58:21.000Z | 2020-03-24T19:11:16.000Z | interpro.py | TAMU-CPT/blast-db-download | 53261f08d1f9193c4f538fa90983a465502190a9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import time
import datetime
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('dl')
NOW = datetime.datetime.now()
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DOWNLOAD_ROOT = os.getcwd()
VERSION = '5.22-61.0'
PANTHER_VERSION = '11.1'
xunit = XUnitReportBuilder('interpro_installer')
if __name__ == '__main__':
try:
interpro()
except Exception:
pass
finally:
# Write out the report
with open(sys.argv[1], 'w') as handle:
handle.write(xunit.serialize())
| 36.808989 | 144 | 0.6308 |
11f2ee6d545351fbf6460813569b0d154e97b751 | 2,572 | py | Python | modules/stat/agd_stat.py | epfl-dcsl/persona-orig | d94a8b60f07622bb61736127ff328329c7b131a9 | [
"Apache-2.0"
] | null | null | null | modules/stat/agd_stat.py | epfl-dcsl/persona-orig | d94a8b60f07622bb61736127ff328329c7b131a9 | [
"Apache-2.0"
] | null | null | null | modules/stat/agd_stat.py | epfl-dcsl/persona-orig | d94a8b60f07622bb61736127ff328329c7b131a9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 cole Polytechnique Fdrale de Lausanne. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.ops import data_flow_ops, string_ops
from ..common.service import Service
from common.parse import numeric_min_checker, path_exists_checker
import tensorflow as tf
persona_ops = tf.contrib.persona.persona_ops()
from tensorflow.contrib.persona import queues, pipeline
| 35.232877 | 83 | 0.651633 |
11f3026c5b723ebaca4c3ade5e133a02d8fccef0 | 6,423 | py | Python | Developing.../main01.py | MuhikaThomas/Pro-forma | da97d9a6581f4dfbd06fe4a0db1128ebb7472d81 | [
"MIT"
] | null | null | null | Developing.../main01.py | MuhikaThomas/Pro-forma | da97d9a6581f4dfbd06fe4a0db1128ebb7472d81 | [
"MIT"
] | null | null | null | Developing.../main01.py | MuhikaThomas/Pro-forma | da97d9a6581f4dfbd06fe4a0db1128ebb7472d81 | [
"MIT"
] | null | null | null | import kivy
from kivy.app import App
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.slider import Slider
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.lang import Builder
Builder.load_string("""
""")
if __name__ == '__main__':
Proforma().run()
| 47.932836 | 156 | 0.717422 |
11f30bdb0ea58245a57190b0de64ce5ae30b036d | 1,943 | py | Python | day8/day8.py | jwhitex/AdventOfCode2018 | e552185f7d6413ccdad824911c66a6590e8de9bb | [
"MIT"
] | null | null | null | day8/day8.py | jwhitex/AdventOfCode2018 | e552185f7d6413ccdad824911c66a6590e8de9bb | [
"MIT"
] | null | null | null | day8/day8.py | jwhitex/AdventOfCode2018 | e552185f7d6413ccdad824911c66a6590e8de9bb | [
"MIT"
] | null | null | null | import itertools
from io import StringIO
from queue import LifoQueue
inputs = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
#data = [int(v) for v in StringIO(inputs).read().split(' ')]
data = [int(v) for v in open("day8.input").read().split(' ')]
# idata = iter(data)
# q = LifoQueue()
# tc_metadata = parse_packet(idata, q, 0)
# print(tc_metadata)
# pt2
idata = iter(data)
q = LifoQueue()
tc_metadata = parse_packet_pt2(idata, q)[0]
print(tc_metadata)
| 30.359375 | 66 | 0.5965 |
11f3952caf0eac585e166a957bfe31975eafdc39 | 2,971 | py | Python | dataset_utils/roi.py | kocurvik/retinanet_traffic_3D | 592ceac767750c65bb3d6678b36e6880a7bb0403 | [
"Apache-2.0"
] | 12 | 2021-04-06T00:50:41.000Z | 2022-03-23T03:27:02.000Z | dataset_utils/roi.py | kocurvik/retinanet_traffic_3D | 592ceac767750c65bb3d6678b36e6880a7bb0403 | [
"Apache-2.0"
] | 7 | 2021-07-13T12:47:41.000Z | 2022-03-05T15:08:51.000Z | dataset_utils/roi.py | kocurvik/retinanet_traffic_3D | 592ceac767750c65bb3d6678b36e6880a7bb0403 | [
"Apache-2.0"
] | 4 | 2021-07-15T12:22:06.000Z | 2022-03-01T03:12:36.000Z | import json
import os
import cv2
import numpy as np
from dataset_utils.geometry import computeCameraCalibration
if __name__ == '__main__':
vid_dir = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/dataset/session5_left'
result_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/results/session5_left/system_SochorCVIU_Edgelets_BBScale_Reg.json'
get_pts(vid_dir, result_path)
| 32.648352 | 126 | 0.582969 |
11f661d7ecc4156688dc11d7e9f3988ffd85ee03 | 1,292 | py | Python | src/ansible_remote_checks/modules/check_process.py | davidvoit/ansible_remote_checks | 491f31855c96297e5466b238e648fa57c1e646d0 | [
"MIT"
] | null | null | null | src/ansible_remote_checks/modules/check_process.py | davidvoit/ansible_remote_checks | 491f31855c96297e5466b238e648fa57c1e646d0 | [
"MIT"
] | null | null | null | src/ansible_remote_checks/modules/check_process.py | davidvoit/ansible_remote_checks | 491f31855c96297e5466b238e648fa57c1e646d0 | [
"MIT"
] | 1 | 2019-08-20T13:19:16.000Z | 2019-08-20T13:19:16.000Z | #!/usr/bin/python2
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| 23.490909 | 127 | 0.69195 |
11f7ea214def9b4195dd57f26ec40b4d4be26bb2 | 972 | py | Python | RESSPyLab/modified_cholesky.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 7 | 2019-10-15T09:16:41.000Z | 2021-09-24T11:28:45.000Z | RESSPyLab/modified_cholesky.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 3 | 2020-10-22T14:27:22.000Z | 2021-11-15T17:46:49.000Z | RESSPyLab/modified_cholesky.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 6 | 2019-07-22T05:47:10.000Z | 2021-10-24T02:06:26.000Z | """@package modified_cholesky
Function to perform the modified Cholesky decomposition.
"""
import numpy as np
import numpy.linalg as la
def modified_cholesky(a):
""" Returns the matrix A if A is positive definite, or returns a modified A that is positive definite.
:param np.array a: (n, n) The symmetric matrix, A.
:return list: [np.array (n, n), float] Positive definite matrix, and the factor required to do so.
See Bierlaire (2015) Alg. 11.7, pg. 278.
"""
iteration = 0
maximum_iterations = 10
identity = np.identity(len(a))
a_mod = a * 1.0
identity_factor = 0.
successful = False
while not successful and iteration < maximum_iterations:
try:
la.cholesky(a_mod)
successful = True
except la.LinAlgError:
identity_factor = np.max([2 * identity_factor, 0.5 * la.norm(a, 'fro')])
a_mod = a + identity_factor * identity
return [a_mod, identity_factor]
| 31.354839 | 106 | 0.653292 |
11f88b21b7c293777ac5db6fccf25f3653b3095f | 1,528 | py | Python | docker_parser.py | hodolizer/HB_LittleBot | 4750c7c8e5bda22fcd5f48ea9248d919b7ca7fb2 | [
"MIT"
] | null | null | null | docker_parser.py | hodolizer/HB_LittleBot | 4750c7c8e5bda22fcd5f48ea9248d919b7ca7fb2 | [
"MIT"
] | null | null | null | docker_parser.py | hodolizer/HB_LittleBot | 4750c7c8e5bda22fcd5f48ea9248d919b7ca7fb2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python Slack Bot docker parser class for use with the HB Bot
"""
import os
import re
DOCKER_SUPPORTED = ["image", "container", "help"]
SUBCOMMAND_SUPPORTED = ["ls",]
def parse_command(incoming_text):
"""
incoming_text: A text string to parse for docker commands
returns: a fully validated docker command
"""
docker_action = ''
parse1 = re.compile(r"(?<=\bdocker\s)(\w+)")
match_obj = parse1.search(incoming_text)
if match_obj:
docker_action = match_obj.group()
print("Got docker action %s" % (docker_action,))
if docker_action and docker_action in DOCKER_SUPPORTED:
# Use this type of code if we want to limit the docker commands
#parse2 = re.compile(r"(?<=\b%s\s)(\w+)" % docker_action)
#match_obj = parse2.search(incoming_text)
#if match_obj:
# docker_subcommand = match_obj.group()
# if docker_subcommand in SUBCOMMAND_SUPPORTED:
# return "docker %s %s" % (docker_action, docker_subcommand)
# Otherwise let it fly and return help if it pumps mud.
print "returning docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return "docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return docker_usage_message()
| 35.534884 | 102 | 0.650524 |
11f9627891295b2fef341d114f820b8acfae0f4d | 1,713 | py | Python | estudo/bingo/bingo.py | PedroMoreira87/python | 7f8ed2d17ba12a8089618477b2738e3b1c809e74 | [
"MIT"
] | null | null | null | estudo/bingo/bingo.py | PedroMoreira87/python | 7f8ed2d17ba12a8089618477b2738e3b1c809e74 | [
"MIT"
] | null | null | null | estudo/bingo/bingo.py | PedroMoreira87/python | 7f8ed2d17ba12a8089618477b2738e3b1c809e74 | [
"MIT"
] | null | null | null | # Entregar arquivo com o cdigo da funo teste_cartela
#
# Verificador de cartela de bingo
#
# CRIAR UMA FUNO DO TIPO:
#
# def teste_cartela(numeros_bilhete,numeros_sorteados): #numeros_bilhete e numeros_sorteados tipo lista com valores inteiros
#
# ...
#
# return([bingo,n_acertos,p_acertos,[numeros_acertados],[numeros_faltantes]]) #retorno tipo lista
#
# ps: a funo deve suportar qualquer tamanho das listas
#
# exemplo1:
#
# bilhete=[1,2,3,4,6]
#
# sorteados=[1,2,3,4,5,6,7,8,9,10]
#
# x=teste_cartela(bilhete,sorteados)
#
# print(x)
#
# [true,5,100.0,[1,2,3,4,6],[]]
#
# print(x[1])
#
# 5
#
# exemplo2:
# bilhete=[1,4,7,13,20,22]
#
# sorteados=[11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
#
# x=teste_cartela(bilhete,sorteados)
#
# print(x)
#
# [False,3,50.0,[13,20,22],[1,4,7]]
#
# print(x[3])
#
# [13,20,22]
bilhete1 = [1, 2, 3, 4, 6]
sorteados1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
bilhete2 = [1, 4, 7, 13, 20, 22]
sorteados2 = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
print(teste_cartela(bilhete1, sorteados1))
| 23.148649 | 124 | 0.669002 |
11fbeaa0cdadcae10084a5b3b7d7792a3d86cf42 | 103 | py | Python | scripts/pymarkovchain_dynamic/__init__.py | jfahrg/augentbot | 2f26f9287928bb405696366c60f1193b6f34ab4a | [
"CC-BY-2.0",
"MIT"
] | 3 | 2017-10-16T14:05:47.000Z | 2017-10-23T07:18:46.000Z | scripts/pymarkovchain_dynamic/__init__.py | jfahrg/augentbot | 2f26f9287928bb405696366c60f1193b6f34ab4a | [
"CC-BY-2.0",
"MIT"
] | 1 | 2017-10-24T18:11:14.000Z | 2017-10-24T18:11:14.000Z | scripts/pymarkovchain_dynamic/__init__.py | jfde/augentbot | 2f26f9287928bb405696366c60f1193b6f34ab4a | [
"CC-BY-2.0",
"MIT"
] | null | null | null | from pymarkovchain_dynamic.MarkovChain import *
from pymarkovchain_dynamic.DynamicMarkovChain import *
| 34.333333 | 54 | 0.883495 |
11fc76302eb18d7762bad32d8a7fb8d4acc13c44 | 3,033 | py | Python | word_breakdown.py | imjeffhi4/word-breakdown | 7edf823fbc49ac56a5dc356067938d3828edc014 | [
"MIT"
] | null | null | null | word_breakdown.py | imjeffhi4/word-breakdown | 7edf823fbc49ac56a5dc356067938d3828edc014 | [
"MIT"
] | null | null | null | word_breakdown.py | imjeffhi4/word-breakdown | 7edf823fbc49ac56a5dc356067938d3828edc014 | [
"MIT"
] | null | null | null | from transformers import GPTNeoForCausalLM, GPT2Tokenizer
from fastapi import FastAPI
import re
import json
from pydantic import BaseModel
from typing import Optional
import torch
app = FastAPI()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
morph_path = './Model'
morph_tokenizer = GPT2Tokenizer.from_pretrained(morph_path)
special_tokens = {'bos_token': '<|startoftext|>', 'pad_token': '<PAD>', 'additional_special_tokens':['<DEF>', '<SYLLABLES>', '<NULL>', '<ETY>', '<MORPH>']}
morph_tokenizer.add_special_tokens(special_tokens)
morph_model = GPTNeoForCausalLM.from_pretrained(morph_path).to(device)
# returning WikiMorph output
def get_etymology(ety_txt):
"""Parses text to return a list of dict containing the etymology compound and definitions"""
etys = re.findall('<ETY>.+?(?=<ETY>|$)', ety_txt)
for ety in etys:
compound = re.findall("<ETY>(.+?)(?=<DEF>|$)", ety)[0].strip()
if "<NULL>" not in compound:
ety_dict = {
"Etymology Compound": re.findall("<ETY>(.+?)(?=<DEF>)", ety)[0].strip(),
"Compound Meaning": re.findall("<DEF>(.+)", ety)[0].strip()
}
yield ety_dict
else:
yield {"Etymology Compound": None, "Compound Meaning": None}
def to_dict(generated_txt):
"""Returns a dictionary containing desired items"""
return {
"Word": re.findall('<\|startoftext\|> (.+?)(?= \w )', generated_txt)[0].strip().replace(' ', ''),
"Definition": re.findall("<DEF>(.+?)(?=<SYLLABLES>)", generated_txt)[0].strip(),
"Syllables": re.findall("<SYLLABLES> (.+?)(?=<MORPH>)", generated_txt)[0].strip().split(),
"Morphemes": list(parse_morphemes(re.findall("(<MORPH>.+?)(?=<\|endoftext\|>)", generated_txt)[0].strip()))
}
def get_morpheme_output(word, definition):
"""Calls the GPT-based model to generated morphemes"""
split_word = ' '.join(word)
if definition:
word_def = f'<|startoftext|> {word} {split_word} <DEF> {definition} <SYLLABLES>'
else:
word_def = f'<|startoftext|> {word} {split_word} <DEF> '
tokenized_string = morph_tokenizer.encode(word_def, return_tensors='pt').to(device)
output = morph_model.generate(tokenized_string, max_length=400)
generated_txt = morph_tokenizer.decode(output[0])
return to_dict(generated_txt)
| 40.986486 | 155 | 0.636334 |
11fe5c633fd36a2c77c71b22b430bb0c40ce5ec0 | 504 | py | Python | mini_book/_build/jupyter_execute/docs/enrollment.py | rebeccajohnson88/qss20 | f936e77660e551bb10a82abb96a36369ccbf3d18 | [
"CC0-1.0"
] | 1 | 2021-04-01T18:42:36.000Z | 2021-04-01T18:42:36.000Z | mini_book/_build/jupyter_execute/docs/enrollment.py | rebeccajohnson88/qss20 | f936e77660e551bb10a82abb96a36369ccbf3d18 | [
"CC0-1.0"
] | 1 | 2021-02-14T22:36:59.000Z | 2021-02-24T23:33:24.000Z | mini_book/_build/jupyter_execute/docs/enrollment.py | rebeccajohnson88/qss20 | f936e77660e551bb10a82abb96a36369ccbf3d18 | [
"CC0-1.0"
] | null | null | null | (enrollment)=
# Enrollment and Waitlist
For Dartmouth students, you can track the enrollment status using the ORC timetable.
The course is capped at 20 students to facilitate small group collaboration for the final data science project. Please fill out this Google form (while logged in via your Dartmouth email) if you are interested in joining the waiting list for spots freeing up! [Waitlist form](https://docs.google.com/forms/d/e/1FAIpQLScxTwR9A8gZ1_uvlEzCtsVFaoQnXmaYQq3kNdfG5Tv3ECUrcA/viewform) | 72 | 376 | 0.821429 |
11ffbc12ee29d6ded59501a82368db14e943d2d0 | 1,683 | py | Python | decode.py | imandr/image_encode | 9828d5dc570fc0feb729b365b13ab50cfdb8c85e | [
"BSD-3-Clause"
] | null | null | null | decode.py | imandr/image_encode | 9828d5dc570fc0feb729b365b13ab50cfdb8c85e | [
"BSD-3-Clause"
] | null | null | null | decode.py | imandr/image_encode | 9828d5dc570fc0feb729b365b13ab50cfdb8c85e | [
"BSD-3-Clause"
] | null | null | null | import sys, getopt
from zlib import adler32
from PIL import Image
from rnd import Generator, sample
Usage = """
python decode.py <password> <input image file> <output file>
"""
password, image_file, out_file = sys.argv[1:]
password = password.encode("utf-8")
image = Image.open(image_file)
dimx, dimy = image.size
npixels = dimx*dimy
gen = Generator(password)
pixel_list = list(range(npixels))
length_inx = sample(gen, pixel_list, 32)
for inx in length_inx:
pixel_list.remove(inx)
length_bits = read_bits(image, length_inx)
length = frombin(length_bits)
#print ("length:", length_bits, length)
text_inx = sample(gen, pixel_list, length*8)
#print("text_inx:", text_inx[:20])
text_bits = read_bits(image, text_inx)
#print("bits:", text_bits[:20])
open(out_file, "wb").write(bitstotext(text_bits))
| 19.569767 | 60 | 0.565657 |
f504c2cb47e19abd70638d4564e9477e15e1315f | 379 | py | Python | member/views.py | comcidis/comcidis-portal | 40eb6d37874f60eac123a15a03661bd48cecd382 | [
"MIT"
] | null | null | null | member/views.py | comcidis/comcidis-portal | 40eb6d37874f60eac123a15a03661bd48cecd382 | [
"MIT"
] | null | null | null | member/views.py | comcidis/comcidis-portal | 40eb6d37874f60eac123a15a03661bd48cecd382 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Member
def index(request):
"""List all members
"""
advisors = Member.objects.filter(advisor=True)
members = Member.objects.filter(advisor=False)
context = {'mobile_title_page': 'Equipe',
'advisors': advisors, 'members': members}
return render(request, 'member/index.html', context)
| 29.153846 | 56 | 0.683377 |
f506803cc0725d8f77786e4264a390f804bf912b | 447 | py | Python | ping_pong.py | kpbochenek/codewarz | 20f600623bddd269fb845d06b1826c9e50b49594 | [
"Apache-2.0"
] | null | null | null | ping_pong.py | kpbochenek/codewarz | 20f600623bddd269fb845d06b1826c9e50b49594 | [
"Apache-2.0"
] | null | null | null | ping_pong.py | kpbochenek/codewarz | 20f600623bddd269fb845d06b1826c9e50b49594 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import requests
ping = sys.argv[1]
pong = sys.argv[2]
word = sys.argv[3]
if not ping.startswith('http'):
ping = 'http://' + ping
if not pong.startswith('http'):
pong = 'http://' + pong
while True:
r = requests.post(ping, data={'food': word})
answer = r.text
if 'serving' not in answer:
print(answer, end='')
break
word = answer.split()[2]
ping, pong = pong, ping
| 17.88 | 48 | 0.592841 |
f506a97a368ef7e32d2a9750ae1f1a3c19762e70 | 437 | py | Python | fenixstroy/shop/forms.py | wiky-avis/fenixstroy_shop | 9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c | [
"MIT"
] | null | null | null | fenixstroy/shop/forms.py | wiky-avis/fenixstroy_shop | 9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c | [
"MIT"
] | 3 | 2021-09-22T18:44:30.000Z | 2022-03-12T00:58:02.000Z | fenixstroy/shop/forms.py | wiky-avis/fenixstroy_shop | 9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c | [
"MIT"
] | null | null | null | from django import forms
from .models import Comment, Rating, RatingStar
| 19.863636 | 47 | 0.631579 |
f50709f23a7db10987ca6be48b2058d6a849444a | 527 | py | Python | lumicks/pylake/tests/test_import_time.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 8 | 2019-02-18T07:56:39.000Z | 2022-03-19T01:14:48.000Z | lumicks/pylake/tests/test_import_time.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 42 | 2018-11-30T14:40:35.000Z | 2022-03-29T11:43:45.000Z | lumicks/pylake/tests/test_import_time.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 4 | 2019-01-09T13:45:53.000Z | 2021-07-06T14:06:52.000Z | from textwrap import dedent
import numpy as np
import subprocess
import sys
import pytest
| 23.954545 | 105 | 0.643264 |
ee921704bb61e5ef659b3c250a5774e67e1fc9fd | 3,433 | py | Python | lib/aquilon/consistency/checks/branch.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | lib/aquilon/consistency/checks/branch.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | lib/aquilon/consistency/checks/branch.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from aquilon.consistency.checker import ConsistencyChecker
from aquilon.aqdb.model.branch import Branch
from aquilon.worker.processes import run_git
from aquilon.worker.dbwrappers.branch import merge_into_trash
| 42.9125 | 84 | 0.637635 |
ee92be80023074621572bda99d5be62e1b63d427 | 1,418 | py | Python | server.py | aoii103/magicworld | cad0df6aa872cd5dcd4142f83ea9fde821652551 | [
"MIT"
] | 7 | 2018-02-05T03:14:08.000Z | 2019-07-28T18:49:41.000Z | server.py | aoii103/magicworld | cad0df6aa872cd5dcd4142f83ea9fde821652551 | [
"MIT"
] | null | null | null | server.py | aoii103/magicworld | cad0df6aa872cd5dcd4142f83ea9fde821652551 | [
"MIT"
] | 3 | 2019-05-21T08:58:32.000Z | 2019-12-26T17:03:07.000Z | import json
import os
from extra import MainStart
import threading
import moment
from jinja2 import Environment, PackageLoader
from sanic import Sanic, response
from sanic.log import logger
from termcolor import colored
from conf import config
from spider import bot
env = Environment(loader=PackageLoader(__name__, './template'))
app = Sanic(__name__)
app.static('static_path',config.static)
def run_bot():
spider = bot()
spider.start()
if __name__ == '__main__':
SPY = threading.Thread(target=MainStart, args=(run_bot, None, config.delay))
SPY.start()
app.run(host=config.host,port=config.port)
| 26.754717 | 81 | 0.648096 |
ee969271d5aeb101a427f273a5ac443c35b1fd94 | 1,891 | py | Python | build/lib/pubsubsql/net/testheader.py | pubsubsql/python | a62f76490222380375d20399dbe3812ff3451815 | [
"Apache-2.0"
] | 1 | 2016-03-17T15:16:16.000Z | 2016-03-17T15:16:16.000Z | src/pubsubsql/net/testheader.py | pubsubsql/pss-py | a62f76490222380375d20399dbe3812ff3451815 | [
"Apache-2.0"
] | null | null | null | src/pubsubsql/net/testheader.py | pubsubsql/pss-py | a62f76490222380375d20399dbe3812ff3451815 | [
"Apache-2.0"
] | 1 | 2015-04-24T10:24:42.000Z | 2015-04-24T10:24:42.000Z | #! /usr/bin/env python
"""
Copyright (C) 2014 CompleteDB LLC.
This program is free software: you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0 http://www.apache.org/licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
import unittest
from header import Header as NetHeader
if __name__ == "__main__":
unittest.main()
| 30.5 | 85 | 0.565838 |
ee97351f4698d9e63dc5fa142d72abe0e05ecfef | 5,189 | py | Python | app/giturl_class/routes.py | KnowledgeCaptureAndDiscovery/somef-web | 220ced6a3cb778dd2ba5e2da7c440a6e33447f67 | [
"Apache-2.0"
] | 1 | 2020-04-12T17:03:37.000Z | 2020-04-12T17:03:37.000Z | app/giturl_class/routes.py | KnowledgeCaptureAndDiscovery/somef-web | 220ced6a3cb778dd2ba5e2da7c440a6e33447f67 | [
"Apache-2.0"
] | null | null | null | app/giturl_class/routes.py | KnowledgeCaptureAndDiscovery/somef-web | 220ced6a3cb778dd2ba5e2da7c440a6e33447f67 | [
"Apache-2.0"
] | 1 | 2021-09-21T20:11:50.000Z | 2021-09-21T20:11:50.000Z |
from flask import render_template, flash, send_from_directory, send_file
from app.giturl_class.url_form import UrlForm
from app.giturl_class.download_form import DownloadButton
from app.giturl_class import bp
import json
import os
USE_TEST_FILE = False
if(os.getenv('SM2KG_TEST_MODE') == 'TRUE'):
USE_TEST_FILE = True
print('SM2KG in Test Mode')
else:
from somef import cli
#from somef import cli
dirname = os.path.dirname(__file__)
#class names in json to be added in the header section. Can either add classes, or rearrange the order of these to change the display
headerClassNames = [
"topics",
"languages",
"license",
"forks_url",
"readme_url"
]
#class names in json to be added to body of metadata section, similar to headerClassNames
bodyClassNames = [
"citation",
"installation",
"invocation",
"description"
]
#this is a defaultHeaderClassBreak value, could change it for different formatting
headerClassBreak = int(len(headerClassNames)/2)
#helper function to display array of data in string format
#checks if string starts with https:
| 31.448485 | 163 | 0.582386 |
ee98e5cd0e12c0ac4700f16fd1175dbaba124f1a | 681 | py | Python | type.py | pfeak/pymarkdown | e136c361c935785267535734394c579d8c7002f5 | [
"MIT"
] | null | null | null | type.py | pfeak/pymarkdown | e136c361c935785267535734394c579d8c7002f5 | [
"MIT"
] | 1 | 2020-09-17T07:46:58.000Z | 2020-09-17T07:46:58.000Z | type.py | pfeak/pymarkdown | e136c361c935785267535734394c579d8c7002f5 | [
"MIT"
] | null | null | null | import platform
from enum import Enum, unique
| 17.025 | 55 | 0.50514 |
ee9a90e09df8676533abaa0b7de5176954a8137e | 3,542 | py | Python | server/server/apps/course/views.py | tjsga/study-bank | f4cb17bc642d2fd28affde89d2af6a8ecd2286f2 | [
"MIT"
] | null | null | null | server/server/apps/course/views.py | tjsga/study-bank | f4cb17bc642d2fd28affde89d2af6a8ecd2286f2 | [
"MIT"
] | null | null | null | server/server/apps/course/views.py | tjsga/study-bank | f4cb17bc642d2fd28affde89d2af6a8ecd2286f2 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.core.exceptions import PermissionDenied
from django.http import Http404
from .models import Course
from ..mod.models import Moderator
from ..files.models import File
from ..decorators import login
# Create your views here. | 31.625 | 87 | 0.634952 |
ee9c2a66660c6fef43012d8c38ea0b3de96ca075 | 1,134 | py | Python | animepicker/apps/picker/migrations/0001_initial.py | Onosume/anime-picker | 635f260ebb3b63b50e3b461b78d4c3295b4ff703 | [
"MIT"
] | null | null | null | animepicker/apps/picker/migrations/0001_initial.py | Onosume/anime-picker | 635f260ebb3b63b50e3b461b78d4c3295b4ff703 | [
"MIT"
] | null | null | null | animepicker/apps/picker/migrations/0001_initial.py | Onosume/anime-picker | 635f260ebb3b63b50e3b461b78d4c3295b4ff703 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-26 14:26
from __future__ import unicode_literals
from django.db import migrations, models
| 36.580645 | 178 | 0.574956 |
ee9c514425fe52fb6f66f62ee9d6108d08382363 | 5,332 | py | Python | solutions/solution_14.py | claudiobierig/adventofcode19 | 40dabd7c780ab1cd8bad4292550cd9dd1d178365 | [
"MIT"
] | null | null | null | solutions/solution_14.py | claudiobierig/adventofcode19 | 40dabd7c780ab1cd8bad4292550cd9dd1d178365 | [
"MIT"
] | null | null | null | solutions/solution_14.py | claudiobierig/adventofcode19 | 40dabd7c780ab1cd8bad4292550cd9dd1d178365 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import math
def reduce_leftovers(leftovers, reactions):
"""
>>> reactions = read_input("input/14.txt")
>>> leftovers = {"DWBL": 10}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'DWBL': 1, 'ORE': 149}
>>> leftovers = {"ZKZHV": 9}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 1, 'KFKWH': 0, 'DWBL': 2, 'ORE': 532}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 1, 'KFKWH': 0, 'DWBL': 2, 'ORE': 532}
>>> leftovers = {'FUEL': 0, 'BRTX': 1, 'CFBP': 1, 'HJPD': 3, 'HDRMK': 1, 'LWGNJ': 2, 'JVGRC': 2, 'CVZLJ': 2, 'PZRSQ': 2, 'LQBJP': 1, 'DVRS': 4, 'TNRGW': 2, 'QGVJV': 0, 'NSWDH': 6, 'XMHN': 0, 'PDKZ': 1, 'NDNP': 3, 'DBKL': 1, 'RLKDF': 0, 'DQPX': 0, 'BWHKF': 0, 'QMQB': 0, 'QZMZ': 3, 'HJFV': 0, 'SLQN': 2, 'XHKG': 6, 'KXHQW': 3, 'GHNG': 1, 'CSNS': 1, 'JVRQ': 0, 'PHBMP': 6, 'LZWR': 1, 'JKRZH': 0, 'WKFTZ': 2, 'GFDP': 3, 'ZKZHV': 0, 'XJFQR': 3, 'JQFM': 0, 'WQCT': 0, 'QMTMN': 0, 'QDJD': 0, 'FRTK': 2, 'MLJN': 8, 'LHXN': 2, 'DWBL': 1, 'MCWF': 2, 'VCMPS': 0, 'SVTK': 7, 'XNGTQ': 2, 'MXQF': 2, 'XCMJ': 3, 'NHVQD': 6, 'WGLN': 1, 'KFKWH': 0, 'VMDSG': 2, 'BMSNV': 0, 'WCMV': 4, 'ZJKB': 2, 'TDPN': 0}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'FUEL': 0, 'BRTX': 1, 'CFBP': 1, 'HJPD': 3, 'HDRMK': 1, 'LWGNJ': 2, 'JVGRC': 2, 'CVZLJ': 2, 'PZRSQ': 2, 'LQBJP': 1, 'DVRS': 4, 'TNRGW': 2, 'QGVJV': 0, 'NSWDH': 6, 'XMHN': 0, 'PDKZ': 1, 'NDNP': 3, 'DBKL': 1, 'RLKDF': 0, 'DQPX': 0, 'BWHKF': 0, 'QMQB': 0, 'QZMZ': 3, 'HJFV': 0, 'SLQN': 2, 'XHKG': 6, 'KXHQW': 3, 'GHNG': 1, 'CSNS': 1, 'JVRQ': 0, 'PHBMP': 6, 'LZWR': 1, 'JKRZH': 0, 'WKFTZ': 2, 'GFDP': 3, 'ZKZHV': 0, 'XJFQR': 3, 'JQFM': 0, 'WQCT': 0, 'QMTMN': 0, 'QDJD': 0, 'FRTK': 2, 'MLJN': 8, 'LHXN': 2, 'DWBL': 1, 'MCWF': 2, 'VCMPS': 0, 'SVTK': 7, 'XNGTQ': 2, 'MXQF': 2, 'XCMJ': 3, 'NHVQD': 6, 'WGLN': 1, 'KFKWH': 0, 'VMDSG': 2, 'BMSNV': 0, 'WCMV': 4, 'ZJKB': 2, 'TDPN': 0}
>>> leftovers = {"ZKZHV": 8, 'DWBL': 7}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 0, 'DWBL': 0, 'KFKWH': 0, 'ORE': 681}
"""
can_reduce = True
while can_reduce:
can_reduce = False
to_add = {}
for key in leftovers.keys():
if key == "ORE":
continue
if reactions[key][0] <= leftovers[key]:
times = int(leftovers[key]/reactions[key][0])
can_reduce = True
leftovers[key] -= times*reactions[key][0]
for r in reactions[key][1]:
to_add[r[1]] = to_add.get(r[1], 0) + times*r[0]
for key, value in to_add.items():
leftovers[key] = leftovers.get(key, 0) + value
if __name__ == "__main__":
input = read_input("input/14.txt")
leftovers = {}
required_resources = get_amount("FUEL", 1, input, leftovers)
while need_reaction(required_resources):
i = 0
while required_resources[i][1] == "ORE":
i += 1
required_resources += get_amount(required_resources[i][1], required_resources[i][0], input, leftovers)
required_resources.pop(i)
required_ore = 0
for r in required_resources:
required_ore += r[0]
print("Solution1")
print(required_ore)
max_ore = 1000000000000
without_problems = int(max_ore/required_ore)
leftovers2 = {k:without_problems*leftovers[k] for k in leftovers.keys()}
ore = required_ore*without_problems
fuel = without_problems
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
leftovers2["ORE"] = 0
while without_problems > 0:
without_problems = int((max_ore-ore)/required_ore)
for key, value in leftovers.items():
leftovers2[key] = leftovers2.get(key, 0) + value*without_problems
ore += required_ore*without_problems
fuel += without_problems
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
leftovers2["ORE"] = 0
leftovers2["FUEL"] = 1
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
if ore<=max_ore:
fuel += 1
print("Solution 2")
print(fuel)
| 45.186441 | 693 | 0.564891 |
ee9daa8c3f24ee0e5956c82c505b318b5493b1d6 | 471 | py | Python | src/actions/action_sleep.py | JohnVillalovos/webhook-proxy | fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3 | [
"MIT"
] | null | null | null | src/actions/action_sleep.py | JohnVillalovos/webhook-proxy | fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3 | [
"MIT"
] | null | null | null | src/actions/action_sleep.py | JohnVillalovos/webhook-proxy | fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3 | [
"MIT"
] | null | null | null | import time
from actions import Action, action
| 23.55 | 83 | 0.673036 |
ee9fab028e33102060e656a46df7bd6afed90358 | 1,262 | py | Python | a1d05eba1/special_fields/choice_filter.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | null | null | null | a1d05eba1/special_fields/choice_filter.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | 28 | 2020-06-23T19:00:58.000Z | 2021-03-26T22:13:07.000Z | a1d05eba1/special_fields/choice_filter.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | null | null | null | from ..utils.kfrozendict import kfrozendict
from ..utils.kfrozendict import kassertfrozen
| 28.044444 | 59 | 0.585578 |
ee9ff38e8ac3eaab8a58f8de6b4ed70735c17d0f | 3,878 | py | Python | hamster_control_test_version.py | iamnotmarcel/HamsterModell | ce8391e8e120e2cf957f9d49e812be3c4f757f75 | [
"MIT"
] | null | null | null | hamster_control_test_version.py | iamnotmarcel/HamsterModell | ce8391e8e120e2cf957f9d49e812be3c4f757f75 | [
"MIT"
] | 1 | 2022-03-26T17:27:30.000Z | 2022-03-26T17:27:30.000Z | hamster_control_test_version.py | iamnotmarcel/HamsterModell | ce8391e8e120e2cf957f9d49e812be3c4f757f75 | [
"MIT"
] | null | null | null | '''
Author: Marcel Miljak
Klasse: 5aHEL - HTL Anichstrae
Diplomarbeit: Entwicklung eines Hamster Roboters
Jahrgang: 2021/22
'''
import time
from time import sleep
import RPi.GPIO as GPIO
DIR_2 = 18 # Direction-Pin vom 2ten Modul
DIR_1 = 24 # Direction-pin vom 1sten Modul
STEP_1 = 25 # Step-Pin vom 1sten Modul
STEP_2 = 23 # Step-Pin vom 2ten Modul
CW = 1 # Clockwise Rotation
CCW = 0 # Counterclockwise Rotation
SENS_TRIG = 6 # Trigger-Pin HC-SR04
SENS_ECHO = 5 # Echo-Pin HC-SR04
whole_cycle = 300 # ganze Umdrehung (360 / 7.5) was aber foisch is
cycle_left = 548 # Viertel Umdrehung
delay = 0.005
def vor():
'''
lsst den Hamster eine ganze Motor-Umdrehung
nach vorne fahren (360)
'''
setup()
GPIO.output(DIR_1, CW)
GPIO.output(DIR_2, CW)
print("Vorwrts...")
for i in range(3):
dist = vornFrei()
if dist < 20.0:
print("Achtung - Hinderniss voraus!")
stop()
time.sleep(delay)
linksUm()
time.sleep(delay)
break
else:
for i in range (100):
GPIO.output(STEP_1, GPIO.HIGH)
GPIO.output(STEP_2, GPIO.HIGH)
sleep(delay)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.LOW)
sleep(delay)
def linksUm():
'''
Dreht sich um 90 nach links
'''
setup()
GPIO.output(DIR_1, CW)
GPIO.output(DIR_2, CCW)
print("Ausrichtung nach links...")
for i in range(298):
GPIO.output(STEP_1, GPIO.HIGH)
GPIO.output(STEP_2, GPIO.LOW)
sleep(delay)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.HIGH)
sleep(delay)
def rechtsUm():
'''
Nur als Test angesehen, ob Hamster auch wirklich nach
rechts ausrichtet
'''
setup()
print("Ausrichtung nach rechts...")
linksUm()
linksUm()
linksUm()
GPIO.cleanup()
def vornFrei():
'''
liefert true, wenn sich keine Mauer vor dem Hamster
befindet.
Kommt gemeinsam mit Obstacle-Avoidance-Sensor in
Einsatz.
'''
setup()
GPIO.output(SENS_TRIG,1)
time.sleep(0.00001)
GPIO.output(SENS_TRIG,0)
while GPIO.input(SENS_ECHO) == 0:
pass
start = time.time()
timer = 0
while (GPIO.input(SENS_ECHO) == 1 and timer <= 12):
timer +=1
time.sleep(0.0001)
stop = time.time()
return (stop-start) * 34300 / 2
def stop():
'''
Wenn sich eine Mauer vor dem Hamster befindet,
soll diese Funktion die Motoren stoppen.
'''
setup()
print("Stop des Hamsters...")
GPIO.output(DIR_1, GPIO.LOW)
GPIO.output(DIR_2, GPIO.LOW)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.LOW)
'''
def kornDa():
liefert true, wenn sich auf dem Feld, auf der der
Hamster steht, sich mindestens ein Korn befindet.
setup()
print("Check ob Korn auf Feld vorhanden...")
korn_indicator = GPIO.input(SENS_Korn)
if korn_indicator == 0:
print("Es befindet sich ein Korn auf dem Feld")
return True
else:
return False
'''
def nimm():
'''
nimmt von dem Feld, auf dem er gerade steht, ein Korn auf
'''
pass
def gib():
'''
lege auf dem Feld, auf dem er gerade steht, ein Korn
aus seinem Maul ab.
'''
pass
def maulLeer():
'''
liefert true, wenn der Hamster keinen Krner
im Maul hat.
'''
pass | 22.678363 | 70 | 0.581227 |
eea0783e1d150b2a10a9b357db454fabc3181131 | 2,154 | py | Python | common_multicore.py | bgrayburn/itemSetCount | b1d8a9262a0d90a9038ecb7b38c94d3a33f235f1 | [
"MIT"
] | null | null | null | common_multicore.py | bgrayburn/itemSetCount | b1d8a9262a0d90a9038ecb7b38c94d3a33f235f1 | [
"MIT"
] | null | null | null | common_multicore.py | bgrayburn/itemSetCount | b1d8a9262a0d90a9038ecb7b38c94d3a33f235f1 | [
"MIT"
] | null | null | null | from multiprocessing import Process
import pymongo
from itertools import combinations
import csv
import time
import sys
mongo_ip = "192.168.1.127"
db_name = "analysis"
collection_name = "common_items"
max_item_threshold = 20
if __name__ == '__main__':
mongo_con = pymongo.MongoClient(mongo_ip)
mongo_col = eval("mongo_con."+db_name+"."+collection_name)
mongo_col.remove()
processess = main(sys.argv[1])
while(still_running(processess)):
time.sleep(2)
| 29.916667 | 113 | 0.689415 |
eea2c9cc9d5f2240a45df9dae18361db691a6948 | 3,753 | py | Python | bluebottle/segments/migrations/0024_auto_20220210_1336.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | null | null | null | bluebottle/segments/migrations/0024_auto_20220210_1336.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | null | null | null | bluebottle/segments/migrations/0024_auto_20220210_1336.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.24 on 2022-02-10 12:36
import bluebottle.utils.fields
import bluebottle.utils.validators
import colorfield.fields
from django.db import migrations, models
import django_better_admin_arrayfield.models.fields
| 55.191176 | 425 | 0.679456 |
eea2f57d28acf6796635f1259b4f5d6adad79071 | 7,980 | py | Python | codeball/tests/test_models.py | metrica-sports/codeball | 60bfe54b7898bed87cbbbae9dfc0f3bc49d31025 | [
"MIT"
] | 54 | 2020-09-16T13:09:03.000Z | 2022-03-28T12:32:19.000Z | codeball/tests/test_models.py | metrica-sports/codeball | 60bfe54b7898bed87cbbbae9dfc0f3bc49d31025 | [
"MIT"
] | null | null | null | codeball/tests/test_models.py | metrica-sports/codeball | 60bfe54b7898bed87cbbbae9dfc0f3bc49d31025 | [
"MIT"
] | 9 | 2021-03-28T13:02:57.000Z | 2022-03-24T11:19:06.000Z | import os
import pandas as pd
from kloppy import (
load_epts_tracking_data,
to_pandas,
load_metrica_json_event_data,
load_xml_code_data,
)
from codeball import (
GameDataset,
DataType,
TrackingFrame,
EventsFrame,
CodesFrame,
PossessionsFrame,
BaseFrame,
Zones,
Area,
PatternEvent,
Pattern,
PatternsSet,
)
import codeball.visualizations as vizs
| 30.930233 | 78 | 0.595614 |
eea39007c18df9bb3a13dd73ee8b29fd1990d82d | 2,025 | py | Python | 100dayspython/day004/main.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | 100dayspython/day004/main.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | 100dayspython/day004/main.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | import random
print("Rock, Paper, Scissors!")
player = int(input("What do you choose? Type 0 for Rock, 1 for paper, 2 for Scissors: "))
computer = random.randint(0, 2)
game = ["""
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
""", """
_______
---' ____)____
______)
_______)
_______)
---.__________)
""", """
_______
---' ____)____
______)
__________)
(____)
---.__(___)
"""]
if player == 0:
print(game[0])
elif player == 1:
print(game[1])
elif player == 2:
print(game[2])
if computer == 0:
print("Computer chose:")
print(game[0])
elif computer == 1:
print("Computer chose:")
print(game[1])
elif computer == 2:
print("Computer chose:")
print(game[2])
if player == 0 and computer == 2 or player == 1 and computer == 0 or player == 2 and computer == 1:
print("YOU WIN!")
elif player == 0 and computer == 0 or player == 1 and computer == 1 or player == 2 and computer == 2:
print("IT'S A TIE!")
if player == 0 and computer == 1 or player == 1 and computer == 2 or player == 2 and computer == 0:
print("YOU LOSE!")
else:
print("404! ERROR!")
print("""
\ / _
___,,,
\_[o o]
Invalid Number! C\ _\/
/ _____),_/__
________ / \/ /
_| .| / o /
| | .| / /
\| .| / /
|________| /_ \/
__|___|__ _//\ \
_____|_________|____ \ \ \ \
_| /// \ \
| \ /
| / /
| / /
________________ | /__ /_
bger ...|_|.............. /______\.......""")
| 28.125 | 101 | 0.382222 |
eea423068f1d28596bc373c0840d0c29a2ee48d7 | 3,249 | py | Python | python/evaluation/track_detection/rpn/rpn_eval.py | billy000400/Mu2e_MLTracking | 675e62d844ff8a5ccba9019e316c374c40658101 | [
"MIT"
] | null | null | null | python/evaluation/track_detection/rpn/rpn_eval.py | billy000400/Mu2e_MLTracking | 675e62d844ff8a5ccba9019e316c374c40658101 | [
"MIT"
] | 1 | 2021-01-03T08:57:34.000Z | 2021-01-03T23:41:22.000Z | python/evaluation/track_detection/rpn/rpn_eval.py | billy000400/Mu2e_MLTracking | 675e62d844ff8a5ccba9019e316c374c40658101 | [
"MIT"
] | null | null | null | # Detector (Faster RCNN)
# forward propogate from input to output
# Goal: test if the validation output act as expected
import sys
from pathlib import Path
import pickle
import timeit
from datetime import datetime
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.regularizers import l2
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Dense, Conv2D, Dropout, Flatten, TimeDistributed, Reshape, Softmax
from tensorflow.keras.optimizers import Adam
from tensorflow.distribute import MirroredStrategy
from tensorflow.keras.metrics import CategoricalAccuracy
script_dir = Path.cwd().parent.parent.parent.joinpath('frcnn_mc_train')
sys.path.insert(1, str(script_dir))
util_dir = Path.cwd().parent.parent.parent.joinpath('Utility')
sys.path.insert(1, str(util_dir))
from Information import *
from Configuration import frcnn_config
from DataGenerator import DataGeneratorV2
from Layers import *
from Loss import *
from Metric import *
### Using a specific pair of CPU and GPU
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.set_visible_devices(physical_devices[1], 'GPU')
tf.config.experimental.set_memory_growth(physical_devices[1], True)
print(tf.config.experimental.get_visible_devices())
# load configuration object
cwd = Path.cwd()
pickle_path = cwd.joinpath('frcnn.test.config.pickle')
C = pickle.load(open(pickle_path,'rb'))
# re-build model
input_layer = Input(C.input_shape)
base_net = C.base_net.get_base_net(input_layer, trainable=False)
rpn_layer = rpn(C.anchor_scales, C.anchor_ratios)
classifier = rpn_layer.classifier(base_net)
regressor = rpn_layer.regression(base_net)
model = Model(inputs=input_layer, outputs = [classifier,regressor])
model.summary()
# load model weights
model.load_weights(str(Path.cwd().joinpath('rpn_mc_00.h5')), by_name=True)
model.load_weights(str(Path.cwd().joinpath('detector_mc_RCNN_dr=0.0.h5')), by_name=True)
# set data generator
val_generator = DataGeneratorV2(C.validation_img_inputs_npy, C.validation_labels_npy, C.validation_deltas_npy, batch_size=8)
# evaluate model
rpn_class_loss = define_rpn_class_loss(1)
rpn_regr_loss = define_rpn_regr_loss(100)
adam = Adam()
model.compile(optimizer=adam, loss={'rpn_out_class' : rpn_class_loss,\
'rpn_out_regress': rpn_regr_loss},\
metrics={'rpn_out_class': [unmasked_binary_accuracy, positive_number],\
'rpn_out_regress': unmasked_IoU})
result = model.evaluate(x=val_generator, return_dict=True, callbacks=[StdCallback()])
result = {key:[value] for key, value in result.items()}
df = pd.DataFrame.from_dict(result)
df.to_csv(Path.cwd().joinpath('result.csv'), index=None)
| 36.505618 | 124 | 0.748538 |
eea44ef30a81ba67ad14a68694b3cdcb38fe067e | 1,686 | py | Python | cv_workshops/6-day/2-clazz.py | afterloe/opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | 5 | 2020-03-13T07:34:30.000Z | 2021-10-01T03:03:05.000Z | cv_workshops/6-day/2-clazz.py | afterloe/Opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | null | null | null | cv_workshops/6-day/2-clazz.py | afterloe/Opencv-practice | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | [
"MIT"
] | 1 | 2020-03-01T13:21:43.000Z | 2020-03-01T13:21:43.000Z | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
cv.moments(contours, binaryImage)
- contours:
- binaryImage: bool, default False
"""
if "__main__" == __name__:
main()
| 31.811321 | 87 | 0.541518 |
eea747f6a5f58fa9f7cb6e82312ed9dadca75ac3 | 1,967 | py | Python | war.py | Eduardojvr/Space_Atack_Game | f37e1891bf00af71f3c1758a0288a6b0b830bb9e | [
"MIT"
] | null | null | null | war.py | Eduardojvr/Space_Atack_Game | f37e1891bf00af71f3c1758a0288a6b0b830bb9e | [
"MIT"
] | null | null | null | war.py | Eduardojvr/Space_Atack_Game | f37e1891bf00af71f3c1758a0288a6b0b830bb9e | [
"MIT"
] | null | null | null | from settings import Settings
from ship import Ship
import pygame
import sys
from trap import Trap
from time import clock
from random import randint
################################ Main ################################
run_game()
| 32.245902 | 101 | 0.516523 |
eea8fc748971275806d47350049795a3a98b474a | 1,463 | py | Python | Project-2/doc/contingency_mat_parser.py | TooSchoolForCool/EE219-Larger-Scale-Data-Mining | 9a42c88169ace88f9b652d0e174c7f641fcc522e | [
"Apache-2.0"
] | null | null | null | Project-2/doc/contingency_mat_parser.py | TooSchoolForCool/EE219-Larger-Scale-Data-Mining | 9a42c88169ace88f9b652d0e174c7f641fcc522e | [
"Apache-2.0"
] | 12 | 2020-01-28T22:09:15.000Z | 2022-03-11T23:16:26.000Z | Project-2/doc/contingency_mat_parser.py | TooSchoolForCool/EE219-Larger-Scale-Data-Mining | 9a42c88169ace88f9b652d0e174c7f641fcc522e | [
"Apache-2.0"
] | null | null | null | import sys
import argparse
if __name__ == '__main__':
main() | 21.835821 | 82 | 0.514012 |
eea9326c5e16b9ddd8185aff0917cab86602e465 | 5,426 | py | Python | voldemort_client/helper.py | mirko-lelansky/voldemort-client | a2839a0cc50ca4fdc5bdb36b2df3a3cf7f7d9db9 | [
"Apache-2.0"
] | null | null | null | voldemort_client/helper.py | mirko-lelansky/voldemort-client | a2839a0cc50ca4fdc5bdb36b2df3a3cf7f7d9db9 | [
"Apache-2.0"
] | null | null | null | voldemort_client/helper.py | mirko-lelansky/voldemort-client | a2839a0cc50ca4fdc5bdb36b2df3a3cf7f7d9db9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Mirko Lelansky <mlelansky@mail.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains some helper methods for building parts of http requests.
"""
from datetime import datetime
import simplejson as json
from voldemort_client.exception import VoldemortError
def create_vector_clock(node_id, timeout):
"""This method builds the initial vector clock for a new key.
Parameters
----------
node_id : int
the id of one node in the cluster
timeout : int
the expire timeout of the key
Returns
-------
dict
the vector clock as dictonary
"""
if node_id is not None and timeout is not None:
return {
"versions": [{"nodeId": node_id, "version": 1}],
"timestamp": timeout
}
else:
raise ValueError("You must gave the node id and the timeout.")
def merge_vector_clock(vector_clock, node_id, timeout=None):
"""This method merges an existing vector clock with the new values.
Parameters
----------
vector_clock : dict
the vector clock which should be updated
node_id : int
the node id to use
timeout : int
the expire timeout of the key
Returns
-------
dict
the update vector clock as dictionary
"""
if vector_clock is not None and node_id is not None:
versions = vector_clock["versions"]
version_map_list_node = [version_map for version_map in versions
if version_map["nodeId"] == node_id]
if version_map_list_node == []:
versions.append({"nodeId": node_id, "version": 1})
elif len(version_map_list_node) == 1:
old_map = version_map_list_node[0]
new_map = old_map
new_map["version"] = new_map["version"] + 1
versions.remove(old_map)
versions.append(new_map)
else:
raise VoldemortError("Only one version map per node is allowed.")
vector_clock["versions"] = versions
if timeout is not None:
vector_clock["timestamp"] = timeout
return vector_clock
else:
raise ValueError("You need the vector clock, timeout and the node id.")
def build_get_headers(request_timeout):
"""This method builds the request headers for get requests like receving keys.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
Returns
-------
dict
the headers as dictonary
"""
timestamp = datetime.now().timestamp()
return {
"X-VOLD-Request-Timeout-ms": str(int(request_timeout)),
"X-VOLD-Request-Origin-Time-ms": str(int(timestamp))
}
def build_delete_headers(request_timeout, vector_clock):
"""This method builds the request headers for the delete requests.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
vector_clock : dict
the vector clock which represents the version which should be delete
Returns
-------
dict
the headers as dictionary
"""
delete_headers = build_get_headers(request_timeout)
delete_headers["X-VOLD-Vector-Clock"] = json.dumps(vector_clock)
return delete_headers
def build_set_headers(request_timeout, vector_clock, content_type="text/plain"):
"""This method builds the request headers for the set requests.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
vector_clock : dict
the vector clock which represents the version which should be create or
update
content_type : str
the content type of the value
Returns
-------
dict
the headers as dictionary
"""
set_headers = build_delete_headers(request_timeout, vector_clock)
set_headers["Content-Type"] = content_type
return set_headers
def build_version_headers(request_timeout):
"""This method builds the request headers for the version requests.
Parameters
----------
request_timeout : int
the time where the request should be done in milli seconds
Returns
--------
dict
the headers as dictionary
"""
version_headers = build_get_headers(request_timeout)
version_headers["X-VOLD-Get-Version"] = ""
return version_headers
def build_url(url, store_name, key):
"""This method combine the different parts of the urls to build the url to
acces the REST-API.
Parameters
----------
url : str
the base url
store_name : str
the name of the voldemort store
key : str
the url part which represents the key or keys
Returns
-------
str
the combined url of the REST-API
"""
return "%s/%s/%s" % (url, store_name, key)
| 29.32973 | 82 | 0.651493 |
eea9c161475ffd63195c5ca94c42455b4deb9625 | 1,581 | py | Python | src/reddack/exceptions.py | diatomicDisaster/Reddit-Slackbot | 4f22af110e72eab19d9162a4428800a1895303f3 | [
"MIT"
] | null | null | null | src/reddack/exceptions.py | diatomicDisaster/Reddit-Slackbot | 4f22af110e72eab19d9162a4428800a1895303f3 | [
"MIT"
] | 10 | 2022-02-21T01:11:20.000Z | 2022-02-22T18:13:00.000Z | src/reddack/exceptions.py | diatomicDisaster/redack | 4f22af110e72eab19d9162a4428800a1895303f3 | [
"MIT"
] | null | null | null | from __future__ import (
annotations,
)
| 26.79661 | 86 | 0.573688 |
eeaa2be76b33b3286d73455fcb963e240ddf8af4 | 7,276 | py | Python | cid/cli/cli_generator.py | zeljko-bal/CID | 52ecc445c441ec63386c9f092b226090588a3789 | [
"MIT"
] | 1 | 2017-09-15T06:14:54.000Z | 2017-09-15T06:14:54.000Z | cid/cli/cli_generator.py | zeljko-bal/CID | 52ecc445c441ec63386c9f092b226090588a3789 | [
"MIT"
] | null | null | null | cid/cli/cli_generator.py | zeljko-bal/CID | 52ecc445c441ec63386c9f092b226090588a3789 | [
"MIT"
] | null | null | null | from collections import defaultdict
from os import makedirs
from os.path import realpath, join, dirname, isdir, exists
from shutil import copy
from jinja2 import Environment, FileSystemLoader
from cid.cli.cli_model_specs import CliModelSpecs
from cid.cli import cli_post_processing
from cid.parser.cid_parser import parse
from cid.common.cid_model_processor import CidModelProcessor
from cid.common.utils import *
_cli_templates_path = join(dirname(realpath(__file__)), 'templates')
_cli_framework_path = join(dirname(realpath(__file__)), 'framework')
# ------------------------------- JINJA FILTERS -------------------------------
# ------------------------------- GENERATOR FUNCTIONS -------------------------------
| 42.8 | 177 | 0.67276 |
eeaa72a12bf7e9c9d8b1d3537dc9a129425ee115 | 2,037 | py | Python | container/sample-inf1/inf1_mx.py | yunma10/neo-ai-dlr | 1f5c65d9bf7155c016e5d2f78d273755760a4f2a | [
"Apache-2.0"
] | 446 | 2019-01-24T02:04:17.000Z | 2022-03-16T13:45:32.000Z | container/sample-inf1/inf1_mx.py | yunma10/neo-ai-dlr | 1f5c65d9bf7155c016e5d2f78d273755760a4f2a | [
"Apache-2.0"
] | 179 | 2019-01-24T10:03:34.000Z | 2022-03-19T02:06:56.000Z | container/sample-inf1/inf1_mx.py | yunma10/neo-ai-dlr | 1f5c65d9bf7155c016e5d2f78d273755760a4f2a | [
"Apache-2.0"
] | 111 | 2019-01-24T20:51:45.000Z | 2022-02-18T06:22:40.000Z | import mxnet as mx
#import neomxnet
import os
import json
import numpy as np
from collections import namedtuple
import os
dtype='float32'
Batch = namedtuple('Batch', ['data'])
ctx = mx.neuron()
is_gpu = False
def transform_fn(mod, img, input_content_type, output_content_type):
'''
stream = os.popen('/opt/aws/neuron/bin/neuron-cli list-model')
output = stream.read()
print(output)
stream = os.popen('/opt/aws/neuron/bin/neuron-cli list-ncg')
output = stream.read()
print(output)
'''
image = mx.image.imdecode(img)
resized = mx.image.resize_short(image, 224) # minimum 224x224 images
cropped, crop_info = mx.image.center_crop(resized, (224, 224))
normalized = mx.image.color_normalize(cropped.astype(np.float32) / 255,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
# the network expect batches of the form (N,3,224,224)
transposed = normalized.transpose((2, 0, 1)) # Transposing from (224, 224, 3) to (3, 224, 224)
batchified = transposed.expand_dims(axis=0) # change the shape from (3, 224, 224) to (1, 3, 224, 224)
image = batchified.astype(dtype='float32')
mod.forward(Batch([image]))
prob = mod.get_outputs()[0].asnumpy().tolist()
prob_json = json.dumps(prob)
return prob_json, output_content_type
| 37.722222 | 121 | 0.675994 |
eeab90972c87f9c41713b77c4809b4a9c645a33d | 4,040 | py | Python | data/process_data.py | KCKhoo/disaster_response_dashboard | ee337125121664503675bfb5bf01af85c7c1a8ca | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | data/process_data.py | KCKhoo/disaster_response_dashboard | ee337125121664503675bfb5bf01af85c7c1a8ca | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | data/process_data.py | KCKhoo/disaster_response_dashboard | ee337125121664503675bfb5bf01af85c7c1a8ca | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
''' Load and merge two CSV files - one containing messages and the other containing categories
Args:
messages_filepath (str): Path to the CSV file containing messages
categories_filepath (str): Path to the CSV file containing categories of each message
Returns:
df (DataFrame): A merged DataFrame containing messages and categories
'''
# Load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# Merge datasets
df = messages.merge(categories, on='id')
return df
def clean_data(df):
'''
Clean the data for machine learning model. Cleaning processes include:
1) Split 'categories' column in the dataframe into separate category columns.
2) Convert category values to just numbers 0 or 1 by removing the texts.
3) Replace 'categories' column in df with new category columns created in Step 1.
4) Remove duplicates.
5) Remove rows with 2 in 'related' category column.
Args:
df (DataFrame): A DataFrame
Returns:
df_clean (DataFrame): clean DataFrame
'''
# Make a copy of df
df_clean = df.copy()
# Create a dataframe of the 36 individual category columns
categories = df_clean['categories'].str.strip().str.split(';', expand=True)
# Select the first row of the categories dataframe
row = categories.iloc[0, :]
# Use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda x: x[:-2])
# Rename the columns of `categories`
categories.columns = category_colnames
# Convert category values to just numbers 0 or 1.
for column in categories:
# Set each value to be the last character of the string
categories[column] = categories[column].str.split('-').str[-1]
# Convert column from string to numeric
categories[column] = pd.to_numeric(categories[column])
# Drop the original categories column from 'df'
df_clean = df_clean.drop(columns=['categories'])
# Concatenate the original dataframe with the new 'categories' dataframe
df_clean = pd.concat([df_clean, categories], axis=1)
# Drop duplicates
df_clean = df_clean.drop_duplicates()
# Drop rows with 2 in 'related' column
df_clean = df_clean[df_clean['related'] != 2].reset_index(drop=True)
return df_clean
def save_data(df, database_filename):
''' Save clean dataset to a SQLite database
Args:
df (DataFrame): Clean dataframe
database_filename (string): Path at which database will be stored
Returns:
None
'''
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('DisasterMessages', engine, index=False)
if __name__ == '__main__':
main() | 33.94958 | 98 | 0.658168 |
eeaca61d7f8a12d9407b89ba0d429021d517e4c0 | 179 | py | Python | problem0650.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0650.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | problem0650.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | ###########################
#
# #650 Divisors of Binomial Product - Project Euler
# https://projecteuler.net/problem=650
#
# Code by Kevin Marciniak
#
###########################
| 19.888889 | 51 | 0.502793 |
eeacff18635731300c340b2e253ce1bf7ee2b4e0 | 3,432 | py | Python | pycle/bicycle-scrapes/bike-data-scrape/scraperMulti.py | fusuyfusuy/School-Projects | 8e38f19da90f63ac9c9ec91e550fc5aaab3d0234 | [
"MIT"
] | null | null | null | pycle/bicycle-scrapes/bike-data-scrape/scraperMulti.py | fusuyfusuy/School-Projects | 8e38f19da90f63ac9c9ec91e550fc5aaab3d0234 | [
"MIT"
] | null | null | null | pycle/bicycle-scrapes/bike-data-scrape/scraperMulti.py | fusuyfusuy/School-Projects | 8e38f19da90f63ac9c9ec91e550fc5aaab3d0234 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import os
import csv
bicycles = []
basepath = 'HTMLFiles/'
outputFile = open('scraped.py','a')
outputFile.write("list=[")
len1 = len(os.listdir(basepath))
counter1 = 0
for entry in os.listdir(basepath):
counter2 = 0
len2 = len(os.listdir(basepath+'/'+entry))
for folder in os.listdir(basepath+'/'+entry):
listFile = open(basepath+entry+'/'+folder,"r")
try:
parsed = BeautifulSoup(listFile, "html.parser")
except:
print('bs4 error in '+basepath+entry+'/'+folder)
break
bicycle = {
'Brand': '-',
'Model': '-',
'Weight': '-',
'Released on the market': '-',
'For women': '-',
'For kids': '-',
'Frame material': '-',
'Frame type': '-',
'Collapsible frame': '-',
'Color': '-',
'Fork type': '-',
'Shock absorber type': '-',
'Shock absorber pressure': '-',
'Fork name': '-',
'Wheel drive': '-',
'Drive type': '-',
'Transmission type': '-',
'Number of speeds': '-',
'System name': '-',
'Cassette name': '-',
'Front derailleur gears name': '-',
'Rear derailleur gears name': '-',
'Shifters type': '-',
'Shifters name': '-',
'Front brakes': '-',
'Front brakes name': '-',
'Rear brakes': '-',
'Number of wheels': '-',
'Wheels diameter': '-',
'Double rim': '-',
'Rim material': '-',
'Rims name': '-',
'Tyres pattern': '-',
'Tyres name': '-',
'Handlebar type': '-',
'Handlebar name': '-',
'Seat type': '-',
'Seat suspension': '-',
'Seat name': '-',
'Pedals type': '-',
'Pedals name': '-',
'Front panel': '-',
'Rear panel panel': '-',
'Trunk': '-',
'Rearview mirror': '-',
'Horn': '-',
'Basket': '-'
}
tableRows = parsed.findAll('tr')
for row in tableRows:
tableData = row.findAll('td')
try:
key = tableData[0].text.strip()
value = tableData[1].text.strip()
except:
print('error in '+basepath+entry+'/'+folder)
break
else:
bicycle[key] = value
if(bicycle['Brand']!='-'):
bicycles.append(bicycle)
outputFile.write(str(bicycle)+',\n')
counter2+=1
print("parsing "+str(counter2)+" of "+str(len2)+" ", end='\r')
counter1+=1
print("\nFOLDER parsing "+str(counter1)+" of "+str(len1)+" \n", end='\r')
# keys = bicycles[0].keys()
# with open('bicycles.csv', 'w', newline='') as output_file:
# dict_writer = csv.DictWriter(output_file, keys)
# dict_writer.writeheader()
# dict_writer.writerows(bicycles)
outputFile.write(']')
toWrite = """
import csv
keys = list[0].keys()
with open('bicycles.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(list)
"""
outputFile.write(toWrite) | 28.840336 | 106 | 0.456002 |
eeaeecc00f80638bdeeeac780d5b87b92462f522 | 464 | py | Python | dummyGPIO.py | yasokada/python-151127-7segLed_IPadrDisplay | eb97f17685ac2477e6a3a7321159d6463f736dd2 | [
"MIT"
] | 1 | 2017-01-13T23:57:21.000Z | 2017-01-13T23:57:21.000Z | toLearn/dummyGPIO.py | yasokada/python-151113-lineMonitor | 224342d5855d8ee6792fad6ad36399d95fce1b09 | [
"MIT"
] | 2 | 2015-12-08T23:40:12.000Z | 2015-12-24T22:09:07.000Z | dummyGPIO.py | yasokada/python-151127-7segLed_IPadrDisplay | eb97f17685ac2477e6a3a7321159d6463f736dd2 | [
"MIT"
] | null | null | null | '''
v0.1 2015/11/26
- add output()
- add setmode()
- add setup()
'''
# Usage
'''
from dummyGPIO import CDummyGPIO
GPIO = CDummyGPIO()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(10, GPIO.OUT)
'''
| 12.888889 | 33 | 0.642241 |
eeb4e80e6cc8868c343b5e9768135af13ccbaa18 | 380 | py | Python | setup.py | CyrusBiotechnology/django-headmaster | 0100b4086c09da43da5f2f68e3cb549dca8af96a | [
"MIT"
] | null | null | null | setup.py | CyrusBiotechnology/django-headmaster | 0100b4086c09da43da5f2f68e3cb549dca8af96a | [
"MIT"
] | null | null | null | setup.py | CyrusBiotechnology/django-headmaster | 0100b4086c09da43da5f2f68e3cb549dca8af96a | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='django-headmaster',
version='0.0.1',
description='Add extra headers to your site via your settings file',
url='http://github.com/CyrusBiotechnology/django-headmaster',
author='Peter Novotnak',
author_email='peter@cyrusbio.com',
license='MIT',
packages=['django_headmaster'],
zip_safe=True)
| 31.666667 | 74 | 0.681579 |
eeb69df1582f775092e1af736d2173a50d2365bb | 484 | py | Python | tests/test_lines_count.py | MacHu-GWU/single_file_module-project | 01f7a6b250853bebfd73de275895bf274325cfc1 | [
"MIT"
] | 3 | 2017-02-27T05:07:46.000Z | 2022-01-17T06:46:20.000Z | tests/test_lines_count.py | MacHu-GWU/single_file_module-project | 01f7a6b250853bebfd73de275895bf274325cfc1 | [
"MIT"
] | null | null | null | tests/test_lines_count.py | MacHu-GWU/single_file_module-project | 01f7a6b250853bebfd73de275895bf274325cfc1 | [
"MIT"
] | 1 | 2017-09-05T14:05:55.000Z | 2017-09-05T14:05:55.000Z | # -*- coding: utf-8 -*-
import os
import pytest
from sfm import lines_count
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 20.166667 | 68 | 0.688017 |
eeb6fa5b0b347f06d9b353c9e9aeb47e31e57218 | 1,884 | py | Python | rrwebapp/accesscontrol.py | louking/rrwebapp | 5c73f84e1a21bc3b5fa51d83ba576c3152e6cf27 | [
"Apache-2.0"
] | null | null | null | rrwebapp/accesscontrol.py | louking/rrwebapp | 5c73f84e1a21bc3b5fa51d83ba576c3152e6cf27 | [
"Apache-2.0"
] | 417 | 2015-05-07T16:50:22.000Z | 2022-03-14T16:16:13.000Z | rrwebapp/accesscontrol.py | louking/rrwebapp | 5c73f84e1a21bc3b5fa51d83ba576c3152e6cf27 | [
"Apache-2.0"
] | null | null | null | ###########################################################################################
# accesscontrol - access control permission and need definitions
#
# Date Author Reason
# ---- ------ ------
# 01/18/14 Lou King Create
#
# Copyright 2014 Lou King
#
###########################################################################################
'''
accesscontrol - access control permission and need definitions
===================================================================
'''
# standard
from collections import namedtuple
from functools import partial
# pypi
import flask
from flask_login import current_user
from flask_principal import Principal, Permission, RoleNeed, UserNeed
# home grown
from . import app
from .model import db # this is ok because this module only runs under flask
########################################################################
# permissions definition
########################################################################
# load principal extension, and define permissions
# see http://pythonhosted.org/Flask-Principal/ section on Granular Resource Protection
principals = Principal(app)
owner_permission = Permission(RoleNeed('owner'))
admin_permission = Permission(RoleNeed('admin'))
viewer_permission = Permission(RoleNeed('viewer'))
ClubDataNeed = namedtuple('club_data', ['method', 'value'])
UpdateClubDataNeed = partial(ClubDataNeed,'update')
ViewClubDataNeed = partial(ClubDataNeed,'view')
| 34.254545 | 91 | 0.571656 |
eeb941243abfa405873eabb4951a2447d2772339 | 173 | py | Python | bareon_fuel_extension/tests.py | gitfred/bareon-fuel-extension | 0074f187a6244e786b37e551009fa2eadcae1d3a | [
"Apache-2.0"
] | null | null | null | bareon_fuel_extension/tests.py | gitfred/bareon-fuel-extension | 0074f187a6244e786b37e551009fa2eadcae1d3a | [
"Apache-2.0"
] | null | null | null | bareon_fuel_extension/tests.py | gitfred/bareon-fuel-extension | 0074f187a6244e786b37e551009fa2eadcae1d3a | [
"Apache-2.0"
] | null | null | null | from nailgun.extensions import BaseExtension
| 14.416667 | 44 | 0.65896 |
eebab051d6bd2499eba549e8e5c3faefb5989879 | 1,404 | py | Python | email_utils/models/EmailMessage.py | E7ernal/quizwhiz | a271d40922eaad682a76d7700beafc7a5df51fac | [
"MIT"
] | null | null | null | email_utils/models/EmailMessage.py | E7ernal/quizwhiz | a271d40922eaad682a76d7700beafc7a5df51fac | [
"MIT"
] | 7 | 2020-02-12T00:31:35.000Z | 2022-03-11T23:19:21.000Z | email_utils/models/EmailMessage.py | E7ernal/quizwhiz | a271d40922eaad682a76d7700beafc7a5df51fac | [
"MIT"
] | null | null | null | # vim: ts=4:sw=4:expandtabs
__author__ = 'zach.mott@gmail.com'
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from email_utils.tasks import send_mail
RESEND_EMAIL_PERMISSION = 'can_resend_email'
| 29.87234 | 86 | 0.688034 |
eebbbd0016582d70d21cbd69a90c5e0e380ce3d8 | 1,262 | py | Python | core/string_utils.py | phage-nz/observer | 2a2d9b5047c5b2aba0d102c0c21e97de472bbd39 | [
"Apache-2.0"
] | 2 | 2020-04-25T05:11:49.000Z | 2021-02-09T21:27:38.000Z | core/string_utils.py | phage-nz/observer | 2a2d9b5047c5b2aba0d102c0c21e97de472bbd39 | [
"Apache-2.0"
] | null | null | null | core/string_utils.py | phage-nz/observer | 2a2d9b5047c5b2aba0d102c0c21e97de472bbd39 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from .log_utils import get_module_logger
from defang import defang
import random
import string
import urllib.parse
logger = get_module_logger(__name__)
| 20.354839 | 64 | 0.639461 |
eebda7906979f96edc59138dae061c5c2c92e491 | 61 | py | Python | udp_decoders/__init__.py | ccgcyber/xpcap | a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8 | [
"MIT"
] | 5 | 2017-07-31T02:07:05.000Z | 2021-02-14T16:39:49.000Z | udp_decoders/__init__.py | ccgcyber/xpcap | a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8 | [
"MIT"
] | null | null | null | udp_decoders/__init__.py | ccgcyber/xpcap | a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8 | [
"MIT"
] | 4 | 2016-07-24T08:56:54.000Z | 2020-07-12T11:50:02.000Z | # empty file telling python that this directory is a package
| 30.5 | 60 | 0.803279 |
eebdcac25970fd8db9e1b4ca1a89af16a4e7a240 | 803 | py | Python | slushtools/string/__init__.py | ZackPaceCoder/slushtools | 32bfee028d30fd8fd88e332bdd744a71e51d6dcc | [
"MIT"
] | null | null | null | slushtools/string/__init__.py | ZackPaceCoder/slushtools | 32bfee028d30fd8fd88e332bdd744a71e51d6dcc | [
"MIT"
] | null | null | null | slushtools/string/__init__.py | ZackPaceCoder/slushtools | 32bfee028d30fd8fd88e332bdd744a71e51d6dcc | [
"MIT"
] | null | null | null | # Slush Tools STRING Module
| 20.075 | 48 | 0.523039 |