id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
1606462 | """
Module Arm
This module represent the two arms that links each blocks togethers.
Attributes
----------
low_anchor : Coordinates
Correspond to the coordinates on the lower left of the block. The coordinates of the lower
right are deducted during computation and drawing.
high_anchor : Coordinates
Correspond to the coordinates on the top left of the block. The coordinates of the top
right are deducted during computation and drawing.
length : float
Store the length of the arm.
offset : Correspond to the offset to draw and compute the position of the right anchors.
Methods
-------
__init__(self, _low_anchor, _high_anchor, _length, _offset)
Initialize an Arm
draw(self, frame, offset, invert_y)
Draw the Arm
"""
import cv2
from utils import Utils
class Arm:
def __init__(self, _low_anchor, _high_anchor, _length, _offset):
"""
Initialize an arm with basics attributes.
Parameters
----------
_low_anchor : Coordinates
Correspond to the coordinates on the lower left of the block. The coordinates of the lower
right are deducted during computation and drawing.
_high_anchor : Coordinates
Correspond to the coordinates on the top left of the block. The coordinates of the top
right are deducted during computation and drawing.
_length : float
Store the length of the arm.
_offset : Correspond to the offset to draw and compute the position of the right anchors.
"""
self.low_anchor = _low_anchor
self.high_anchor = _high_anchor
self.length = _length
self.offset = _offset
def draw(self, frame, offset, invert_y):
"""
Method responsible to draw the arms.
Parameters
----------
frame : numpy Array
Image of the current frame to draw on.
offset : Coordinate
Offset of the arms to the robot coordinates
invert_y : bool
If true, means that we the structure is reverted in y axis.
Returns
-------
frame : numpy Array
Updated image with the arms on it.
"""
inv = -1 if invert_y else 1
arm_thickness = 5
frame = cv2.line(
frame,
(
Utils.ConvertX(self.low_anchor.x + offset.x),
Utils.ConvertY(inv * (self.low_anchor.y) + offset.y)
),
(
Utils.ConvertX(self.high_anchor.x + offset.x),
Utils.ConvertY(inv * (self.high_anchor.y) + offset.y)
),
Utils.red,
thickness=arm_thickness
)
return cv2.line(
frame,
(
Utils.ConvertX(self.low_anchor.x + self.offset + offset.x),
Utils.ConvertY(inv * (self.low_anchor.y) + offset.y)
),
(
Utils.ConvertX(self.high_anchor.x + self.offset + offset.x),
Utils.ConvertY(inv * (self.high_anchor.y) + offset.y)
),
Utils.red,
thickness=arm_thickness
)
| StarcoderdataPython |
11356841 | <reponame>taxio/Saffron<filename>calyx/src/courses/models.py
import unicodedata
from importlib import import_module
from typing import TYPE_CHECKING
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import Group
from django.db import models, transaction, IntegrityError
from django.utils import timezone
from .errors import AlreadyJoinedError, NotJoinedError, NotAdminError
if TYPE_CHECKING:
from typing import Optional
from users.models import User as AppUser
User = get_user_model() # type: AppUser
def create_group_name(year: int, course_name: str) -> str:
"""管理グループの名称を返す"""
return f'{year}_{course_name}'
class Year(models.Model):
"""
年度のモデル
"""
year = models.IntegerField('年度', unique=True)
class Meta:
ordering = ['year']
verbose_name = '年度'
verbose_name_plural = '年度'
def __str__(self):
return str(self.year)
class CourseManager(models.Manager):
"""
課程を操作するマネージャ
"""
def create_course(self, name: str, pin_code: str, year: 'Optional[int]' = None,
config: 'Optional[dict]' = None) -> 'Course':
"""
新しい課程を作成する.
:param name: 課程の名前
:param pin_code: 課程に設定するPINコード
:param year: 値を渡さなければ現在の年をデフォルトで使用する
:param config: 設定のディクショナリ
:return: Course
"""
with transaction.atomic():
model = self.model # type: Course
name = model.normalize_name(name)
if year is None:
year = timezone.now().year
group_name = create_group_name(year, name)
admin_user_group, _ = Group.objects.get_or_create(name=group_name)
year_obj, _ = Year.objects.get_or_create(year=year)
course = model(name=name, year=year_obj, admin_user_group=admin_user_group)
course.set_password(pin_code)
course.save()
# Configが一緒にPOSTされた場合,デフォルトのconfigをオーバーライド
if config is not None:
for key, val in config.items():
setattr(course.config, key, val)
course.config.save()
return course
class Course(models.Model):
"""
課程や学部のモデル
"""
name = models.CharField('名称', max_length=255)
pin_code = models.CharField('PINコード', max_length=255)
year = models.ForeignKey(Year, on_delete=models.CASCADE, related_name='courses')
users = models.ManyToManyField(User, verbose_name="所属学生", blank=True, related_name='courses')
admin_user_group = models.OneToOneField(Group, on_delete=models.CASCADE, blank=True, null=True,
related_name='course', unique=True)
_pin_code = None
objects = CourseManager()
class Meta:
# 課程の名称と年度でユニーク制約を張る
unique_together = ('name', 'year')
ordering = ['name', '-year__year']
verbose_name = '課程'
verbose_name_plural = '課程'
def __str__(self):
return '{n}({y})'.format(y=str(self.year.year), n=self.name)
def set_password(self, raw_password: str):
"""
パスワードをハッシュ化してセットする
:param raw_password: パスワード文字列
:return: None
"""
self._pin_code = raw_password
self.pin_code = make_password(raw_password)
def check_password(self, raw_pin_code: str) -> bool:
"""
与えられたPINコードが正しいならばTrueを返す
:return:
"""
def setter(raw_password):
"""Passwordのハッシュアルゴリズムが変わった場合にパスワードを更新する"""
self.set_password(raw_password)
self._password = None
self.save(update_fields=["password"])
return check_password(raw_pin_code, self.pin_code, setter)
@classmethod
def normalize_name(cls, name: str) -> str:
return unicodedata.normalize('NFKC', name)
def join(self, user: User, password: str) -> bool:
"""
課程へジョインする.既にジョインしていればAlreadyJoinedErrorをraiseする.
:param user: ジョインするユーザ
:param password: <PASSWORD>PIN<PASSWORD>
:return: bool
"""
if user.courses.filter(pk=self.pk).exists():
raise AlreadyJoinedError(self)
if self.check_password(password):
self.users.add(user)
self.save()
return True
return False
def leave(self, user: User) -> None:
"""
課程から脱退する.参加している課程が無い場合はNotJoinedErrorをraiseする.
:return:
"""
if not self.users.filter(pk=user.pk).exists():
raise NotJoinedError(self)
self.users.remove(user)
self.save()
return None
def register_as_admin(self, user: 'AppUser') -> None:
"""
指定されたユーザをこの課程の管理者として登録する.この課程に参加していない場合,NotJoinedErrorをraiseする.
:param user: 管理者として登録したいユーザ
:return:
"""
if not self.users.filter(id=user.pk).exists():
raise NotJoinedError(self)
if not user.groups.filter(name=self.admin_group_name).exists():
user.groups.add(self.admin_user_group)
user.save()
def unregister_from_admin(self, user: 'AppUser') -> None:
"""
指定されたユーザを管理者から外す.そのユーザが管理者でなかった場合,NotAdminErrorをraiseする.
:param user: 管理者から外したいユーザ
:return:
"""
if not user.groups.filter(name=self.admin_group_name).exists():
raise NotAdminError(self, user)
user.groups.remove(self.admin_user_group)
user.save()
@property
def admin_group_name(self) -> str:
"""管理ユーザグループの名称"""
return create_group_name(self.year.year, self.name)
class Config(models.Model):
"""希望調査の表示設定"""
show_gpa = models.BooleanField('GAPを表示する', default=False)
show_username = models.BooleanField('ユーザ名を表示する', default=False)
rank_limit = models.IntegerField('表示する志望順位の数', default=3)
course = models.OneToOneField(Course, verbose_name="課程", related_name="config", on_delete=models.CASCADE)
class Meta:
verbose_name = "表示設定"
verbose_name_plural = "表示設定"
def __str__(self):
return f'{self.course.name}の表示設定'
class Lab(models.Model):
"""研究室のモデル"""
name = models.CharField("研究室名", max_length=255)
capacity = models.IntegerField("許容人数", default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='labs')
class Meta:
verbose_name = "研究室"
verbose_name_plural = "研究室"
# ある課程について同名の研究室は許容しない
unique_together = ["name", "course"]
def __str__(self):
return f'{self.course} - {self.name}'
class RankManager(models.Manager):
"""研究室志望順位を操作するマネージャー"""
def create(self, lab, course, user, order):
if self.filter(course=course, user=user, lab=lab).exists():
# 既にその研究室の志望順位を登録している場合
raise IntegrityError
return super(RankManager, self).create(lab=lab, course=course, user=user, order=order)
class Rank(models.Model):
"""研究室志望順位のモデル"""
# 削除時に自動で志望順位を入れ替えるため.on_deleteにはDO_NOTHINGを指定する
# SET_NULL等をすると入れ替えた後にNoneが代入されてしまう
lab = models.ForeignKey(Lab, verbose_name='研究室', on_delete=models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(User, verbose_name='ユーザ', on_delete=models.CASCADE)
course = models.ForeignKey(Course, verbose_name='課程', on_delete=models.CASCADE)
order = models.IntegerField("志望順位")
objects = RankManager()
class Meta:
verbose_name = "志望順位"
verbose_name_plural = "志望順位"
unique_together = ["user", "order", "course"]
def __str__(self):
return f'{self.user}-{self.order}-{self.lab}'
# Signalsの記述を分離するために,動的にimportのみを行う.
# importしない場合,Signalの登録が行われないため処理が実行されなくなってしまう.
# `from . import signals`をするとOptimize importsで消されるので注意
import_module('.signals', 'courses')
| StarcoderdataPython |
399344 | # make sure the rest of the ABXpy package is accessible
import ABXpy.sideop.side_operations_manager as side_operations_manager
import ABXpy.dbfun.dbfun_compute as dbfun_compute
import ABXpy.dbfun.dbfun_lookuptable as dbfun_lookuptable
import ABXpy.dbfun.dbfun_column as dbfun_column
import numpy as np
class FilterManager(side_operations_manager.SideOperationsManager):
"""Manage the filters on attributes (on, across, by) or elements (A, B, X)
for further processing"""
def __init__(self, db_hierarchy, on, across, by, filters):
side_operations_manager.SideOperationsManager.__init__(
self, db_hierarchy, on, across, by)
# this case is specific to filters, it applies a generic filter to the
# database before considering A, B and X stuff.
self.generic = []
# associate each of the provided filters to the appropriate point in
# the computation flow
# filt can be: the name of a column of the database (possibly
# extended), the name of lookup file, the name of a script, a script
# under the form of a string (that doesnt end by .dbfun...)
for filt in filters:
# instantiate appropriate dbfun
if filt in self.extended_cols: # column already in db
db_fun = dbfun_column.DBfun_Column(filt, indexed=False)
# evaluate context is wasteful in this case... not even
# necessary to have a dbfun at all
elif len(filt) >= 6 and filt[-6:] == '.dbfun': # lookup table
# ask for re-interpreted indexed outputs
db_fun = dbfun_lookuptable.DBfun_LookupTable(
filt, indexed=False)
else: # on the fly computation
db_fun = dbfun_compute.DBfun_Compute(filt, self.extended_cols)
self.add(db_fun)
def classify_generic(self, elements, db_fun, db_variables):
# check if there are only non-extended names and, only if this is the
# case, instantiate 'generic' field of db_variables
if {s for r, s in elements} == set(['']):
db_variables['generic'] = set(elements)
self.generic.append(db_fun)
self.generic_context['generic'].update(db_variables['generic'])
elements = {}
return elements, db_variables
def by_filter(self, by_values):
return singleton_filter(self.evaluate_by(by_values))
def generic_filter(self, by_values, db):
return db.iloc[vectorial_filter(lambda context: self.evaluate_generic(by_values, db, context), np.arange(len(db)))]
def on_across_by_filter(self, on_across_by_values):
return singleton_filter(self.evaluate_on_across_by(on_across_by_values))
def A_filter(self, on_across_by_values, db, indices):
# Caution: indices contains db-related indices
# but the returned result contains indices with respect to indices
indices_ind = np.arange(len(indices))
return vectorial_filter(lambda context: self.evaluate_A(on_across_by_values, db, indices, context), indices_ind)
def B_filter(self, on_across_by_values, db, indices):
# Caution: indices contains db-related indices
# but the returned result contains indices with respect to indices
indices_ind = np.arange(len(indices))
return vectorial_filter(lambda context: self.evaluate_B(on_across_by_values, db, indices, context), indices_ind)
def X_filter(self, on_across_by_values, db, indices):
# Caution: indices contains db-related indices
# but the returned result contains indices with respect to indices
indices_ind = np.arange(len(indices))
return vectorial_filter(lambda context: self.evaluate_X(on_across_by_values, db, indices, context), indices_ind)
def ABX_filter(self, on_across_by_values, db, triplets):
# triplets contains db-related indices
# the returned result contains indices with respect to triplets
indices = np.arange(len(triplets))
return vectorial_filter(lambda context: self.evaluate_ABX(on_across_by_values, db, triplets, context), indices)
def singleton_filter(generator):
keep = True
for result in generator:
if not(result):
keep = False
break
return keep
def vectorial_filter(generator, indices):
"""
.. note:: To allow a lazy evaluation of the filter, the context is filtered
explicitly which acts on the generator by a side-effect (dict being
mutable in python)
"""
kept = np.array(indices)
context = {}
for result in generator(context):
still_up = np.where(result)[0]
kept = kept[still_up]
for var in context:
# keep testing only the case that are still possibly True
context[var] = [context[var][e] for e in still_up]
# FIXME wouldn't using only numpy arrays be more performant ?
if not(kept.size):
break
return kept
| StarcoderdataPython |
9697077 | from matplotlib import _api
import mpl_toolkits.axes_grid1.axes_grid as axes_grid_orig
from .axislines import Axes
@_api.deprecated("3.5")
class CbarAxes(axes_grid_orig.CbarAxesBase, Axes):
pass
class Grid(axes_grid_orig.Grid):
_defaultAxesClass = Axes
class ImageGrid(axes_grid_orig.ImageGrid):
_defaultAxesClass = Axes
AxesGrid = ImageGrid
| StarcoderdataPython |
3535724 | import logging
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Tuple, Optional
from antarest.core.interfaces.cache import ICache, CacheConstants
from antarest.matrixstore.service import MatrixService, ISimpleMatrixService
from antarest.study.common.uri_resolver_service import (
UriResolverService,
)
from antarest.study.storage.rawstudy.model.filesystem.config.files import (
ConfigPathBuilder,
)
from antarest.study.storage.rawstudy.model.filesystem.config.model import (
FileStudyTreeConfig,
)
from antarest.study.storage.rawstudy.model.filesystem.context import (
ContextServer,
)
from antarest.study.storage.rawstudy.model.filesystem.root.filestudytree import (
FileStudyTree,
)
logger = logging.getLogger(__name__)
@dataclass
class FileStudy:
config: FileStudyTreeConfig
tree: FileStudyTree
class StudyFactory:
"""
Study Factory. Mainly used in test to inject study mock by dependency injection.
"""
def __init__(
self,
matrix: ISimpleMatrixService,
resolver: UriResolverService,
cache: ICache,
) -> None:
self.context = ContextServer(matrix=matrix, resolver=resolver)
self.cache = cache
def create_from_fs(
self,
path: Path,
study_id: str,
output_path: Optional[Path] = None,
use_cache: bool = True,
) -> Tuple[FileStudyTreeConfig, FileStudyTree]:
cache_id = f"{study_id}/{CacheConstants.STUDY_FACTORY}"
if study_id and use_cache:
from_cache = self.cache.get(cache_id)
if from_cache is not None:
logger.info(f"Study {study_id} read from cache")
config = FileStudyTreeConfig.parse_obj(from_cache)
return config, FileStudyTree(self.context, config)
start_time = time.time()
config = ConfigPathBuilder.build(path, study_id, output_path)
duration = "{:.3f}".format(time.time() - start_time)
logger.info(f"Study {study_id} config built in {duration}s")
result = config, FileStudyTree(self.context, config)
if study_id and use_cache:
self.cache.put(cache_id, config.dict())
logger.info(f"Cache new entry from StudyFactory (studyID: {study_id})")
return result
def create_from_config(self, config: FileStudyTreeConfig) -> FileStudyTree:
return FileStudyTree(self.context, config)
| StarcoderdataPython |
6417811 | import time
from decimal import Decimal
try:
import thread
except ImportError:
import _thread as thread
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
# import Bitfinex library
from hokonui.exchanges.bitfinex import Bitfinex as bfx
# import ITBIT library
from hokonui.exchanges.itbit import Itbit as itb
else:
# import ITBIT library
# import Bitfinex library
from ..hokonui.exchanges.bitfinex import Bitfinex as bfx
from ..hokonui.exchanges.itbit import Itbit as itb
SLEEP = 30 # check every 30 seconds
def main():
"""main function, called at the start of the program"""
def ticker(sleeptime, lock):
while True:
lock.acquire()
ask_itbit = format(Decimal(itb.get_current_ask('USD'), '.2f'))
bid_itbit = format(Decimal(itb.get_current_bid('USD'), '.2f'))
ask_bitfinex = format(Decimal(bfx.get_current_ask('USD', '.2f')))
bid_bitfinex = format(Decimal(bfx.get_current_bid('USD', '.2f')))
print("It : Bid %s Ask %s" % (bid_itbit, ask_itbit))
print("Bfx: Bid %s Ask %s" % (bid_bitfinex, ask_bitfinex))
print('-' * 20)
# check for Arb in one direction (buy @ ItBit, sell @ Bitfinex)
if ask_itbit < bid_bitfinex:
arb = bid_bitfinex - ask_itbit
print("Arb #1 exists : ITBIT sell price < Bitfinex buy price ")
print("Amount : %s " % format(Decimal(arb), '.2f'))
# check for arb in the other direction (buy @ Bitfinex, sell @ ItBit)
if bid_itbit > ask_bitfinex:
arb = ask_bitfinex - bid_itbit
print("Arb #2 exists : Bitfinex sell price < itBit buy price ")
print("Amount : %s " % format(Decimal(arb), '.2f'))
lock.release()
time.sleep(sleeptime)
lock = thread.allocate_lock()
thread.start_new_thread(ticker, (SLEEP, lock))
while True:
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
6522106 | from wikimedia_cli import __version__
project = "wikimedia-cli"
author = 'Sky "g3ner1c" H.'
copyright = '2022 Sky "g3ner1c" H.'
release = __version__
extensions = ["myst_parser"] #! this is so that you can write docs in markdown instead of rst
master_doc = "index"
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "furo"
html_static_path = ["_static"]
pygments_style = "sphinx"
pygments_dark_style = "one-dark"
| StarcoderdataPython |
1818314 | <filename>setup.py
from setuptools import setup
with open('requirements.txt', 'r') as f:
requires=[line for line in f.read().split("\n")
if line and not line.startswith('#')]
setup(
name='WeatherAPI',
version='0.0.1',
packages=[
'weatherapi',
],
install_requires=requires,
package_data={
'weatherapi': [
'weatherapi/helpers/*.py',
'weatherapi/static/*',
'weatherapi/templates/*'
]
},
#entry_points='''
# [console_scripts]
# starter=flask_starter.cli:main
# # Also interesting:
# # [flask.command]
#''',
)
| StarcoderdataPython |
4987177 | <reponame>SemanticBeeng/termolator-j<filename>find_terms.py<gh_stars>0
from inline_terms import *
from DataDef import File, TXT3, TERM, POS
#
# @semanticbeeng @done static typing
# @semanticbeeng @pported
#
def find_inline_terms_for_file_list(file_list: File, dict_prefix: str = None) -> None:
start = True
with file_list.openText() as instream:
# if dict_prefix:
# unigram_dictionary.clear()
## see derive_plurals in term_utilities
## and other instances of "unigram_dict" below
for line in instream.readlines():
file_prefix = line.strip()
# @semanticbeeng @todo @dataFlow
lines: List[str] = get_lines_from_file(
File[TXT3](file_prefix + '.txt3')) ## add feature to remove xml
run_abbreviate_on_lines(lines,
File[ABBR](file_prefix + '.abbr'), reset_dictionary=start) # @semanticbeeng @todo @arch global state mutation
## creates abbreviation files and acquires abbreviation --> term
## and term --> abbreviation dictionaries
## Possibly add alternative which loads existing abbreviation files into
## dictionaries for future steps (depends on timing)
# if dict_prefix:
# increment_unigram_dict_from_lines(lines)
find_inline_terms(lines,
File[FACT](file_prefix + '.fact'),
File[POS](file_prefix + '.pos'),
File[TERM](file_prefix + '.terms'))
if start:
start = False
if dict_prefix:
save_abbrev_dicts(File[ABBR](str(dict_prefix) + ".dict_abbr_to_full"),
File[ABBR](str(dict_prefix) + ".dict_full_to_abbr"))
## save_unigram_dict(dict_prefix+".dict_unigram")
| StarcoderdataPython |
4970940 | import mock
from imhotep.repositories import Repository, AuthenticatedRepository
repo_name = 'justinabrahms/imhotep'
def test_unauthed_download_location():
uar = Repository(repo_name, None, [None], None)
loc = uar.download_location
assert loc == "git://github.com/justinabrahms/imhotep.git"
def test_authed_download_location():
ar = AuthenticatedRepository(repo_name, None, [None], None)
assert ar.download_location == "<EMAIL>:justinabrahms/imhotep.git"
def test_unicode():
r = Repository(repo_name, None, [None], None)
assert r.__unicode__() == repo_name
def test_diff_commit():
executor = mock.Mock()
uar = Repository(repo_name, '/loc/', [None], executor)
uar.diff_commit('commit-to-diff')
executor.assert_called_with("cd /loc/ && git diff commit-to-diff")
def test_diff_commit__compare_point_applied():
executor = mock.Mock()
uar = Repository(repo_name, '/loc/', [None], executor)
uar.diff_commit('commit-to-diff', compare_point='base')
executor.assert_any_call("cd /loc/ && git checkout base")
def test_apply_commit():
executor = mock.Mock()
uar = Repository(repo_name, '/loc/', [None], executor)
uar.apply_commit('base')
executor.assert_called_with("cd /loc/ && git checkout base")
| StarcoderdataPython |
4855710 | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class CustomerCreationForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=True)
last_name = forms.CharField(max_length=30, required=True)
email = forms.EmailField(max_length=254, required=True)
username = forms.CharField(max_length=30, required=True)
password1=forms.CharField(widget=forms.PasswordInput(), required=True)
password2=forms.CharField(widget=forms.PasswordInput(), required=True, )
FAVORITE_COLORS_CHOICES = [
('blue', 'Blue'),
('green', 'Green'),
('black', 'Black'),
('red', 'Red'),
('white', 'White'),
('yellow', 'Yellow'),
]
FAVOURITE_PRODUCT_CHOICES = [
('jeans', 'Jeans'),
('shorts', 'Shorts'),
('tee', 'T-shirts'),
('shoes', 'Shoes'),
]
GENDER = [
('male', 'Male'),
('female', 'Female'),
]
favourite_colour = forms.ChoiceField(
choices=FAVORITE_COLORS_CHOICES,
)
fav_product = forms.ChoiceField(
choices=FAVOURITE_PRODUCT_CHOICES,
)
gender = forms.ChoiceField(
choices=GENDER,
)
class Meta(UserCreationForm.Meta):
fields = UserCreationForm.Meta.fields + ('first_name', 'last_name', 'username', 'email', 'gender', 'favourite_colour', 'fav_product', 'password1', 'password2')
model = get_user_model()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['first_name'].label = 'First Name'
self.fields['last_name'].label = 'Last Name'
self.fields['username'].label = 'User Name'
self.fields['email'].label = 'Email ID'
self.fields['favourite_colour'].label = 'Favourite Colour'
self.fields['fav_product'].label = 'Favourite Product'
self.fields['password1'].label = 'Password'
self.fields['password2'].label = 'Confirm Password'
self.fields['gender'].label = 'Gender'
self.fields['first_name'].widget.attrs.update({'class' : 'form-field;'})
self.fields['last_name'].widget.attrs.update({'class' : 'form-field'})
self.fields['username'].widget.attrs.update({'class' : 'form-field'})
self.fields['email'].widget.attrs.update({'class' : 'form-field'})
self.fields['favourite_colour'].widget.attrs.update({'class' : 'form-field'})
self.fields['fav_product'].widget.attrs.update({'class' : 'form-field'})
self.fields['password1'].widget.attrs.update({'class' : 'form-field'})
self.fields['password2'].widget.attrs.update({'class' : 'form-field'})
self.fields['gender'].widget.attrs.update({'class' : 'form-field'})
def clean(self):
cleaned_data = super(UserCreationForm, self).clean()
password1 = cleaned_data.get("password1")
password2 = cleaned_data.get("password2")
if password1 != password2:
raise forms.ValidationError(
"password and confirm_password does not match"
)
| StarcoderdataPython |
9671887 | <reponame>pgajdos/pymacaroons
from __future__ import unicode_literals
from hypothesis import *
from hypothesis.specifiers import *
from pymacaroons import Macaroon, MACAROON_V1, MACAROON_V2
from pymacaroons.utils import convert_to_bytes
ascii_text_strategy = strategy(
[sampled_from(map(chr, range(0, 128)))]
).map(lambda c: ''.join(c))
ascii_bin_strategy = strategy(ascii_text_strategy).map(
lambda s: convert_to_bytes(s)
)
class TestMacaroon(object):
def setup(self):
pass
@given(
key_id=one_of((ascii_text_strategy, ascii_bin_strategy)),
loc=one_of((ascii_text_strategy, ascii_bin_strategy)),
key=one_of((ascii_text_strategy, ascii_bin_strategy))
)
def test_serializing_deserializing_macaroon(self, key_id, loc, key):
assume(key_id and loc and key)
macaroon = Macaroon(
location=loc,
identifier=key_id,
key=key,
version=MACAROON_V1
)
deserialized = Macaroon.deserialize(macaroon.serialize())
assert macaroon.identifier == deserialized.identifier
assert macaroon.location == deserialized.location
assert macaroon.signature == deserialized.signature
macaroon = Macaroon(
location=loc,
identifier=key_id,
key=key,
version=MACAROON_V2
)
deserialized = Macaroon.deserialize(macaroon.serialize())
assert macaroon.identifier_bytes == deserialized.identifier_bytes
assert macaroon.location == deserialized.location
assert macaroon.signature == deserialized.signature
| StarcoderdataPython |
5153541 | """Test state getters for retrieving motion planning views of state."""
import pytest
from dataclasses import dataclass, field
from mock import MagicMock
from typing import Optional
from opentrons.types import Point, MountType
from opentrons.hardware_control.types import CriticalPoint
from opentrons.protocols.geometry.planning import MoveType, get_waypoints
from opentrons.protocol_engine import errors
from opentrons.protocol_engine.state import (
PipetteData,
LocationData,
PipetteLocationData,
)
from opentrons.protocol_engine.state.labware import LabwareStore
from opentrons.protocol_engine.state.pipettes import PipetteStore
from opentrons.protocol_engine.state.geometry import GeometryStore
from opentrons.protocol_engine.state.motion import MotionStore
@pytest.fixture
def mock_labware_store() -> MagicMock:
"""Get a mock in the shape of a LabwareStore."""
return MagicMock(spec=LabwareStore)
@pytest.fixture
def mock_pipette_store() -> MagicMock:
"""Get a mock in the shape of a PipetteStore."""
return MagicMock(spec=PipetteStore)
@pytest.fixture
def mock_geometry_store() -> MagicMock:
"""Get a mock in the shape of a GeometryStore."""
return MagicMock(spec=GeometryStore)
@pytest.fixture
def motion_store(
mock_labware_store: MagicMock,
mock_pipette_store: MagicMock,
mock_geometry_store: MagicMock,
) -> MotionStore:
"""Get a MotionStore with its dependecies mocked out."""
return MotionStore(
labware_store=mock_labware_store,
pipette_store=mock_pipette_store,
geometry_store=mock_geometry_store,
)
def mock_location_data(
motion_store: MotionStore,
data: Optional[LocationData]
) -> None:
"""Insert mock location data into the store."""
motion_store.state._current_location = data
assert motion_store.state.get_current_location_data() == data
def test_get_pipette_location_with_no_current_location(
mock_pipette_store: MagicMock,
motion_store: MotionStore,
) -> None:
"""It should return mount and critical_point=None if no location."""
mock_pipette_store.state.get_pipette_data_by_id.return_value = PipetteData(
mount=MountType.LEFT,
pipette_name="p300_single"
)
result = motion_store.state.get_pipette_location("pipette-id")
mock_pipette_store.state.get_pipette_data_by_id.assert_called_with(
"pipette-id"
)
assert result == PipetteLocationData(
mount=MountType.LEFT,
critical_point=None
)
def test_get_pipette_location_with_current_location_with_quirks(
mock_pipette_store: MagicMock,
mock_labware_store: MagicMock,
motion_store: MotionStore
) -> None:
"""It should return cp=XY_CENTER if location labware has center quirk."""
mock_location_data(
motion_store,
LocationData(
pipette_id="pipette-id",
labware_id="reservoir-id",
well_name="A1"
),
)
mock_labware_store.state.get_labware_has_quirk.return_value = True
mock_pipette_store.state.get_pipette_data_by_id.return_value = PipetteData(
mount=MountType.RIGHT,
pipette_name="p300_single"
)
result = motion_store.state.get_pipette_location("pipette-id")
mock_labware_store.state.get_labware_has_quirk.assert_called_with(
"reservoir-id",
"centerMultichannelOnWells",
)
assert result == PipetteLocationData(
mount=MountType.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
)
def test_get_pipette_location_with_current_location_different_pipette(
mock_pipette_store: MagicMock,
mock_labware_store: MagicMock,
motion_store: MotionStore
) -> None:
"""It should return mount and cp=None if location used other pipette."""
mock_location_data(
motion_store,
LocationData(
pipette_id="other-pipette-id",
labware_id="reservoir-id",
well_name="A1"
),
)
mock_labware_store.state.get_labware_has_quirk.return_value = False
mock_pipette_store.state.get_pipette_data_by_id.return_value = PipetteData(
mount=MountType.LEFT,
pipette_name="p300_single"
)
result = motion_store.state.get_pipette_location("pipette-id")
assert result == PipetteLocationData(
mount=MountType.LEFT,
critical_point=None,
)
@dataclass(frozen=True)
class WaypointSpec:
"""Spec data for testing the get_movement_waypoints selector."""
name: str
expected_move_type: MoveType
pipette_id: str = "pipette-id"
labware_id: str = "labware-id"
well_name: str = "A1"
origin: Point = field(default_factory=lambda: Point(1, 2, 3))
dest: Point = field(default_factory=lambda: Point(4, 5, 6))
origin_cp: Optional[CriticalPoint] = None
location: Optional[LocationData] = None
expected_dest_cp: Optional[CriticalPoint] = None
has_center_multichannel_quirk: bool = False
labware_z: Optional[float] = None
all_labware_z: Optional[float] = None
max_travel_z: float = 50
@pytest.mark.parametrize("spec", [
WaypointSpec(
name="General arc if moving from unknown location",
all_labware_z=20,
expected_move_type=MoveType.GENERAL_ARC,
),
WaypointSpec(
name="General arc if moving from other labware",
location=LocationData("pipette-id", "other-labware-id", "A1"),
all_labware_z=20,
expected_move_type=MoveType.GENERAL_ARC,
),
WaypointSpec(
name="In-labware arc if moving to same labware",
location=LocationData("pipette-id", "labware-id", "B2"),
labware_z=10,
expected_move_type=MoveType.IN_LABWARE_ARC,
),
WaypointSpec(
name="General arc if moving to same labware with different pipette",
location=LocationData("other-pipette-id", "labware-id", "A1"),
all_labware_z=20,
expected_move_type=MoveType.GENERAL_ARC,
),
WaypointSpec(
name="Direct movement from well to same well",
location=LocationData("pipette-id", "labware-id", "A1"),
labware_z=10,
expected_move_type=MoveType.DIRECT,
),
WaypointSpec(
name="General arc with XY_CENTER destination CP",
has_center_multichannel_quirk=True,
all_labware_z=20,
expected_move_type=MoveType.GENERAL_ARC,
expected_dest_cp=CriticalPoint.XY_CENTER,
)
])
def test_get_movement_waypoints(
mock_labware_store: MagicMock,
mock_geometry_store: MagicMock,
motion_store: MotionStore,
spec: WaypointSpec
) -> None:
"""It should calculate the correct set of waypoints for a move."""
mock_labware_store.state.get_labware_has_quirk.return_value = \
spec.has_center_multichannel_quirk
if spec.labware_z is not None:
min_travel_z = spec.labware_z
mock_geometry_store.state.get_labware_highest_z.return_value = \
spec.labware_z
elif spec.all_labware_z is not None:
min_travel_z = spec.all_labware_z
mock_geometry_store.state.get_all_labware_highest_z.return_value = \
spec.all_labware_z
else:
assert False, "One of spec.labware_z or all_labware_z must be defined."
mock_geometry_store.state.get_well_position.return_value = spec.dest
mock_location_data(motion_store, spec.location)
result = motion_store.state.get_movement_waypoints(
pipette_id=spec.pipette_id,
labware_id=spec.labware_id,
well_name=spec.well_name,
origin=spec.origin,
origin_cp=spec.origin_cp,
max_travel_z=spec.max_travel_z,
)
expected = get_waypoints(
move_type=spec.expected_move_type,
origin=spec.origin,
origin_cp=spec.origin_cp,
max_travel_z=spec.max_travel_z,
min_travel_z=min_travel_z,
dest=spec.dest,
dest_cp=spec.expected_dest_cp,
xy_waypoints=[],
)
mock_labware_store.state.get_labware_has_quirk.assert_called_with(
spec.labware_id,
"centerMultichannelOnWells"
)
mock_geometry_store.state.get_well_position.assert_called_with(
spec.labware_id,
spec.well_name,
)
if spec.labware_z is not None:
mock_geometry_store.state.get_labware_highest_z.assert_called_with(
spec.labware_id
)
assert result == expected
def test_get_movement_waypoints_raises(
mock_geometry_store: MagicMock,
motion_store: MotionStore,
) -> None:
"""It should raise FailedToPlanMoveError if get_waypoints raises."""
mock_geometry_store.state.get_well_position.return_value = Point(4, 5, 6)
mock_location_data(motion_store, None)
with pytest.raises(errors.FailedToPlanMoveError, match="out of bounds"):
motion_store.state.get_movement_waypoints(
pipette_id="pipette-id",
labware_id="labware-id",
well_name="A1",
origin=Point(1, 2, 3),
origin_cp=None,
# this max_travel_z is too low and will induce failure
max_travel_z=1,
)
| StarcoderdataPython |
5191874 | <gh_stars>1-10
from graphviz import Digraph
import pickle
def plot(genotype, filename):
g = Digraph(
format='pdf',
edge_attr=dict(fontsize='20', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"),
engine='dot'
)
g.body.extend(['rankdir=LR'])
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
assert len(genotype) % 2 == 0
steps = len(genotype) // 2
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
for i in range(steps):
for k in [2*i, 2*i + 1]:
op, j = genotype[k]
if j == 0:
u = "c_{k-2}"
elif j == 1:
u = "c_{k-1}"
else:
u = str(j-2)
v = str(i)
g.edge(u, v, label=op, fillcolor="gray")
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), "c_{k}", fillcolor="gray")
g.render(filename, view=True)
def load_model(file_path):
with open(file_path, 'rb') as f:
gtyp = pickle.load(f)
return gtyp
if __name__ == '__main__':
model_name = 'npubo' # options: ['npubo_type2', 'npubo_type3', 'npenas_type3']
# NPUBO-Type3
# model_path = '/home/albert_wei/Desktop/nas_models/titanxp_2/da6e5c2b7f6c8f09435559d707add7a6715b9f578743610c363bb69f9e8c8dd5.pkl'
# NPUBO - Type2
# model_path = '/home/albert_wei/Desktop/nas_models/liang_titan_V/97de0acd0ab4b320fb2b774118527357dac65142c1fee8a47595c7153db5b705.pkl'
# NPENAS-Type3
# model_path = '/home/albert_wei/Desktop/nas_models/2080ti/models/a945af02dc233989a6192d7e3462ea8e9c2a49764a55854e816b95a830ec9875.pkl'
model_path = '/home/albert_wei/Desktop/NPENAS_materials/论文资料/results_models/npubo_150_seed_4/4f4ab32092cd83f30d3ac249e88c0828a870df8db86282e651eaef4d0b1f397a.pkl'
genotype = load_model(model_path)
plot(genotype.normal, f"normal_{model_name}")
plot(genotype.reduce, f"reduction_{model_name}")
| StarcoderdataPython |
1952089 | <reponame>jgordo04/housinginsights_temp
##########################################################################
## Summary
##########################################################################
'''
Loads our flat file data into the Postgres database
'''
import logging
import json
import pandas as pandas
import sys, os
if __name__ == '__main__':
sys.path.append(os.path.abspath('../../'))
from housinginsights.ingestion import ManifestReader
#configuration
#See /logs/example-logging.py for usage examples
logging_filename = "../../logs/ingestion.log"
logging_path = os.path.abspath("../../logs")
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler()) #Pushes everything from the logger to the command line output as well.
# Function to clean column names for the sql_name JSON field.
def sql_name_clean(name):
for item in ["-"," ","."]:
if item in name:
name = name.replace(item, "_")
return name.lower()
def pandas_to_sql_data_type(pandas_type_string):
mapping = {
'object':'text',
'int64':'integer',
'float64':'decimal',
'datetime64':'timestamp'
}
try:
sql_type = mapping[pandas_type_string]
except KeyError:
sql_type = 'text'
return sql_type
def make_draft_json(filename, tablename, encoding): #use the name from constants as default
# Reads the initial CSV and sets up the basic output structure.
dataframe_file = pandas.read_csv(filename, encoding=encoding)
dataframe_iterator = dataframe_file.columns
output = {
tablename: {
"cleaner": tablename + "{}".format("_cleaner"),
"replace_table": True,
"fields": []
}
}
# The meat of the JSON data.
for field in dataframe_iterator:
pandas_type = str(dataframe_file[field].dtypes)
sql_type = pandas_to_sql_data_type(pandas_type)
data = {
"type": sql_type,
"source_name": field,
"sql_name": sql_name_clean(field),
"display_name": sql_name_clean(field),
"display_text":""
}
output[tablename]["fields"].append(data)
output_path = os.path.join(logging_path,(tablename+".json"))
with open(output_path, "w") as results:
json.dump(output, results, sort_keys=True, indent=4)
print(tablename + " JSON table file created.")
def make_all_json(manifest_path):
completed_tables = {}
manifest = ManifestReader(path=manifest_path)
if manifest.has_unique_ids():
for manifest_row in manifest:
tablename = manifest_row['destination_table']
encoding = manifest_row.get('encoding')
if tablename not in completed_tables:
if manifest_row['include_flag'] == 'use':
filepath = os.path.abspath(
os.path.join(manifest_row['local_folder'],
manifest_row['filepath']
))
make_draft_json(filepath, tablename, encoding)
completed_tables[tablename] = filepath
print("completed {}".format(tablename))
else:
print("skipping {}, already loaded".format(manifest_row['unique_data_id']))
print("Finished! Used these files to generate json:")
print(completed_tables)
if __name__ == '__main__':
if 'single' in sys.argv:
# Edit these values before running!
csv_filename = os.path.abspath("/Users/williammcmonagle/GitHub/housing-insights/data/raw/acs/B25058_median_rent_by_tract/2009_5year/ACS_09_5YR_B25058_with_ann.csv")
table_name = "foobar"
encoding = "latin1" #only used for opening w/ Pandas. Try utf-8 if latin1 doesn't work. Put the successful value into manifest.csv
make_draft_json(csv_filename, table_name, encoding)
if 'multi' in sys.argv:
manifest_path = os.path.abspath('../../scripts/manifest.csv')
make_all_json(manifest_path)
| StarcoderdataPython |
4874665 | <reponame>linxiaohui/yaproxy
# -*- coding: utf-8 -*-
def create_dns_proxy(port=53, *, ip='0.0.0.0'):
raise Exception("Not Implement")
return DNSServer(ip, port)
def create_http_proxy(port=65432, *, ip='0.0.0.0'):
raise Exception("Not Implement")
return HTTPProxyServer(ip, port)
def create_http_proxy(port=65433, *, ip='0.0.0.0'):
raise Exception("Not Implement")
return HTTPSProxyServer(ip, port)
def create_reverse_http(port=65434, *, ip='0.0.0.0', target_ip, target_port):
raise Exception("Not Implement")
def create_reverse_tcp(port=65435, *, ip='0.0.0.0', target_ip, target_port):
raise Exception("Not Implement")
| StarcoderdataPython |
9770409 | <reponame>cclauss/hubspot-api-python
from hubspot import HubSpot
from hubspot.communication_preferences import DefinitionApi, StatusApi
def test_is_discoverable():
apis = HubSpot().communication_preferences
assert isinstance(apis.definition_api, DefinitionApi)
assert isinstance(apis.status_api, StatusApi)
| StarcoderdataPython |
6582515 | # Copyright (c) 2019 Alibaba Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from easy_rl.models.model_base import Model
import copy
from easy_rl.utils import *
class MarwilModel(Model):
"""implemention of Exponentially Weighted Imitation Learning for
Batched Historical Data[<NAME> et al., 2018](https://papers.nips.cc/paper/
7866-exponentially-weighted-imitation-learning-for-batched-historical-data.pdf)
"""
def __init__(self, obs_space, action_space, scope="marwil_model",
**kwargs):
_default_config = {
# network config
# specify the parameters of network for each input state.
# a default config will be used if empty
# * it is recommended to overrider the `encode_states` function and then implement a custom network structure.
"network_spec": {},
# model config
# coefficient for emphasis on policy improvement
# set to 0 and the algorithm degenerates to ordinary imitation learning
"beta": 1.0,
# normalization factor for advantage
"norm_c": 1.0,
# coefficient for moving average of normalization factor
"gamma": 1e-8,
# training config
# coefficient for loss of value function
"vf_coef": 0.5,
# parameter for grad clipping
"global_norm_clip": 40,
# initialization of learning rate
"lr_init": 0.001,
}
self.config = copy.deepcopy(_default_config)
self.config.update(kwargs.get('model_config', {}))
super(MarwilModel, self).__init__(
obs_space=obs_space,
action_space=action_space,
scope=scope,
**kwargs)
@property
def extra_act_fetches(self):
"""return the sample_action with action logits and state_value.
"""
return {"value_preds": self.value_preds}
@property
def learn_feed(self):
"""return the dict of placeholder for training.
"""
return {
"obs": self.obs_ph,
"actions": self.actions_ph,
"value_target": self.value_target_ph
}
def _build_loss(self, value_preds, actions, value_target):
"""implement specific loss function according to the optimization target
Arguments:
value_preds: estimator of state value from the behavior policy
actions: action taken by the behavior policy.
value_target: target for value function.
"""
# value loss
loss_v = .5 * tf.reduce_mean(tf.nn.l2_loss(value_target - value_preds))
log_target_prob = self.action_dist.log_p(action=actions)
beta = self.config.get('beta', 1.0)
advantage = tf.stop_gradient(
(value_target - value_preds) / self.norm_c)
# update norm_c with moving average
gamma = self.config.get('gramma', 1e-8)
update_c = self.norm_c + gamma * tf.reduce_mean(advantage)
# maxminize E ~ exp(beta * A(s,a))log(pi(s,a))
with tf.control_dependencies([update_c]):
loss_p = -tf.reduce_mean(
tf.exp(beta * advantage) * log_target_prob)
vf_coef = self.config.get('vf_coef', 0.5)
# total loss
loss_op = loss_p + vf_coef * loss_v
return loss_op
def _build_train(self, loss, optimizer, vars=None, global_step=None):
grads_and_vars = optimizer.compute_gradients(loss=loss, var_list=vars)
grads_and_vars = [(grad, var) for grad, var in grads_and_vars
if grad is not None]
# apply grad clipping
grads, vars = zip(*grads_and_vars)
clipped_grads, _ = tf.clip_by_global_norm(
grads, clip_norm=self.config.get('global_norm_clip', 40))
grads_and_vars = list(zip(clipped_grads, vars))
train_op = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
return train_op
def _build_graph(self, scope, **kwargs):
with tf.variable_scope(name_or_scope=scope, reuse=tf.AUTO_REUSE):
# encode the input obs
self.obs_embedding, self.value_preds = self._encode_obs(
input_obs=self.obs_ph, scope="encode_obs")
self.model_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name + "/encode_obs")
self.norm_c = tf.get_variable(
name="normalization_coefficient",
dtype=tf.float32,
initializer=self.config.get('norm_c', 1.0))
# build output action_op
self.action_op = self._actions_distribution(self.obs_embedding)
# build loss_op
self.loss_op = self._build_loss(
value_preds=self.value_preds,
actions=self.actions_ph,
value_target=self.value_target_ph)
# get or create global_step
self.global_step = tf.train.get_or_create_global_step()
# build train_op
init_lr = self.config.get('init_lr', 1e-3)
lr_strategy_spec = self.config.get('lr_strategy_spec', {})
# apply different decay strategy of learning rate
lr = learning_rate_utils.LearningRateStrategy(
init_lr=init_lr, strategy_spec=lr_strategy_spec)(self.global_step)
self.summary_ops["train"].append(
tf.summary.scalar("learning_rate", lr))
opt = tf.train.AdamOptimizer(learning_rate=lr)
self.train_op = self._build_train(
loss=self.loss_op, optimizer=opt, global_step=self.global_step)
def _encode_obs(self, input_obs, scope="encode_obs"):
"""build network to encode input feature.the desired output
consists of two parts: action_logits and value_estimator
Arguments:
input_obs: the (list, dict)[of] input tensor of state.
scope: the name of variable scope
"""
with tf.variable_scope(name_or_scope=scope):
# override the function `build_graph` to implement the specific network manually
if len(self.config.get('network_spec')) == 0:
# must set action_dim to use default network
action_dim = self.config.get('action_dim')
if isinstance(input_obs, dict):
is_multi_input = len(input_obs) > 1
is_image_input = any(
[(len(ob.shape) > 2) for _, ob in input_obs.items()])
elif isinstance(input_obs, list):
is_multi_input = len(input_obs) > 1
is_image_input = any(
[(len(ob.shape) > 2) for ob in input_obs])
else:
is_multi_input = False
is_image_input = len(input_obs.shape) > 2
if is_image_input and is_multi_input:
raise ValueError(
"the default convolution network accepts only one input but {} given"
.format(len(input_obs)))
if is_image_input and not is_multi_input:
obs_embedding = layer_utils.DefaultConvNetwork(
action_dim=action_dim)(input_obs)
else:
obs_embedding = layer_utils.DefaultFCNetwork(
action_dim=action_dim)(input_obs)
else:
# build computation graph from configuration
assert not isinstance(input_obs, list), "list type is forbidden, key for each channel of input_" \
"obs should be supplied to build graph from configuration" \
"with multi-channel data"
obs_embedding = layer_utils.build_model(
inputs=input_obs,
network_spec=self.config['network_spec'],
is_training_ph=self.is_training_ph)
return obs_embedding
def _build_ph_op(self, obs_space, action_space):
super(MarwilModel, self)._build_ph_op(
obs_space=obs_space, action_space=action_space)
# add extra placeholder of return for training
self.value_target_ph = tf.placeholder(dtype=tf.float32, shape=[None])
| StarcoderdataPython |
138066 | import sys
from flask import Flask, render_template
from flask_flatpages import FlatPages
from flask_frozen import Freezer
app = Flask(__name__)
app.config.from_pyfile('mysettings.cfg')
pages = FlatPages(app)
freezer = Freezer(app)
@app.route("/")
def index():
return render_template('index.html', navigation=True)
@app.route("/blog")
def articles():
return render_template('index.html', navigation=False)
@app.route("/coaching")
def private():
return render_template('index.html', navigation=False)
if '__main__' == __name__:
"""Allow building of frozen app through the command line."""
if 1 < len(sys.argv) and "build" == sys.argv[1]:
freezer.freeze()
else:
app.run(port=5000)
| StarcoderdataPython |
5102675 | import unittest
from chapter_01.src.answer_09 import string_rotation
class TestStringRotation(unittest.TestCase):
def test_empty_string(self):
self.assertIs(string_rotation("", ""), True)
def test_equal_string(self):
self.assertIs(string_rotation("abcdef", "abcdef"), True)
def test_spaces_symbols(self):
self.assertIs(string_rotation("d0=\*", "0=\*d"), True)
def test_order(self):
self.assertIs(string_rotation("waterbottle", "erbottlewat"), True)
self.assertIs(string_rotation("waterbottle", "bottlewater"), True)
self.assertIs(string_rotation("waterbottle", "elttobretaw"), False)
self.assertIs(string_rotation("waterbottle", "watrebotlte"), False)
def test_diff_size(self):
self.assertIs(string_rotation("asdfg", "sdfg"), False)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
34567 | import numpy as np
import cv2
import math
import datetime
from datetime import timedelta as Delta
h=300
w=300
cap = cv2.VideoCapture(0)
SUN_LOC=(200,70)
SUN_RSIZE=20
ORBITAL_R=10
def Orbiral(frame,Centerloc,orbit_r,size_r,phi,color):
x_orbit=Centerloc[0]+int(orbit_r*np.cos(np.deg2rad(phi)))
y_orbit=Centerloc[1]+int(orbit_r*np.sin(np.deg2rad(phi)))
#print(f"x:{x_orbit} y:{y_orbit} phi:{int(orbitphi)}")
frame= cv2.circle(frame,(x_orbit,y_orbit),size_r, color, -1)
return frame
ORBITAL_RSIZE=3
ORBITAL_PHI=0
ORBITAL_DPHI=1 #0.5deg delta
dr=(SUN_RSIZE+ORBITAL_R) #*(orbitdphi) #*np.pi/180)
orbitloc=(SUN_LOC[0],SUN_LOC[1]+SUN_RSIZE+ORBITAL_R)
satsn=0
#2021/05/06 Window priority
print(cv2.WND_PROP_FULLSCREEN)
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame",cv2.WND_PROP_FULLSCREEN,0)
Start_Time=datetime.datetime.today()
Delta_T=60
#Sat_Time_Space=Delta(minutes=1)
Sat_Time_Space=Delta(seconds=Delta_T)
Sat_dic={}
Poff=180
Roff=0
#mins=time.minute
while True:
_, frame = cap.read()
frame_time=datetime.datetime.today()
if frame_time >= Sat_Time_Space+Start_Time:
Start_Time=frame_time
dr=(SUN_RSIZE+ORBITAL_R)
Sat_dic[satsn]={"Time":Start_Time,"Phi_Offset":Poff,"Sat_Radius":dr}
print("New Sat added")
print(Sat_dic[satsn])
Poff-=30
satsn+=1
if Poff <=-180:
Poff=180
ORBITAL_R+=5
print(frame_time)
#frame = cv2.resize(frame,(h,w))
if(frame is None):
continue
frame = cv2.circle(frame,SUN_LOC,SUN_RSIZE, (0,0,250), -1)
#Satn to frame
# frame=cv2.putText(frame,str(satsn),(SUN_LOC[0]-15,SUN_LOC[1]+15),
# cv2.FONT_HERSHEY_PLAIN,3,(255,255,255))
if satsn:
for n,sat in Sat_dic.items():
frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE,ORBITAL_PHI-sat["Phi_Offset"],(0,0,255))
#for offphi in range(-180,180,satsn):
#if n==satsn:
# for R_OFF, fadeSeconds in zip(np.linspace(ORBITAL_RSIZE,1,ORBITAL_RSIZE),np.linspace(0,Delta//2,int(ORBITAL_RSIZE))):
# if frame_time >= Sat_Time_Space+fadeSeconds:
# print("Fade:",R_OFF)
# frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE-int(R_OFF),ORBITAL_PHI-sat["Phi_Offset"],(255,0,255))
# else:
#frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE,ORBITAL_PHI-sat["Phi_Offset"],(0,0,255))
ORBITAL_PHI+=ORBITAL_DPHI
if ORBITAL_PHI>=360:
ORBITAL_PHI=0
#Line
#img = cv2.line(frame,logoloc,orbitloc,(255,0,0),5)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# VideoCaptureオブジェクト破棄
cap.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
9700563 | <gh_stars>0
import tkinter as tk
import xicd
def main():
root = tk.Tk()
app = xicd.App(root)
app.loop()
if __name__ == "__main__":
main()
| StarcoderdataPython |
9752623 | from distutils.core import setup
setup(
name='Markov',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['markov'],
url='http://pypi.python.org/pypi/Markov/',
license='LICENSE.txt',
description='Markov',
long_description=open('README.rst').read(),
) | StarcoderdataPython |
3221406 | <filename>examples/sample_model/test_model.py
import os
import sys
import numpy as np
from keras.models import model_from_json
from constants import CHARS, MAX_TOKENS
WORKING_DIR = os.getcwd()
def clean_data_encoded(sentence, max_tokens=MAX_TOKENS, sup_chars=CHARS):
sentence = str(sentence).lower()
x = np.zeros((max_tokens, ), dtype=np.float32)
for i, c in enumerate(sentence):
if i >= max_tokens:
break
try:
x[i] = sup_chars.index(c)
except ValueError:
pass
return x
def depreserve_model(path=WORKING_DIR):
with open(os.path.join(path, 'preserved_model/model.json'), 'r') as json_file:
model = model_from_json(json_file.read())
model.load_weights(os.path.join(path, 'preserved_model/model.h5'))
return model
if __name__ == '__main__':
print("\nLoading the Keras model ...")
model = depreserve_model()
while True:
print("\nEnter an Amazon review [:q for Quit]")
if sys.version_info[0] < 3:
sentence = raw_input() if sys.version_info[0] < 3 else input()
if sentence == ':q':
break
model_input = clean_data_encoded(sentence)
model_prediction = model.predict(model_input.reshape(1, 50))
print("The model predicted ...")
print(model_prediction)
| StarcoderdataPython |
11275479 | <gh_stars>1-10
from dal import autocomplete
from django import forms
from dj_waff.choice_with_other import ChoiceWithOtherField
from .models import DocumentTemplate
SET_OF_CHOICES = [
('choice1', 'choice1111'),
('choice2', 'choice2222'),
# ('choice3', lambda b: DocumentTemplate.objects.get(pk=1)),
]
class MyCustomForm(forms.Form):
other_form_field = forms.CharField(required=False)
document_template = ChoiceWithOtherField(
choices=SET_OF_CHOICES,
other_form_field=forms.ModelChoiceField(
queryset=DocumentTemplate.objects.all(),
required=True,
# widget=autocomplete.ModelSelect2(url='document-template-autocomplete', ),
)
)
document_template2 = ChoiceWithOtherField(
choices=SET_OF_CHOICES,
first_is_preselected=True,
other_form_field=forms.ModelChoiceField(
queryset=DocumentTemplate.objects.all(),
required=True,
widget=autocomplete.ModelSelect2(url='document-template-autocomplete', ),
)
)
document_template3 = ChoiceWithOtherField(
choices=SET_OF_CHOICES,
initial=SET_OF_CHOICES[1][0],
other_form_field=forms.CharField(
widget=forms.Textarea()
)
)
document_template4 = ChoiceWithOtherField(
choices=SET_OF_CHOICES,
initial=SET_OF_CHOICES[1][0],
other_form_field=forms.CharField(
# widget=forms.IntegerField()
)
)
document_template5 = ChoiceWithOtherField(
choices=SET_OF_CHOICES,
initial=SET_OF_CHOICES[1][0],
other_form_field=forms.CharField(
# widget=forms.TimeField()
)
)
maria = forms.ModelChoiceField(
queryset=DocumentTemplate.objects.all(),
required=True,
widget=autocomplete.ModelSelect2(url='document-template-autocomplete', ),
)
teste = forms.ChoiceField(choices=SET_OF_CHOICES, initial=SET_OF_CHOICES[0])
| StarcoderdataPython |
1843757 | <reponame>joergsimon/gesture-analysis
from analysis.preparation import labelMatrixToArray
from analysis.preparation import normalizeZeroClassArray
from visualise.trace_features import trace_feature_origin
from visualise.confusion_matrix import plot_confusion_matrix
import numpy as np
import sklearn
import sklearn.linear_model
import sklearn.preprocessing as pp
import sklearn.svm as svm
import sklearn.feature_selection as fs
from analysis.classification import fit_classifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Interesting References:
# RFECV:
# <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Gene selection for
# cancer classification using support vector machines. Mach. Learn.. 46(1-3). 389-422.
def feature_selection(train_data, train_labels, const):
train_labels_arr, exclude = labelMatrixToArray(train_labels, const.label_threshold)
train_data_clean = train_data.drop(exclude)
train_labels_arr, train_data_clean, _ = normalizeZeroClassArray(train_labels_arr, train_data_clean)
print "num features before selection: {}".format(train_data_clean.columns.size)
feature_index = variance_threshold(train_data_clean)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:,feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after variance threshold".format(clf_name))
print(classification_report(train_labels_arr,prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index,const)
feature_index = rfe(train_data_clean,train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFE".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = k_best_chi2(train_data_clean, train_labels_arr, 700)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after Chi2".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = rfe_cv_f1(train_data_clean, train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFECV".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
plt.show()
def get_values(data, feature_index, needs_scaling):
if needs_scaling:
values = data.values[:, feature_index]
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)
return values
else:
return data.values[:, feature_index]
def variance_threshold(train_data):
# feature selection using VarianceThreshold filter
sel = fs.VarianceThreshold(threshold=(.8 * (1 - .8)))
fit = sel.fit(train_data.values)
col_index = fit.get_support(indices=True)
print "num features selected by VarianceThreshold: {}".format(len(col_index))
return col_index
def rfe(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = sklearn.linear_model.Lasso(alpha = 0.1) #svm.SVR(kernel="linear")
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values) # pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels))
print "run rfecv.."
rfecv = fs.RFE(estimator=svc, step=0.1, verbose=2)
rfecv.fit(values, np.array(train_labels))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFE(CV)/Lasso: {}".format(len(col_index))
return col_index
def rfe_cv_f1(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = svm.SVC(kernel="linear") #sklearn.linear_model.Lasso(alpha = 0.1)
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)#pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels).astype(int))
print "run rfecv.."
rfecv = fs.RFECV(estimator=svc, step=0.05, verbose=2)
rfecv.fit(values, np.array(train_labels).astype(int))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFECV/SVR: {}".format(len(col_index))
return col_index
def k_best_chi2(train_data, train_labels, k):
values = train_data.values
if values.min() < 0:
values = values + abs(values.min())
kb = fs.SelectKBest(fs.chi2, k=k)
kb.fit(values, np.array(train_labels))
col_index = kb.get_support(indices=True)
print "num features selected by K-Best using chi2: {}".format(len(col_index))
return col_index | StarcoderdataPython |
3324913 | """Tests for the programming application."""
| StarcoderdataPython |
388104 | #!/usr/bin/env python
# coding=utf-8
'''
Author: <NAME> / Yulv
Email: <EMAIL>
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: <NAME>
LastEditTime: 2022-03-23 01:00:36
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/ExponentialBarycenter.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: <NAME> (3 Oct 2018)
# Copyright (c) 2006-2017, <NAME>, <NAME>, <NAME>
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means
on Lie groups. These codes can be used to reproduce the experiments illustrated in the
video developed for the MICCAI Educational challenge 2014, available at:
url of the video.
:Authors:
`<NAME> <website>`
`<NAME> <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
References
----------
(1) Defining a mean on Lie group.
<NAME>. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
from srmg.common.util import *
EPS = 1e-5
def sigma2(m,tabr,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Computations of the variance in the 3 geometric frameworks: Group, L and R
"""
siz = tabr.shape[0]
if siz < 2:
print('Error: Calculating sigma requires at least 2 points')
s=0;
for i in range(0,siz):
s = s + tabw[i]*numpy.linalg.norm(logRotL(m,tabr[i,:]))**2;
return s
def expRotL(r,a):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Group exponential and logarithm from any point f (first for SO(3))
#Group geodesics are left- and right- invariant
"""
return rotVect(numpy.dot(rotMat(r),rotMat(numpy.dot(numpy.linalg.inv(jRotL(r)),a))))
#R*Exp(DL(R^-1)*a)
def logRotL(r,rr):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
return numpy.dot(jRotL(r),rotVect(numpy.dot(rotMat(-r),rotMat(rr))));
def rotMean(tabr,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Preliminaries: computation of the mean for rotations
"""
siz = tabr.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
m = tabr[0,:]
#mbis=mt et m=m(t+1)
aux = numpy.zeros(6);
mbis=m;
aux = numpy.zeros(3);
for i in range(0,siz):
aux=aux+tabw[i]*logRotL(mbis,tabr[i,:])
m = expRotL(mbis,aux);
# BRACKETS!! in while
while (numpy.dot(numpy.linalg.norm(logRotL(mbis,m)),numpy.linalg.norm(logRotL(mbis,m))) > EPS*sigma2(mbis,tabr,tabw)):
mbis = m
aux = numpy.zeros(3)
for i in range(0,siz):
aux=aux+tabw[i]*logRotL(mbis,tabr[i,:])
m = expRotL(mbis,aux)
return m
def matDeExp(v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Group exponential barycenter on SE(3):
- the previous mean for the rotation part,
- closed form from article of <NAME> "Exponential Barycenters of...",(2012)
"""
v = regRot(v);
r = v[0:3];
theta = numpy.linalg.norm(r);
Sr = skew(r);
if (theta==0):
M = numpy.eye(3)
elif (theta<EPS):
M = numpy.eye(3) + (1.0/6.0-theta**3/120.0)*numpy.dot(Sr,Sr)+(1.0/2.0-theta**2/24.0)*Sr;
else:
M = numpy.eye(3) + theta**(-2)*(1.0-numpy.sin(theta)/theta)*numpy.dot(Sr,Sr)+theta**(-2)*(1.0-numpy.cos(theta))*Sr;
return M
def expBar(tabf,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
# tabf: SE3 data points, Nx6 array
# tabw: data point weights, Nx1 array
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
tabr=tabf[:,0:3]
tabt=tabf[:,3:6]
rmean=rotMean(tabr,tabw);
# Partie translation, p34 de expbar
M = numpy.zeros([3,3])
t = numpy.zeros(3)
for i in range(0,siz):
Maux = numpy.linalg.inv(matDeExp(rotVect(numpy.dot(rotMat(rmean),rotMat(-tabr[i,:])))));
M=M+tabw[i]*Maux;
t=t+tabw[i]*numpy.dot(numpy.dot(Maux,rotMat(-tabr[i,:])),tabt[i,:]);
m = numpy.zeros(6)
m[0:3]=rmean;
m[3:6]=numpy.linalg.lstsq(M,t)[0]
return m
| StarcoderdataPython |
6580484 | #!/usr/bin/env python
import tifffile
from scipy import ndimage as ndi
from imctools import library as lib
import argparse
import warnings
import os
import numpy as np
def crop_objects(fn_stack, fn_label, outfolder, basename, extend, order=None):
"""
:param fn_stack:
:param fn_label:
:param outfolder:
:param basename:
:param scale:
:param extend:
:return:
"""
warnings.warn('''crop_objects is deprecated and will not be supported in future versions.\n
Please use the `SaveObjectCrops` module from Bodenmillergroup/ImcPluginsCP
in CellProfiler!''',
DeprecationWarning)
if order is None:
order = 'cxy'
with tifffile.TiffFile(fn_label) as tif:
labelmask = tif.asarray()
with tifffile.TiffFile(fn_stack) as tif:
stack = tif.asarray()
if order == 'xyc':
stack = np.rollaxis(stack, 2, 0)
if np.any(labelmask >0):
if len(stack.shape) == 2:
stack = stack.reshape([1]+list(stack.shape))
slices = ndi.find_objects(labelmask)
slices, labels = zip(*[(s, label) for label, s in enumerate(slices) if s is not None])
print(stack.shape)
print(labelmask.shape)
ext_slices = [lib.extend_slice_touple(sl, extend, [stack.shape[1], stack.shape[2]]) for sl in slices]
lib.save_object_stack(outfolder, basename, stack, ext_slices, labels)
else:
print('No object in image:' + fn_stack)
if __name__ == "__main__":
# Setup the command line arguments
parser = argparse.ArgumentParser(
description='Crops objects out of images.', prog='crop_objects')
parser.add_argument('image_filename', type=str,
help='The path to the image filename. If the image is a stack it needs to be CXY ordered')
parser.add_argument('object_filename', type=str, default=None,
help='Filename to the tiff that contains the object masks.')
parser.add_argument('--out_folder', type=str, default=None,
help='Folder to save the images in. Default a subfolder with the basename object_filename in the image_filename folder.')
parser.add_argument('--extend', type=int, default=0,
help='How many pixels to extend around the object.')
parser.add_argument('--basename', type=str, default=None,
help='Basename for the output image. Default: image_filename')
parser.add_argument('--postfix', type=str, default=None,
help='Postfix to append to the basename.'
)
args = parser.parse_args()
if args.basename is None:
args.basename = os.path.split(args.image_filename)[1].strip('.tif').strip('.tiff')
if args.postfix is not None:
args.basename = args.basename + args.postfix
if args.out_folder is None:
args.out_folder = os.path.split(args.image_filename)[0]
tmpname = os.path.split(args.object_filename)[1].strip('.tif').strip('.tiff')
args.out_folder = os.path.join(args.out_folder, tmpname)
if not(os.path.exists(args.out_folder)):
os.mkdir(args.out_folder)
crop_objects(args.image_filename, args.object_filename, args.out_folder,
args.basename, args.extend)
| StarcoderdataPython |
5178784 | <gh_stars>0
from django.urls import path
from .views import (
# HomeList,
# HomeDetail,
NewPostView,
PostDelView,
PostUpdateView,
UserPostView,
# AddCommentView
)
from . import views
#from .views import video
urlpatterns = [
# path('', HomeList.as_view(), name='home'),
path('', views.home, name="home"),
path('detail/<int:pk>/', views.post_detail, name='post-detail'),
# path('detail/<int:pk>/', HomeDetail.as_view(), name='detail'),
path('new-post/', NewPostView.as_view(), name='new-post'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDelView.as_view(), name='post-delete'),
path('user/<str:username>', UserPostView.as_view(), name='user-posts'),
# path('detail/<int:pk>/comment', AddCommentView.as_view(), name='add-comment'),
path('like/', views.like_post, name="like_post"),
path('favourites/', views.post_favourite_list, name="post_favourite_list"),
path('post/<int:pk>/favourite_post/', views.favourite_post, name="favourite_post"),
#path('how-it-works/', video)
path('how-it-works/', views.video, name="how-it-works"),
path('instructions/', views.file, name="instructions"),
]
| StarcoderdataPython |
6430725 | <filename>luogu-paint/utils.py
import json
import logging as log
from time import sleep
from random import shuffle
from requests import get, post
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
PAINT_DATA_URL = 'https://www.luogu.org/paintBoard/board'
PAINT_POST_URL = 'https://www.luogu.org/paintBoard/paint'
OFFSET_X = 0
OFFSET_Y = 0
TIMEOUT = 3
REST_TIME = 10
RETRY_TIME = 10
MAX_RETRY = 3
def parse(s):
data = filter(lambda x : x.strip() != '', s.split('\n'))
for i in xrange(len(data)):
data[i] = map(
lambda c : int(c, base = 36), filter(lambda c : c != ' ', data[i]))
return data
def fetch():
r = get(PAINT_DATA_URL, headers={'user-agent': USER_AGENT}, timeout=TIMEOUT)
if r.status_code != 200:
return None
return parse(r.content)
def draw(args, no_block=False):
pixel, user, userid = args
x, y, c = pixel
ok = False
for i in xrange(MAX_RETRY):
r = post(PAINT_POST_URL, headers={
'user-agent': USER_AGENT,
'cookie': user
}, data={'x': x, 'y': y, 'color': c}, timeout=TIMEOUT)
try:
ret = json.loads(r.content)
except:
ret = {'status': 404}
if r.status_code == 200 and ret['status'] == 200:
ok = True
break
log.warn(u'[%s] %s' % (userid, ret['data']))
sleep(RETRY_TIME)
if ok:
log.info('[%s] OK. (%s, %s, %s)' % (userid, x, y, c))
else:
log.error('[%s] Failed.' % userid)
if not no_block:
sleep(REST_TIME)
return userid
def slice(data, w, h):
x, y = OFFSET_X, OFFSET_Y
ret = data[x:x + w]
for i in xrange(len(ret)):
ret[i] = ret[i][y:y + h]
return ret
def diff(pre, nxt, user_provider):
xl = range(len(pre))
yl = range(len(pre[0]))
shuffle(xl)
shuffle(yl)
for x in xl:
for y in yl:
if pre[x][y] != -1 and nxt[x][y] != -1 and pre[x][y] != nxt[x][y]:
user, userid = user_provider()
yield (OFFSET_X + x, OFFSET_Y + y, nxt[x][y]), user, userid
| StarcoderdataPython |
4897028 | '''
Title : Day 1: Data Types
Domain : Tutorials
Author : <NAME>
Created : 03 April 2019
'''
i = 4
d = 4.0
s = 'HackerRank '
# Declare second integer, double, and String variables.
# Read and save an integer, double, and String to your variables.
i2 = int(input())
d2 = float(input())
s2 = input()
# Print the sum of both integer variables on a new line.
print(i+i2)
# Print the sum of the double variables on a new line.
print(d+d2)
# Concatenate and print the String variables on a new line
# The 's' variable above should be printed first.
print(s+s2)
| StarcoderdataPython |
8003071 | <reponame>tslazarova/WMCore<filename>src/python/WMQuality/Emulators/PyCondorAPI/MockPyCondorAPI.py
from __future__ import (division, print_function)
class MockPyCondorAPI(object):
"""
Version of Services/PyCondor intended to be used with mock or unittest.mock
"""
def __init__(self, *args, **kwargs):
print("Using MockPyCondorAPI")
def getCondorJobs(self, constraint, attr_list):
"""
Given a job/schedd constraint, return a list of jobs attributes
or None if the query to condor fails.
"""
return None
def isScheddOverloaded(self):
"""check whether job limit is reached in local schedd"""
return False
| StarcoderdataPython |
8128486 | <reponame>JadilsonJR/Python
import os
carros=[]
class Carro:
nome=""
potencia=0
velMax=0
ligado=False
def __init__(self,nome,potencia): #Metodo Construtor
self.nome=nome
self.potencia=potencia
self.velMax=int(potencia)*2
self.ligado=False
def ligar(self): #Metodo
self.ligado=True
def desligar(self):
self.ligado=False
def info(self):
print("Nome.......: ",self.nome)
print("Potencia...: ",str(self.potencia))
print("Vel.Maxima.: ",str(self.velMax))
print("Ligado.....: ", ("sim" if self.ligado else "Não"))
def Menu(): #Funçãonormal
os.system("cls") or None
print("1 - Novo Carro")
print("2 - Informções Carro")
print("3 - Excluir Carro")
print("4 - Ligar Carro")
print("5 - Desligar Carro")
print("6 - Listar Carro")
print("7 - Sair Carro")
print("Quantidades de Carros",len(carros))
opcao=input("Digite uma Opção: ")
return int(opcao)
def NovoCarro():
os.system("cls") or None
nome1=input("Nome do Carro: ")
pot1=input("Potencia do Carro: ")
car=Carro(nome1, pot1)
carros.append(car)
print("Novo Carro criado!")
os.system("pause")
def informacao():
os.system("cls")
n=input("Informe o numero do carro que deseja informações: ") #Retorna uma string do numero
try: #Caso o Carro Exista
carros[int(n)].info() #Pesquise o Carro na Lista e mostre suas informaçoes
except: #Caso não exista o numero do Carro
print("Carro não existe na Lista")
os.system("pause")
def excluirCarro():
os.system("cls")
n=input("Informe o numero do carro que deseja deseja Excluir: ") #Retorna uma string do numero
try: #Caso o Carro Exista
del carros[int(n)] #Delete o carro na posição indicada
except: #Caso não exista o numero do Carro
print("Carro não existe na Lista")
os.system("pause")
def ligarCarro():
os.system("cls")
n=input("Informe o numero do carro que deseja ligar: ") #Retorna uma string do numero
try: #Caso o Carro Exista
carros[int(n)].ligar()
except: #Caso não exista o numero do Carro
print("Carro não existe na Lista")
os.system("pause")
def DesligarCarro():
os.system("cls")
n=input("Informe o numero do carro que deseja Desligar: ") #Retorna uma string do numero
try: #Caso o Carro Exista
carros[int(n)].desligar() #Liga o Carro informado
except: #Caso não exista o numero do Carro
print("Carro não existe na Lista")
os.system("pause")
def listarCarros():
os.system("cls")
posi=0
for recebe in carros:
print(posi, " - ", recebe.nome)
posi+=1
os.system("pause")
retorno=Menu()
while (int(retorno)<7):
if retorno == 1:
NovoCarro()
elif retorno == 2:
informacao()
elif retorno == 3:
excluirCarro()
elif retorno == 4:
ligarCarro()
elif retorno == 5:
DesligarCarro()
elif retorno == 6:
listarCarros()
retorno=Menu()
os.system("cls")
print("Programa Finalizado")
| StarcoderdataPython |
11359196 | <filename>python/nvtfcnn_benchmarks/postprocess.py<gh_stars>0
"""Extract from a log file speeds and compute average speed and batch time."""
from __future__ import print_function
import re
import sys
MODEL_TITLES = {
"alexnet_owt": "AlexNetOWT", "googlenet": "GoogleNet",
"inception_resnet_v2": "InceptionResNetV2",
"inception3": "InceptionV3", "inception4": "InceptionV4", "overfeat": "Overfeat",
"resnet18": "ResNet18", "resnet34": "ResNet34", "resnet50": "ResNet50",
"resnet101": "ResNet101", "resnet152": "ResNet152",
"vgg11": "VGG11", "vgg13": "VGG13", "vgg16": "VGG16", "vgg19": "VGG19",
"xception": "Xception"
}
def main():
"""Main function."""
# Get input parameters
fname = sys.argv[1]
model = sys.argv[2]
effective_batch = int(sys.argv[3])
# Load log file content
with open(fname) as logfile:
lines = [line.strip() for line in logfile]
# Define possible parser states
state = type('Enum', (), {'SEARCH': 1, 'PROCESS': 2})
header = 'Step[\t ]+Epoch[\t ]+Img/sec[\t ]+Loss[\t ]+LR'
# Parse content
speeds = []
parser_state = state.SEARCH
for line in lines:
if parser_state == state.SEARCH:
parser_state = state.PROCESS if re.match(header, line) else state.SEARCH
continue
#
if 'tensorflow' in line:
# This can be warning messages related to recoverable OOM errors:
# 2018-10-12 02:14:08.982901: W tensorflow/stream_executor/cuda/cuda_dnn.cc:3797]
# 2018-10-12 02:34:28.983848: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator ...
continue
if 'results.end_time' in line:
# Safe way to stop processing
break
#
line = line.split()
if len(line) != 6:
break
try:
line = [float(col) for col in line]
speeds.append(line[2])
except ValueError:
break
# Find average speed/processing time
if len(speeds) < 20:
# It's better to remove first two points even if number of iterations was small.
if len(speeds) >= 5:
speeds = speeds[2:]
print(
"[WARNING] Number of performance points is too low (%d). "\
"I will use almost all points to compute average. Better algorithm "\
"exists." % len(speeds)
)
else:
speeds = speeds[10:]
# Do some logging
if model in MODEL_TITLES:
print("__exp.model_title__=\"%s\"" % MODEL_TITLES[model])
if len(speeds) > 0:
speed = sum(speeds) / len(speeds)
batch_time = 1000.0 / (speed / effective_batch)
# Print results
print("__results.throughput__=%f" % speed)
print("__results.time__=%f" % batch_time)
print("__exp.status__=\"success\"")
else:
print("__exp.status__=\"failure\"")
print("__exp.status_msg__=\"No results have been found in this log file\"")
if __name__ == '__main__':
main()
| StarcoderdataPython |
6550575 | import os
import re
import json
import requests
import fnmatch
from urllib import urlencode
from BeautifulSoup import BeautifulSoup
_ROOT = os.path.abspath(os.path.dirname(__file__))
invalid_url = {'error': 'Invalid URL'}
unreachable = {'error': 'Failed to reach the URL'}
empty_meta = {'error': 'Found no meta info for that url'}
headers = {'user-agent': 'embeddit/0.0.1'}
class Embeddit(dict):
url = None
fetched = False
def __init__(self, url=None, *args, **kwargs):
if url:
self.url = url
self.fetch()
def fetch(self, force=False):
if self.fetched and not force:
return self
response = self.fetch_oembed_meta()
if 'error' in response:
# No oembed info found.
# Fall back to open graph
response = self.fetch_og_meta()
self.clear()
self.update(response)
self.fetched = True
return response
def to_json(self):
if not self.fetched:
self.fetch()
return json.dumps(self)
def fetch_oembed_meta(self):
try:
f = open(get_data('providers.json'), 'r')
providers = json.loads(f.read())
oembed_url = None
for provider in providers:
for endpoint in provider.get('endpoints', []):
for schema in endpoint.get('schemes', []):
if not schema.startswith('http://*') or not schema.startswith('https://*'):
schema = schema.replace('http://', 'http://*')
schema = schema.replace('https://', 'https://*')
if fnmatch.fnmatch(self.url, schema):
oembed_url = endpoint.get('url')
break
if not oembed_url:
provider_urls = [
provider.get('provider_url'),
provider.get('provider_url').replace('http://', 'https://')
]
for provider_url in provider_urls:
if fnmatch.fnmatch(self.url, provider_url + "*"):
oembed_url = provider.get('endpoints')[0].get('url')
break
if not oembed_url:
return invalid_url
params = urlencode({'url': self.url})
try:
results = requests.get(
'%s?%s' % (oembed_url.replace('{format}', 'json'), params),
headers=headers
)
content = json.loads(results.content)
content[u'source_type'] = 'oembed'
except ValueError:
params = urlencode({'url': self.url, 'format': 'json'})
results = requests.get('%s?%s' % (oembed_url, params), headers=headers)
content = json.loads(results.content)
content[u'source_type'] = 'oembed'
return content
except IndexError:
return empty_meta
except requests.exceptions.InvalidSchema:
return invalid_url
except requests.exceptions.HTTPError:
return unreachable
def fetch_og_meta(self):
try:
results = requests.get(self.url, headers=headers)
soup = BeautifulSoup(results.content)
meta = soup.findAll('meta')
content = {}
for tag in meta:
if tag.has_key('property'):
if re.search('og:', tag['property']) is not None:
key = re.sub('og:', '', tag['property'])
content[key] = tag['content']
if content == {}:
return empty_meta
else:
content[u'source_type'] = 'open_graph'
return content
except requests.exceptions.InvalidSchema:
return invalid_url
except requests.exceptions.HTTPError:
return unreachable
def get_data(path):
return os.path.join(_ROOT, 'data', path)
| StarcoderdataPython |
3418589 | #!/usr/bin/env python
"""async10gather.py: Use gather
Usage:
async10gather.py
"""
import asyncio
async def factorial(name, number):
f = 1
for i in range(2, number + 1):
print(f"Task {name}: Compute factorial({i})...")
await asyncio.sleep(1)
f *= i
print(f"Task {name}: factorial({i}) = {f}")
def main():
loop = asyncio.get_event_lsoop()
loop.run_until_complete(asyncio.gather(
factorial("A", 2),
factorial("B", 3),
factorial("C", 4)))
loop.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11209763 | """Top-level package for zfit."""
# Copyright (c) 2021 zfit
import warnings
from pkg_resources import get_distribution
__version__ = get_distribution(__name__).version
__license__ = "BSD 3-Clause"
__copyright__ = "Copyright 2018, zfit"
__status__ = "Beta"
__author__ = ("<NAME> <<EMAIL>>,"
"<NAME> <<EMAIL>>, "
"<NAME> <<EMAIL>>, "
"<NAME> <<EMAIL>>")
__maintainer__ = "zfit"
__email__ = '<EMAIL>'
__credits__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__all__ = ["z", "constraint", "pdf", "minimize", "loss", "data", "func", "dimension", "exception",
"sample", "binned",
"Parameter", "ComposedParameter", "ComplexParameter", "convert_to_parameter",
"Space", "convert_to_space", "supports",
"run", "settings"]
# Copyright (c) 2019 zfit
warnings.warn("This is an ALPHA version of zfit for feature testing, do NOT use it in production! "
"It is NOT stable and contains bugs and untested features. "
"For production use, please use the last beta stable version of zfit.")
def _maybe_disable_warnings():
import os
disable_warnings = os.environ.get("ZFIT_DISABLE_TF_WARNINGS")
if disable_warnings is None:
warnings.warn("TensorFlow warnings are by default suppressed by zfit."
" In order to show them,"
" set the environment variable ZFIT_DISABLE_TF_WARNINGS=0."
" In order to suppress the TensorFlow warnings AND this warning,"
" set ZFIT_DISABLE_TF_WARNINGS=1.")
elif disable_warnings == '0':
return
os.environ["KMP_AFFINITY"] = "noverbose"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
_maybe_disable_warnings()
import tensorflow as tf
if int(tf.__version__[0]) < 2:
raise RuntimeError(f"You are using TensorFlow version {tf.__version__}. This zfit version ({__version__}) works"
f" only with TF >= 2")
from . import z # initialize first
from . import (constraint, data, dimension, exception, func, loss, binned,
minimize, param, pdf, sample)
from .core.data import Data
from .core.parameter import (ComplexParameter, ComposedParameter, Parameter,
convert_to_parameter)
from .core.space import Space, convert_to_space, supports
from .settings import run, ztypes
from .util.graph import jit as _jit
def _maybe_disable_jit():
import os
arg1 = os.environ.get("ZFIT_DO_JIT")
arg2 = os.environ.get("ZFIT_EXPERIMENTAL_DO_JIT")
arg3 = os.environ.get("ZFIT_MODE_GRAPH")
if arg3 is not None:
warnings.warn("Depreceated to use `ZFIT_MODE_GRAPH`, use `ZFIT_GRAPH_MODE` instead.",
DeprecationWarning)
if arg1 is not None and arg2 is None:
warnings.warn("Depreceated to use `ZFIT_EXPERIMENTAL_DO_JIT`, use `ZFIT_GRAPH_MODE` instead.",
DeprecationWarning)
arg = arg2 if arg1 is None else arg1
if arg is not None and not int(arg):
run.set_graph_mode(False)
graph = os.environ.get("ZFIT_GRAPH_MODE")
if graph is not None and not int(graph):
run.set_graph_mode(False)
# experimental flags
_maybe_disable_jit()
# EOF
| StarcoderdataPython |
1644763 | <reponame>chensjtu/AdelaiDepth
import torch
import torch.nn.functional
from . import network_auxi as network
from lib.configs.config import cfg
from lib.utils.net_tools import *
from lib.models.PWN_planes import PWNPlanesLoss
from lib.models.PWN_edges import EdgeguidedNormalRegressionLoss
from lib.models.ranking_loss import EdgeguidedRankingLoss
from lib.models.ILNR_loss import MEADSTD_TANH_NORM_Loss
from lib.models.MSGIL_loss import MSGIL_NORM_Loss
class RelDepthModel(nn.Module):
def __init__(self):
super(RelDepthModel, self).__init__()
self.depth_model = DepthModel()
self.losses = ModelLoss()
def forward(self, data, is_train=True):
# Input data is a_real, predicted data is b_fake, groundtruth is b_real
self.inputs = data['rgb'].cuda()
self.logit, self.auxi = self.depth_model(self.inputs)
if is_train:
self.losses_dict = self.losses.criterion(self.logit, self.auxi, data)
else:
self.losses_dict = {'total_loss': torch.tensor(0.0, dtype=torch.float).cuda()}
return {'decoder': self.logit, 'auxi': self.auxi, 'losses': self.losses_dict}
def inference(self, data):
with torch.no_grad():
out = self.forward(data, is_train=False)
pred_depth = out['decoder']
pred_disp = out['auxi']
pred_depth_normalize = (pred_depth - pred_depth.min() + 1) / (pred_depth.max() - pred_depth.min()) #pred_depth - pred_depth.min() #- pred_depth.max()
pred_depth_out = pred_depth
pred_disp_normalize = (pred_disp - pred_disp.min() + 1) / (pred_disp.max() - pred_disp.min())
return {'pred_depth': pred_depth_out, 'pred_depth_normalize': pred_depth_normalize,
'pred_disp': pred_disp, 'pred_disp_normalize': pred_disp_normalize,
}
class ModelLoss(nn.Module):
def __init__(self):
super(ModelLoss, self).__init__()
################Loss for the main branch, i.e. on the depth map#################
# Geometry Loss
self.pn_plane = PWNPlanesLoss(focal_x=cfg.DATASET.FOCAL_X, focal_y=cfg.DATASET.FOCAL_Y,
input_size=cfg.DATASET.CROP_SIZE, sample_groups=5000, xyz_mode='xyz')
self.pn_edge = EdgeguidedNormalRegressionLoss(mask_value=-1e-8, max_threshold=10.1)
# self.surface_normal_loss = SurfaceNormalLoss()
# the scale can be adjusted
self.msg_normal_loss = MSGIL_NORM_Loss(scale=4, valid_threshold=-1e-8)
# Scale shift invariant. SSIMAEL_Loss is MIDAS loss. MEADSTD_TANH_NORM_Loss is our normalization loss.
self.meanstd_tanh_loss = MEADSTD_TANH_NORM_Loss(valid_threshold=-1e-8)
self.ranking_edge_loss = EdgeguidedRankingLoss(mask_value=-1e-8)
################Loss for the auxi branch, i.e. on the disp map#################
# the scale can be adjusted
self.msg_normal_auxiloss = MSGIL_NORM_Loss(scale=4, valid_threshold=-1e-8)
# Scale shift invariant. SSIMAEL_Loss is MIDAS loss. MEADSTD_TANH_NORM_Loss is our normalization loss.
self.meanstd_tanh_auxiloss = MEADSTD_TANH_NORM_Loss(valid_threshold=-1e-8)
self.ranking_edge_auxiloss = EdgeguidedRankingLoss(mask_value=-1e-8)
def criterion(self, pred_logit, auxi, data):
loss1 = self.decoder_loss(pred_logit, data)
loss2 = self.auxi_loss(auxi, data)
loss = {}
loss.update(loss1)
loss.update(loss2)
loss['total_loss'] = loss1['total_loss'] + loss2['total_loss']
return loss
def auxi_loss(self, auxi, data):
loss = {}
if 'disp' not in data:
return {'total_loss': torch.tensor(0.0).cuda()}
gt = data['disp'].to(device=auxi.device)
if '_ranking-edge-auxi_' in cfg.TRAIN.LOSS_MODE.lower():
loss['ranking-edge_auxiloss'] = self.ranking_edge_auxiloss(auxi, gt, data['rgb'])
if '_msgil-normal-auxi_' in cfg.TRAIN.LOSS_MODE.lower():
loss['msg_normal_auxiloss'] = (self.msg_normal_auxiloss(auxi, gt) * 0.5).float()
if '_meanstd-tanh-auxi_' in cfg.TRAIN.LOSS_MODE.lower():
loss['meanstd-tanh_auxiloss'] = self.meanstd_tanh_auxiloss(auxi, gt)
total_loss = sum(loss.values())
loss['total_loss'] = total_loss * cfg.TRAIN.LOSS_AUXI_WEIGHT
return loss
def decoder_loss(self, pred_logit, data):
pred_depth = pred_logit
gt_depth = data['depth'].to(device=pred_depth.device)
# High-quality data, except webstereo data
mask_high_quality = data['quality_flg'] ==3
mask_mid_quality = data['quality_flg'] >= 2
# gt_depth_high = gt_depth[mask_high_quality]
# pred_depth_high = pred_depth[mask_high_quality]
gt_depth_mid = gt_depth[mask_mid_quality]
pred_depth_mid = pred_depth[mask_mid_quality]
#gt_depth_filter = data['mask_highquality']]
#pred_depth_filter = pred_depth[data['mask_highquality']]
#focal_length_filter = data['focal_length'][data['mask_highquality']]
# if gt_depth_high.ndim == 3:
# gt_depth_high = gt_depth_high[None, :, :, :]
# pred_depth_high = pred_depth_high[None, :, :, :]
if gt_depth_mid.ndim == 3:
gt_depth_mid = gt_depth_mid[None, :, :, :]
pred_depth_mid = pred_depth_mid[None, :, :, :]
loss = {}
if '_pairwise-normal-regress-edge_' in cfg.TRAIN.LOSS_MODE.lower() or \
'_pairwise-normal-regress-plane_' in cfg.TRAIN.LOSS_MODE.lower():
pred_ssinv = recover_scale_shift_depth(pred_depth, gt_depth, min_threshold=-1e-8, max_threshold=10.1)
else:
pred_ssinv = None
# Geometry Loss
if '_pairwise-normal-regress-plane_' in cfg.TRAIN.LOSS_MODE.lower():
focal_length = data['focal_length'] if 'focal_length' in data else None
loss['pairwise-normal-regress-plane_loss'] = self.pn_plane(gt_depth,
pred_ssinv,
data['planes'],
focal_length)
if '_pairwise-normal-regress-edge_' in cfg.TRAIN.LOSS_MODE.lower():
if mask_high_quality.sum():
loss['pairwise-normal-regress-edge_loss'] = self.pn_edge(pred_ssinv[mask_high_quality],
gt_depth[mask_high_quality],
data['rgb'][mask_high_quality],
focal_length=data['focal_length'][mask_high_quality])
else:
loss['pairwise-normal-regress-edge_loss'] = pred_ssinv.sum() * 0.
# Scale-shift Invariant Loss
if '_meanstd-tanh_' in cfg.TRAIN.LOSS_MODE.lower():
loss_ssi = self.meanstd_tanh_loss(pred_depth_mid, gt_depth_mid)
loss['meanstd-tanh_loss'] = loss_ssi
if '_ranking-edge_' in cfg.TRAIN.LOSS_MODE.lower():
loss['ranking-edge_loss'] = self.ranking_edge_loss(pred_depth, gt_depth, data['rgb'])
# Multi-scale Gradient Loss
if '_msgil-normal_' in cfg.TRAIN.LOSS_MODE.lower():
loss['msg_normal_loss'] = (self.msg_normal_loss(pred_depth, gt_depth) * 0.1).float()
total_loss = sum(loss.values())
loss['total_loss'] = total_loss
return loss
class ModelOptimizer(object):
def __init__(self, model):
super(ModelOptimizer, self).__init__()
encoder_params = []
encoder_params_names = []
decoder_params = []
decoder_params_names = []
nograd_param_names = []
for key, value in model.named_parameters():
if value.requires_grad:
if 'res' in key:
encoder_params.append(value)
encoder_params_names.append(key)
else:
decoder_params.append(value)
decoder_params_names.append(key)
else:
nograd_param_names.append(key)
lr_encoder = cfg.TRAIN.BASE_LR
lr_decoder = cfg.TRAIN.BASE_LR * cfg.TRAIN.SCALE_DECODER_LR
weight_decay = 0.0005
net_params = [
{'params': encoder_params,
'lr': lr_encoder,
'weight_decay': weight_decay},
{'params': decoder_params,
'lr': lr_decoder,
'weight_decay': weight_decay},
]
self.optimizer = torch.optim.SGD(net_params, momentum=0.9)
self.model = model
def optim(self, loss):
self.optimizer.zero_grad()
loss_all = loss['total_loss']
loss_all.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 10)
self.optimizer.step()
class DepthModel(nn.Module):
def __init__(self):
super(DepthModel, self).__init__()
backbone = network.__name__.split('.')[-1] + '.' + cfg.MODEL.ENCODER
self.encoder_modules = get_func(backbone)()
self.decoder_modules = network.Decoder()
self.auxi_modules = network.AuxiNetV2()
def forward(self, x):
lateral_out = self.encoder_modules(x)
out_logit, auxi_input = self.decoder_modules(lateral_out)
out_auxi = self.auxi_modules(auxi_input)
return out_logit, out_auxi
def recover_scale_shift_depth(pred, gt, min_threshold=1e-8, max_threshold=1e8):
b, c, h, w = pred.shape
mask = (gt > min_threshold) & (gt < max_threshold) # [b, c, h, w]
EPS = 1e-6 * torch.eye(2, dtype=pred.dtype, device=pred.device)
scale_shift_batch = []
ones_img = torch.ones((1, h, w), dtype=pred.dtype, device=pred.device)
for i in range(b):
mask_i = mask[i, ...]
pred_valid_i = pred[i, ...][mask_i]
ones_i = ones_img[mask_i]
pred_valid_ones_i = torch.stack((pred_valid_i, ones_i), dim=0) # [c+1, n]
A_i = torch.matmul(pred_valid_ones_i, pred_valid_ones_i.permute(1, 0)) # [2, 2]
A_inverse = torch.inverse(A_i + EPS)
gt_i = gt[i, ...][mask_i]
B_i = torch.matmul(pred_valid_ones_i, gt_i)[:, None] # [2, 1]
scale_shift_i = torch.matmul(A_inverse, B_i) # [2, 1]
scale_shift_batch.append(scale_shift_i)
scale_shift_batch = torch.stack(scale_shift_batch, dim=0) # [b, 2, 1]
ones = torch.ones_like(pred)
pred_ones = torch.cat((pred, ones), dim=1) # [b, 2, h, w]
pred_scale_shift = torch.matmul(pred_ones.permute(0, 2, 3, 1).reshape(b, h * w, 2), scale_shift_batch) # [b, h*w, 1]
pred_scale_shift = pred_scale_shift.permute(0, 2, 1).reshape((b, c, h, w))
return pred_scale_shift
| StarcoderdataPython |
3579798 | import re
from unidecode import unidecode
import nick_names
import utils
class MalformedAuthorName(Exception):
pass
class Mention():
def __init__(self):
pass
def load_author_alias(self, name_str):
self.original_name = name_str
self.merged_name = name_str #this gets overwritten
self.first_name, self.middle_names, self.last_name, self.suffix = self.split_name()
if len(self.middle_names) > 4:
msg = "Too many middle names in '%s'" % self
raise MalformedAuthorName(msg)
def load_clean_name(self, fn, mns, ln, suffix):
self.first_name, self.middle_names, self.last_name, self.suffix = \
(fn, mns, ln, suffix)
name_str = " ".join([fn] + mns + [ln])
self.original_name = name_str
self.merged_name = name_str #this gets overwritten
@classmethod
def intersected_name(cls, e1, e2):
fn = utils.shorter(e1.fn(), e2.fn())
mns = []
mns1, mns2 = e1.mns(), e2.mns()
if len(mns1) == len(mns2):
mns = [utils.shorter(mns1[mi], mns2[mi]) for mi in xrange(len(mns1))]
ln = e1.ln()
a = cls()
a.load_clean_name(fn, mns, ln, "")
return a
def __str__(self):
return self.original_name
def clean_name(self):
name_str = self.original_name.decode('utf-8')
name_str = unidecode(name_str)
name_str = re.sub(r'\s+', ' ', name_str)
name_str = re.sub(r'[^a-zA-Z .,-]', '', name_str)
name_str = re.sub(r' -|- ', ' ', name_str)
name_str = re.sub(r'--+', '-', name_str)
name_str = re.sub(r'(?i)^(Dr|Mr|Mrs|Ms)\. ', '', name_str)
name_str = re.sub('^([A-Z])([A-Z]) ', r'\1. \2. ', name_str)
name_str = re.sub('^([A-Z][a-z]+)\. ', r'\1 ', name_str)
name_str = re.sub('\. *', ' ', name_str)
name_str = re.sub('(:? |^)((van|de|del|da|do|el|la|di|von|der) )+', ' ', name_str)
name_str = re.sub(r'^ +| +$', '', name_str)
name_str = re.sub(r'\s+', ' ', name_str)
return name_str.lower()
def split_name(self):
cname = self.clean_name()
suffix = ""
m_suffix = re.search(r'(?i)^(.*) (jr|iii|iv)$', cname)
if m_suffix:
cname, suffix = m_suffix.group(1), m_suffix.group(2)
not_last_name, last_name = "", ""
# smith, john c
m = re.match('(?P<last>.+), (?P<first>.+)', cname)
if not m:
# smith j c
m = re.match('^(?P<last>\S{2,}) (?P<first>(?:[a-z] )*[a-z])$', cname)
if not m:
# j c smith
m = re.match('^(?P<first>.+?) (?P<last>\S+)$', cname)
if not m:
msg = "Cannot split '%s' into first and last names" % self
raise MalformedAuthorName(msg)
not_last_name, last_name = m.group("first"), m.group("last")
name_parts = re.split(r'[ -]+', not_last_name)
name_parts = [n for n in name_parts if n]
first_name = nick_names.nick_names.get(name_parts[0], name_parts[0])
middle_names = name_parts[1:]
return first_name, middle_names, last_name, suffix
def full_name(self):
return " ".join([self.first_name] + self.middle_names + [self.last_name])
def last_first(self):
return " ".join([self.last_name + ","] + [self.first_name] + self.middle_names)
def fn(self):
return self.first_name
def mns(self):
return self.middle_names
def ln(self):
return self.last_name
def name_variants(self):
ret = set([self.full_name()])
m_string = " ".join(self.mns())
ret.add("%s %s" % (self.fn(), self.ln()))
ret.add("%s %s" % (self.fn()[0], self.ln()))
if self.mns():
ret.add("%s %s %s" % (self.fn(), m_string, self.ln()))
ret.add("%s %s %s" % (self.fn()[0], m_string, self.ln()))
return ret
def token(self):
return "%s_%s" % (self.last_name, self.first_name[0])
def drop_first_name(self):
self.first_name = self.middle_names[0]
self.middle_names = self.middle_names[1:]
def repr_tsv(self):
mn = " ".join(self.mns())
name_tsv = "\t".join([self.fn(), mn, self.ln()])
return "\t".join([self.article_id, self.author_id, name_tsv,])
def name_length(self):
return len(self.full_name())
def change_last_name(self, new_last):
self.last_name = new_last
def backup_name(self):
self.former_fn = self.fn()
self.former_mns = self.mns()
self.former_ln = self.ln()
def restore_name(self):
self.first_name = self.former_fn
self.middle_names = self.former_mns
self.last_name = self.former_ln
def drop_first_name(self):
self.backup_name()
self.first_name = self.mns()[0]
self.middle_names = self.mns()[1:]
def drop_hyphenated_ln(self):
self.backup_name()
import re
self.last_name = re.sub(r'-\w+$', '', self.ln())
def fix_spelling(self, pc):
self.backup_name()
fn, mns, ln = pc.fn(), pc.mns(), pc.ln()
if not utils.compatible_name_part(fn, self.fn()):
self.first_name = fn
if mns != self.mns():
self.middle_names = mns
if ln != self.ln():
self.change_last_name(ln)
| StarcoderdataPython |
150546 | # -*- coding: utf-8 -*-
#############################################################
# IMPORTS #
#############################################################
import os
import sys
import re
from time import sleep
from PIL import Image, ImageOps, ImageFile
#############################################################
# SIZE #
#############################################################
WIDTH = 76 # mm
HEIGHT = 102 # mm
DPI = 300 # DPI
#############################################################
# PATH #
#############################################################
PATH = os.path.dirname(os.path.abspath(__file__))
os.chdir(PATH)
#############################################################
# CONTENT #
#############################################################
ImageFile.LOAD_TRUNCATED_IMAGES = True
EXTENSION = (".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG")
FOLDER = [file for file in sorted(os.listdir()) if file.endswith(EXTENSION) and not file == "watermark.png"]
TOTAL = len(FOLDER)
DUO = ["recto", "verso", "duo"]
DOUBLE = False
IMAGE_NAME = ""
#############################################################
# CONVERT MM 300DPI TO PIXELS #
#############################################################
WIDTH_DPI = round((float(WIDTH) / 25.4) * DPI)
HEIGHT_DPI = round((float(HEIGHT) / 25.4) * DPI)
#############################################################
# MAIN #
#############################################################
if not os.path.exists(f"{PATH}\\2 en 1") :
os.makedirs(f"{PATH}\\2 en 1")
index = 1
while len(FOLDER) > 0:
os.system('cls' if os.name == 'nt' else 'clear')
print("2 images sur 10x15")
print("#" * 30)
print(f"image {index} sur {TOTAL}")
print("-" * 13)
image1 = FOLDER.pop()
if any(key_name in image1.lower() for key_name in DUO) == True:
IMAGE_NAME = image1
image2 = image1
DOUBLE = True
else :
if len(FOLDER) < 1:
image2 = image1
else:
image2 = FOLDER.pop()
images = map(Image.open, [image1, image2])
x_offset = 0
try:
new_image = Image.new('RGB', (WIDTH_DPI * 2, HEIGHT_DPI))
except Exception:
pass
else:
for image in images:
# widths, heights = zip(*(i.size for i in images))
if image.width > image.height:
image = image.rotate(90, expand=True)
cropped_image = ImageOps.fit(image, (WIDTH_DPI, HEIGHT_DPI))
new_image.paste(cropped_image, (x_offset, 0))
x_offset += WIDTH_DPI
if DOUBLE :
new_image.save(f"{PATH}\\2 en 1\\10x15_{IMAGE_NAME}", dpi=(DPI, DPI), format='JPEG', subsampling=0, quality=100)
DOUBLE = False
else :
new_image.save(f"{PATH}\\2 en 1\\10x15_{index:03}.jpg", dpi=(DPI, DPI), format='JPEG', subsampling=0, quality=100)
index += 1
print("Terminé !")
| StarcoderdataPython |
1619255 | <reponame>coclar/pointlike<filename>python/pointlike_defaults.py
# default parameters for the various parameter files
#
# $Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/pointlike_defaults.py,v 1.16 2009/02/24 20:50:52 burnett Exp $
#
# Include this to set defaults, then override
import sys
print ('running %s' % sys.argv[0])
# data selection parameters
class Data:
pixelfile='' # set to a photon data FITS file, or use a list of FT1 files, If set, ignore alignment, times, output
files = [] # set to a list of FT1-like FITS files (if no pixefile)
event_class = -1 # 0, select front only; -1 no selection
source_id =-1 # -1: all sources -- select according to Monte Carlo source id, if present
output_pixelfile = '' # set to create an output pixel file (if reading FT1 or ROOT files)
start_time=0. # select interval if non zero
stop_time=0. # "
history = '' # optional history or FT2 file, needed to correct for misalignment if reading FT1
LATalignment=[] # alignment correction angles about x,y,z axes, in arcseconds
energy_bins=[] #
class Diffuse:
file = '' # diffuse input file image file: if not specified, try to find it below
exposure=3e10 # this seems appropriate for 1 year.
import os
if 'GLAST_EXT' in os.environ and file=='':
file = os.path.join(os.environ['GLAST_EXT'],'extFiles','v0r7','galdiffuse', 'gll_iem_v01.fit')
class Isotropic: # isotropic flux description
flux = 1.5e-5
index = 2.2
class Exposure:
livetimefile=r'D:\common\pointfind\data\aug2008-jan2009_livetime.fits'
IRF = 'P6_v1_diff'
class PointSourceLikelihood: #parameters for the likelihood calculation
# HEALpix level range for energy band fits
emin = 200; # only select bands including or above this energy
minalpha=0.01 # minimum value for signal fraction to use in TS total
minROI = 0 # minimum size for ROI (deg)
maxROI = 20 # prevent from bing too large
# parameters governing iteration cycle of bands/position
TSmin= 5 # minimum TS value to allow during iteration
skip1=1 # inital number of layers to skip in localization fit
skip2=9 # don't skip beyond this
itermax=1 # maximum number of iterations
verbose = 0 # set non-zero to get lots of output
maxstep = 0.2 # max step allowed during localization: abort if larger
merge=1 # set to zero to not attempt to merge bands with identical parameters
class SourceFinder: # parameters for the SourceFinder.
pass1_nside=256 # HEALpix binning for initial points.
TSmin = 5.0 # overall minimum TS
pixel_fraction = 1.0 # fraction of pixels to sample, sorted according to TS
prune_radius =0.25 # pruning radius in degrees (also needs to be tuned)
group_radius = 4.0 # maximum radius for nearby sources.
examine_radius=180 # default is to examine full sky
| StarcoderdataPython |
3389335 | <reponame>yaqwsx/YBlade
#Author-<NAME>
#Description-
import adsk.core, adsk.fusion, adsk.cam, traceback
from adsk.core import Point3D, Point2D, Vector3D, Vector2D, Matrix3D
import math
import sys
from copy import deepcopy
handlers = []
sys.stderr = sys.stdout
sys.stdout.flush()
def readProfile(profileFile):
points = []
for l in profileFile.readlines()[1:]:
p = l.split()
points.append((float(p[0]), float(p[1])))
return points
class Struct(object): pass
def readBlade(bladeFile):
sections = []
for l in bladeFile.readlines()[3:]:
x = l.split()
s = Struct()
s.pos = float(x[0]) * 100 # Convert to cm
s.len = float(x[1]) * 100 # Convert to cm
s.twist = float(x[2])
s.offset = float(x[3]) * 100 # Convert to cm
s.thread = float(x[4])
sections.append(s)
return sections
def findClosest(target, l):
"""
Find value in l closest target. Return index of such a value
"""
minVal = l[0]
minIdx = 0
for i, e in enumerate(l):
if abs(target - minVal) > abs(target - e):
minVal = e
minIdx = i
return minIdx
def deduceOffset(blade, profile):
positives = list([(x, y) for x, y in profile if y > 0])
posIdx = findClosest(blade[0].thread, [x for x, y in positives])
negatives = list([(x, y) for x, y in profile if y < 0])
negIdx = findClosest(blade[0].thread, [x for x, y in negatives])
mid = (positives[posIdx][1] + negatives[negIdx][1]) / 2
for b in blade:
b.offset = -mid * b.len
def offsetLen(blade, offsetMm):
newBlade = []
for x in blade:
y = deepcopy(x)
y.len -= 2 * offsetMm / 10
newBlade.append(y)
return newBlade
def profilePoints(profileData, chordLength, twist, threadAxisOffset, zoffset):
pointSet = adsk.core.ObjectCollection.create()
for profilePoint in profileData:
p = Point3D.create(profilePoint[0] * chordLength, profilePoint[1] * chordLength, 0)
p.translateBy(Vector3D.create(-chordLength * threadAxisOffset, zoffset))
m = Matrix3D.create()
m.setToRotation(math.radians(twist), Vector3D.create(0, 0, 1), Point3D.create(0, 0, 0))
p.transformBy(m)
pointSet.add(p)
return pointSet
def drawProfile(sketch, profileData, chordLength, twist, threadAxisOffset, zoffset):
pointSet = profilePoints(profileData, chordLength, twist, threadAxisOffset, zoffset)
spline = sketch.sketchCurves.sketchFittedSplines.add(pointSet)
first, last = pointSet.item(0), pointSet.item(pointSet.count - 1)
line = sketch.sketchCurves.sketchLines.addByTwoPoints(first, last)
profile = adsk.core.ObjectCollection.create()
profile.add(spline)
profile.add(line)
return profile
def drawProfileLines(sketch, profileData, chordLength, twist, threadAxisOffset, zoffset):
pointSet = profilePoints(profileData, chordLength, twist, threadAxisOffset, zoffset)
lineProfile = adsk.core.ObjectCollection.create()
for i in range(pointSet.count):
first, last = pointSet.item(i), pointSet.item((i + 1) % pointSet.count)
line = sketch.sketchCurves.sketchLines.addByTwoPoints(first, last)
line.isConstruction = True
lineProfile.add(line)
return lineProfile
def drawGuideLine(sketch, blade, seed):
pointSet = adsk.core.ObjectCollection.create()
for s in blade:
p = Point3D.create(seed[0] * s.len, seed[1] * s.len, s.pos)
p.translateBy(Vector3D.create(-s.len * s.thread, s.offset))
m = Matrix3D.create()
m.setToRotation(math.radians(s.twist), Vector3D.create(0, 0, 1), Point3D.create(0, 0, 0))
p.transformBy(m)
pointSet.add(p)
spline = sketch.sketchCurves.sketchFittedSplines.add(pointSet)
return spline
def drawSpline(sketch, points):
spline = sketch.sketchCurves.sketchFittedSplines.add(points)
return adsk.fusion.Path.create(spline, adsk.fusion.ChainedCurveOptions.noChainedCurves)
def drawLinestring(sketch, points):
line = adsk.core.ObjectCollection.create()
for i in range(points.count - 1):
s, e = points.item(i), points.item((i + 1) % points.count)
l = sketch.sketchCurves.sketchLines.addByTwoPoints(s, e)
line.add(l)
return line
def extrudeBlade(component, profiles, sweepLine, guideLine):
sweepProfile = adsk.core.ObjectCollection.create()
sweepProfile.add(profiles[0].profiles.item(0))
sweepProfile.add(profiles[0].profiles.item(1))
path = component.features.createPath(sweepLine)
guide = component.features.createPath(guideLine)
sweeps = component.features.sweepFeatures
sweepInput = sweeps.createInput(sweepProfile, path, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
sweepInput.guideRail = guide
sweepInput.profileScaling = adsk.fusion.SweepProfileScalingOptions.SweepProfileScaleOption
sweeps.add(sweepInput)
def hollowBlade(component, profiles, guides):
loftFeats = component.features.loftFeatures
loftInput = loftFeats.createInput(adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
loftSectionsObj = loftInput.loftSections
for p in profiles:
loftSectionsObj.add(p.profiles.item(0))
c = loftInput.centerLineOrRails
for g in guides:
c.addRail(g)
loftFeats.add(loftInput)
def hollowBladeAlt(component, profiles, guides):
loftFeats = component.features.loftFeatures
for i in range(len(profiles) - 1):
op = adsk.fusion.FeatureOperations.JoinFeatureOperation
if i == 0:
op = adsk.fusion.FeatureOperations.NewBodyFeatureOperation
loftInput = loftFeats.createInput(op)
loftSectionsObj = loftInput.loftSections
loftSectionsObj.add(profiles[i].profiles.item(0))
loftSectionsObj.add(profiles[i + 1].profiles.item(0))
c = loftInput.centerLineOrRails
for g in guides:
path = adsk.fusion.Path.create(g.item(i), adsk.fusion.ChainedCurveOptions.noChainedCurves)
c.addRail(path)
loftFeats.add(loftInput)
def collectLinePoints(linestring):
res = set()
for i in range(linestring.count):
l = linestring.item(i)
s, e = l.geometry.startPoint, l.geometry.endPoint
res.add((s.x, s.y))
res.add((e.x, e.y))
return list(res)
def getLeftmostPoint(points):
res = points[0]
for p in points:
if p[0] < res[0]:
res = p
return res
def getRightmostPoint(points):
res = points[0]
for p in points:
if p[0] > res[0]:
res = p
return res
def dist(a, b):
x = a[0] - b[0]
y = a[1] - b[1]
return math.sqrt(x*x + y*y)
def _vec2d_dist(p1, p2):
return (p1[0] - p2[0])**2 + (p1[1] - p2[1])**2
def _vec2d_sub(p1, p2):
return (p1[0]-p2[0], p1[1]-p2[1])
def _vec2d_mult(p1, p2):
return p1[0]*p2[0] + p1[1]*p2[1]
# Taken from https://stackoverflow.com/questions/2573997/reduce-number-of-points-in-line
def ramerdouglas(line, dist):
if len(line) < 3:
return line
(begin, end) = (line[0], line[-1]) if line[0] != line[-1] else (line[0], line[-2])
distSq = []
for curr in line[1:-1]:
tmp = (
_vec2d_dist(begin, curr) - _vec2d_mult(_vec2d_sub(end, begin), _vec2d_sub(curr, begin)) ** 2 / _vec2d_dist(begin, end))
distSq.append(tmp)
maxdist = max(distSq)
if maxdist < dist ** 2:
return [begin, end]
pos = distSq.index(maxdist)
return (ramerdouglas(line[:pos + 2], dist) +
ramerdouglas(line[pos + 1:], dist)[1:])
def reduceProfile(profile, minDistance):
return ramerdouglas(profile, minDistance)
def inputFile(ui, title):
fileDlg = ui.createFileDialog()
fileDlg.isMultiSelectEnabled = False
fileDlg.title = title
fileDlg.filter = "*.*"
# Show file open dialog
dlgResult = fileDlg.showOpen()
if dlgResult == adsk.core.DialogResults.DialogOK:
return fileDlg.filenames[0]
else:
raise RuntimeError("No file specified")
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
commandDefinitions = ui.commandDefinitions
design = app.activeProduct
rootComp = design.activeComponent
qbladeFile = inputFile(ui, "Select QBlade specification file")
profileFile = inputFile(ui, "Select profile file")
class YBladeExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
command = args.firingEvent.sender
inputs = command.commandInputs
params = {input.id: input.value for input in inputs}
sketches = rootComp.sketches
planes = rootComp.constructionPlanes
xyPlane = rootComp.xYConstructionPlane
with open(params["profileFile"]) as f:
profileData = readProfile(f)
reducedProfileData = reduceProfile(profileData, params["simplificationFactor"])
with open(params["bladeFile"]) as f:
blade = readBlade(f)
deduceOffset(blade, profileData)
profiles = []
prevLen = -1000
prevTwist = -1000
leftInfillRail = adsk.core.ObjectCollection.create()
rightInfillRail = adsk.core.ObjectCollection.create()
for i, b in enumerate(blade):
if abs(b.len - prevLen) < 1 and abs(b.twist - prevTwist) < 1 and i != len(blade) - 1:
continue
prevLen = b.len
prevTwist = b.twist
planeInput = planes.createInput()
offsetValue = adsk.core.ValueInput.createByReal(b.pos)
planeInput.setByOffset(xyPlane, offsetValue)
plane = planes.add(planeInput)
plane.name = f"profile_{i}"
profileSketch = sketches.add(plane)
profileSketch.isLightBulbOn = False
spline = drawProfile(profileSketch, profileData, b.len, b.twist, b.thread, b.offset)
lines = drawProfileLines(profileSketch, reducedProfileData, b.len, b.twist, b.thread, b.offset)
dirPoint = adsk.core.Point3D.create(0, 0, 0)
offsetCurves = profileSketch.offset(lines, dirPoint, params["thickness"])
profiles.append(profileSketch)
points = collectLinePoints(offsetCurves)
lp = getLeftmostPoint(points)
leftInfillRail.add(Point3D.create(lp[0], lp[1], b.pos))
rp = getRightmostPoint(points)
rightInfillRail.add(Point3D.create(rp[0], rp[1], b.pos))
guideSketch = sketches.add(xyPlane)
guideLine1 = drawGuideLine(guideSketch, blade, (0, 0))
guideLine2 = drawGuideLine(guideSketch, blade, (1, 0))
innerGuide1 = drawLinestring(guideSketch, leftInfillRail)
innerGuide2 = drawLinestring(guideSketch, rightInfillRail)
sweepLine = guideSketch.sketchCurves.sketchLines.addByTwoPoints(
Point3D.create(0, 0, blade[0].pos),
Point3D.create(0, 0, blade[-1].pos))
hollowBladeAlt(rootComp, profiles, [innerGuide1, innerGuide2])
extrudeBlade(rootComp, profiles, sweepLine, guideLine1)
adsk.terminate()
except:
if ui:
ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
class YBladeDestroyHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
sys.stdout.close()
class YBladeCreateHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
try:
cmd = args.command
onExecute = YBladeExecuteHandler()
cmd.execute.add(onExecute)
onDestroy = YBladeDestroyHandler()
cmd.destroy.add(onDestroy)
# keep the handler referenced beyond this function
handlers.append(onExecute)
handlers.append(onDestroy)
inputs = cmd.commandInputs
inputs.addStringValueInput("profileFile", "Profile file path", profileFile)
inputs.addStringValueInput("bladeFile", "Blade file path", qbladeFile)
inputs.addDistanceValueCommandInput("thickness", "Shell thickness", adsk.core.ValueInput.createByString("1mm"))
inputs.addValueInput("simplificationFactor", "Infill simplification factor", "", adsk.core.ValueInput.createByReal(0.005))
except:
if ui:
ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
cmdDef = commandDefinitions.itemById("YBlade")
if not cmdDef:
cmdDef = commandDefinitions.addButtonDefinition("YBlade",
"Import QBlade",
"Create a blade.",
"./resources")
onCommandCreated = YBladeCreateHandler()
cmdDef.commandCreated.add(onCommandCreated)
# keep the handler referenced beyond this function
handlers.append(onCommandCreated)
inputs = adsk.core.NamedValues.create()
cmdDef.execute(inputs)
adsk.autoTerminate(False)
except:
print("Failed:\n{}".format(traceback.format_exc()))
if ui:
ui.messageBox("Failed:\n{}".format(traceback.format_exc()))
| StarcoderdataPython |
3324004 | import os
import django
import json
import random
os.environ.setdefault('DJANGO_SETTINGS_MODULE','QuestionsBidding.settings')
# # settings.configure()
django.setup()
from bidding.models import Question
fields=['question', 'a', 'b', 'c', 'd', 'answer']
for row in json.load(open('questions.json'))['results']:
row['incorrect_answers'].append(row['correct_answer'])
random.shuffle(row['incorrect_answers'])
data = [ row['question'] ]
data.extend(row['incorrect_answers'])
data.append([ 'a', 'b', 'c', 'd' ][ row['incorrect_answers'].index(row['correct_answer']) ])
Question.objects.create(**dict(zip(fields, data))) | StarcoderdataPython |
6484765 | <filename>vector_parse.py
## Kraken version 1 kraken parser to remove plasmids
import sys
import re
PLASMID_REGEXP = re.compile(r'plasmid')
def fasta_parse(infile):
with open(infile, 'r') as fastaFile:
# Skip whitespace
while True:
line = fastaFile.readline()
if line is "":
return # Empty file or premature end of file?
if line[0] is ">":
break
while True:
if line[0] is not ">":
raise ValueError("Records in FASTA should begin with '>'")
header = line[1:].rstrip()
allLines = []
line = fastaFile.readline()
while True:
if not line:
break
if line[0] is ">":
break
allLines.append(line.rstrip())
line = fastaFile.readline()
yield header, "".join(allLines).replace(" ", "").replace("\r", "")
if not line:
return # Stop Iteration
assert False, "Should not reach this line"
def rename_plasmid_seqs(inseqs, outseqs):
data = {k: v for k, v in fasta_parse(inseqs)}
with open(outseqs , 'w') as out:
vectors = 0
for header, seq in data.items():
vectors += 1
out.write('>kraken:taxid|32630|{}\n{}\n'.format(header, seq))
print(vectors, ' total vector sequences')
if __name__ == '__main__':
input_sequences = sys.argv[1]
output_sequences = sys.argv[2]
rename_plasmid_seqs(input_sequences, output_sequences)
# some empty header lines, use the following awk code to only keep useful headers
#awk 'BEGIN {RS = ">" ; FS = "\n" ; ORS = ""} {if ($2) print ">"$0}' edited_vector.fasta > vector_contaminant.fasta | StarcoderdataPython |
3589896 | <gh_stars>0
#Class used for reading AutoLabel files
import msgpack
import numpy as np
import StandardBody
class AutoLabelReader():
def __init__(self, in_filename):
self.in_filename = in_filename
self.read_data()
def read_data(self):
with open(self.in_filename, 'rb') as labelFile:
dictToUnpack = msgpack.unpackb(labelFile.read(), raw=False)
self.timestamps = dictToUnpack["timestamps"]
self.labels = dictToUnpack["labels"]
#TODO: Raise a suitable error of some kind if the lengths don't match!
def getMinTimestamp(self):
return self.timestamps[0]
def getNumLabels(self):
return len(self.timestamps)
def getMaxTimestamp(self):
return self.timestamps[-1]
def getTimestamp(self, i):
return self.timestamps[i]
#Returns a given entry as an Nx3 float32 numpy array
def getLabel(self, i):
binLabel = self.labels[i]
N = StandardBody.pointArray.shape[0]
flat_array = np.frombuffer(binLabel, dtype=np.float32)
return np.reshape(flat_array, (N, 3))
| StarcoderdataPython |
3501194 | <reponame>ekzemplaro/data_base_language
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# firebird_python_read.py
#
# Jun/18/2012
#
#
import sys
import json
import kinterbasdb
import string
#
sys.path.append ("/var/www/data_base/common/python_common")
#
from sql_manipulate import sql_update_proc
from cgi_manipulate import parse_parameter
#
#
conn = kinterbasdb.connect \
(dsn='localhost:/var/tmp/firebird/cities.fdb', \
user="sysdba", password="<PASSWORD>")
#
cursor = conn.cursor ()
#
# --------------------------------------------------------
#
print "Content-type: text/html\n\n"
#
# ---------------------------------------------------------------
array_bb = parse_parameter ()
#
for it in range (len(array_bb)):
print "it = %d<br />" % it
id_in = array_bb[it]['id']
print "id_in = %s<br />" % id_in
population_in = string.atoi (array_bb[it]['population'])
print "check aaaa<br />"
print "population_in = %d<br />" % population_in
print "check bbbbbbb<br />"
sql_update_proc (cursor,id_in,population_in)
print "check cccccc<br />"
#
conn.commit ()
#
cursor.close ()
conn.close ()
#
#
print "OK<br />"
#
| StarcoderdataPython |
5022697 | <reponame>bibliotechie/h
from zope.interface import Interface
class IAnnotationFormatter(Interface): # pylint:disable=inherit-non-class
"""
Interface for annotation formatters.
Annotation formatters are ways to add data to the annotation JSON payload
without putting everything in the annotation presenter, thus allowing better
decoupling of code.
The main method is ``format(annotation_resource)`` which is expected to
return a dictionary representation based on the passed-in annotation. If
the formatter depends on other data it should be able to load it on-demand
for the given annotation.
Since we are rendering lists of potentially hundreds of annotations in one
request, formatters need to be able to optimize the fetching of additional
data (e.g. from the database). Which is why this interface defines the
``preload(ids)`` method.
Each formatter implementation is expected to handle a cache internally which
is being preloaded with said method.
"""
def preload(ids): # noqa: N805 pylint:disable=no-self-argument
"""
Batch load data based on annotation ids.
:param ids: List of annotation ids based on which data should be preloaded.
:type ids: list of unicode
"""
def format(annotation_context): # noqa: N805 pylint:disable=no-self-argument
"""
Presents additional annotation data that will be served to API clients.
:param annotation_context: The annotation that needs presenting.
:type annotation_context: :py:class`h.traversal.AnnotationContext`
:returns: A formatted dictionary.
:rtype: dict
"""
| StarcoderdataPython |
3331631 | from django.db import models
from hello_django.storage_backends import PublicMediaStorage, PrivateMediaStorage
class Upload(models.Model):
uploaded_at = models.DateTimeField(auto_now_add=True)
file = models.FileField(storage=PublicMediaStorage())
class UploadPrivate(models.Model):
uploaded_at = models.DateTimeField(auto_now_add=True)
file = models.FileField(storage=PrivateMediaStorage())
| StarcoderdataPython |
9791298 | import os
import dash_bootstrap_components as dbc
from dash import dash_table, dcc, html
from dash.dependencies import ALL, Input, Output, State
from rubicon_ml import publish
from rubicon_ml.viz.base import VizBase
from rubicon_ml.viz.common.colors import light_blue, plot_background_blue
class ExperimentsTable(VizBase):
"""Visualize the experiments `experiments` and their metadata, metrics,
and parameters in a tabular format.
Parameters
----------
experiments : list of rubicon_ml.client.experiment.Experiment, optional
The experiments to visualize. Defaults to None. Can be set as
attribute after instantiation.
is_selectable : bool, optional
True to enable selection of the rows in the table, False otherwise.
Defaults to True.
"""
def __init__(self, experiments=None, is_selectable=True):
super().__init__(dash_title="experiment table")
self.experiments = experiments
self.is_selectable = is_selectable
@property
def layout(self):
"""Defines the experiments table's layout."""
bulk_select_buttons = [
html.Div(
dbc.Button(
"select all experiments",
color="primary",
disabled=not self.is_selectable,
id="select-all-button",
outline=True,
),
className="bulk-select-button-container",
),
html.Div(
dbc.Button(
"clear all experiments",
color="primary",
disabled=not self.is_selectable,
id="clear-all-button",
outline=True,
),
className="bulk-select-button-container",
),
html.Div(
dbc.Button(
"publish selected",
color="primary",
disabled=not self.is_selectable,
id="publish-selected-button",
outline=True,
),
className="bulk-select-button-container",
),
]
experiment_table = dash_table.DataTable(
columns=[
{"name": column, "id": column, "selectable": self.is_selectable}
for column in self.all_columns
],
data=self.experiment_records,
filter_action="native",
fixed_columns={"headers": True, "data": 1},
hidden_columns=self.hidden_columns,
id="experiment-table",
page_size=10,
row_selectable="multi" if self.is_selectable else False,
selected_rows=[],
sort_action="native",
sort_mode="multi",
style_cell={"overflow": "hidden", "textOverflow": "ellipsis"},
style_data_conditional=[
{"if": {"row_index": "odd"}, "backgroundColor": plot_background_blue}
],
style_header={"fontWeight": 700},
style_table={"minWidth": "100%"},
)
header_text = (
f"showing {len(self.experiments)} experiments "
f"{'at commit ' if self.commit_hash is not None else ''}"
)
header = html.H5(
[
html.P(
header_text,
className="experiment-table-header-text",
style={"float": "left"} if self.commit_hash is not None else {},
),
html.A(
html.P(
[
self.commit_hash,
html.I(className="bi bi-box-arrow-up-right external-link-icon"),
]
),
href=self.github_url,
style={"display": "none"} if self.commit_hash is None else {},
target="_blank",
),
],
className="header-text",
)
toggle_columns_dropdown = dbc.DropdownMenu(
[
dbc.DropdownMenuItem(
"show all",
id="show-all-dropdown-button",
),
dbc.DropdownMenuItem(
"hide all",
id="hide-all-dropdown-button",
),
dbc.DropdownMenuItem(divider=True),
*[
dbc.DropdownMenuItem(
dcc.Checklist(
inputClassName="column-dropdown-checkbox",
labelClassName="column-dropdown-label",
options=[{"label": column, "value": column}],
value=[column] if column not in self.hidden_columns else [],
id={
"type": "column-dropdown-checkbox",
"index": column,
},
),
id={"type": "column-dropdown-button", "index": column},
)
for column in self.all_columns
],
],
color="secondary",
id="column-selection-dropdown",
label="toggle columns",
)
publish_modal = dbc.Modal(
[
dbc.ModalHeader(
dbc.ModalTitle("publish selected experiments"),
close_button=True,
),
dbc.ModalBody(
[
dbc.Label("enter catalog YAML output path:"),
dbc.Input(
id="publish-path-input",
type="text",
value=os.path.join(os.getcwd(), "rubicon-ml-catalog.yml"),
),
],
),
dbc.ModalFooter(dbc.Button("publish", id="publish-button")),
],
id="publish-modal",
centered=True,
is_open=False,
size="lg",
)
if self.is_selectable:
header_row = [
html.Div(
header,
className="header-row",
),
dbc.Row(
[
*[dbc.Col(button, width="auto") for button in bulk_select_buttons],
dbc.Col(toggle_columns_dropdown),
],
className="button-group",
),
]
else:
header_row = [
dbc.Row(
[
dbc.Col(
html.Div(
header,
className="header-row",
),
width=8,
),
dbc.Col(toggle_columns_dropdown),
],
className="button-group",
),
]
return html.Div(
[
*header_row,
publish_modal,
dcc.Loading(experiment_table, color=light_blue),
],
)
def load_experiment_data(self):
"""Load the experiment data required for the experiments table.
Extracts all experiment metadata as well as parameters and metrics
from each experiment in `self.experiments`. Sets GitHub information
if applicable.
"""
self.experiment_records = []
self.metric_names = set()
self.parameter_names = set()
self.all_columns = ["id", "name", "created_at", "model_name", "commit_hash", "tags"]
self.hidden_columns = []
self.commit_hash = None
self.github_url = None
commit_hashes = set()
show_columns = {"id", "created_at"}
for experiment in self.experiments:
experiment_record = {
"id": experiment.id,
"name": experiment.name,
"created_at": experiment.created_at,
"model_name": experiment.model_name,
"commit_hash": None,
"tags": experiment.tags,
}
if experiment.commit_hash is not None:
experiment_record["commit_hash"] = experiment.commit_hash[:7]
commit_hashes.add(experiment.commit_hash)
show_columns.add("commit_hash")
if experiment.model_name is not None:
show_columns.add("model_name")
if experiment.name is not None:
show_columns.add("name")
if len(experiment.tags) > 0:
show_columns.add("tags")
for parameter in experiment.parameters():
experiment_record[parameter.name] = parameter.value
self.parameter_names.add(parameter.name)
for metric in experiment.metrics():
experiment_record[metric.name] = metric.value
self.metric_names.add(metric.name)
self.experiment_records.append(experiment_record)
self.metric_names = list(self.metric_names)
self.parameter_names = list(self.parameter_names)
self.all_columns.extend(self.parameter_names + self.metric_names)
self.hidden_columns = [
column
for column in self.all_columns
if column not in list(show_columns) + self.metric_names + self.parameter_names
]
if len(commit_hashes) == 1:
github_url_root = self.experiments[0].project.github_url[:-4]
commit_hash = list(commit_hashes)[0]
self.commit_hash = commit_hash[:7]
self.github_url = f"{github_url_root}/tree/{commit_hash}"
def register_callbacks(self, link_experiment_table=False):
@self.app.callback(
Output({"type": "column-dropdown-checkbox", "index": ALL}, "value"),
[
Input("show-all-dropdown-button", "n_clicks_timestamp"),
Input("hide-all-dropdown-button", "n_clicks_timestamp"),
],
prevent_initial_call=True,
)
def update_selected_column_checkboxes(last_show_click, last_hide_click):
"""Bulk updates for the selections in the "toggle columns" dropdown.
Returns all if triggered by the "show all" button and returns
only "id" if triggered by the "hide all" button. The initial
call is prevented as the default state is neither all nor none.
"""
last_show_click = last_show_click if last_show_click else 0
last_hide_click = last_hide_click if last_hide_click else 0
if last_hide_click > last_show_click:
hidden_values = [[]] * (len(self.all_columns) - 1)
return [["id"], *hidden_values]
return [[column] for column in self.all_columns]
@self.app.callback(
Output("experiment-table", "hidden_columns"),
Input({"type": "column-dropdown-checkbox", "index": ALL}, "value"),
)
def update_hidden_experiment_table_cols(selected_columns):
"""Hide and show the columns in the experiment table.
Returns the columns that should be hidden based on whether or
not the column's corresponding value is checked in the "toggle
columns" dropdown.
"""
selected_columns = [sc[0] for sc in selected_columns if len(sc) > 0]
return [column for column in self.all_columns if column not in selected_columns]
@self.app.callback(
Output("experiment-table", "selected_rows"),
[
Input("select-all-button", "n_clicks_timestamp"),
Input("clear-all-button", "n_clicks_timestamp"),
],
State("experiment-table", "derived_virtual_indices"),
)
def update_selected_experiment_table_rows(
last_select_click, last_clear_click, experiment_table_indices
):
"""Bulk selection for the rows of the experiment table.
Returns all if triggered by the "select all" button and returns
none if triggered by the "clear all" button.
"""
if last_select_click is None and last_clear_click is None:
return list(range(len(self.experiments)))
last_select_click = last_select_click if last_select_click else 0
last_clear_click = last_clear_click if last_clear_click else 0
if last_select_click > last_clear_click:
return experiment_table_indices
return []
@self.app.callback(
Output("publish-modal", "is_open"),
[
Input("publish-selected-button", "n_clicks_timestamp"),
Input("publish-button", "n_clicks_timestamp"),
],
[
State("publish-modal", "is_open"),
State("publish-path-input", "value"),
State("experiment-table", "derived_virtual_selected_rows"),
State("experiment-table", "derived_virtual_data"),
],
)
def toggle_publish_modal(
last_publish_selected_click,
last_publish_click,
is_modal_open,
publish_path,
selected_rows,
data,
):
last_publish_selected_click = (
last_publish_selected_click if last_publish_selected_click else 0
)
last_publish_click = last_publish_click if last_publish_click else 0
if last_publish_selected_click > last_publish_click:
return True
elif last_publish_click > last_publish_selected_click:
selected_experiment_ids = [data[row].get("id") for row in selected_rows]
selected_experiments = [
e for e in self.experiments if e.id in selected_experiment_ids
]
publish(selected_experiments, publish_path)
return False
return is_modal_open
| StarcoderdataPython |
3218129 | """Softmax multi-class logistic regression classifier in Python."""
# References
# MultiClass Logistic Classifier in Python
# https://www.codeproject.com/Articles/821347/MultiClass-Logistic-Classifier-in-Python
import numpy as np
class Softmax(object):
"""Softmax classfier layer"""
def __init__(self):
self.W = None
self.b = None
def make_onehot(self, Y, n_dim):
"""Compute class indices to one-hot vectors
Y is a vector of numeric class labels
n_dim is a scalar
"""
y_onehot = np.zeros((Y.shape[0], n_dim))
y_onehot[np.arange(Y.shape[0]), Y] = 1
return y_onehot
def fit(self, X, Y, iters=100, learn_rate=0.01, ref=0.1):
"""Train the model.
X is the feature matrix (n_samples, n_features)
Y is a vector of numeric zero-index class labels dimension (n_samples)
W is the weight matrix of dimension (n_features, n_classes)
b is the a bias vector of dimenison (n_classes)
"""
# initialize weight matrix and bias vector
self.W = np.random.normal(0, 0.1, (X.shape[1], np.max(Y)+1))
self.b = np.zeros(np.max(Y)+1)
# train model
# TODO
def softmax(self, Z):
"""Compute softmax activations
Z is a matrix with dimension (n_batch, n_classes)
"""
ex = np.exp(Z-np.max(Z, axis=1, keepdims=True))
return ex / ex.sum(axis=1, keepdims=True)
def predict(self, X):
"""return prediction of most likely class for each instance"""
return np.argmax(self.predict_log_proba(X), axis=1)
def predict_log_proba(self, X):
""" return probability vector for each instance
X is a feature matrix of dimenison (n_batch, n_features)
returns matrix of dimension (n_batch, n_classes)
"""
return self.softmax(np.dot(X, self.W) + self.b)
def loss(self, X, Y, reg):
"""compute cross-entropy loss
X is a feature matrix of dimension (n_batch, n_features)
Y is a vector of the true class labels of dimension (n_batch)
"""
# compute softmax activations
Yh = self.predict_lob_proba(X)
# loss is the negative log probability of correct class
#loss = np.sum(-n[p.log(Yh[np.arange(X.shape[0]), Y]))
# add regularization penalty
#loss += reg * np.sum(np.square(self.W))
#return loss / X.shape[0]
def grad(self, X, Y, reg):
"""compute gradient of cost function with respect to inputs"""
Yh = self.predict_lob_proba(X)
| StarcoderdataPython |
6584616 | #Desemvolva um programa que leia NOME, IDADE, a SEO DA PESSOA. No Final do programa.
# faça um progama que leia o nome de 4 pessoas, idade e o sexo de cada pessoa. No final do programa mostre
# A media da idade do grupo
# qual é o nome do homem mais velho.
# quantidade mulhers tem menos de 20 anos
somaidade = 0
mediaidade = 0
maioridadadehomem = 0
nomevelho = ''
totmulher20 = 0
for p in range (1,5):
print(f'----- {p}ª PESSOA ---------')
nome = input('Nome ').strip()
idade = int(input(' Idade '))
sexo = str(input(' Sexo ')).strip()
somaidade += idade
if p == 1 and sexo in 'Mn':
maioridadadehomem = idade
nomevelho = nome
if sexo in 'Mn' and idade > maioridadadehomem:
maioridadadehomem = idade
nomevelho = nome
if sexo in 'Ff' and idade < 20: # ('Mn') caso o usuario usar qualquer um maiusculo ou minusculo
totmulher20 += 1
mediaidade = somaidade /4
print(f'A media de idade do grupo é de {mediaidade} anos')
print(f'O home mais velho tem {maioridadadehomem} anos e se chama {nomevelho} ')
print(f' Ao todos são {totmulher20} com menos de 20 anos ')
| StarcoderdataPython |
12856079 | from torchvision import datasets, transforms
from base import BaseDataLoader
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import torch
from skimage import io#, transform
import numpy as np
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class LCZDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
# trsfm = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])
self.data_dir = data_dir
self.dataset = LCZdataset(self.data_dir, transform=transforms.Compose([RandomCrop(64),ToTensor()]))
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class LCZdataset(Dataset):
"""LCZdataset."""
def __init__(self, csv_file, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.landmarks_frame.iloc[idx, 0]
image = io.imread(img_name)
classLabel = self.landmarks_frame.iloc[idx, 1]
classLabel = np.array([classLabel])
#landmarks = landmarks.astype('float').reshape(-1, 2)
#print(image.shape)
image = image[:,:,[1,2,3,4,5,6,7,10,11,12]]/10000.0
sample = {'image': image, 'label': classLabel}
if self.transform:
sample = self.transform(sample)
return sample['image'], sample['label']-1#sample
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
#landmarks = landmarks - [left, top]
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image.astype("float")).float(),#torch.from_numpy(image.astype("float")).float()
'label': torch.squeeze(torch.from_numpy(label))}#
| StarcoderdataPython |
1948012 | <reponame>Maistho/twitter-parser
from flask import Flask, render_template, request
from TweetParser import TweetParser
app = Flask(__name__)
app.debug = True
myparser = TweetParser('gamergate')
def precision(c, tweets):
"""Computes precision for class `c` on the specified test data."""
tp = 0
fp = 0
for tweet in tweets:
if c in tweet['predictions']:
if c in tweet['tags']:
tp+=1
else:
fp+=1
if(tp+fp == 0):
return float('nan')
else:
return tp/(tp+fp)
def recall(c, tweets):
"""Computes recall for class `c` on the specified test data."""
tp = 0
fn = 0
for tweet in tweets:
if c in tweet['tags']:
if c in tweet['predictions']:
tp+=1
else:
fn+=1
if tp+fn == 0:
return float('nan')
return tp/(tp+fn)
def count(c, tweets):
count = 0
for tweet in tweets:
if c in tweet['tags']:
count += 1
return count
@app.route("/", methods=['GET', 'POST'])
def hello():
if request.method == 'POST':
try:
tagged_categories = request.form.getlist('category')
tweet_id = int(request.form['tweetId'])
except KeyError:
tweet = myparser.getNextTweet()
return render_template("tag.html", tweet=tweet, categories=myparser.target_names)
cat = []
for tag in tagged_categories:
i = myparser.target_names.index(tag)
if i != None:
cat.append(i)
myparser.add_tags(tweet_id, cat)
tweet = myparser.getNextTweet()
return render_template("tag.html", tweet=tweet, categories=myparser.target_names)
@app.route("/train")
def train():
success = myparser.train()
return render_template('trained.html', success=success)
@app.route("/predict")
def predict():
tweet = myparser.getNextTweet()
predicted = myparser.predict([tweet['text']])
return render_template('predict.html', tweet = tweet, prediction = predicted, categories=myparser.target_names)
@app.route("/test")
def test():
predicted, tweets = myparser.test()
new_tweets = []
for tweet, prediction in zip(tweets, predicted):
tweet['predictions'] = prediction
new_tweets.append(tweet)
cats = []
for c in range(0, len(myparser.target_names)):
cats.append({
'name': myparser.target_names[c],
'recall': recall(c, new_tweets),
'precision': precision(c, new_tweets),
'count': count(c, new_tweets)
})
tagging = []
for tweet, prediction in zip(tweets, predicted):
'green' if len([e for e in prediction if e in tweet['tags']]) >0 else 'red'
tagging.append({
'text': tweet['text'],
'gold': ', '.join(myparser.target_names[x] for x in tweet['tags']),
'pred': ', '.join(myparser.target_names[x] for x in prediction),
'color': 'green' if len([e for e in prediction if e in tweet['tags']]) >0 else 'red'
})
return render_template('test.html', cats=cats, tagging=tagging)
if __name__ == "__main__":
app.run(host='0.0.0.0')
| StarcoderdataPython |
3495346 | <reponame>tkishimoto/gridalert<filename>gridalert/template/sts_template.py
from logging import getLogger
logger = getLogger(__name__)
from ..util import date as util_date
from ..util import text as util_text
from ..util import hash as util_hash
class StsTemplate:
def __init__(self, cl_conf):
self.cl_conf = cl_conf
def initialize(self):
logger.info('load sts template')
def execute(self, text):
buffers = []
for line in open(text, errors='replace'):
# genre filename year score sentence1 sentence2
meta = line.split('\t')
# host, date, service, metadata, data, label
host = 'dummy'
date = '2019-08-16 17:23:48'
service = 'dummy'
label = '1'
metadata = 'dummy'
data0 = meta[5]
data1 = meta[6]
if 'dev' in text:
service = 'dev'
elif 'test' in text:
service = 'test'
elif 'train' in text:
service = 'train'
buffers.append([host, date, service,
metadata, data0, label])
buffers.append([host, date, service,
metadata, data1, label])
return buffers
| StarcoderdataPython |
5161560 | import pathlib
import pytest
from x2webrtc.signaling import CopyAndPasteSignaling, get_signaling_method
BASE_DIR = pathlib.Path(__file__).resolve().parent
@pytest.mark.asyncio
async def test_get_signaling_method() -> None:
default = get_signaling_method()
assert default is not None
assert isinstance(default, CopyAndPasteSignaling)
success = get_signaling_method(BASE_DIR / "examples/good_plugin.py")
assert success is not None
assert await success(None) is True
with pytest.raises(RuntimeError) as e:
get_signaling_method(BASE_DIR / "examples/unknown_plugin.py")
assert str(e.value) == "invalid plugin path"
with pytest.raises(RuntimeError) as e:
get_signaling_method(BASE_DIR / "examples/bad_plugin.py")
assert str(e.value) == "cannot find `plugin` method in the plugin"
with pytest.raises(RuntimeError) as e:
get_signaling_method(BASE_DIR / "examples/bad_plugin2.py")
assert str(e.value) == "`plugin` method must return a subclass of `SignalingPlugin`"
| StarcoderdataPython |
6471074 | from .anvilGoogleMap import *
| StarcoderdataPython |
6411703 | <reponame>FCC-hh-framework/FCCFitter
import json
import optparse
import sys
from matplotlib import pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
import os
import ROOT as r
from ROOT import *
from array import array
plotname=''
#__________________________________________________________
def getMassDisco(signiDict,mmin,mmax):
indict=None
with open(signiDict,'r') as f:
indict = json.load(f)
mass =[]
disco = []
for m in indict:
if int(m)<mmin :
print 'm, mmin ',m,' ',mmin,' ',m>mmin
continue
if int(m)>mmax:
print 'm, mmax ',m,' ',mmax,' ',m>mmax, type(m), type(mmax)
continue
signi=[]
lumi=[]
print m
for i in indict[m]:
signi.append(i['signi'])
lumi.append(i['lumi'])
print signi
print lumi
signi,lumi = zip(*sorted(zip(signi, lumi)))
plt.figure()
xnew = np.linspace(min(lumi), max(lumi), 100)
f = interp1d(lumi, signi)
plt.plot(lumi, signi, 'o-', xnew, f(xnew), '-')
plt.plot([min(lumi), max(lumi)], [5, 5], 'k-', lw=2)
plt.plot([min(lumi), max(lumi)], [3, 3], 'k-', lw=2)
plt.xlabel('luminosity fb$^{-1}')
plt.ylabel('significance sigma')
plt.grid(True)
plt.savefig('Plots/Signi_%s_%s.eps'%(m,plotname))
plt.close()
plt.figure()
xnew = np.linspace(min(signi), max(signi), 1000)
f = interp1d(signi, lumi)
plt.plot(signi, lumi, 'o-', xnew, f(xnew), '-')
plt.plot([5,5],[min(lumi), max(lumi)], 'k-', lw=2)
plt.plot([3,3],[min(lumi), max(lumi)], 'k-', lw=2)
plt.ylabel('luminosity fb$^{-1}')
plt.xlabel('significance sigma')
plt.grid(True)
discolumi = f(5) if xnew[-1]>5.0 else f(xnew[-1])
plt.plot([0,5],[discolumi, discolumi], 'k-', lw=2)
plt.savefig('Plots/SigniInverse_%s_%s.eps'%(m,plotname))
print 'lumi = %f'%discolumi
plt.close()
mass.append(int(m))
#CLEMENT
#disco.append(discolumi/1000)
disco.append(discolumi)
Mass,Disco = zip(*sorted(zip(mass, disco)))
print Mass
print Disco
return Disco,Mass
#__________________________________________________________
if __name__=="__main__":
parser = optparse.OptionParser(description="analysis parser")
parser.add_option('-f', '--files', dest='files', type=str, default='')
parser.add_option('-n', '--names', dest='names', type=str, default='')
parser.add_option('-p', '--plot', dest='plot', type=str, default='')
parser.add_option('--helhc', action='store_true')
parser.add_option('--mmax', dest='mmax', type=int, default=1000)
parser.add_option('--mmin', dest='mmin', type=int, default=-1000)
ops, args = parser.parse_args()
signiDict=ops.files
names=ops.names
mmax=ops.mmax
mmin=ops.mmin
if not os.path.isdir("Plots/"):
os.system('mkdir Plots')
signiList = signiDict.split(' ')
namesList = names.split(' ')
print signiList
print namesList
if len(signiList)!=len(namesList):
print 'different length, name, signi, exit',len(signiList),' ',len(namesList)
sys.exit(3)
plt.figure()
graph_array = []
for s in xrange(len(signiList)):
plotname=namesList[s]
print signiList[s]
Disco,Mass=getMassDisco(signiList[s],mmin,mmax)
Disco=[i/1000. for i in Disco]
from scipy import interpolate
f = interpolate.interp1d(Mass, Disco)
xnew = np.linspace(Mass[0],Mass[-1],50)
Disco_smooth = f(xnew)
plt.plot(Mass, Disco, label=namesList[s])
#plt.plot( xnew, Disco_smooth, '-')
Mass_array = array( 'd' )
Disco_array = array( 'd' )
for m in range( len(Mass) ) :
Mass_array.append( Mass[m] )
Disco_array.append( Disco[m] )
graph_array.append( [namesList[s],Mass_array,Disco_array] )
plt.ylabel('Int. Luminosity fb$^{-1}$')
plt.xlabel('mass [TeV]')
#plt.title('luminosity versus mass for a 5 $\sigma$ discovery')
if 'tt' in namesList:
print '----------------------'
print '-- running tt --'
print '----------------------'
plt.ylim(ymax = 1000000, ymin = 100)
plt.text(10.57, 500000, r'$Z\' \rightarrow = t \bar{t}$')
plt.text(10.57, 300000, r'Integrated luminosity versus mass for a 5 $\sigma$ discovery')
plt.text(10.57, 2500, r'$2.5ab^{-1}$')
plt.plot([10,30], [2500, 2500], color='black')
plt.text(10.57, 30000, r'$30ab^{-1}$')
plt.plot([10,30], [30000, 30000], color='black')
if 'll' in namesList:
print '----------------------'
print '-- running ll --'
print '----------------------'
plt.ylim(ymax = 1000000, ymin = 10)
plt.text(16, 500000, r'$Z\' \rightarrow = l^{+}l^{-}$')
plt.text(16, 300000, r'Integrated luminosity versus mass for a 5 $\sigma$ discovery')
plt.text(16, 2500, r'$2.5ab^{-1}$')
plt.plot([15,50], [2500, 2500], color='black')
plt.text(16, 30000, r'$30ab^{-1}$')
plt.plot([15,50], [30000, 30000], color='black')
plt.legend(loc=4)
if 'jj' in namesList:
print '----------------------'
print '-- running jj --'
print '----------------------'
plt.ylim(ymax = 1000000, ymin = 100)
plt.text(35, 50, r'$Q* \rightarrow = jet jet$')
#plt.text(10.57, 300000, r'Integrated luminosity versus mass for a 5 $\sigma$ discovery')
#plt.text(10.57, 2500, r'$2.5ab^{-1}$')
#plt.plot([10,30], [2500, 2500], color='black')
#plt.text(10.57, 30000, r'$30ab^{-1}$')
#plt.plot([10,30], [30000, 30000], color='black')
plt.grid(True)
#plt.legend(loc=4)
plt.yscale('log')
#plt.savefig('Plots/DiscoveryPotential_%s.eps'%(plotname))
#plt.savefig('Plots/DiscoveryPotential_%s.png'%(plotname))
plt.close()
########################
# ROOT style
canvas = r.TCanvas("Discovery","Discovery",0,0,600,600)
#CLEMENT
canvas.SetLogy(1)
canvas.SetTicks(1,1)
canvas.SetLeftMargin(0.14)
canvas.SetRightMargin(0.08)
canvas.SetGridx()
#CLEMENT
canvas.SetGridy()
lg = r.TLegend(0.6,0.18,0.78,0.33)
lg.SetFillStyle(0)
lg.SetLineColor(0)
lg.SetBorderSize(0)
lg.SetShadowColor(10)
#CLEMENT
#lg.SetTextSize(0.040)
lg.SetTextSize(0.030)
lg.SetTextFont(42)
# search wich curve has the max Mass
s_xmax=0
xmax=-10.
for s in xrange(len(signiList)):
Mass = sorted(graph_array[s][1])
if Mass[len(Mass)-1]>xmax:
xmax=Mass[len(Mass)-1]
s_xmax=s
# compute min and max on the the curve defined above
max_mass_idx=-1
xmin=10000.
xmax=-10.
for s in xrange(len(signiList)):
Disco = graph_array[s][2]
Mass = graph_array[s][1]
# min
if Mass[0]<xmin: xmin=Mass[0]
# max
if s==s_xmax:
for d in Disco :
if d>1E+6 and max_mass_idx==-1: max_mass_idx=Disco.index(d)
xmax=Mass[max_mass_idx]
# protection
if xmax<=xmin : xmax=Mass[len(Disco)-1]
# draw
dicgraph={}
color = [kBlue-4,kRed,kGreen-3,kViolet,kBlack]
print graph_array
for s in xrange(len(signiList)):
ana = graph_array[s][0]
Mass = graph_array[s][1]
nmass = len(Mass)
Disco = graph_array[s][2]
dicgraph[str(s)]= r.TGraph(nmass, Mass, Disco)
if s<5 : dicgraph[str(s)].SetLineColor(color[s])
else : dicgraph[str(s)].SetLineColor(s+20)
dicgraph[str(s)].SetLineWidth(3)
if s == 0:
dicgraph[str(s)].SetName(ana)
dicgraph[str(s)].SetTitle( "" )
dicgraph[str(s)].GetXaxis().SetTitle( "Mass [TeV]" )
#CLEMENT
dicgraph[str(s)].GetYaxis().SetTitle( "Int. Luminosity [fb^{-1}]" )
dicgraph[str(s)].GetXaxis().SetLimits(xmin, xmax)
if Disco[0]>1E+2:
dicgraph[str(s)].SetMinimum(1E+2)
elif Disco[0]>1E+1:
dicgraph[str(s)].SetMinimum(1E+1)
elif Disco[0]>1E+0:
dicgraph[str(s)].SetMinimum(1E+0)
else:
dicgraph[str(s)].SetMinimum(1E-1)
#BEGIN CLEMENT HACK
#dicgraph[str(s)].SetMinimum(1E+0)
#END CLEMENT HACK
if Disco[0]<1E+6: dicgraph[str(s)].SetMaximum(1E+6)
#BEGIN CLEMENT HACK
#dicgraph[str(s)].SetMaximum(7E+1)
#END CLEMENT HACK
if Disco[0]>Disco[len(Disco)-1]: dicgraph[str(s)].SetMaximum(Disco[0]*100.)
dicgraph[str(s)].GetXaxis().SetTitleOffset(1.3)
dicgraph[str(s)].GetYaxis().SetTitleOffset(1.6)
dicgraph[str(s)].Draw("AC")
#dicgraph[str(s)].SetMarkerStyle(21)
#dicgraph[str(s)].SetMarkerSize(1.5)
#dicgraph[str(s)].SetMarkerColor(kRed)
#dicgraph[str(s)].Draw("*")
else :
dicgraph[str(s)].Draw("C")
lg_lbl=ana
lg_lbl=lg_lbl.replace('ee','e^{+}e^{-}')
lg_lbl=lg_lbl.replace('mumu','#mu^{+}#mu^{-}')
lg_lbl=lg_lbl.replace('ll','#it{l^{+}l^{-}} (#it{l}=e, #mu)')
lg_lbl=lg_lbl.replace('bb','#it{b\\wwbar{b}}')
lg_lbl=lg_lbl.replace('qq','#it{q\\wwbar{q}}')
lg_lbl=lg_lbl.replace('5p','5%')
lg_lbl=lg_lbl.replace('10p','10%')
lg_lbl=lg_lbl.replace('15p','15%')
lg_lbl=lg_lbl.replace('20p','20%')
lg_lbl=lg_lbl.replace('nominal','#sigma_{E}/E = 3%')
lg_lbl=lg_lbl.replace('6%','#sigma_{E}/E = 6%')
lg_lbl=lg_lbl.replace('9%','#sigma_{E}/E = 9%')
lg_lbl=lg_lbl.replace('12%','#sigma_{E}/E = 12%')
lg_lbl=lg_lbl.replace('15%','#sigma_{E}/E = 15%')
lg.AddEntry(dicgraph[str(s)],lg_lbl,"L")
if len(signiList)>1 : lg.Draw()
line1=None
line2=None
if ops.helhc:
line1 = TLine(xmin,1E+3,xmax,1E+3);
line2 = TLine(xmin,1.5E+4,xmax,1.5E+4);
else:
line1 = TLine(xmin,2.5E+3,xmax,2.5E+3);
line2 = TLine(xmin,3E+4,xmax,3E+4);
line1.SetLineWidth(3)
line1.SetLineStyle(2)
line1.SetLineColor(kGreen+3);
line1.Draw("same");
line2.SetLineWidth(3)
line2.SetLineStyle(2)
line2.SetLineColor(kGreen+3);
line2.Draw("same");
# make proper channel definition -> user need to adapts with his own definitions
the_ana=''
if 'ee' in namesList and 'mumu' in namesList and 'll' in namesList and 'tt' in namesList: the_ana='llSSM'
if 'ee' in namesList and 'mumu' in namesList and 'll' in namesList: the_ana='llSSM'
# different models Z' case
elif 'tt' in namesList and 'SSM' not in namesList and "TC2" not in namesList : the_ana='ttSSM'
elif 'bb' in namesList : the_ana='bbSSM'
elif 'qq' in namesList : the_ana='qqSSM'
#
elif 'SSM' in namesList and "TC2" in namesList : the_ana='ttTC2SSM'
elif 'TC2' in namesList : the_ana='ttTC2'
elif 'nominal' in namesList : the_ana='ttTC2'
elif 'tt' in namesList : the_ana='ttTC2'
elif 'SSM' in namesList : the_ana='ttSSM'
elif 'ee' in namesList : the_ana='ee'
elif 'mumu' in namesList : the_ana='mumu'
elif 'll' in namesList : the_ana='ll'
elif 'ww' in namesList : the_ana='ww'
elif 'jj' in namesList : the_ana='jj'
elif 'tautau' in namesList : the_ana='tautau'
elif 'mumu_flav_ano' in namesList : the_ana='mumu_flav_ano'
elif 'mumu_LQ'in namesList : the_ana='mumu_LQ'
#CLEMENT
elif 'nominal'in namesList : the_ana='zpjj'
elif 'FCC'in namesList : the_ana='mumu'
else : print "No associated channel, give it yourself by making your case for the_ana"
# define the associated channel
plotname = ""
if the_ana=='llSSM' : plotname+="Z\'_{SSM}"
if the_ana=='ttSSM' : plotname+="Z\'_{SSM} #rightarrow t#bar{t}"
if the_ana=='bbSSM' : plotname+="Z\'_{SSM} #rightarrow b#bar{b}"
if the_ana=='qqSSM' : plotname+="Z\'_{SSM} #rightarrow q#bar{q}"
if the_ana=='ttTC2SSM' : plotname+="Z\' #rightarrow t#bar{t}"
if the_ana=='ttTC2' : plotname+="Z\'_{TC2} #rightarrow t#bar{t}"
if the_ana=='ll' : plotname+="Z\'_{SSM} #rightarrow l^{+}l^{-} (l=e, #mu)"
if the_ana=='ee' : plotname+="Z\'_{SSM} #rightarrow e^{+}e^{-}"
if the_ana=='mumu' : plotname+="Z\'_{SSM} #rightarrow #mu^{+}#mu^{-}"
if the_ana=='ww' : plotname+="G_{RS} #rightarrow W^{+}W^{-}"
if the_ana=='jj' : plotname+="Q* #rightarrow jj"
if the_ana=='tautau' : plotname+="Z\'_{SSM} #rightarrow #tau^{+}#tau^{-}"
if the_ana=='mumu_flav_ano' : plotname+="Z\' #rightarrow #mu^{+}#mu^{-} (1710.06363)"
if the_ana=='mumu_LQ' : plotname+="t-channel LQ #rightarrow #mu^{+}#mu^{-} (1710.06363)"
if the_ana=='zpjj' : plotname+="Z\'_{SSM} #rightarrow jj"
if the_ana=='FCC' : plotname+="Z\'_{SSM} #rightarrow #mu^{+}#mu^{-}"
# automatic position of caption
left_pos=0.18
center_pos=0.44
right_pos=0.63
s_pos=[]
# make list of position of curves at 1e6
for s in xrange(len(signiList)):
Mass = graph_array[s][1]
Disco = graph_array[s][2]
mass_idx=-1
for d in Disco :
if d>1E+6 and mass_idx==-1: mass_idx=Disco.index(d)
ref_mass=0.
if mass_idx==-1: ref_mass=Mass[len(Mass)-1]
else : ref_mass=float(Mass[mass_idx]+Mass[mass_idx-1])/2.
if ref_mass<float(xmax-xmin)*3./8. : s_pos.append("left")
elif ref_mass>float(xmax-xmin)*6./8. : s_pos.append("right")
else : s_pos.append("center")
# match
n_pos=0
#print s_pos
if 'left' in s_pos and 'center' in s_pos and 'right' in s_pos : n_pos=0
elif 'left' in s_pos and 'center' in s_pos : n_pos=2
elif 'left' in s_pos and 'right' in s_pos : n_pos=1
elif 'center' in s_pos and 'right' in s_pos : n_pos=0
elif 'left' in s_pos : n_pos=2
else : n_pos=0
#
if n_pos==0 : the_pos=left_pos
if n_pos==1 : the_pos=center_pos
if n_pos==2 : the_pos=right_pos
# long title
if the_ana=='mumu_flav_ano' : the_pos=left_pos
if the_ana=='mumu_LQ' : the_pos=left_pos
label = r.TLatex()
label.SetNDC()
label.SetTextColor(1)
label.SetTextSize(0.042)
label.SetTextAlign(12)
if ops.helhc:
label.SetNDC()
label.SetTextAlign(31);
label.SetTextSize(0.04)
label.DrawLatex(0.90,0.92, "#it{HE-LHC Simulation (Delphes)}")
label.SetTextAlign(12);
label.SetNDC(r.kTRUE)
label.SetTextSize(0.04)
label.DrawLatex(0.18,0.83, "#bf{#it{#sqrt{s} = 27 TeV}}")
else:
label.SetNDC()
label.SetTextAlign(31);
label.SetTextSize(0.04)
label.DrawLatex(0.90,0.92, "#it{FCC-hh Simulation (Delphes)}")
label.SetTextAlign(12);
label.SetNDC(r.kTRUE)
label.SetTextSize(0.04)
label.DrawLatex(0.18,0.83, "#bf{#it{#sqrt{s} = 100 TeV}}")
label.SetTextSize(0.03)
# label.DrawLatex(0.2,0.14, "Integrated luminosity versus mass for a 5 #sigma discovery")
label.DrawLatex(0.4,0.14, "5 #sigma discovery")
label.SetTextSize(0.036)
if the_pos==left_pos : label.DrawLatex(the_pos,0.78, plotname)
else : label.DrawLatex(the_pos+0.3,0.83, plotname)
label.SetTextSize(0.03)
label.SetNDC(False)
mass_for_latex=int(xmin)*1.68
if ops.helhc:
label.DrawLatex(mass_for_latex,0.7*15E+3, "15 ab^{-1}")
label.DrawLatex(mass_for_latex,0.7*1E+3, "1 ab^{-1}")
else:
label.DrawLatex(mass_for_latex,0.7*30E+3, "30 ab^{-1}")
label.DrawLatex(mass_for_latex,0.7*2.5E+3, "2.5 ab^{-1}")
canvas.RedrawAxis()
#canvas.Update()
canvas.GetFrame().SetBorderSize( 12 )
canvas.Modified()
canvas.Update()
canvas.SaveAs('Plots/DiscoveryPotential_%s_rootStyle.eps'%(ops.plot))
canvas.SaveAs('Plots/DiscoveryPotential_%s_rootStyle.png'%(ops.plot))
| StarcoderdataPython |
3435604 | <gh_stars>1-10
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import ocl
from numba.ocl.testing import unittest
from numba.ocl.testing import OCLTestCase
class TestCudaArrayArg(OCLTestCase):
def test_array_ary(self):
@ocl.jit('double(double[:],int64)', device=True)
def device_function(a, c):
return a[c]
@ocl.jit('void(double[:],double[:])')
def kernel(x, y):
i = ocl.get_global_id(0)
y[i] = device_function(x, i)
x = np.arange(10, dtype=np.double)
y = np.zeros_like(x)
kernel[10, 1](x, y)
self.assertTrue(np.all(x == y))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11213748 | <filename>Day5/day5.py
def run_program(inputs):
with open("input.txt", "r") as data:
# split string on commas, convert it to integers, store them in a list
intcode = list(map(lambda x: int(x), data.read().split(",")))
startIndex = 0
while intcode[startIndex] != 99:
# convert to string and pad 0s
opcode_buffer = str(intcode[startIndex])
while len(opcode_buffer) < 5:
opcode_buffer = "0" + opcode_buffer
print(f"OpCode Buffer: {opcode_buffer}")
opcode = int(opcode_buffer[-2:])
print(f"OpCode: {opcode}")
if opcode == 1:
# do addition
if opcode_buffer[-3] == "0":
#print("v1 position mode")
v1 = intcode[intcode[startIndex + 1]]
else:
#print("v1 immediate mode")
v1 = intcode[startIndex + 1]
if opcode_buffer[-4] == "0":
v2 = intcode[intcode[startIndex + 2]]
else:
v2 = intcode[startIndex + 2]
destination = intcode[startIndex + 3]
#print(f"Setting index {destination} to {v1 + v2}")
intcode[destination] = v1 + v2
# increment starting index
startIndex += 4
elif opcode == 2:
# do multiplication
if opcode_buffer[-3] == "0":
v1 = intcode[intcode[startIndex + 1]]
else:
v1 = intcode[startIndex + 1]
if opcode_buffer[-4] == "0":
v2 = intcode[intcode[startIndex + 2]]
else:
v2 = intcode[startIndex + 2]
destination = intcode[startIndex + 3]
#print(f"Setting index {destination} to {v1 * v2}")
intcode[destination] = v1 * v2
# increment starting index
startIndex += 4
elif opcode == 3:
# take input
v1 = inputs.pop(0)
#print(f"Setting index {intcode[startIndex + 1]} to {v1}")
intcode[intcode[startIndex + 1]] = v1
startIndex += 2
elif opcode == 4:
# print output
if opcode_buffer[-3] == "0":
v1 = intcode[intcode[startIndex + 1]]
else:
v1 = intcode[startIndex + 1]
print(f"Output: {v1}")
startIndex += 2
elif opcode == 5:
if opcode_buffer[-3] == "0":
v1 = intcode[intcode[startIndex + 1]]
else:
v1 = intcode[startIndex + 1]
if opcode_buffer[-4] == "0":
v2 = intcode[intcode[startIndex + 2]]
else:
v2 = intcode[startIndex + 2]
if v1 != 0:
startIndex = v2
else:
startIndex += 3
elif opcode == 6:
if opcode_buffer[-3] == "0":
v1 = intcode[intcode[startIndex + 1]]
else:
v1 = intcode[startIndex + 1]
if opcode_buffer[-4] == "0":
v2 = intcode[intcode[startIndex + 2]]
else:
v2 = intcode[startIndex + 2]
if v1 == 0:
startIndex = v2
else:
startIndex += 3
elif opcode == 7:
if opcode_buffer[-3] == "0":
v1 = intcode[intcode[startIndex + 1]]
else:
v1 = intcode[startIndex + 1]
if opcode_buffer[-4] == "0":
v2 = intcode[intcode[startIndex + 2]]
else:
v2 = intcode[startIndex + 2]
destination = intcode[startIndex + 3]
if v1 < v2:
intcode[destination] = 1
else:
intcode[destination] = 0
startIndex += 4
elif opcode == 8:
if opcode_buffer[-3] == "0":
v1 = intcode[intcode[startIndex + 1]]
else:
v1 = intcode[startIndex + 1]
if opcode_buffer[-4] == "0":
v2 = intcode[intcode[startIndex + 2]]
else:
v2 = intcode[startIndex + 2]
destination = intcode[startIndex + 3]
if v1 == v2:
intcode[destination] = 1
else:
intcode[destination] = 0
startIndex += 4
else:
print(f"Unknown Opcode: {opcode}")
break
return intcode[0]
print(run_program([5])) | StarcoderdataPython |
6428712 | <gh_stars>1-10
name = "logmein_host"
| StarcoderdataPython |
1883632 | <filename>trello_utility.py
import random
import trello
def get_list(name: str, board: trello.Board) -> trello.List:
"""
Find an open Trello list of the specified name
:param name: The name of the list
:param board: The board
:return: The list
"""
for trello_list in board.get_lists('open'):
if trello_list.name == name:
return trello_list
return None
def get_label(name, board):
"""
Find a label object by name
:param name:
:param board:
:return:
"""
for label in board.get_labels():
if label.name == name:
return label
return None
def random_goal_colour():
# removed green, black and red as used for goals, dates and events respectively
colours = ['yellow', 'purple', 'blue', 'orange', 'sky', 'pink', 'lime']
return random.choice(colours)
def create_label(name, colour, board):
"""
Create a new label of specified name and colour
:param name:
:param colour:
:param board:
:return:
"""
return board.add_label(name, colour)
def get_cards_with_label(label: trello.Label, board: trello.Board):
return filter(lambda card: card.labels and label in card.labels, board.get_cards())
def archive_cards(cards):
for card in cards:
card.set_closed(True)
| StarcoderdataPython |
6503050 | from __future__ import absolute_import
from __future__ import print_function
import os
import subprocess
import itertools
import numpy as np
import psi4
from psi4.driver.qcdb import periodictable
from psi4.driver.qcdb import physconst
from psi4.driver.qcdb import cov_radii
from . import BFS_bonding
from . import helper_methods as hm
from . import analysis_methods as am
class Molecule(object):
def __init__(self, z_vals, xyz, unit='Angstrom'):
"""
Molecule objects are used for passing information between Psi4 and OpenMM.
Psi4 and OpenMM have different unit conventions. Psi4 does not label units,
but typically takes units as Angstrom or Bohr. Internally, units are typically
in Bohr. OpenMM typically uses nanometers, but units in OpenMM are treated as
objects where every number has its value as well as its unit in a Unit object.
In order to preserve clarity between programs, it is important to keep track of units.
The Molecule class here keeps all units in Angstroms. Because the Cartesian coordinates
stored in Molecule.xyz are kept in a Numpy array, conversion between units is facile.
If units other than Angstrom are passed in, the keyword argument unit should be set
to the unit in place. Supported units (not case sensitive): 'Angstrom', 'Bohr',
'Nanometer'.
z_vals : container of integers
Container of z values (atomic numbers) of length N.
Ex: [1, 1, 6, 6]
xyz : container of container of doubles
Container of Cartesian coordinates of N atoms. Dimensions (N, 3).
"""
self.z_vals = np.array(z_vals)
self.xyz = np.array(xyz)
unit = unit.lower()
# if the units are not in Angstrom, convert Nanometers OR Bohr to Angstrom
if unit != 'angstrom':
self.xyz = self.xyz * 0.1 if unit == 'nanometer' else self.xyz / physconst.psi_bohr2angstroms
if self.xyz.shape[1] != 3:
raise ValueError("XYZ should only have three coordinates")
atom_indices = range(self.natoms())
self.bonds = None
self.charges = None
self.atom_types = None
@staticmethod
def from_xyz_string(xyz_string):
"""
Create a molecule from a string.
Zxyz_string : string
Example and format:
Atomic number X Y Z
_____________________________
Zxyz_str = 'C 0.0 0.0 0.0
O 1.2 0.0 0.0'
"""
z_vals = []
xyz = []
for line in xyz_string.splitlines():
Zxyz_list = line.strip().split()
if Zxyz_list == []:
continue
if len(Zxyz_list) != 4:
raise KeyError("Line should have exactly 4 elements, Z, x, y, z.")
z_vals.append(periodictable.el2z[Zxyz_list[0].upper()])
xyz.append([float(x) for x in Zxyz_list[1:]])
return Molecule(z_vals, xyz)
@staticmethod
def from_xyz_file(filename):
"""
Create a molecule from an xyz file.
First line should be number of atoms, second line a comment,
and subsequent lines (of length the number of atoms) atomic symbol
and Cartesian coordinates.
filename : string
String of the name of the file containing the molecular input.
"""
f = open(filename)
data = f.read().splitlines()
f.close()
data_string = ""
num_atoms = int( data[0].strip() )
comment = data[1]
for line in data[2:]:
if len(line.split()) != 4: continue
data_string += line + '\n'
num_atom_lines = data_string.count('\n')
if num_atom_lines != num_atoms:
raise Exception("The XYZ file should contain %d atoms. It only contains %d lines (one atom per line)." % (num_atoms, num_atom_lines))
return Molecule.from_xyz_string(data_string)
def generate_bonds(self):
"""
Generate bonding information for this molecule.
Yields
-------
List of lists of lists
self.bonds[0] will yield a list of length number of bonds. This list is a list of
lists of length 3; [atom1, atom2, bond_order] where atom1 and atom2 are atom indices
for the Molecule's xyz array and bond_order is an integer 1, 2, or 3 for single, double,
or triple bond.
self.bonds[1] will yield a list of length number of atoms. This list is a list of lists
where each index in the list corresponds to the index in the Molecule's xyz array. The list
at this location contains the indices of every atom that is bound to this atom.
Example:
O=C=O carbon dioxide
self.bonds[0] = [ [0,1,2], [1,2,2] ]
self.bonds[1] = [ [1], [0,2], [1] ]
"""
self.bonds = BFS_bonding.bond_profile(self)
def to_xyz_string(self):
"""
Create an XYZ string with this molecule. Useful for passing to
Psi4 via psi4.geometry or for writing XYZ files.
Returns
-------
string
Example: String looks like this (with newlines inserted instead of shown as backslash n. Note the one space.
O 0.0 0.0 0.0
C 1.2 0.0 0.0
O 2.4 0.0 0.0
"""
output = ''
for atom in range(self.natoms()):
output += '\n' if atom != 0 else ''
output += "%3s " % self.symbol(atom)
output += ' '.join("% 10.6f" % x for x in self.xyz[atom])
return output
def to_xyz_file(self, filename, comment=None, append_mode='w'):
"""
Create an XYZ file with this molecule.
filename : string
Name of the new XYZ file.
comment : string
String to be placed on second line of XYZ file.
append_mode : string
Mode for open() to use. Default is 'w' for write - overwrite anything
in the file with name filename. For writing XYZ files, this should
typically be in append_mode='w'. Alternative mode 'a' for append is
useful for writing trajectories.
Example:
An XYZ file looks like the following
3 # Number of atoms
Carbon Dioxide # Comment
O 0.0 0.0 0.0 # Atoms; atomic symbol followed by coordinates
C 1.2 0.0 0.0
O 2.4 0.0 0.0
"""
# if append_mode is 'a', we need to check if there is text in the file
# if there is text, we add a newline such that the new XYZ data is readable for trajectories
file_exists = os.path.exists(filename)
output = '\n' if (file_exists and append_mode=='a') else ''
output += str(self.natoms())
output += ('\n '+comment if comment is not None else '\n #Generated by GT')
output += '\n' + self.to_xyz_string()
f = open(filename, append_mode)
f.write(output)
f.close()
"""TODO add mol2 file writer. Probably exists in qcdb of Psi4."""
"""TODO add method to write psi4 input files. Probably exists in qcdb of Psi4."""
def generate_charges(self, qc_method=None, basis=None, charge_method=None):
"""
Generate charges for the molecule using Psi4. Probably need to support semi-empirical
charges in the future i.e. MOPAC support for use in generating am1-bcc charges via
Antechamber. For now, Mulliken and Lowdin charges can be generated by Psi4. Default is
Mulliken with SCF and sto-3g.
qc_method : string
Quantum chemistry method to generate charges. Any method supported by Psi4 can be
passed i.e. SCF, HF, B3LYP, etc.
basis : string
Basis set for generation of charges. Any basis set supported by Psi4 can be passed.
charge_method : string
Method to generate atomic charges from one-electron density. Psi4 supported methods
are Mulliken and Lowdin; case insensitive.
Yields
-------
Numpy array of length number of atoms of floats; accessible at self.charges
"""
qc_method = 'scf' if qc_method is None else qc_method
basis = 'sto-3g' if basis is None else basis
charge_method = 'MULLIKEN' if charge_method is None else charge_method.upper()
charge_method += '_CHARGES'
# make geometry into a string for passing to psi4.geometry
xyz_string = self.to_xyz_string()
# First, we want to assign atom types and charges to the solute
mol = psi4.geometry(xyz_string)
# Tell Psi4 that we have updated the geometry
mol.update_geometry()
psi4.core.set_global_option("BASIS", basis)
E, wfn = psi4.property(qc_method, properties=[charge_method], return_wfn=True)
self.charges = np.asarray(wfn.atomic_point_charges())
def generate_atom_types(self, temp_filename="TEMP_FILENAME_PSI-OMM", babel_path=None, antechamber_path=None):
"""
Generate General Amber Force Field atom types for the molecule using Antechamber.
Hopefully in the future there will be a Python module that will be able to do this...
In order to generate the atom types, first an XYZ file is written. Then, this XYZ file
is translated into a pseudo PDB file by babel. This PDB file is then passed to Antechamber
which produces an output file with the atom types. This file is then parsed in order to return
a list of the atom types.
If babel_path and antechamber_path are not provided, they are searched for in the system path.
temp_filename : string
Filename for XYZ file, PDB file, and Antechamber output file to be written to. All of these
will be cleaned up (deleted) upon success of this method.
babel_path : string
Full path including executable name for Babel i.e. '/usr/bin/babel'
antechamber_path : string
Full path including executable name for Antechamber i.e. '/usr/bin/antechamber'
Yields
------
Numpy array of length number of atoms of strings; accessible at self.atom_types
"""
# find paths to the programs we will need to call later
babel_path = hm.which('babel') if babel_path is None else babel_path
antechamber_path = hm.which('antechamber') if antechamber_path is None else antechamber_path
# make an XYZ file
self.to_xyz_file(temp_filename+'.xyz')
# use Babel to convert the XYZ file to a PDB file for use with Antechamber
subprocess.call([babel_path, temp_filename+'.xyz', temp_filename+'.pdb'])
# use Antechamber to assign atom types
subprocess.call([antechamber_path, '-i', temp_filename+'.pdb', '-fi',
'pdb', '-o', temp_filename+'.TEMP_ATS', '-fo', 'prepc',
'-pf', 'y'])
# parse the atom types from the Antechamber output file; not simple, thus a helper method is called
self.atom_types = hm.align_antechamber(temp_filename+'.xyz', temp_filename+'.TEMP_ATS')
# remove files generated during the process
remove_list = [temp_filename+'.xyz', temp_filename+'.pdb', temp_filename+'.TEMP_ATS']
for rm in remove_list:
subprocess.call(['rm', rm])
def natoms(self):
"""
Return the number of atoms from the length of the xyz array.
Returns
-------
int - the number of atoms
"""
return self.xyz.shape[0]
def distance(self, atom1, atom2):
"""
Return the distance between two atoms in Angstroms.
atom1, atom2 : int
The index of atom1 and atom2 in the xyz array.
Returns
-------
float - the distance in Angstroms
"""
return np.linalg.norm(self.xyz[atom1]- self.xyz[atom2])
def neighbors(self, atom1):
"""
Return the list of atoms that are neighbors to atom1. Requires bonding
information. If none exists, it will be generated.
atom1 : int
Index of the atom in Molecule that neighbors are desired for.
Returns : list
List of atomic indices in Molecule of atoms that are bonded to atom1.
"""
# Check that bonding information exists; if it does not, generate it
if self.bonds is None:
self.generate_bonds()
return self.bonds[1][atom1]
def angle(self, atom1, atom2, atom3):
"""
Compute the angle between the points at indices atom1, atom2, and atom3
in the xyz array.
atom1, atom2, atom3 : int
The indicies of the atoms in the XYZ array.
Returns
-------
float - the angle in radians.
"""
point1 = np.asarray(mol.xyz[atom1])
point2 = np.asarray(mol.xyz[atom2])
point3 = np.asarray(mol.xyz[atom3])
v1 = point2 - point1
v2 = point3 - point1
return hm.vector_angle(v1, v2)
def plane_perp_vec(self, atom1, atom2, atom3):
"""
Compute the unit length vector perpendicular to the plane defined by points at indices atom1, atom2, and atom3
in the xyz array.
atom1, atom2, atom3 : int
The indicies of the atoms in the XYZ array.
Returns
-------
Length 3 Numpy array of floats
"""
point1 = np.asarray(mol.xyz[atom1])
point2 = np.asarray(mol.xyz[atom2])
point3 = np.asarray(mol.xyz[atom3])
v1 = point2 - point1
v2 = point3 - point1
perp_vec = np.cross(v1, v2)
return perp_vec / np.linalg.norm(perp_vec)
def dihedral(self, atom1, atom2, atom3, atom4):
"""
Calculate the dihedral angle between four points.
atom1, atom2, atom3, atom4 : int
Indices of the four atoms in the XYZ array to calculate a dihedral angle from
Returns
-------
float - the dihedral angle in radians
"""
n1 = self.plane_perp_vec(atom1, atom2, atom3)
n2 = self.plane_perp_vec(atom2, atom3, atom4)
dihedral = - n1.dot(n2) / np.linalg.norm(n1) / np.linalg.norm(n2)
dihedral = np.arccos([dihedral])
return dihedral
def symbol(self, atom1):
"""
Return the chemical symbol of an atom. For example, 6 would yield 'C'.
atom1 : int
The index of atom1 in the z_vals array.
Returns
-------
string - the chemical symbol
"""
return periodictable._temp_symbol[int(self.z_vals[atom1])]
#TODO: Add methods to return all bonds, angles, dihedrals that exist between BONDED ATOMS only
def is_overlapped(self, atom1, atom2):
"""
Using covalent radii, check if atom1 and atom2 are overlapping.
atom1, atom2 : Indices of atom1 and atom2 in self.xyz
Returns
-------
Boolean - True if the distance between atom1 and atom2 is less
than the sum of their covalent radii
"""
# Find covalent radii
r1 = cov_radii.psi_cov_radii[self.symbol(self.z_vals[atom1])]
r2 = cov_radii.psi_cov_radii[self.symbol(self.z_vals[atom2])]
# Find distance
d = self.distance(atom1, atom2)
return d < (r1 + r2)*1.2
def substitute(self, atom1, group, group_ix=0):
"""
Substitute the atom at index atom1 with the group defined by the Psi-OMM Molecule
group by attaching the group by the atom at index group_ix. Place the group (attached
by the atom at index group_ix) at the position of atom1. This group is aligned such that
the mean atomic position of group_ix is aligned with the vector that bisects all angles containing
atom1 and atoms it is attached to. Essentially, it places group in a VSEPR-like configuration.
atom1 : int
The index of atom1, the atom to be substituted, in this Molecule.
group : Psi-OMM Molecule
A Psi-OMM Molecule that represents the group to be added at the position of atom1.
Only needs to contain the atomic positions.
group_ix : int
The index of the atom in group which will take the place of atom1. The default is 0
i.e. the first atom in group should be the atom that will take the place of atom1. For
example, hydroxylating a hydrogen site would require the oxygen to be at index group_ix.
Updates
-------
Updates the self.xyz array with a new array with the substituted group.
"""
# Make a copy of the old geometry
new_geo = self.xyz.copy()
new_z_vals = self.z_vals.copy()
# Find the bisecting vector between atom1 and its neighbors
# First, identify the list of atoms that are neighbors to atom1
neighbors = self.neighbors(atom1)
# Find the unit vector pointing from atom1 to where group's mean atomic position will lie
# This is the unit vector of the sum of all vectors atom1-n where n is a neighboring atom
uvs = []
for n in neighbors:
v = self.xyz[atom1] - self.xyz[n]
v /= np.linalg.norm(v)
uvs.append(v)
uv = np.sum(np.asarray(uvs), axis=0)
uv /= np.linalg.norm(uv)
uv.shape = (1,3)
#uv = np.append(uv, np.array([[0,0,0]]), axis=0)
# Translate the group's atom at group_ix to (0,0,0) and the rest of the group
# to prepare for rotation (needs to be at origin for rotation)
group.xyz -= group.xyz[group_ix]
# Find the centroid of the orientation and group
centroid = np.mean(uv, axis=0)
g_centroid = np.mean(group.xyz, axis=0)
# Find the group unit vector pointing from atom at group_ix to g_centroid
g_uv = g_centroid - group.xyz[group_ix]
g_uv /= np.linalg.norm(g_uv)
g_uv.shape = (1,3)
g_uv_orig = g_uv.copy()
g_uv_orig.shape = (3,)
#g_uv = np.append(g_uv, np.array([[0,0,0]]), axis=0)
# Align group by rotating g_uv to uv in order to find the rotation matrix
R = am.find_rotation(g_uv, uv)
group.xyz = np.dot(R, group.xyz.T)
group.xyz = group.xyz.T
# Translate the group geometry such that the atom at group_ix lies at the position of atom1
group.xyz += self.xyz[atom1]
# Translate the group such that the bond distance is more accurate if there is only one neighbor
# excluding the group itself. Else, too complicated and it should stay at its current position.
if len(neighbors) == 1:
bond_dist = self.distance(atom1, neighbors[0])
debond_v = uv * bond_dist
group.xyz -= debond_v
bond_v_dist = cov_radii.psi_cov_radii[self.symbol(self.z_vals[atom1])] + cov_radii.psi_cov_radii[self.symbol(self.z_vals[neighbors[0]])]
bond_v = uv * bond_v_dist
group.xyz += bond_v
# Add the group to the molecule
# First, remove atom1
new_geo = np.delete(new_geo, atom1, axis=0)
new_z_vals = np.delete(new_z_vals, atom1)
# Add group
new_geo = np.append(new_geo, group.xyz, axis=0)
new_z_vals = np.append(new_z_vals, group.z_vals)
# Update geometry of molecule with new_geo
self.xyz = new_geo
self.z_vals = new_z_vals
# Check that the rotated, translated group does not overlap with any atoms in the original
# molecule. If so, rotate it in 30 degree increments to try to resolve any overlaps.
n = group.xyz.shape[0]
N = self.xyz.shape[0]
# 12 possible rotations by 30 degrees
no_overlaps = False
for x in range(12):
if no_overlaps is True:
print("Took %d rotations to arrive at new geometry." % (x))
break
found_overlap = False
# Don't want atom at group_ix because it IS bonded to the original molecule and WILL overlap
group_indices = list(range(N-n, N))
group_indices.remove(group_ix + N - n)
for at, g_at in itertools.product(range(N-n), group_indices):
# If atoms are overlapped, rotate
if self.is_overlapped(at, g_at):
found_overlap = True
# Find rotation matrix
rot_axis = uv
R = am.rotation_matrix(uv, np.pi/6)
# Rotate
group.xyz = self.xyz[N-n:]
old_pos = group.xyz[group_ix].copy()
group.xyz -= old_pos
group.xyz = np.dot(R, group.xyz.T)
group.xyz = group.xyz.T
group.xyz += old_pos
# Replace old group with rotated group
self.xyz[N-n:] = group.xyz
break
no_overlaps = True if found_overlap is False else False
# Finally, set self.bonds to None again because the old self.bonds is now wrong (doesn't account for new atoms)
self.bonds = None
| StarcoderdataPython |
9663638 | """Main file for clifold."""
import argparse
from clifold.commands.clifold_git import git_init
from clifold.commands.clifold_init import py_init
from clifold.commands.clifold_pkg import pip
from clifold.commands.clifold_project import create
def __version__():
"""return package version"""
return "0.2.10"
def cli():
"""CLI parsing"""
parser = argparse.ArgumentParser(
prog='clif',
usage='%(prog)s <arg> [options]',
description='🚀 A CLI tool for scaffolding any Python Projects 🚀',
conflict_handler='resolve')
group = parser.add_argument_group('Argument')
group.add_argument('project_name', help='Project name to create with venv')
group = parser.add_argument_group('Options')
group.add_argument('-g', '--git', action='store_true', default=True,
dest='git', help='Make git initialization (Default: True)')
group.add_argument('-ng', '--no-git', action='store_false',
dest='git', help='Skip git initialization')
group.add_argument('-p', '--pkg', action='store_true', default=True,
dest='pkg', help='Ask packages to install (Default: True)')
group.add_argument('-np', '--no-pkg', action='store_false', dest='pkg',
help='Skip packages installation')
group.add_argument('-i', '--init', action='store_true', default=True,
dest='init', help='Create setup.py file (Default: True)')
group.add_argument('-ni', '--no-init', action='store_false',
dest='init', help='Skip setup.py file')
group.add_argument('-V', '--version', action='version',
version=__version__(), help='Output version number')
group.add_argument('-h', '--help', action='help',
help='Output usage information')
args = parser.parse_args()
pname = args.project_name
git = args.git
pkg = args.pkg
init = args.init
ppath, root = create(pname)
git_init(git, ppath)
pip(pkg, root, pname)
py_init(init, ppath, pname)
if __name__ == "__main__":
cli()
| StarcoderdataPython |
1833990 | from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import StaleElementReferenceException
import time
def fresh_find(root, EC, timeout=2, retries=10):
for i in range(retries):
try:
elem = WebDriverWait(root, timeout).until(EC)
elem.is_enabled()
return elem
except StaleElementReferenceException:
print('attempting to recover from StaleElementReferenceException...')
time.sleep(0.25)
raise StaleElementReferenceException("All {} tries yielded stale elements".format(retries))
| StarcoderdataPython |
9175 | <filename>src/dataAccess/Connection.py
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created Aug 4, 2009
connection pool abstraction over previous Connection.py which is now SingleConnection.py
sets up module scope connection pool, currently with no size limit
pool for both connections with dictionary cursors and regular cursors
reconnects to db every x hours depending on config file
@author: Andrew
'''
from utaka.src.dataAccess.SingleConnection import Connection as SingleConnection
import utaka.src.Config as Config
import MySQLdb
import datetime
dcp = [SingleConnection(True)]
rcp = [SingleConnection(False)]
dbTimer = datetime.datetime.today()
dbTimeout = datetime.timedelta(hours = int(Config.get('database', 'connection_timeout_in_hours')))
class Connection:
def __init__(self, useDictCursor = False):
if len(dcp) > 0:
if useDictCursor:
self.innerConn = dcp.pop()
else:
self.innerConn = rcp.pop()
now = datetime.datetime.today()
if (now - dbTimeout) > self.innerConn.connectTime:
self.innerConn.close()
self.innerConn = SingleConnection(useDictCursor)
else:
self.innerConn = SingleConnection(useDictCursor)
def usingDictCursor(self):
return self.innerConn.usingDictCursor()
def executeStatement(self, statement, placeholder):
return self.innerConn.executeStatement(statement, placeholder)
def getRowCount(self):
return self.innerConn.rowcount()
def commit(self):
self.innerConn.commit()
def rollback(self):
self.innerConn.rollback()
def close(self):
self.commit()
self.__close_()
def cancelAndClose(self):
self.rollback()
self.__close_()
def __close_(self):
utakaLog = open('/var/www/html/utaka/utakaLog', 'a')
try:
if self.usingDictCursor():
utakaLog.write('Dictionary Database Connection Returned to Pool\r\n')
else:
utakaLog.write('Regular Database Connection Returned to Pool\r\n')
finally:
utakaLog.close()
if self.usingDictCursor():
dcp.append(self.innerConn)
else:
rcp.append(self.innerConn)
self.innerConn = None
| StarcoderdataPython |
5102418 | <filename>form-romanos/main.py
from flask import Flask, render_template, request
app = Flask(__name__)
def converte_romanos(numero):
#coloque seu codigo aqui, exemplo
romanos="MCM"
return romanos
@app.route("/",methods=["GET","POST"])
def romano():
if request.method == "POST":
response = request.form
print("response")
print(response)
numero=response["valor"]
romano=converte_romanos(numero)
return romano
elif request.method == "GET":
return render_template("recebe_numero.html")
if __name__== "__main__":
app.run(host="0.0.0.0",debug= True)
| StarcoderdataPython |
9758727 |
def add(x1, x2= 2):
return x1+ x2
def sub(x1, x2=2):
return x1 -x2
def mul(x1, x2=2):
return x1 * x2
def div (x1, x2=2):
return x1 / x2
class InsufficientFund(Exception):
def __init__(self, message) -> None:
self.message = message
class BankAccount():
def __init__(self, starting_balance=0) -> None:
self.balance = starting_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
if amount > self.balance:
raise InsufficientFund ("ERROR: Insufficient Funds")
self.balance -= amount
def collect_interest(self):
self.balance *= 1.1
| StarcoderdataPython |
28691 | <gh_stars>0
from infra.controllers.contracts import HttpResponse
class NotFoundError(HttpResponse):
def __init__(self, message) -> None:
status_code = 404
self.message = message
body = {
'message': self.message
}
super().__init__(body, status_code)
| StarcoderdataPython |
355278 | from flask import Flask, render_template, url_for, flash, redirect
"""
Set variable "app" to an instance of the Flask class.
__name__ is a special variable in Python that just means
the name of the module. It can be equal to "__main__".
Basically, this is so Python knows where to look for your
templates, static files, etc.
"""
app = Flask(__name__)
@app.route("/")
@app.route("/home/")
def home():
return render_template("home.html", page="home")
@app.route("/about")
def about():
return render_template("about.html", page="about")
@app.route("/inquire/")
def inquire():
return render_template("inquire.html", page="inquire")
@app.route("/deposit/")
def deposit():
return render_template("deposit.html", page="deposit")
@app.route("/gallery/")
def gallery():
return render_template("gallery.html", page="gallery")
"""
We can use the below statement to run the debugger and dynamically update our
website without shutting it down and restarting every time changes are made.
"""
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
396625 | <reponame>NicholasWon47/redis-py<gh_stars>0
import os
import mock
import pytest
import re
import redis
import time
from threading import Thread
from redis.connection import ssl_available, to_bool
from .conftest import skip_if_server_version_lt, _get_client
from .test_pubsub import wait_for_message
class DummyConnection(object):
description_format = "DummyConnection<>"
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def can_read(self):
return False
class TestConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=None,
connection_class=redis.Connection):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=DummyConnection)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_max_connections(self):
pool = self.get_pool(max_connections=2)
pool.get_connection('_')
pool.get_connection('_')
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
def test_reuse_previously_released_connection(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=1>>'
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
connection_kwargs = {'path': '/abc', 'db': 1}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=/abc,db=1>>'
assert repr(pool) == expected
def test_pool_equality(self):
connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1}
pool1 = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
pool2 = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
assert pool1 == pool2
def test_pools_unequal_if_different_types(self):
connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
assert pool != 0
def test_pools_unequal_if_different_hosts(self):
connection_kwargs1 = {'host': 'localhost', 'port': 6379, 'db': 1}
connection_kwargs2 = {'host': '127.0.0.1', 'port': 6379, 'db': 1}
pool1 = self.get_pool(connection_kwargs=connection_kwargs1,
connection_class=redis.Connection)
pool2 = self.get_pool(connection_kwargs=connection_kwargs2,
connection_class=redis.Connection)
assert pool1 != pool2
def test_pools_unequal_if_different_ports(self):
connection_kwargs1 = {'host': 'localhost', 'port': 6379, 'db': 1}
connection_kwargs2 = {'host': 'localhost', 'port': 6380, 'db': 1}
pool1 = self.get_pool(connection_kwargs=connection_kwargs1,
connection_class=redis.Connection)
pool2 = self.get_pool(connection_kwargs=connection_kwargs2,
connection_class=redis.Connection)
assert pool1 != pool2
def test_pools_unequal_if_different_dbs(self):
connection_kwargs1 = {'host': 'localhost', 'port': 6379, 'db': 1}
connection_kwargs2 = {'host': 'localhost', 'port': 6379, 'db': 2}
pool1 = self.get_pool(connection_kwargs=connection_kwargs1,
connection_class=redis.Connection)
pool2 = self.get_pool(connection_kwargs=connection_kwargs2,
connection_class=redis.Connection)
assert pool1 != pool2
class TestBlockingConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self):
"When out of connections, block for timeout seconds, then raise"
pool = self.get_pool(max_connections=1, timeout=0.1)
pool.get_connection('_')
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def connection_pool_blocks_until_another_connection_released(self):
"""
When out of connections, block until another connection is released
to the pool
"""
pool = self.get_pool(max_connections=1, timeout=2)
c1 = pool.get_connection('_')
def target():
time.sleep(0.1)
pool.release(c1)
Thread(target=target).start()
start = time.time()
pool.get_connection('_')
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=0>>'
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path='abc',
db=0,
)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=abc,db=0>>'
assert repr(pool) == expected
class TestConnectionPoolURLParsing(object):
def test_defaults(self):
pool = redis.ConnectionPool.from_url('redis://localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': <PASSWORD>,
}
def test_hostname(self):
pool = redis.ConnectionPool.from_url('redis://myhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'myhost',
'port': 6379,
'db': 0,
'password': <PASSWORD>,
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+',
decode_components=True)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my / host +=+',
'port': 6379,
'db': 0,
'password': <PASSWORD>,
}
def test_port(self):
pool = redis.ConnectionPool.from_url('redis://localhost:6380')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6380,
'db': 0,
'password': <PASSWORD>,
}
def test_password(self):
pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': '<PASSWORD>',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'redis://:%2Fmypass%2F%2B word%3D%24+@localhost',
decode_components=True)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': <PASSWORD>/+ word=$+',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('redis://localhost', db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 1,
'password': <PASSWORD>,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 2,
'password': <PASSWORD>,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3',
db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 3,
'password': <PASSWORD>,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10'
'&socket_keepalive=&retry_on_timeout=Yes&max_connections=10'
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 2,
'socket_timeout': 20.0,
'socket_connect_timeout': 10.0,
'retry_on_timeout': True,
'password': <PASSWORD>,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ''),
(False, 0), (False, '0'),
(False, 'f'), (False, 'F'), (False, 'False'),
(False, 'n'), (False, 'N'), (False, 'No'),
(True, 1), (True, '1'),
(True, 'y'), (True, 'Y'), (True, 'Yes'),
):
assert expected is to_bool(value)
def test_invalid_extra_typed_querystring_options(self):
import warnings
with warnings.catch_warnings(record=True) as warning_log:
redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=_&'
'socket_connect_timeout=abc'
)
# Compare the message values
assert [
str(m.message) for m in
sorted(warning_log, key=lambda l: str(l.message))
] == [
'Invalid value for `socket_connect_timeout` in connection URL.',
'Invalid value for `socket_timeout` in connection URL.',
]
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': <PASSWORD>,
'a': '1',
'b': '2'
}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url('redis://localhost')
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url('redis://myhost')
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
'host': 'myhost',
'port': 6379,
'db': 0,
'password': <PASSWORD>,
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url('localhost')
class TestConnectionPoolUnixSocketURLParsing(object):
def test_defaults(self):
pool = redis.ConnectionPool.from_url('unix:///socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': <PASSWORD>,
}
def test_password(self):
pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': '<PASSWORD>',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'unix://:%2Fmypass%2F%2B word%3D%24+@/socket',
decode_components=True)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': '/<PASSWORD>/+ word=$+',
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket',
decode_components=True)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/my/path/to/../+_+=$ocket',
'db': 0,
'password': '<PASSWORD>',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('unix:///socket', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 1,
'password': <PASSWORD>,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 2,
'password': <PASSWORD>,
}
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': <PASSWORD>,
'a': '1',
'b': '2'
}
class TestSSLConnectionURLParsing(object):
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
def test_defaults(self):
pool = redis.ConnectionPool.from_url('rediss://localhost')
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': <PASSWORD>,
}
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
def test_cert_reqs_options(self):
import ssl
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self, *args, **kwargs):
return self.make_connection()
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=none')
assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=optional')
assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=required')
assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED
class TestConnection(object):
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
assert not r.connection._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command('DEBUG', 'ERROR',
'LOADING fake message')
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url('redis://localhost')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'Connection',
'host=localhost,port=6379,db=0',
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url('unix:///path/to/socket')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'UnixDomainSocketConnection',
'path=/path/to/socket,db=0',
)
def test_connect_no_auth_supplied_when_required(self, r):
"""
AuthenticationError should be raised when the server requires a
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
r.execute_command('DEBUG', 'ERROR',
'ERR Client sent AUTH, but no password is set')
def test_connect_invalid_password_supplied(self, r):
"AuthenticationError should be raised when sending the wrong password"
with pytest.raises(redis.AuthenticationError):
r.execute_command('DEBUG', 'ERROR', 'ERR invalid password')
class TestMultiConnectionClient(object):
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis,
request,
single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
assert r.set('a', '123')
assert r.get('a') == b'123'
class TestHealthCheck(object):
interval = 60
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request,
health_check_interval=self.interval)
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - time.time()
assert self.interval > diff > (self.interval - 1)
def test_health_check_runs(self, r):
r.connection.next_health_check = time.time() - 1
r.connection.check_health()
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
r.get('foo')
r.connection.next_health_check = time.time()
with mock.patch.object(r.connection, 'send_command',
wraps=r.connection.send_command) as m:
r.get('foo')
m.assert_called_with('PING', check_health=False)
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_advances_next_health_check(self, r):
r.get('foo')
next_health_check = r.connection.next_health_check
r.get('foo')
assert next_health_check < r.connection.next_health_check
def test_health_check_not_invoked_within_interval(self, r):
r.get('foo')
with mock.patch.object(r.connection, 'send_command',
wraps=r.connection.send_command) as m:
r.get('foo')
ping_call_spec = (('PING',), {'check_health': False})
assert ping_call_spec not in m.call_args_list
def test_health_check_in_pipeline(self, r):
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
responses = pipe.set('foo', 'bar').get('foo').execute()
m.assert_any_call('PING', check_health=False)
assert responses == [True, b'bar']
def test_health_check_in_transaction(self, r):
with r.pipeline(transaction=True) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
responses = pipe.set('foo', 'bar').get('foo').execute()
m.assert_any_call('PING', check_health=False)
assert responses == [True, b'bar']
def test_health_check_in_watched_pipeline(self, r):
r.set('foo', 'bar')
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
pipe.watch('foo')
# the health check should be called when watching
m.assert_called_with('PING', check_health=False)
self.assert_interval_advanced(pipe.connection)
assert pipe.get('foo') == b'bar'
# reset the mock to clear the call list and schedule another
# health check
m.reset_mock()
pipe.connection.next_health_check = 0
pipe.multi()
responses = pipe.set('foo', 'not-bar').get('foo').execute()
assert responses == [True, b'not-bar']
m.assert_any_call('PING', check_health=False)
def test_health_check_in_pubsub_before_subscribe(self, r):
"A health check happens before the first [p]subscribe"
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
p.connection.next_health_check = 0
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
assert not p.subscribed
p.subscribe('foo')
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
m.assert_any_call('PING', check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
def test_health_check_in_pubsub_after_subscribed(self, r):
"""
Pubsub can handle a new subscribe when it's time to check the
connection health
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
p.connection.next_health_check = 0
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
p.subscribe('foo')
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
m.assert_any_call('PING', check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
p.subscribe('bar')
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
m.assert_called_once_with('SUBSCRIBE', 'bar', check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
assert wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
m.assert_any_call('PING', p.HEALTH_CHECK_MESSAGE,
check_health=False)
self.assert_interval_advanced(p.connection)
def test_health_check_in_pubsub_poll(self, r):
"""
Polling a pubsub connection that's subscribed will regularly
check the connection's health.
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
p.subscribe('foo')
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
# doesn't result in another health check
m.reset_mock()
next_health_check = p.connection.next_health_check
assert wait_for_message(p) is None
assert p.connection.next_health_check == next_health_check
m.assert_not_called()
# reset the health check and poll again
# we should not receive a pong message, but the next_health_check
# should be advanced
p.connection.next_health_check = 0
assert wait_for_message(p) is None
m.assert_called_with('PING', p.HEALTH_CHECK_MESSAGE,
check_health=False)
self.assert_interval_advanced(p.connection)
| StarcoderdataPython |
58828 | #!/usr/bin/env python
from chr import models
from chr import coverage
# from chr import black_boxes
#from chr import black_boxes_r
from chr import methods
from chr import others
# from chr import others_r
| StarcoderdataPython |
5025746 | <reponame>ibab/tensorprob
from itertools import product
import tensorflow as tf
from .. import config
from ..distribution import Distribution
from ..model import Model, Region
@Distribution
def Mix2(f, A, B, name=None):
# TODO(chrisburr) Check if f is bounded between 0 and 1?
X = tf.placeholder(config.dtype, name=name)
a_logp, a_integral, a_bounds = Model._current_model._description[A]
b_logp, b_integral, b_bounds = Model._current_model._description[B]
def _integral(mix_bounds, sub_bounds, sub_integral):
# Calculate the normalised logp
integrals = []
for (mix_lower, mix_upper), (sub_lower, sub_upper) in product(mix_bounds, sub_bounds):
# Ignore this region if it's outside the current limits
if sub_upper <= mix_lower or mix_upper <= sub_lower:
continue
# Else keep the region, tightening the edges as reqired
integrals.append(sub_integral(max(sub_lower, mix_lower), min(sub_upper, mix_upper)))
return tf.add_n(integrals) if integrals else tf.constant(1, config.dtype)
bounds = Distribution.bounds(1)[0]
a_normalisation_1 = _integral(bounds, a_bounds, a_integral)
b_normalisation_1 = _integral(bounds, b_bounds, b_integral)
Distribution.logp = tf.log(
f*tf.exp(a_logp)/a_normalisation_1 +
(1-f)*tf.exp(b_logp)/b_normalisation_1
)
def integral(lower, upper):
a_normalisation_2 = _integral([Region(lower, upper)], a_bounds, a_integral)
b_normalisation_2 = _integral([Region(lower, upper)], b_bounds, b_integral)
return f/a_normalisation_1*a_normalisation_2 + (1-f)/b_normalisation_1*b_normalisation_2
Distribution.integral = integral
# Modify the current model to recognize that X and Y have been removed
for dist in A, B:
# TODO(chrisburr) Explain
# TODO(chrisburr) Add test for combinators of combinators
if dist in Model._current_model._silently_replace.values():
# We need to copy the items to a list as we're deleting items from
# the dictionary
for key, value in list(Model._current_model._silently_replace.items()):
if value == dist:
Model._current_model._silently_replace[value] = X
Model._current_model._silently_replace[key] = X
if dist in Model._current_model._description:
del Model._current_model._description[dist]
else:
Model._current_model._silently_replace[dist] = X
del Model._current_model._description[dist]
return X
| StarcoderdataPython |
3469980 | <filename>TooManyStalkers/bot.py
import sys
import sc2
from sc2.ids.ability_id import AbilityId
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.upgrade_id import UpgradeId
from sc2.ids.buff_id import BuffId
from sc2.unit import Unit
from sc2.units import Units
from sc2.position import Point2, Point3
from loguru import logger
logger.remove()
logger.add(sys.stderr, level="INFO")
# All townhalls to check if the enemy's main base is destroyed
TOWNHALLS = {
UnitTypeId.COMMANDCENTER, UnitTypeId.COMMANDCENTERFLYING,
UnitTypeId.ORBITALCOMMAND, UnitTypeId.ORBITALCOMMANDFLYING,
UnitTypeId.PLANETARYFORTRESS, UnitTypeId.HATCHERY,
UnitTypeId.LAIR, UnitTypeId.HIVE, UnitTypeId.NEXUS
}
# The bot class
class TooManyStalkersBot(sc2.BotAI):
def __init__(self):
"""Inititialize variables
"""
super().__init__()
# The maximum amount of workers
self.MAX_WORKERS = 80
# The maximum amount of Nexuses
self.MAX_NEXUSES = 3
# The upgrades that will be researched
self.UPGRADES = ["PROTOSSGROUNDWEAPONSLEVEL",
"PROTOSSGROUNDWEAPONSLEVEL",
"PROTOSSSHIELDSLEVEL"]
# If the enemy was greeted and our main base
self.greeted = False
self.main: Unit = None
# The Proxy and the Proxy position
self.proxy: Unit = None
self.proxy_position: Point3 = Point3()
# How many attempts must be made to rebuild the Proxy Pylon
self.MAX_PROXY_ATTEMPTS = 3
# How many attempts have been made thusfar
self.proxy_attempts = 0
# The defending Stalkers, the wall-off unit, and the position to defend
self.bases_defenders: dict = {}
self.wall_unit: Unit = None
# The attacking Stalkers, how many attacks have happend
self.attackers: Units = Units([], self)
# The amount of timing attacks
self.timing_attack: int = 0
# The amount of attacks
self.attack_amount = 0
# If the enemy's main base is destroyed
self.enemy_main_destroyed_triggerd = False
# How many attacking Stalker there must be for every defending Stalker
self.attack_defend_ratio = 6/1
# If we should debug and if we should debug once (testing only)
self.DEBUG = False
self.debug_once = True
async def on_before_start(self):
"""Before the game starts
"""
# Calculate the Proxy location
self.proxy_position = self.get_proxy_location()
logger.info(f"Proxy position: {self.proxy_position}")
# Get the main base
self.main = self.townhalls.first
async def on_step(self, iteration: int):
"""What to do each step
Args:
iteration (int): what number step it currently is
"""
# Greet the opponent
if iteration > 5 and not self.greeted:
logger.info("Greeted the enemy")
self.greeted = True
await self.chat_send(f"Hello {self.opponent_id}, GL HF")
# Debug and draw on screen
await self.debug()
await self.debug_draw()
# (Built-in) Distribute workers
await self.distribute_workers()
# Manage the main base
await self.manage_bases()
# Build Pylons and
await self.build_pylons()
# Build a Proxy Pylon
await self.build_proxy()
# Collect Vespene Gas
await self.collect_gas()
# Build research buildings and research
await self.build_research_structures()
await self.research()
# Build Gateways/Warpgates, train/warp units, attack
await self.build_unit_structures()
await self.train_units()
await self.expand()
await self.attack()
async def debug(self):
"""Spawn 5 Stalkers if self.DEBUG is true
"""
if self.DEBUG and self.debug_once:
logger.info("Created 5 Stalkers")
await self._client.debug_create_unit(
[[UnitTypeId.STALKER, 5, self.main.position, 1]])
self.debug_once = False
async def debug_draw(self):
"""Draw text and spheres for debugging purposes
"""
# Draw spheres at the Proxy and the defense position
self._client.debug_sphere_out(
self.proxy_position, 2, color=(255, 0, 0))
# If there are attackers, put text on their position
if self.attackers:
for stalker in self.units.tags_in(self.attackers):
self._client.debug_text_world("Attacker",
stalker,
color=(255, 255, 0))
for base_defenders in self.bases_defenders.values():
# If there are defenders, put text on their position
if base_defenders:
for stalker in self.units.tags_in(base_defenders):
self._client.debug_text_world("Defender",
stalker,
color=(255, 0, 255))
# If there is a wall-unit, put text on their position
if self.wall_unit:
wall_unit = self.units.tags_in([self.wall_unit.tag])
if len(wall_unit) > 0:
self.wall_unit = wall_unit[0]
self._client.debug_text_world("Wall-off",
self.wall_unit,
color=(255, 255, 255))
if self.structures(UnitTypeId.NEXUS).exists:
nexuses = self.structures(UnitTypeId.NEXUS)
for nexus in nexuses:
self._client.debug_sphere_out(
nexus.position3d, 4, color=(40, 240, 250))
async def manage_bases(self):
"""Handle the Chronoboost for each Nexus and produce workers
"""
# Loop over all the Nexuses
for nexus in self.townhalls:
# Handle Chronoboost
await self.chronoboost(nexus)
# Train Probes
if (
nexus.is_idle
and self.workers.amount < self.MAX_WORKERS
and self.can_afford(UnitTypeId.PROBE)
):
nexus.train(UnitTypeId.PROBE)
async def build_pylons(self):
"""Build Pylons if the supply is too low
"""
# If the Pylon for the wall-off wasn't built, build it
wall_pylon = self.main_base_ramp.protoss_wall_pylon
if (
await self.can_place_single(UnitTypeId.PYLON, wall_pylon)
and self.can_afford(UnitTypeId.PYLON)
):
await self.build(UnitTypeId.PYLON, wall_pylon)
# If there is 8 supply or less left, build a Pylon
if (
self.supply_left <= 8
and self.supply_cap < 200
and self.townhalls.ready.exists
and not self.already_pending(UnitTypeId.PYLON)
and self.can_afford(UnitTypeId.PYLON)
):
position = self.townhalls.ready.random.position.towards(
self.game_info.map_center, 15
)
await self.build(UnitTypeId.PYLON, near=position)
async def build_proxy(self):
"""Builds a Proxy Pylon if the Warpgate research is a quarter done
"""
# Build a Proxy Pylon once the Warpgate Research is for 25% done
if (
self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) > 0.25
and self.proxy is None
and self.proxy_attempts < self.MAX_PROXY_ATTEMPTS
and self.can_afford(UnitTypeId.PYLON)
and not self.already_pending(UnitTypeId.PYLON)
):
# If this is first attempt at Proxy, use calculated position
if self.proxy_attempts == 0:
pos = self.proxy_position
# If this isn't the first attempt, calculate new location
elif self.proxy_attempts > 0:
pos = self.get_proxy_location()
logger.info("Proxy position changed to: "
f"{self.proxy_position}")
# Build a Proxy Pylon
logger.info(f"Building a proxy at {self.proxy_position}")
await self.build(UnitTypeId.PYLON, pos)
# Increment the Proxy attempts
self.proxy_attempts += 1
async def collect_gas(self):
"""Collect Vespene Gas after a Gateway was build
"""
# Only collect Gas when a Gateway was built
if self.structures(UnitTypeId.GATEWAY).exists:
# Loop over all the Nexuses
for nexus in self.townhalls.ready:
# Get all the Vespene Geysers
vespenenes: Units = self.vespene_geyser.closer_than(10, nexus)
# Loop over all the Vespene Geysers
for vespene in vespenenes:
# Build an Assimilator on top of the Vespene Geysers
if (
await self.can_place_single(
UnitTypeId.ASSIMILATOR, vespene.position)
and not self.already_pending(UnitTypeId.ASSIMILATOR)
and self.can_afford(UnitTypeId.ASSIMILATOR)
):
await self.build(UnitTypeId.ASSIMILATOR, vespene)
async def build_research_structures(self):
"""Build structures to research from
"""
# None of the structures can be build if we don't have Pylons
if self.structures(UnitTypeId.PYLON).ready.exists:
# Build Research buildings by Pylon that aren't the Proxy or Cannon
pylon: Unit = self.structures(UnitTypeId.PYLON).ready.random
while pylon == self.proxy:
pylon = self.structures(UnitTypeId.PYLON).ready.random
# If we have a Gateway, build a Cybernetics Core
if (
self.structures(UnitTypeId.GATEWAY).ready.exists
and self.structures(UnitTypeId.CYBERNETICSCORE).amount == 0
and self.can_afford(UnitTypeId.CYBERNETICSCORE)
):
await self.build(UnitTypeId.CYBERNETICSCORE, near=pylon)
# If we have a Cybernetics Core, build a Forge
if (
self.structures(UnitTypeId.CYBERNETICSCORE).exists
and self.already_pending(UnitTypeId.FORGE) == 0
and not self.structures(UnitTypeId.FORGE).exists
and self.can_afford(UnitTypeId.FORGE)
):
await self.build(UnitTypeId.FORGE, near=pylon)
# If the Forge is at it's last upgrade, build a Twilight Council
if (
self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) > 0
and self.structures(UnitTypeId.TWILIGHTCOUNCIL) == 0
and self.can_afford(UnitTypeId.TWILIGHTCOUNCIL)
):
await self.build(UnitTypeId.TWILIGHTCOUNCIL, near=pylon)
async def research(self):
"""Research Warpgates, Weapons, Armor and Shields
"""
# If we have a Cybernetics Core, research Warpgates
if self.structures(UnitTypeId.CYBERNETICSCORE).ready.exists:
# Select a Cybernetics Core and research Warpgate
ccore = self.structures(UnitTypeId.CYBERNETICSCORE).ready.first
ccore.research(UpgradeId.WARPGATERESEARCH)
# If we have a Forge and Warpgates are researching, research upgrades
if (
self.structures(UnitTypeId.FORGE).ready.exists
and self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) > 0
):
# Select a Forge and upgrade
forge = self.structures(UnitTypeId.FORGE).ready.first
# Research Ground Weapons
if (
self.can_afford(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1)
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 0
):
forge.research(UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1)
# Research Ground Armor
elif (
self.can_afford(
UpgradeId.PROTOSSGROUNDARMORSLEVEL1)
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDARMORSLEVEL1) == 0
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 1
):
forge.research(UpgradeId.PROTOSSGROUNDARMORSLEVEL1)
# Research Shields
elif (
self.can_afford(
UpgradeId.PROTOSSSHIELDSLEVEL1)
and self.already_pending_upgrade(
UpgradeId.PROTOSSSHIELDSLEVEL1) == 0
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDARMORSLEVEL1) == 1
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 1
):
forge.research(UpgradeId.PROTOSSSHIELDSLEVEL1)
async def build_unit_structures(self):
"""Build Gateways
"""
# Build Gateways only when there is a Pylon
if self.structures(UnitTypeId.PYLON).ready.exists:
# Get the placement positions for a wall
wall_buildings = self.main_base_ramp.protoss_wall_buildings
# See if it can place a building on the position and build it
for wall_building in wall_buildings:
if (
await self.can_place_single(UnitTypeId.GATEWAY,
wall_building)
and self.can_afford(UnitTypeId.GATEWAY)
):
await self.build(UnitTypeId.GATEWAY, wall_building)
# Build Gateways by Pylons that aren't the Proxy or the Cannon
pylon: Unit = self.structures(UnitTypeId.PYLON).ready.random
while pylon == self.proxy:
pylon = self.structures(UnitTypeId.PYLON).ready.random
# Build Gateways once the Warpgate Research is done
if (
self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) == 1
and self.townhalls.amount > 1
and self.structures(
UnitTypeId.WARPGATE).amount < self.max_gateways
and self.can_afford(UnitTypeId.GATEWAY)
):
await self.build(UnitTypeId.GATEWAY, near=pylon)
async def train_units(self):
"""Train Stalkers from Gateways and warp them in with Warpgates
"""
# Build a Wall-off unit if there is not one
if (
self.wall_unit is None
and self.can_afford(UnitTypeId.ZEALOT)
and not self.already_pending(UnitTypeId.ZEALOT)
):
# If we have Warpgates
if (
self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) == 1
and self.structures(UnitTypeId.WARPGATE).ready.exists
):
# Select a random Warpgate
warpgate = self.structures(UnitTypeId.WARPGATE).ready.random
# Get the available abilities of the Warpgate
abilities = await self.get_available_abilities(warpgate)
# Warp a Zealot if we can warp
if AbilityId.WARPGATETRAIN_ZEALOT in abilities:
# Select a random Pylon that isn't the Proxy or cannon
pylon = self.structures(UnitTypeId.PYLON).ready.random
while pylon == self.proxy:
pylon = self.structures(UnitTypeId.PYLON).ready.random
# Get the position of the Pylon
pos = pylon.position
# Find a placement for the Zealot
placement = await self.find_placement(
AbilityId.WARPGATETRAIN_STALKER, pos,
placement_step=3)
# Warp in the Zealot
if placement:
warpgate.warp_in(UnitTypeId.STALKER, placement)
# If we don't have Warpgates, just train a Zealot
elif self.structures(UnitTypeId.GATEWAY).ready.exists:
gateway = self.structures(UnitTypeId.GATEWAY).ready.first
gateway.train(UnitTypeId.ZEALOT)
# Wait to train Stalkers until we have a second base
if (
self.townhalls.amount > 1
and not
(
self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 1
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDARMORSLEVEL1) == 0
)
):
# If we have Warpgates, warp in Stalkers
if (
self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) == 1
and self.structures(UnitTypeId.WARPGATE).ready.exists
):
# Loop over all the Warpgates and warp in Stalkers
for warpgate in self.structures(UnitTypeId.WARPGATE):
# If we can't afford a Stalker, return
if not self.can_afford(UnitTypeId.STALKER):
return
# Get the available abilities of the Warpgate
abilities = await self.get_available_abilities(warpgate)
# Warp a Stalker if we can
if AbilityId.WARPGATETRAIN_STALKER in abilities:
# Select a random Pylon
pylon = self.structures(UnitTypeId.PYLON).ready.random
# If next Stalker is an attacker, warp it to the Proxy
if self.next_stalker_is_attacker():
# Warp the Stalker to the proxy if the Proxy exists
if self.proxy is not None:
pylon = self.proxy
# If next Stalker is a defender, warp it to the base
elif self.next_stalker_is_defender():
# Make sure the random Pylon is not the Proxy
while pylon == self.proxy:
pylon = self.structures(
UnitTypeId.PYLON).ready.random
# Get the position of the Pylon
pos = pylon.position
# Find a placement for the Stalker
placement = await self.find_placement(
AbilityId.WARPGATETRAIN_STALKER, pos,
placement_step=3)
# Warp in the Stalker
if placement:
warpgate.warp_in(UnitTypeId.STALKER, placement)
# If we don't have Warpgates, just train Stalkers
elif self.structures(UnitTypeId.GATEWAY).ready.exists:
# Get all the idle Gateways
gateways = self.structures(UnitTypeId.GATEWAY)
gateways = gateways.filter(lambda gw: gw.is_idle)
# Train Stalkers
for gateway in gateways:
# If we can't afford a Stalker, return
if not self.can_afford(UnitTypeId.STALKER):
return
# Train a Stalker
gateway.train(UnitTypeId.STALKER)
async def chronoboost(self, nexus):
"""Handle the Chronoboost of a specific nexus
Args:
nexus (Unit): the nexus which will use the Chronoboost
"""
# If we have enough energy for Chronoboost
if nexus.energy >= 50:
# If we have a Cybernetics Core
if self.structures(UnitTypeId.CYBERNETICSCORE).ready.exists:
# Select the Cybernetics Core
ccore = self.structures(UnitTypeId.CYBERNETICSCORE).ready.first
# If Cybenetics Core isn't idle and doesn't have Chronoboost
if (
not ccore.is_idle
and not ccore.has_buff(BuffId.CHRONOBOOSTENERGYCOST)
):
# Chronoboost the Cybernetics Core
nexus(AbilityId.EFFECT_CHRONOBOOSTENERGYCOST, ccore)
return
# If Warpgate is done or is no Cybernetics Core, Chronoboost others
if (
self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) == 1
or not self.structures(UnitTypeId.CYBERNETICSCORE)
):
# If we have a Forge and it's researching, Chronoboost it
if self.structures(UnitTypeId.FORGE).ready.exists:
# Select the Forge
forge = self.structures(UnitTypeId.FORGE).ready.first
# If Forge isn't idle and doesn't have Chronoboost
if (
not forge.is_idle
and not forge.has_buff(BuffId.CHRONOBOOSTENERGYCOST)
):
# Chronoboost the Forge
nexus(AbilityId.EFFECT_CHRONOBOOSTENERGYCOST, forge)
return
# If we have a Gateway/Warpgate and isn't idle, Chronoboost it
for gw in (self.structures(UnitTypeId.GATEWAY).ready |
self.structures(UnitTypeId.WARPGATE).ready):
# If Gateway/Warpgate isn't idle and has not Chronoboost
if (
not gw.is_idle
and not gw.has_buff(BuffId.CHRONOBOOSTENERGYCOST)
):
# Chronoboost Gateway/Warpgate
nexus(AbilityId.EFFECT_CHRONOBOOSTENERGYCOST, gw)
return
# If all else fails, Chronoboost yourself
if (
not nexus.has_buff(BuffId.CHRONOBOOSTENERGYCOST)
and not self.already_pending(UnitTypeId.CYBERNETICSCORE)
):
# Chronoboost self
nexus(AbilityId.EFFECT_CHRONOBOOSTENERGYCOST, nexus)
async def expand(self):
if (
self.can_afford(UnitTypeId.NEXUS)
and self.already_pending(UnitTypeId.NEXUS) == 0
and self.townhalls.amount < self.MAX_NEXUSES
):
logger.info("Expanding")
await self.expand_now()
def attack_now(self, enemy_main_destroyed):
if enemy_main_destroyed:
target: Point2 = self.find_target().position
else:
# Get the enemy's main base
target: Point2 = self.enemy_start_locations[0]
# Log and chat the attack
logger.info(f"Attack {self.timing_attack}, attacking {target}")
# Attack the enemy's main base
for stalker in self.units.tags_in(self.attackers):
stalker.attack(target)
async def attack(self):
"""Attack and defend
"""
# If there is a Wall-off unit, place it in the right place
if self.wall_unit is not None:
# Get the correct position
pos = self.main_base_ramp.protoss_wall_warpin
# If it is not on the position, go to it
self.wall_unit.move(pos)
for nexus_id in self.bases_defenders.keys():
# Get the Nexus
nexus = self.structures.tags_in([nexus_id])
if len(nexus) > 0:
nexus = nexus[0]
if nexus:
# Get the attacking enemies
enemies: Units = (
self.enemy_units.closer_than(30, nexus) |
self.enemy_structures.closer_than(30, nexus)
)
# If there are attacking enemies, attack them
if enemies.exists:
# Get the center position
target = enemies.center
base_defenders = self.bases_defenders[nexus_id]
logger.info(f"Enemies detected, attacking {target}")
# Send the defenders to attack the Stalkers
for stalker in self.units.tags_in(base_defenders):
stalker.attack(target)
else:
base_defenders = self.bases_defenders[nexus_id]
for stalker in self.units.tags_in(base_defenders):
pos = self.find_defend_position(stalker)
stalker.attack(pos)
# If there are attackers, attack
if self.attackers:
# If the enemy's main base is destroyed, attack everything
if await self.enemy_main_destroyed():
self.attack_now(True)
# If we can attack (every 5 minutes)
elif (
self.timing_attack == 0
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 1
and self.units.tags_in(self.attackers).ready.amount >= 10
):
self.attack_now(False)
self.timing_attack += 1
await self.chat_send("Started timing attack with 10+ Stalkers")
elif (
self.timing_attack == 1
and self.already_pending_upgrade(
UpgradeId.PROTOSSGROUNDARMORSLEVEL1) == 1
and self.units.tags_in(self.attackers).ready.amount >= 20
):
self.attack_now(False)
self.timing_attack += 1
await self.chat_send("Started timing attack with 20+ Stalkers")
elif (
self.timing_attack > 1
and
(
(self.time // 300 == self.attack_amount
and self.units.tags_in(self.attackers).ready.amount > 20)
or self.supply_used >= 190
)
):
self.attack_amount = self.time // 300
self.attack_now(False)
await self.chat_send("Started attack with 20+ Stalkers")
# If enemy main not destroyed and can't attack, go to gather point
else:
# Get the gather position
pos = self.proxy_position
pos = pos.towards(self.enemy_start_locations[0], 10)
# Get all the attacking Stalkers
stalkers = self.units.tags_in(self.attackers)
stalkers = stalkers.filter(lambda s: s.is_idle)
# Go to the gather point
for stalker in stalkers:
stalker.attack(pos)
async def on_unit_created(self, unit: Unit):
"""Gets called when a unit is created
Args:
unit (Unit): the unit that is created
"""
logger.info(f"Unit {unit.name} trained at {unit.position}")
# If the created Unit is a Zealot, it't the wall-off unit
if (
unit.type_id == UnitTypeId.ZEALOT
and self.wall_unit is None
):
logger.info("Zealot wall-off unit trained")
self.wall_unit = unit
# If the created Unit is a Stalker, figure out what it should be
if unit.type_id == UnitTypeId.STALKER:
# Get the tag of the unit
tag = unit.tag
# If the next Stalker should be an attacker, add it to attackers
if self.next_stalker_is_attacker():
self.attackers.append(tag)
# If the next Stalker should be a defender, add it to defenders
elif self.next_stalker_is_defender():
# Set some variables
min_stalkers_amount = 1000
base_min_stalkers = 0
# Loop over all the base defenders
for base_defenders in self.bases_defenders.values():
stalkers = self.units.tags_in(base_defenders)
# Get the least amount of Stalkers and the base
if stalkers.amount < min_stalkers_amount:
min_stalkers_amount = stalkers.amount
keys = list(self.bases_defenders.keys())
values = list(self.bases_defenders.values())
base_min_stalkers = keys[values.index(base_defenders)]
# Add the Stalkers to the base with the least Stalkers
stalkers_at_base = self.bases_defenders[base_min_stalkers]
stalkers_at_base.append(tag)
# If the unit is in attackers, set pos to the Proxy location
if tag in self.attackers:
pos = self.proxy_position
pos = pos.towards(self.enemy_start_locations[0], 10)
logger.info(f"Stalker added as attacker, attack/defend ratio: "
f"{self.attackers.amount}/{self.get_defenders()}")
# If the unit is a defender, set pos to defend location
else:
# Loop over all the defenders
for base_defenders in self.bases_defenders.values():
# If the tag is in one of the defenders
if tag in self.bases_defenders:
pos = self.find_defend_position(unit)
# Log
logger.info(f"Stalker added as defender, "
f"attack/defend ratio: "
f"{self.attackers.amount}/"
f"{self.get_defenders()}")
break
pos = self.main.position
# Attack the pos
unit.attack(pos)
async def on_building_construction_started(self, unit: Unit):
"""Gets called when a building is started building
Args:
unit (Unit): the building that is started building
"""
logger.info(f"Structure {unit.name} "
f"started building at {unit.position}")
# If the unit is a Pylon, it's either the Proxy or Cannon
if (
unit.type_id == UnitTypeId.PYLON
and unit.distance_to(self.enemy_start_locations[0]) < 80
and self.proxy is None
):
self.proxy = unit
self.proxy_position = unit.position3d
# If the structure is a Gateway, set the rally point
elif unit.type_id == UnitTypeId.GATEWAY:
# Calculate the offset
x = (self._game_info.map_center.x - unit.position.x) // 10
y = -4 if unit.position.y < self._game_info.map_center.y else 4
offset = (x, y)
# Offset the position by the offset
pos = self.main_base_ramp.barracks_in_middle.offset(offset)
# Rally the Gateway
unit(AbilityId.RALLY_BUILDING, pos)
async def on_building_construction_complete(self, unit: Unit):
# Assign an empty Units object to each Nexus
if unit.type_id == UnitTypeId.NEXUS:
self.bases_defenders[unit.tag] = Units([], self)
async def on_unit_destroyed(self, unit_tag: int):
"""When the building gets destroyed, check if it is the Proxy
Args:
unit (Unit): the building that was destroyed
"""
# If the destroyed unit is Proxy, log to console
if self.proxy is None:
logger.info(f"Proxy {unit_tag} was destroyed")
# If destroyed unit is attacker, log to console
if unit_tag in self.attackers:
logger.info(f"Attack Stalker {unit_tag} was killed")
self.attackers.remove(unit_tag)
# If destroyed units is defender, log to console
for base_defenders in self.bases_defenders.values():
if unit_tag == base_defenders:
logger.info(f"Defense Stalker {unit_tag} was killed")
base_defenders.remove(unit_tag)
if self.wall_unit is not None:
# If destroyed unit is wall-off unit, log to console
if unit_tag == self.wall_unit.tag:
logger.info(f"Wall-off Zealot {unit_tag} was killed")
def get_proxy_location(self) -> Point3:
"""Returns the new Proxy location
Returns:
Point3: the Proxy location
"""
# Calculate the Proxy Pylon's position
proxy_position = self.enemy_start_locations[0]
proxy_position = proxy_position.towards(self.game_info.map_center, 60)
# Calculate the heigth
(x, y) = proxy_position
z = self.get_terrain_z_height(proxy_position)
# Return the 3d coordinates
return Point3((x, y, z))
def find_target(self):
"""Find a target to attack
Returns:
Unit: the unit to attack
"""
# Returns enemy structures
if self.enemy_structures.amount > 0:
return self.enemy_structures.random
# Else return enemy units
elif self.enemy_units.amount > 0:
return self.enemy_units.random
# Else return the start location
else:
return self.enemy_start_locations[0]
def next_stalker_is_attacker(self) -> bool:
"""Returns True if the next Stalker should be an attacker
Returns:
bool: is the next Stalker an attacker
"""
defenders = self.get_defenders() != 0
# If there are defenders and attackers, calculate the ratio
if defenders and self.attackers:
# For each defender there are {attack_defend_ratio} attackers
defenders_amount = self.get_defenders() * self.attack_defend_ratio
attackers_amount = self.attackers.amount
# If the defender amount is greater, next should be attacker
if defenders_amount > attackers_amount:
return True
# If the attacker amount is greater, next should be defender
elif defenders_amount < attackers_amount:
return False
# If they are even, next should be an attacker
else:
return True
# If there is one defender but no attackers. next should be attacker
elif defenders:
return True
# If there is one attacker but no defender, next should be defender
elif self.attackers:
return False
# If there are not attackers and no defenders, next should be attacker
else:
return True
def next_stalker_is_defender(self) -> bool:
"""Returns True if the next Stalker should be a defender
Returns:
bool: is the next Stalker a defender
"""
# Returns the opposite of the stalker should be defender
return not self.next_stalker_is_attacker()
def get_defenders(self):
defenders = 0
for base_defenders in self.bases_defenders.values():
defenders += base_defenders.amount
return defenders
def find_defend_position(self, stalker: Unit) -> Point2:
# Loop over all the defenders
for base_defenders in self.bases_defenders.values():
for base_defender in base_defenders:
# If the tag is in one of the defenders
if stalker.tag == base_defender:
# Get the base
keys = list(self.bases_defenders.keys())
values = list(self.bases_defenders.values())
base = keys[values.index(base_defenders)]
base = self.townhalls.tags_in([base])
if len(base) > 0:
base = base[0].position
pos = base.towards(self.enemy_start_locations[0], -10)
return pos
raise TypeError(f"Stalker {stalker} is not a defender")
async def enemy_main_destroyed(self) -> bool:
"""Returns True if the enemy's main base is destroyed
Returns:
bool: is the enemy's main base destroyed
"""
# If enemy main was already destroyed, assume it will stay destroyed
if self.enemy_main_destroyed_triggerd:
return True
# If units are close enough to see if the main is destroyed, check it
if (
self.units.closer_than(10, self.enemy_start_locations[0]).exists
):
# Count the amount of townhalls that are not there
townhalls = 0
for townhall in TOWNHALLS:
if (
not self.enemy_structures(townhall).closer_than(
5, self.enemy_start_locations[0])
):
townhalls += 1
# If amount of townhalls not there == amount possible townhalls
if townhalls == len(TOWNHALLS):
# Do some logging
await self.chat_send("Your main base is destroyed, "
"what are you going to do now?")
logger.info(f"Townhall: {townhall} is destroyed")
# Set the enemy main to destroyed
self.enemy_main_destroyed_triggerd = True
# Return True
return True
# If amount of townhalls not there != amount possible townhalls
return False
# If there aren't any units close enough to the enemy base
return False
@ property
def max_gateways(self) -> int:
"""The max Gateways allowed
Returns:
int: the amount of Gateways that are allowed
"""
# Calculate the maximum amount of Gateways (+1 every 30 seconds)
return self.time // 30 + 1
| StarcoderdataPython |
6576389 | import numpy as np
def fire(time, temperature_initial):
time /= 1200.0 # convert time from seconds to hours
temperature_initial -= 273.15 # convert ambient temperature from kelvin to celsius
temperature = (
660 * (1 - 0.687 * np.exp(-0.32 * time) - 0.313 * np.exp(-3.8 * time))
+ temperature_initial
)
return temperature + 273.15 # convert temperature from celsius to kelvin
| StarcoderdataPython |
9744807 | <gh_stars>0
import sys
import os
import string
from selenium import webdriver
def fetchContest(contestNum):
baseURL = "https://codeforces.com/problemset/problem/" + contestNum + "/"
if(not os.path.exists("./" + contestNum)):
os.mkdir("./" + contestNum)
driver = webdriver.Firefox(executable_path="../drivers/geckodriver.exe")
i = 0
while(i < 26):
currURL = baseURL + string.ascii_uppercase[i]
driver.get(currURL)
if(not driver.current_url == currURL):
break
currPath = "./" + contestNum + "/" + string.ascii_uppercase[i]
if(not os.path.exists(currPath)):
os.mkdir(currPath)
problem = driver.find_element_by_class_name("problem-statement")
problem.screenshot(currPath + "/problem.png")
inputs = driver.find_elements_by_css_selector("div.input pre")
outputs = driver.find_elements_by_css_selector("div.output pre")
for j in range(len(inputs)):
f = open(currPath + "/input" + str(j + 1) + ".txt", "w")
f.write(inputs[j].text)
f.close()
for j in range(len(outputs)):
f = open(currPath + "/output" + str(j + 1) + ".txt", "w")
f.write(outputs[j].text)
f.close()
i += 1
driver.close()
if(not len(sys.argv) == 2):
print("Usage: python fetch_round.py <contest_number>")
sys.exit(1)
contestNum = sys.argv[1]
fetchContest(contestNum) | StarcoderdataPython |
28297 | <reponame>kboone/dotfiles<gh_stars>0
from skimage import color
import numpy as np
colors = [
("base03", (10., +00., -05.)),
("base02", (15., +00., -05.)),
("base01", (45., -01., -05.)),
("base00", (50., -01., -05.)),
("base0", (60., -01., -02.)),
("base1", (65., -01., -02.)),
("base2", (92., -00., +02.)),
("base3", (97., +00., +02.)),
("yellow", (60., +10., +65.)),
("orange", (45., +30., +35.)),
("red", (50., +35., +25.)),
("magenta", (45., +35., -05.)),
("violet", (45., +15., -35.)),
("blue", (55., -10., -25.)), # class/function name
("cyan", (55., -20., +15.)), # comments
("green", (60., -10., +75.)), # def, class, etc.
]
lab_colors = [i[1] for i in colors]
color_labels = [i[0] for i in colors]
rgb_colors = color.lab2rgb([lab_colors])[0] * 255
hex_colors = []
for rgb_color in rgb_colors:
hex_color = ""
for val in rgb_color:
if hex_color:
hex_color += "/"
hex_color += '%02x' % int(np.round(val))
hex_colors.append(hex_color)
color_text = []
for label, hex_color in zip(color_labels, hex_colors):
color_text.append("%s=\"%s\"" % (label, hex_color))
template = open("set_colorscheme_template.sh")
out_str = ""
for line in template:
if line == "COLOR_DEFINITIONS_HERE\n":
next_line = "\n".join(color_text) + "\n"
else:
next_line = line
out_str += (next_line)
out_file = open("set_colorscheme.sh", "w")
print(out_str, file=out_file)
| StarcoderdataPython |
1621516 | <reponame>DX-MON/OpenPICle
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import Elaboratable, Module, Signal, unsigned
from .types import Opcodes, ArithOpcode, LogicOpcode, BitOpcode
from .busses import *
__all__ = ["PIC16"]
class PIC16(Elaboratable):
def __init__(self):
self.iBus = InstructionBus()
self.pBus = PeripheralBus()
self.pcLatchHigh = Signal(8)
self.wreg = Signal(8)
self.pc = Signal(12)
self.flags = Signal(8)
def elaborate(self, platform):
from .decoder import Decoder
from .alu import ArithUnit, LogicUnit
from .bitmanip import Bitmanip
from .callStack import CallStack
m = Module()
decoder = Decoder()
m.submodules.decoder = decoder
arithUnit = ArithUnit()
m.submodules.arith = arithUnit
logicUnit = LogicUnit()
m.submodules.logic = logicUnit
bitmanip = Bitmanip()
m.submodules.bitmanip = bitmanip
callStack = CallStack()
m.submodules.callStack = callStack
q = Signal(unsigned(2))
actualQ = Signal(unsigned(2))
m.d.sync += q.eq(q + 1)
m.d.sync += actualQ.eq(q)
instruction = Signal(14)
lhs = Signal(8)
rhs = Signal(8)
result = Signal(8)
targetBit = Signal(3)
opEnable = Signal()
skip = Signal()
pause = Signal()
pcNext = Signal.like(self.pc)
carry = self.flags[0]
zero = self.flags[1]
opcode = Signal(Opcodes)
arithOpcode = self.mapArithOpcode(m, opcode)
logicOpcode = self.mapLogicOpcode(m, opcode)
bitOpcode = self.mapBitmanipOpcode(m, opcode)
loadsWReg = self.loadsWReg(m, opcode)
loadsFReg = self.loadsFReg(m, opcode)
loadsLiteral = self.loadsLiteral(m, opcode)
storesWReg = Signal()
storesFReg = Signal()
changesFlow = self.changesFlow(m, opcode)
loadPCLatchHigh = self.loadPCLatchHigh(m, opcode)
isReturn = self.isReturn(m, opcode)
storesZeroFlag = Signal()
resultFromArith = Signal()
resultFromLogic = Signal()
resultFromBit = Signal()
resultFromLit = Signal()
resultFromWReg = Signal()
resultZero = Signal()
carryInvert = Signal()
m.d.comb += opEnable.eq(0)
with m.Switch(q):
with m.Case(0):
with m.If(storesWReg):
m.d.sync += self.wreg.eq(result)
with m.Elif(storesFReg):
m.d.sync += [
self.pBus.address.eq(instruction[0:7]),
self.pBus.writeData.eq(result),
self.pBus.write.eq(1)
]
with m.If(storesZeroFlag):
m.d.sync += zero.eq(skip)
with m.If(resultFromArith):
m.d.sync += carry.eq(arithUnit.carry)
with m.Elif(resultFromBit):
m.d.sync += carry.eq(bitmanip.carryOut)
with m.If((opcode == Opcodes.INCFSZ) | (opcode == Opcodes.DECFSZ)):
m.d.sync += pause.eq(skip)
m.d.comb += [
self.iBus.read.eq(1),
opEnable.eq(1)
]
with m.Case(1):
with m.If(pause):
m.d.sync += instruction.eq(0), # Load a NOP if we're entering a pause cycle.
with m.Else():
m.d.sync += [
instruction.eq(self.iBus.data),
self.pBus.address.eq(self.iBus.data[0:7])
]
m.d.sync += carry.eq(carry ^ (resultFromArith & carryInvert))
m.d.sync += self.pBus.write.eq(0)
with m.Case(2):
with m.If(pause):
m.d.sync += pause.eq(0)
with m.If(loadsFReg):
m.d.comb += self.pBus.read.eq(1)
with m.If(opcode == Opcodes.CALL):
m.d.sync += [
callStack.valueIn.eq(self.pc + 1),
callStack.push.eq(1)
]
with m.Elif(isReturn):
m.d.sync += [
self.pc.eq(callStack.valueOut),
callStack.pop.eq(1)
]
with m.If(loadPCLatchHigh):
m.d.sync += [
self.pc[0:11].eq(instruction[0:11]),
self.pc[11:].eq(self.pcLatchHigh[3:5])
]
with m.Elif(~changesFlow):
m.d.sync += self.pc.eq(pcNext)
with m.Case(3):
with m.If(opcode == Opcodes.CALL):
m.d.sync += callStack.push.eq(0)
with m.Elif(isReturn):
m.d.sync += callStack.pop.eq(0)
with m.If(loadsWReg):
m.d.sync += lhs.eq(self.wreg)
with m.Else():
m.d.sync += lhs.eq(0)
with m.If(loadsFReg):
m.d.sync += rhs.eq(self.pBus.readData)
with m.Elif(loadsLiteral):
m.d.sync += rhs.eq(instruction[0:8])
with m.Else():
m.d.sync += rhs.eq(0)
with m.If((opcode == Opcodes.BCF) | (opcode == Opcodes.BSF)):
m.d.sync += targetBit.eq(instruction[7:10])
with m.If(resultFromArith):
m.d.comb += result.eq(arithUnit.result)
with m.Elif(resultFromLogic):
m.d.comb += result.eq(logicUnit.result)
with m.Elif(resultFromBit):
m.d.comb += result.eq(bitmanip.result)
with m.Elif(resultZero):
m.d.comb += result.eq(0)
with m.Elif(resultFromLit):
m.d.comb += result.eq(rhs)
with m.Elif(resultFromWReg):
m.d.comb += result.eq(self.wreg)
with m.If(~changesFlow):
m.d.comb += pcNext.eq(self.pc + 1)
m.d.sync += [
arithUnit.operation.eq(arithOpcode),
logicUnit.operation.eq(logicOpcode),
bitmanip.operation.eq(bitOpcode),
bitmanip.value.eq(rhs),
bitmanip.carryIn.eq(carry),
resultFromArith.eq(self.resultFromArith(m, opcode)),
resultFromLogic.eq(logicOpcode != LogicOpcode.NONE),
resultFromBit.eq(bitOpcode != BitOpcode.NONE),
resultFromLit.eq(opcode == Opcodes.MOVLW),
resultFromWReg.eq(opcode == Opcodes.MOVWF),
resultZero.eq((opcode == Opcodes.CLRF) | (opcode == Opcodes.CLRW)),
storesWReg.eq(self.storesWReg(m, opcode, instruction[7])),
storesFReg.eq(self.storesFReg(m, opcode, instruction[7])),
storesZeroFlag.eq(self.storesZeroFlag(m, opcode)),
carryInvert.eq((arithOpcode == ArithOpcode.SUB) | (arithOpcode == ArithOpcode.DEC)),
]
m.d.comb += [
decoder.instruction.eq(instruction),
opcode.eq(decoder.opcode),
arithUnit.enable.eq(opEnable),
arithUnit.lhs.eq(lhs),
arithUnit.rhs.eq(rhs),
skip.eq(arithUnit.result == 0),
logicUnit.enable.eq(opEnable),
logicUnit.lhs.eq(lhs),
logicUnit.rhs.eq(rhs),
bitmanip.targetBit.eq(targetBit),
bitmanip.enable.eq(opEnable),
self.iBus.address.eq(self.pc),
]
return m
def mapArithOpcode(self, m, opcode):
result = Signal(ArithOpcode, name = "aluOpcode")
with m.Switch(opcode):
with m.Case(Opcodes.ADDLW, Opcodes.ADDWF):
m.d.comb += result.eq(ArithOpcode.ADD)
with m.Case(Opcodes.SUBLW, Opcodes.SUBWF):
m.d.comb += result.eq(ArithOpcode.SUB)
with m.Case(Opcodes.INCF, Opcodes.INCFSZ):
m.d.comb += result.eq(ArithOpcode.INC)
with m.Case(Opcodes.DECF, Opcodes.DECFSZ):
m.d.comb += result.eq(ArithOpcode.DEC)
with m.Default():
m.d.comb += result.eq(ArithOpcode.ADD)
return result
def resultFromArith(self, m, opcode):
result = Signal(name = "resultFromArith")
with m.Switch(opcode):
with m.Case(
Opcodes.ADDLW,
Opcodes.SUBLW,
Opcodes.INCF,
Opcodes.DECF,
Opcodes.ADDWF,
Opcodes.SUBWF,
Opcodes.INCFSZ,
Opcodes.DECFSZ
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def mapLogicOpcode(self, m, opcode):
result = Signal(LogicOpcode, name = "logicOpcode")
with m.Switch(opcode):
with m.Case(Opcodes.ANDLW, Opcodes.ANDWF):
m.d.comb += result.eq(LogicOpcode.AND)
with m.Case(Opcodes.IORLW, Opcodes.IORWF):
m.d.comb += result.eq(LogicOpcode.OR)
with m.Case(Opcodes.XORLW, Opcodes.XORWF):
m.d.comb += result.eq(LogicOpcode.XOR)
with m.Default():
m.d.comb += result.eq(LogicOpcode.NONE)
return result
def mapBitmanipOpcode(self, m, opcode):
result = Signal(BitOpcode, name = "bitOpcode")
with m.Switch(opcode):
with m.Case(Opcodes.RRF):
m.d.comb += result.eq(BitOpcode.ROTR)
with m.Case(Opcodes.RLF):
m.d.comb += result.eq(BitOpcode.ROTL)
with m.Case(Opcodes.SWAPF):
m.d.comb += result.eq(BitOpcode.SWAP)
with m.Case(Opcodes.BCF):
m.d.comb += result.eq(BitOpcode.BITCLR)
with m.Case(Opcodes.BSF):
m.d.comb += result.eq(BitOpcode.BITSET)
with m.Default():
m.d.comb += result.eq(BitOpcode.NONE)
return result
def loadsWReg(self, m, opcode):
result = Signal(name = "loadsWReg")
with m.Switch(opcode):
with m.Case(
Opcodes.MOVWF,
Opcodes.ADDWF,
Opcodes.SUBWF,
Opcodes.ANDWF,
Opcodes.IORWF,
Opcodes.XORWF,
Opcodes.ADDLW,
Opcodes.SUBLW,
Opcodes.ANDLW,
Opcodes.IORLW,
Opcodes.XORLW
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def loadsFReg(self, m, opcode):
result = Signal(name = "loadsFReg")
with m.Switch(opcode):
with m.Case(
Opcodes.ADDWF,
Opcodes.SUBWF,
Opcodes.ANDWF,
Opcodes.IORWF,
Opcodes.XORWF,
Opcodes.INCF,
Opcodes.INCFSZ,
Opcodes.DECF,
Opcodes.DECFSZ,
Opcodes.COMF,
Opcodes.MOVF,
Opcodes.RLF,
Opcodes.RRF,
Opcodes.SWAPF,
Opcodes.BCF,
Opcodes.BSF,
Opcodes.BTFSC,
Opcodes.BTFSS
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def loadsLiteral(self, m, opcode):
result = Signal(name = "loadsLiteral")
with m.Switch(opcode):
with m.Case(
Opcodes.MOVLW,
Opcodes.RETLW,
Opcodes.ADDLW,
Opcodes.SUBLW,
Opcodes.ANDLW,
Opcodes.IORLW,
Opcodes.XORLW
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def storesWReg(self, m, opcode, dir):
result = Signal(name = "storesWReg")
with m.Switch(opcode):
with m.Case(
Opcodes.CLRW,
Opcodes.MOVLW,
Opcodes.RETLW,
Opcodes.ADDLW,
Opcodes.SUBLW,
Opcodes.ANDLW,
Opcodes.IORLW,
Opcodes.XORLW
):
m.d.comb += result.eq(1)
with m.Case(
Opcodes.CLRF,
Opcodes.DECF,
Opcodes.DECFSZ,
Opcodes.MOVF,
Opcodes.COMF,
Opcodes.INCF,
Opcodes.INCFSZ,
Opcodes.RRF,
Opcodes.RLF,
Opcodes.SWAPF,
Opcodes.BCF,
Opcodes.BSF
):
m.d.comb += result.eq(~dir)
with m.Default():
m.d.comb += result.eq(0)
return result
def storesFReg(self, m, opcode, dir):
result = Signal(name = "storesFReg")
with m.Switch(opcode):
with m.Case(
Opcodes.MOVWF,
Opcodes.CLRF,
Opcodes.SUBWF,
Opcodes.DECF,
Opcodes.DECFSZ,
Opcodes.IORWF,
Opcodes.ANDWF,
Opcodes.XORWF,
Opcodes.ADDWF,
Opcodes.MOVF,
Opcodes.COMF,
Opcodes.INCF,
Opcodes.INCFSZ,
Opcodes.RRF,
Opcodes.RLF,
Opcodes.SWAPF
):
m.d.comb += result.eq(dir)
with m.Case(
Opcodes.BCF,
Opcodes.BSF
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def changesFlow(self, m, opcode):
result = Signal(name = "changesFlow")
with m.Switch(opcode):
# Need to handle bit test f skip if <condition>..?
with m.Case(
Opcodes.CALL,
Opcodes.GOTO,
Opcodes.RETFIE,
Opcodes.RETLW,
Opcodes.RETURN
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def isReturn(self, m, opcode):
result = Signal(name = "isReturn")
with m.Switch(opcode):
with m.Case(
Opcodes.RETFIE,
Opcodes.RETLW,
Opcodes.RETURN
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def loadPCLatchHigh(self, m, opcode):
result = Signal(name = "loadPCLatch")
with m.Switch(opcode):
with m.Case(
Opcodes.CALL,
Opcodes.GOTO
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
def storesZeroFlag(self, m, opcode):
result = Signal(name = "storesZeroFlag")
with m.Switch(opcode):
with m.Case(
Opcodes.CLRW,
Opcodes.CLRF,
Opcodes.SUBWF,
Opcodes.DECF,
Opcodes.IORWF,
Opcodes.ANDWF,
Opcodes.XORWF,
Opcodes.ADDWF,
Opcodes.MOVF,
Opcodes.COMF,
Opcodes.INCF,
Opcodes.ADDLW,
Opcodes.SUBLW,
Opcodes.ANDLW,
Opcodes.IORLW,
Opcodes.XORLW
):
m.d.comb += result.eq(1)
with m.Default():
m.d.comb += result.eq(0)
return result
| StarcoderdataPython |
3452053 | from mongoengine import Document, fields
from mongodbforms import DocumentForm
import unittest
class MyDocument(Document):
mystring = fields.StringField()
myverbosestring = fields.StringField(verbose_name="Foobar")
myrequiredstring = fields.StringField(required=True)
class MyForm(DocumentForm):
class Meta:
document = MyDocument
class SimpleDocumentTest(unittest.TestCase):
def test_form(self):
form = MyForm()
self.assertEquals(len(form.fields), 3)
self.assertFalse(form.fields['mystring'].required)
self.assertEquals(form.fields['myverbosestring'].label, "Foobar")
self.assertTrue(form.fields['myrequiredstring'].required)
| StarcoderdataPython |
4831281 | import random, math
import time
from itertools import product
R = 32
N = 1000
#d = 13393249480990767973698914121061987209673507827659760595482620214891467806973397091277092174
d = 1233932494 #31 bits
#d = 23393249481
#d = 133932494809907679736989141210619872096735078276597605954826202148555332 #237 bits
q = 2**252 + 27742317777372353535851937790883648493
#q = 2**32
window_size = 10
RANDOMIZED_BITS = 0
r = []
v = []
alpha = []
n = 0
def getalpha(v,j):
return v[j]-v[0]
def int_to_bin(number):
return [int(x) for x in bin(number)[2:]]
def bin_to_int(bit_list):
output = 0
for bit in bit_list:
output = output * 2 + bit
return output
def generate_v():
value = d + (r[0]*q)
#print (r[0]*q), " , ",int_to_bin((r[0]*q))
v.append(value)
for i in xrange(1, N):
value = d + (r[i]*q)
v.append(value)
def generate_alpha_js():
for i in xrange(1, N+1):
al = r[i] - r[0]
alpha.append(int(math.fabs(al)))
def generate_r_js():
for i in xrange(0, N+1):
a = random.getrandbits(R)
r.append(int(math.fabs(a)))
def xor_operation(v, d):
result = v ^ d
return result
def sum_all_ds(d_candidates, w, w_prime, mod_value):
pairs = {}
for d in d_candidates:
#print "d_candidate: ", d
sum_hw_d = 0
for j in xrange(1,N):
d_prime = (bin_to_int(d)+(getalpha(v,j))) % mod_value
v_j = v[j] % mod_value
#change here to get now "W - W_prime"
temp_xor = int_to_bin(xor_operation(v_j, d_prime))
temp = temp_xor[:(w-w_prime)]
#print "Xor: ", temp_xor
#print "Temp: ", temp
pre_sum = temp.count(1)
#print "HW: ", pre_sum
#time.sleep(0.5)
sum_hw_d = sum_hw_d + pre_sum
try:
if pairs[sum_hw_d] <> None:
val = pairs[sum_hw_d]
pairs[sum_hw_d].append(d)
except Exception as e:
pairs[sum_hw_d] = [d]
return min(pairs.keys()) , pairs[min(pairs.keys())]
def check_result(d_candidates):
for d_prime in d_candidates:
if bin_to_int(d_prime) == d:
return True, d_prime
return False, None
def wide_widow_attack():
generate_r_js()
generate_alpha_js()
generate_v()
print "d = ", int_to_bin(d), " len: ", len(int_to_bin(d)), " d ", d
print "Starting...."
w_prime = 0
w = window_size
#d_prime = [0]
d_candidate = []
difference = w - w_prime
variations = []
for i in product([0,1], repeat=difference):
variations.append(list(i))
n = len(int_to_bin(v[0]))
while (w < (n)):
print "w: ", w
print "w_prime: ", w_prime
mod_value = 2**w
to_iterate = []
if len(d_candidate) > 0:
for d_prime in d_candidate:
for variation in variations:
to_iterate.append(variation+d_prime)
else:
for variation in variations:
to_iterate.append(variation)
sum_d , d_candidate = sum_all_ds(to_iterate, w, w_prime, mod_value)
print "sum: ", sum_d, " d_candidate = ", d_candidate
print "sum: ", sum_d, " d_candidate = ", d_candidate[0], " d_ca = ", bin_to_int(d_candidate[0])
#time.sleep(5)
w_prime = w
w = w + window_size
check, result_d = check_result(d_candidate)
if check:
w = w+n
print "FOUND KEY."
print result_d
#if (d == bin_to_int(d_prime)):
# print "FOUND KEY."
#else:
# print "SORRY"
print "Finished."
wide_widow_attack()
#lista = [1,2,3,4,5,6,7,8]
#print lista[3:5]
#print lista[-2]
| StarcoderdataPython |
4932578 | <filename>tests/test_decorators/test_has.py
# external
import pytest
# project
import deal
# app
from .helpers import run_sync
@pytest.mark.parametrize('markers, expected', [
(['io'], True),
(['network'], True),
(['socket'], True),
(['network', 'stdout'], True),
(['import', 'network', 'stdout'], True),
(['stdout'], False),
(['stderr'], False),
(['write'], False),
(['read'], False),
(['import'], False),
])
def test_has_network(markers: list, expected: bool):
assert deal.has(*markers).has_network is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], True),
(['network'], True),
(['socket'], True),
(['network', 'stdout'], True),
(['import', 'network', 'stdout'], True),
(['stdout'], True),
(['stderr'], True),
(['write'], True),
(['read'], True),
(['import'], False),
(['global'], False),
])
def test_has_io(markers: list, expected: bool):
assert deal.has(*markers).has_io is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], True),
(['network'], False),
(['socket'], False),
(['network', 'stdout'], True),
(['import', 'network', 'stdout'], True),
(['stdout'], True),
(['print'], True),
(['stderr'], False),
(['write'], False),
(['read'], False),
(['import'], False),
])
def test_has_stdout(markers: list, expected: bool):
assert deal.has(*markers).has_stdout is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], True),
(['network'], False),
(['socket'], False),
(['network', 'stderr'], True),
(['network', 'stdout'], False),
(['import', 'network', 'stderr'], True),
(['stderr'], True),
(['stdout'], False),
(['print'], False),
(['write'], False),
(['read'], False),
(['import'], False),
])
def test_has_stderr(markers: list, expected: bool):
assert deal.has(*markers).has_stderr is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], False),
(['network'], False),
(['socket'], False),
(['network', 'stderr'], False),
(['network', 'stdout'], False),
(['import', 'network', 'stderr'], True),
(['stderr'], False),
(['stdout'], False),
(['print'], False),
(['write'], False),
(['read'], False),
(['import'], True),
])
def test_has_import(markers: list, expected: bool):
assert deal.has(*markers).has_import is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], False),
(['network'], False),
(['socket'], False),
(['network', 'stderr'], False),
(['network', 'stdout'], False),
(['import', 'global', 'stderr'], True),
(['stderr'], False),
(['stdout'], False),
(['print'], False),
(['write'], False),
(['read'], False),
(['import'], False),
(['global'], True),
(['nonlocal'], True),
])
def test_has_global(markers: list, expected: bool):
assert deal.has(*markers).has_global is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], True),
(['network'], False),
(['socket'], False),
(['network', 'stderr'], False),
(['network', 'stdout'], False),
(['import', 'read', 'stderr'], True),
(['stderr'], False),
(['stdout'], False),
(['print'], False),
(['write'], False),
(['read'], True),
(['import'], False),
(['global'], False),
(['nonlocal'], False),
])
def test_has_read(markers: list, expected: bool):
assert deal.has(*markers).has_read is expected
@pytest.mark.parametrize('markers, expected', [
(['io'], True),
(['network'], False),
(['socket'], False),
(['network', 'stderr'], False),
(['network', 'stdout'], False),
(['import', 'write', 'stderr'], True),
(['stderr'], False),
(['stdout'], False),
(['print'], False),
(['read'], False),
(['write'], True),
(['import'], False),
(['global'], False),
(['nonlocal'], False),
])
def test_has_write(markers: list, expected: bool):
assert deal.has(*markers).has_write is expected
def test_decorating_regular_function():
@deal.has()
def func(msg):
if msg:
print(msg)
return msg
assert func('') == ''
with pytest.raises(deal.SilentContractError):
func('a')
def test_custom_exception():
@deal.has(exception=KeyError)
def func(msg):
if msg:
print(msg)
return msg
assert func('') == ''
with pytest.raises(KeyError):
func('a')
def test_custom_message():
@deal.has(message='oh hi mark')
def func(msg):
if msg:
print(msg)
return msg
assert func('') == ''
with pytest.raises(deal.SilentContractError, match='oh hi mark'):
func('a')
def test_custom_exc_and_message():
@deal.has(exception=KeyError, message='oh hi mark')
def func(msg):
if msg:
print(msg)
return msg
assert func('') == ''
with pytest.raises(KeyError, match='oh hi mark'):
func('a')
def test_custom_exc_instance_and_message():
@deal.has(exception=KeyError('oh no'), message='oh hi mark')
def func(msg):
if msg:
print(msg)
return msg
assert func('') == ''
with pytest.raises(KeyError, match='oh no'):
func('a')
def test_decorating_async_function():
@deal.has()
async def func(msg):
if msg:
print(msg)
return msg
assert run_sync(func('')) == ''
with pytest.raises(deal.SilentContractError):
run_sync(func('a'))
| StarcoderdataPython |
3420431 | <reponame>taesko/fst
__version__ = "0.0.3a1"
| StarcoderdataPython |
5181066 | <reponame>snoopy369/FiveThirtyEight<filename>State Words.py
# coding: utf-8
# Import Word List
import urllib.request
#Read in the list of words. Fortunately, this list of words is just straight up text, so easy to parse!
url="https://norvig.com/ngrams/word.list"
file = urllib.request.urlopen(url)
#Initialize the wordlist
wordlist = []
#Read in each line of the file
for line in file:
word = line.decode("utf-8").rstrip()
wordlist.append(word)
# Import State List
from lxml import html
import requests
#Read in a list of states. This page is a little more complex to parse, so need some html parsing.
url_state = "https://www.plaintextlist.com/geography/list_of_us_states/"
#Read in the page
page = requests.get(url_state)
#Translate the page html to tree nodes so we can parse it
tree = html.fromstring(page.content)
#This reads in only the text from the p elements with the content_item class
states = tree.xpath('//p[@class="content_item"]/span/text()')
#Lowcase the state names so they match the word list
states = [str.lower() for str in states]
#Cribbed this from https://www.geeksforgeeks.org/check-two-strings-common-substring/ with some changes- thanks, ChitraNayal!
#This compares two strings and returns True if they share any common letters, otherwise False
def twoStrings(s1, s2) :
# vector for storing character
# occurrences
v = [0] * (26)
# increment vector index for every
# character of str1
for i in range(len(s1)):
v[ord(s1[i]) - ord('a')] = True
# checking common substring
# of str2 in str1
for i in range(len(s2)) :
if (ord(s2[i]) - ord('a') <= 26) and (ord(s2[i]) - ord('a') >= 0):
if (v[ord(s2[i]) - ord('a')]) :
return True
return False
#Using twoStrings above, compare a word to a list of words.
#If one matches, return that one
#If none match, return NONE
#If two or more match, return MULTIPLE
def checkWord(word,wordlist):
count=0
match=''
for worditem in wordlist:
if twoStrings(word,worditem) == False:
count = count + 1
match = worditem
##print(worditem,':',word)
if count > 1:
return 'MULTIPLE'
if count == 1:
return match
return 'NONE'
#Run checkWord on each element in wordlist, then grab the lengths and zip to a final result list
resultList = [checkWord(worditem,states) for worditem in wordlist]
lenList = [len(word) for word in wordlist]
final_list=list(zip(wordlist,resultList,lenList))
#Determine the max length by iteration
maxLen=0
for (word,result,len) in final_list:
if (result != 'MULTIPLE' and result != 'NONE'):
if len > maxLen:
maxLen = len
#Print out those records with the max length
for (word,result,len) in final_list:
if (result != 'MULTIPLE' and result != 'NONE' and len == maxLen):
print(word,':',result)
#Extra Credit! Finding the count per state
import pandas as pd
import numpy as np
#Using pandas, creating a dataframe from the list of only the unique words
uniqueWordList = pd.DataFrame(list(filter(lambda w: w[1] != 'NONE' and w[1] !='MULTIPLE',final_list))
, columns=['word','state','length'])
#using numpy, count the unique values of state, and return them
vals,counts = np.unique(uniqueWordList.state, return_counts=True)
#Zip the results into a dict and print to screen
table = dict(zip(vals,counts))
table
| StarcoderdataPython |
8073365 | import math
import os
import time
import numpy as np
from paddle import fluid
from paddle.fluid import layers
from pytracking.features import augmentation
from pytracking.libs import dcf, operation, fourier
from pytracking.libs.optimization import ConjugateGradient, GaussNewtonCG, GradientDescentL2
from pytracking.libs.paddle_utils import mod, n2p, \
leaky_relu, dropout2d
from pytracking.libs.tensorlist import TensorList
from pytracking.tracker.atom.optim import FactorizedConvProblem, ConvProblem
from pytracking.tracker.base.basetracker import BaseTracker
class ATOM(BaseTracker):
def initialize_features(self):
if not getattr(self, 'features_initialized', False):
self.params.features.initialize()
self.features_initialized = True
def initialize(self, image, state, *args, **kwargs):
# Initialize some stuff
self.frame_num = 1
# TODO: for now, we don't support explictly setting up device
# if not hasattr(self.params, 'device'):
# self.params.device = 'cuda' if self.params.use_gpu else 'cpu'
# Initialize features
self.initialize_features()
# Check if image is color
self.params.features.set_is_color(image.shape[2] == 3)
# Get feature specific params
self.fparams = self.params.features.get_fparams('feature_params')
self.time = 0
tic = time.time()
# Get position and size
self.pos = np.array(
[state[1] + (state[3] - 1) / 2, state[0] + (state[2] - 1) / 2],
'float32')
self.target_sz = np.array([state[3], state[2]], 'float32')
# Set search area
self.target_scale = 1.0
search_area = np.prod(self.target_sz * self.params.search_area_scale)
if search_area > self.params.max_image_sample_size:
self.target_scale = math.sqrt(search_area /
self.params.max_image_sample_size)
elif search_area < self.params.min_image_sample_size:
self.target_scale = math.sqrt(search_area /
self.params.min_image_sample_size)
# Check if IoUNet is used
self.use_iou_net = getattr(self.params, 'use_iou_net', True)
# Target size in base scale
self.base_target_sz = self.target_sz / self.target_scale
# Use odd square search area and set sizes
feat_max_stride = max(self.params.features.stride())
if getattr(self.params, 'search_area_shape', 'square') == 'square':
self.img_sample_sz = np.ones((2, ), 'float32') * np.round(
np.sqrt(
np.prod(self.base_target_sz *
self.params.search_area_scale)))
elif self.params.search_area_shape == 'initrect':
self.img_sample_sz = np.round(self.base_target_sz *
self.params.search_area_scale)
else:
raise ValueError('Unknown search area shape')
if self.params.feature_size_odd:
self.img_sample_sz += feat_max_stride - mod(self.img_sample_sz,
(2 * feat_max_stride))
else:
self.img_sample_sz += feat_max_stride - mod(
(self.img_sample_sz + feat_max_stride), (2 * feat_max_stride))
# Set sizes
self.img_support_sz = self.img_sample_sz
self.feature_sz = self.params.features.size(self.img_sample_sz)
self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output
self.kernel_size = self.fparams.attribute('kernel_size')
self.iou_img_sample_sz = self.img_sample_sz
# Optimization options
self.params.precond_learning_rate = self.fparams.attribute(
'learning_rate')
if self.params.CG_forgetting_rate is None or max(
self.params.precond_learning_rate) >= 1:
self.params.direction_forget_factor = 0
else:
self.params.direction_forget_factor = (
1 - max(self.params.precond_learning_rate)
)**self.params.CG_forgetting_rate
self.output_window = None
if getattr(self.params, 'window_output', False):
if getattr(self.params, 'use_clipped_window', False):
self.output_window = dcf.hann2d_clipped(
self.output_sz.astype('long'),
self.output_sz.astype('long') *
self.params.effective_search_area /
self.params.search_area_scale,
centered=False)
else:
self.output_window = dcf.hann2d(
self.output_sz.astype('long'), centered=False)
# Initialize some learning things
self.init_learning()
# Convert image
im = image.astype('float32')
self.im = im # For debugging only
# Setup scale bounds
self.image_sz = np.array([im.shape[0], im.shape[1]], 'float32')
self.min_scale_factor = np.max(10 / self.base_target_sz)
self.max_scale_factor = np.min(self.image_sz / self.base_target_sz)
# Extract and transform sample
x = self.generate_init_samples(im)
# Initialize iounet
if self.use_iou_net:
self.init_iou_net()
# Initialize projection matrix
self.init_projection_matrix(x)
# Transform to get the training sample
train_x = self.preprocess_sample(x)
# Generate label function
init_y = self.init_label_function(train_x)
# Init memory
self.init_memory(train_x)
# Init optimizer and do initial optimization
self.init_optimization(train_x, init_y)
self.pos_iounet = self.pos.copy()
self.time += time.time() - tic
def track(self, image):
self.frame_num += 1
# Convert image
# im = numpy_to_paddle(image)
im = image.astype('float32')
self.im = im # For debugging only
# ------- LOCALIZATION ------- #
# Get sample
sample_pos = self.pos.round()
sample_scales = self.target_scale * self.params.scale_factors
test_x = self.extract_processed_sample(im, self.pos, sample_scales,
self.img_sample_sz)
# Compute scores
scores_raw = self.apply_filter(test_x)
translation_vec, scale_ind, s, flag = self.localize_target(scores_raw)
# Update position and scale
if flag != 'not_found':
if self.use_iou_net:
update_scale_flag = getattr(self.params,
'update_scale_when_uncertain',
True) or flag != 'uncertain'
if getattr(self.params, 'use_classifier', True):
self.update_state(sample_pos + translation_vec)
self.refine_target_box(sample_pos, sample_scales[scale_ind],
scale_ind, update_scale_flag)
elif getattr(self.params, 'use_classifier', True):
self.update_state(sample_pos + translation_vec,
sample_scales[scale_ind])
# ------- UPDATE ------- #
# Check flags and set learning rate if hard negative
update_flag = flag not in ['not_found', 'uncertain']
hard_negative = (flag == 'hard_negative')
learning_rate = self.params.hard_negative_learning_rate if hard_negative else None
if update_flag:
# Get train sample
train_x = TensorList([x[scale_ind:scale_ind + 1] for x in test_x])
# Create label for sample
train_y = self.get_label_function(sample_pos,
sample_scales[scale_ind])
# Update memory
self.update_memory(train_x, train_y, learning_rate)
# Train filter
if hard_negative:
self.filter_optimizer.run(self.params.hard_negative_CG_iter)
elif (self.frame_num - 1) % self.params.train_skipping == 0:
self.filter_optimizer.run(self.params.CG_iter)
self.filter = self.filter_optimizer.x
# Set the pos of the tracker to iounet pos
if self.use_iou_net and flag != 'not_found':
self.pos = self.pos_iounet.copy()
# Return new state
yx = self.pos - (self.target_sz - 1) / 2
new_state = np.array(
[yx[1], yx[0], self.target_sz[1], self.target_sz[0]], 'float32')
return new_state.tolist()
def update_memory(self,
sample_x: TensorList,
sample_y: TensorList,
learning_rate=None):
replace_ind = self.update_sample_weights(
self.sample_weights, self.previous_replace_ind,
self.num_stored_samples, self.num_init_samples, self.fparams,
learning_rate)
self.previous_replace_ind = replace_ind
for train_samp, x, ind in zip(self.training_samples, sample_x,
replace_ind):
train_samp[ind] = x[0]
for y_memory, y, ind in zip(self.y, sample_y, replace_ind):
y_memory[ind] = y[0]
if self.hinge_mask is not None:
for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind):
m[ind] = layers.cast(y >= self.params.hinge_threshold,
'float32')[0]
self.num_stored_samples += 1
def update_sample_weights(self,
sample_weights,
previous_replace_ind,
num_stored_samples,
num_init_samples,
fparams,
learning_rate=None):
# Update weights and get index to replace in memory
replace_ind = []
for sw, prev_ind, num_samp, num_init, fpar in zip(
sample_weights, previous_replace_ind, num_stored_samples,
num_init_samples, fparams):
lr = learning_rate
if lr is None:
lr = fpar.learning_rate
init_samp_weight = getattr(fpar, 'init_samples_minimum_weight',
None)
if init_samp_weight == 0:
init_samp_weight = None
s_ind = 0 if init_samp_weight is None else num_init
if num_samp == 0 or lr == 1:
sw[:] = 0
sw[0] = 1
r_ind = 0
else:
# Get index to replace
r_ind = np.argmin(sw[s_ind:], 0)
r_ind = int(r_ind + s_ind)
# Update weights
if prev_ind is None:
sw /= 1 - lr
sw[r_ind] = lr
else:
sw[r_ind] = sw[prev_ind] / (1 - lr)
sw /= sw.sum()
if init_samp_weight is not None and sw[:num_init].sum(
) < init_samp_weight:
sw /= init_samp_weight + sw[num_init:].sum()
sw[:num_init] = init_samp_weight / num_init
replace_ind.append(r_ind)
return replace_ind
def localize_target(self, scores_raw):
# Weighted sum (if multiple features) with interpolation in fourier domain
weight = self.fparams.attribute('translation_weight', 1.0)
scores_raw = weight * scores_raw
sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) *
scores_raw.size(3))
for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)):
sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (
1 - np.array([ksz[0] % 2, ksz[1] % 2]) / sz))
scores_fs = fourier.sum_fs(sf_weighted)
scores = fourier.sample_fs(scores_fs, self.output_sz)
if self.output_window is not None and not getattr(
self.params, 'perform_hn_without_windowing', False):
scores *= self.output_window
if getattr(self.params, 'advanced_localization', False):
return self.localize_advanced(scores)
# Get maximum
max_score, max_disp = dcf.max2d(scores)
scale_ind = np.argmax(max_score, axis=0)[0]
max_disp = max_disp.astype('float32')
# Convert to displacements in the base scale
output_sz = self.output_sz.copy()
disp = mod((max_disp + output_sz / 2), output_sz) - output_sz / 2
# Compute translation vector and scale change factor
translation_vec = np.reshape(
disp[scale_ind].astype('float32'), [-1]) * (
self.img_support_sz / self.output_sz) * self.target_scale
translation_vec *= self.params.scale_factors[scale_ind]
# Shift the score output for visualization purposes
if self.params.debug >= 2:
sz = scores.shape[-2:]
scores = np.concatenate(
[scores[..., sz[0] // 2:, :], scores[..., :sz[0] // 2, :]], -2)
scores = np.concatenate(
[scores[..., sz[1] // 2:], scores[..., :sz[1] // 2]], -1)
return translation_vec, scale_ind, scores, None
def update_state(self, new_pos, new_scale=None):
# Update scale
if new_scale is not None:
self.target_scale = np.clip(new_scale, self.min_scale_factor,
self.max_scale_factor)
self.target_sz = self.base_target_sz * self.target_scale
# Update pos
inside_ratio = 0.2
inside_offset = (inside_ratio - 0.5) * self.target_sz
self.pos = np.maximum(
np.minimum(new_pos,
self.image_sz.astype('float32') - inside_offset),
inside_offset)
def get_label_function(self, sample_pos, sample_scale):
# Generate label function
train_y = TensorList()
target_center_norm = (self.pos - sample_pos) / (self.img_support_sz *
sample_scale)
for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size):
center = sz * target_center_norm + 0.5 * np.array(
[(ksz[0] + 1) % 2, (ksz[1] + 1) % 2], 'float32')
train_y.append(dcf.label_function_spatial(sz, sig, center))
return train_y
def extract_sample(self,
im: np.ndarray,
pos: np.ndarray,
scales,
sz: np.ndarray,
debug_save_name):
return self.params.features.extract(im, pos, scales, sz,
debug_save_name)
def extract_processed_sample(self,
im: np.ndarray,
pos: np.ndarray,
scales,
sz: np.ndarray,
debug_save_name=None) -> (TensorList,
TensorList):
x = self.extract_sample(im, pos, scales, sz, debug_save_name)
return self.preprocess_sample(self.project_sample(x))
def apply_filter(self, sample_x: TensorList):
with fluid.dygraph.guard():
sample_x = sample_x.apply(n2p)
filter = self.filter.apply(n2p)
return operation.conv2d(sample_x, filter, mode='same').numpy()
def init_projection_matrix(self, x):
# Set if using projection matrix
self.params.use_projection_matrix = getattr(
self.params, 'use_projection_matrix', True)
if self.params.use_projection_matrix:
self.compressed_dim = self.fparams.attribute('compressed_dim', None)
proj_init_method = getattr(self.params, 'proj_init_method', 'pca')
if proj_init_method == 'pca':
raise NotImplementedError
elif proj_init_method == 'randn':
with fluid.dygraph.guard():
self.projection_matrix = TensorList([
None if cdim is None else layers.gaussian_random(
(cdim, ex.shape[1], 1, 1), 0.0,
1 / math.sqrt(ex.shape[1])).numpy()
for ex, cdim in zip(x, self.compressed_dim)
])
elif proj_init_method == 'np_randn':
rng = np.random.RandomState(0)
self.projection_matrix = TensorList([
None if cdim is None else rng.normal(
size=(cdim, ex.shape[1], 1, 1),
loc=0.0,
scale=1 / math.sqrt(ex.shape[1])).astype('float32')
for ex, cdim in zip(x, self.compressed_dim)
])
elif proj_init_method == 'ones':
self.projection_matrix = TensorList([
None if cdim is None else
np.ones((cdim, ex.shape[1], 1, 1),
'float32') / math.sqrt(ex.shape[1])
for ex, cdim in zip(x, self.compressed_dim)
])
else:
self.compressed_dim = x.size(1)
self.projection_matrix = TensorList([None] * len(x))
def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList):
if getattr(self.params, '_feature_window', False):
x = x * self.feature_window
return x
def init_label_function(self, train_x):
# Allocate label function
self.y = TensorList([
np.zeros(
[self.params.sample_memory_size, 1, x.shape[2], x.shape[3]],
'float32') for x in train_x
])
# Output sigma factor
output_sigma_factor = self.fparams.attribute('output_sigma_factor')
self.sigma = output_sigma_factor * np.ones((2, ), 'float32') * (
self.feature_sz / self.img_support_sz *
self.base_target_sz).apply(np.prod).apply(np.sqrt)
# Center pos in normalized coords
target_center_norm = (self.pos - np.round(self.pos)) / (
self.target_scale * self.img_support_sz)
# Generate label functions
for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz,
self.kernel_size, train_x):
center_pos = sz * target_center_norm + 0.5 * np.array(
[(ksz[0] + 1) % 2, (ksz[1] + 1) % 2], 'float32')
for i, T in enumerate(self.transforms[:x.shape[0]]):
sample_center = center_pos + np.array(
T.shift, 'float32') / self.img_support_sz * sz
y[i] = dcf.label_function_spatial(sz, sig, sample_center)
# Return only the ones to use for initial training
return TensorList([y[:x.shape[0]] for y, x in zip(self.y, train_x)])
def init_memory(self, train_x):
# Initialize first-frame training samples
self.num_init_samples = train_x.size(0)
self.init_sample_weights = TensorList(
[np.ones(x.shape[0], 'float32') / x.shape[0] for x in train_x])
self.init_training_samples = train_x
# Sample counters and weights
self.num_stored_samples = self.num_init_samples.copy()
self.previous_replace_ind = [None] * len(self.num_stored_samples)
self.sample_weights = TensorList([
np.zeros(self.params.sample_memory_size, 'float32') for x in train_x
])
for sw, init_sw, num in zip(self.sample_weights,
self.init_sample_weights,
self.num_init_samples):
sw[:num] = init_sw
# Initialize memory
self.training_samples = TensorList(
[[np.zeros([cdim, x.shape[2], x.shape[3]], 'float32')] *
self.params.sample_memory_size
for x, cdim in zip(train_x, self.compressed_dim)])
def init_learning(self):
# Get window function
self.feature_window = TensorList(
[dcf.hann2d(sz) for sz in self.feature_sz])
# Filter regularization
self.filter_reg = self.fparams.attribute('filter_reg')
# Activation function after the projection matrix (phi_1 in the paper)
projection_activation = getattr(self.params, 'projection_activation',
'none')
if isinstance(projection_activation, tuple):
projection_activation, act_param = projection_activation
if projection_activation == 'none':
self.projection_activation = lambda x: x
elif projection_activation == 'relu':
self.projection_activation = layers.relu
elif projection_activation == 'elu':
self.projection_activation = layers.elu
elif projection_activation == 'mlu':
self.projection_activation = lambda x: layers.elu(leaky_relu(x, 1 / act_param), act_param)
else:
raise ValueError('Unknown activation')
# Activation function after the output scores (phi_2 in the paper)
response_activation = getattr(self.params, 'response_activation',
'none')
if isinstance(response_activation, tuple):
response_activation, act_param = response_activation
if response_activation == 'none':
self.response_activation = lambda x: x
elif response_activation == 'relu':
self.response_activation = layers.relu
elif response_activation == 'elu':
self.response_activation = layers.elu
elif response_activation == 'mlu':
self.response_activation = lambda x: layers.elu(leaky_relu(x, 1 / act_param), act_param)
else:
raise ValueError('Unknown activation')
def generate_init_samples(self, im: np.ndarray) -> TensorList:
"""Generate augmented initial samples."""
# Compute augmentation size
aug_expansion_factor = getattr(self.params,
'augmentation_expansion_factor', None)
aug_expansion_sz = self.img_sample_sz.copy()
aug_output_sz = None
if aug_expansion_factor is not None and aug_expansion_factor != 1:
aug_expansion_sz = (self.img_sample_sz *
aug_expansion_factor).astype('long')
aug_expansion_sz += (
aug_expansion_sz - self.img_sample_sz.astype('long')) % 2
aug_expansion_sz = aug_expansion_sz.astype('float32')
aug_output_sz = self.img_sample_sz.astype('long').tolist()
# Random shift operator
get_rand_shift = lambda: None
random_shift_factor = getattr(self.params, 'random_shift_factor', 0)
if random_shift_factor > 0:
get_rand_shift = lambda: ((np.random.uniform(size=[2]) - 0.5) * self.img_sample_sz * random_shift_factor).astype('long').tolist()
# Create transofmations
self.transforms = [augmentation.Identity(aug_output_sz)]
if 'shift' in self.params.augmentation:
self.transforms.extend([
augmentation.Translation(shift, aug_output_sz)
for shift in self.params.augmentation['shift']
])
if 'relativeshift' in self.params.augmentation:
get_absolute = lambda shift: (np.array(shift, 'float32') * self.img_sample_sz / 2).astype('long').tolist()
self.transforms.extend([
augmentation.Translation(get_absolute(shift), aug_output_sz)
for shift in self.params.augmentation['relativeshift']
])
if 'fliplr' in self.params.augmentation and self.params.augmentation[
'fliplr']:
self.transforms.append(
augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))
if 'blur' in self.params.augmentation:
self.transforms.extend([
augmentation.Blur(sigma, aug_output_sz, get_rand_shift())
for sigma in self.params.augmentation['blur']
])
if 'scale' in self.params.augmentation:
self.transforms.extend([
augmentation.Scale(scale_factor, aug_output_sz,
get_rand_shift())
for scale_factor in self.params.augmentation['scale']
])
if 'rotate' in self.params.augmentation:
self.transforms.extend([
augmentation.Rotate(angle, aug_output_sz, get_rand_shift())
for angle in self.params.augmentation['rotate']
])
# Generate initial samples
init_samples = self.params.features.extract_transformed(
im, self.pos, self.target_scale, aug_expansion_sz, self.transforms)
# Remove augmented samples for those that shall not have
for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):
if not use_aug:
init_samples[i] = init_samples[i][0:1]
# Add dropout samples
if 'dropout' in self.params.augmentation:
num, prob = self.params.augmentation['dropout']
self.transforms.extend(self.transforms[:1] * num)
with fluid.dygraph.guard():
for i, use_aug in enumerate(
self.fparams.attribute('use_augmentation')):
if use_aug:
init_samples[i] = np.concatenate([
init_samples[i], dropout2d(
layers.expand(
n2p(init_samples[i][0:1]), (num, 1, 1, 1)),
prob,
is_train=True).numpy()
])
return init_samples
def init_optimization(self, train_x, init_y):
# Initialize filter
filter_init_method = getattr(self.params, 'filter_init_method', 'zeros')
self.filter = TensorList([
np.zeros([1, cdim, sz[0], sz[1]], 'float32')
for x, cdim, sz in zip(train_x, self.compressed_dim,
self.kernel_size)
])
if filter_init_method == 'zeros':
pass
elif filter_init_method == 'ones':
for idx, f in enumerate(self.filter):
self.filter[idx] = np.ones(f.shape,
'float32') / np.prod(f.shape)
elif filter_init_method == 'np_randn':
rng = np.random.RandomState(0)
for idx, f in enumerate(self.filter):
self.filter[idx] = rng.normal(
size=f.shape, loc=0,
scale=1 / np.prod(f.shape)).astype('float32')
elif filter_init_method == 'randn':
for idx, f in enumerate(self.filter):
with fluid.dygraph.guard():
self.filter[idx] = layers.gaussian_random(
f.shape, std=1 / np.prod(f.shape)).numpy()
else:
raise ValueError('Unknown "filter_init_method"')
# Get parameters
self.params.update_projection_matrix = getattr(
self.params, 'update_projection_matrix',
True) and self.params.use_projection_matrix
optimizer = getattr(self.params, 'optimizer', 'GaussNewtonCG')
# Setup factorized joint optimization
if self.params.update_projection_matrix:
self.joint_problem = FactorizedConvProblem(
self.init_training_samples, init_y, self.filter_reg,
self.fparams.attribute('projection_reg'), self.params,
self.init_sample_weights, self.projection_activation,
self.response_activation)
# Variable containing both filter and projection matrix
joint_var = self.filter.concat(self.projection_matrix)
# Initialize optimizer
analyze_convergence = getattr(self.params, 'analyze_convergence',
False)
if optimizer == 'GaussNewtonCG':
self.joint_optimizer = GaussNewtonCG(
self.joint_problem,
joint_var,
plotting=(self.params.debug >= 3),
analyze=True,
fig_num=(12, 13, 14))
elif optimizer == 'GradientDescentL2':
self.joint_optimizer = GradientDescentL2(
self.joint_problem,
joint_var,
self.params.optimizer_step_length,
self.params.optimizer_momentum,
plotting=(self.params.debug >= 3),
debug=analyze_convergence,
fig_num=(12, 13))
# Do joint optimization
if isinstance(self.params.init_CG_iter, (list, tuple)):
self.joint_optimizer.run(self.params.init_CG_iter)
else:
self.joint_optimizer.run(self.params.init_CG_iter //
self.params.init_GN_iter,
self.params.init_GN_iter)
# Get back filter and optimizer
len_x = len(self.joint_optimizer.x)
self.filter = self.joint_optimizer.x[:len_x // 2] # w2 in paper
self.projection_matrix = self.joint_optimizer.x[len_x //
2:] # w1 in paper
if analyze_convergence:
opt_name = 'CG' if getattr(self.params, 'CG_optimizer',
True) else 'GD'
for val_name, values in zip(['loss', 'gradient'], [
self.joint_optimizer.losses,
self.joint_optimizer.gradient_mags
]):
val_str = ' '.join(
['{:.8e}'.format(v.item()) for v in values])
file_name = '{}_{}.txt'.format(opt_name, val_name)
with open(file_name, 'a') as f:
f.write(val_str + '\n')
raise RuntimeError('Exiting')
# Re-project samples with the new projection matrix
compressed_samples = self.project_sample(self.init_training_samples,
self.projection_matrix)
for train_samp, init_samp in zip(self.training_samples,
compressed_samples):
for idx in range(init_samp.shape[0]):
train_samp[idx] = init_samp[idx]
self.hinge_mask = None
# Initialize optimizer
self.conv_problem = ConvProblem(self.training_samples, self.y,
self.filter_reg, self.sample_weights,
self.response_activation)
if optimizer == 'GaussNewtonCG':
self.filter_optimizer = ConjugateGradient(
self.conv_problem,
self.filter,
fletcher_reeves=self.params.fletcher_reeves,
direction_forget_factor=self.params.direction_forget_factor,
debug=(self.params.debug >= 3),
fig_num=(12, 13))
elif optimizer == 'GradientDescentL2':
self.filter_optimizer = GradientDescentL2(
self.conv_problem,
self.filter,
self.params.optimizer_step_length,
self.params.optimizer_momentum,
debug=(self.params.debug >= 3),
fig_num=12)
# Transfer losses from previous optimization
if self.params.update_projection_matrix:
self.filter_optimizer.residuals = self.joint_optimizer.residuals
self.filter_optimizer.losses = self.joint_optimizer.losses
if not self.params.update_projection_matrix:
self.filter_optimizer.run(self.params.init_CG_iter)
# Post optimization
self.filter_optimizer.run(self.params.post_init_CG_iter)
self.filter = self.filter_optimizer.x
# Free memory
del self.init_training_samples
if self.params.use_projection_matrix:
del self.joint_problem, self.joint_optimizer
def project_sample(self, x: TensorList, proj_matrix=None):
# Apply projection matrix
if proj_matrix is None:
proj_matrix = self.projection_matrix
with fluid.dygraph.guard():
return operation.conv2d(x.apply(n2p), proj_matrix.apply(n2p)).apply(
self.projection_activation).numpy()
def get_iounet_box(self, pos, sz, sample_pos, sample_scale):
"""All inputs in original image coordinates"""
box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz
- 1) / 2
box_sz = sz / sample_scale
target_ul = box_center - (box_sz - 1) / 2
return np.concatenate([np.flip(target_ul, 0), np.flip(box_sz, 0)])
def get_iou_features(self):
return self.params.features.get_unique_attribute('iounet_features')
def get_iou_backbone_features(self):
return self.params.features.get_unique_attribute(
'iounet_backbone_features')
def init_iou_net(self):
# Setup IoU net
self.iou_predictor = self.params.features.get_unique_attribute(
'iou_predictor')
# Get target boxes for the different augmentations
self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz,
self.pos.round(),
self.target_scale)
target_boxes = TensorList()
if self.params.iounet_augmentation:
for T in self.transforms:
if not isinstance(
T, (augmentation.Identity, augmentation.Translation,
augmentation.FlipHorizontal,
augmentation.FlipVertical, augmentation.Blur)):
break
target_boxes.append(self.iou_target_box + np.array(
[T.shift[1], T.shift[0], 0, 0]))
else:
target_boxes.append(self.iou_target_box.copy())
target_boxes = np.concatenate(target_boxes.view(1, 4), 0)
# Get iou features
iou_backbone_features = self.get_iou_backbone_features()
# Remove other augmentations such as rotation
iou_backbone_features = TensorList(
[x[:target_boxes.shape[0], ...] for x in iou_backbone_features])
# Extract target feat
with fluid.dygraph.guard():
iou_backbone_features = iou_backbone_features.apply(n2p)
target_boxes = n2p(target_boxes)
target_feat = self.iou_predictor.get_filter(iou_backbone_features,
target_boxes)
self.target_feat = TensorList(
[layers.reduce_mean(x, 0).numpy() for x in target_feat])
if getattr(self.params, 'iounet_not_use_reference', False):
self.target_feat = TensorList([
np.full_like(tf, tf.norm() / tf.numel())
for tf in self.target_feat
])
def optimize_boxes(self, iou_features, init_boxes):
with fluid.dygraph.guard():
# Optimize iounet boxes
init_boxes = np.reshape(init_boxes, (1, -1, 4))
step_length = self.params.box_refinement_step_length
target_feat = self.target_feat.apply(n2p)
iou_features = iou_features.apply(n2p)
output_boxes = n2p(init_boxes)
for f in iou_features:
f.stop_gradient = False
for i_ in range(self.params.box_refinement_iter):
# forward pass
bb_init = output_boxes
bb_init.stop_gradient = False
outputs = self.iou_predictor.predict_iou(target_feat,
iou_features, bb_init)
if isinstance(outputs, (list, tuple)):
outputs = outputs[0]
outputs.backward()
# Update proposal
bb_init_np = bb_init.numpy()
bb_init_gd = bb_init.gradient()
output_boxes = bb_init_np + step_length * bb_init_gd * np.tile(
bb_init_np[:, :, 2:], (1, 1, 2))
output_boxes = n2p(output_boxes)
step_length *= self.params.box_refinement_step_decay
return layers.reshape(output_boxes, (
-1, 4)).numpy(), layers.reshape(outputs, (-1, )).numpy()
def refine_target_box(self,
sample_pos,
sample_scale,
scale_ind,
update_scale=True):
# Initial box for refinement
init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos,
sample_scale)
# Extract features from the relevant scale
iou_features = self.get_iou_features()
iou_features = TensorList(
[x[scale_ind:scale_ind + 1, ...] for x in iou_features])
init_boxes = np.reshape(init_box, (1, 4)).copy()
rand_fn = lambda a, b: np.random.rand(a, b).astype('float32')
if self.params.num_init_random_boxes > 0:
# Get random initial boxes
square_box_sz = np.sqrt(init_box[2:].prod())
rand_factor = square_box_sz * np.concatenate([
self.params.box_jitter_pos * np.ones(2),
self.params.box_jitter_sz * np.ones(2)
])
minimal_edge_size = init_box[2:].min() / 3
rand_bb = (rand_fn(self.params.num_init_random_boxes, 4) - 0.5
) * rand_factor
new_sz = np.clip(init_box[2:] + rand_bb[:, 2:], minimal_edge_size,
1e10)
new_center = (init_box[:2] + init_box[2:] / 2) + rand_bb[:, :2]
init_boxes = np.concatenate([new_center - new_sz / 2, new_sz], 1)
init_boxes = np.concatenate(
[np.reshape(init_box, (1, 4)), init_boxes])
# Refine boxes by maximizing iou
output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)
# Remove weird boxes with extreme aspect ratios
output_boxes[:, 2:] = np.clip(output_boxes[:, 2:], 1, 1e10)
aspect_ratio = output_boxes[:, 2] / output_boxes[:, 3]
keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * \
(aspect_ratio > 1 / self.params.maximal_aspect_ratio)
output_boxes = output_boxes[keep_ind, :]
output_iou = output_iou[keep_ind]
# If no box found
if output_boxes.shape[0] == 0:
return
# Take average of top k boxes
k = getattr(self.params, 'iounet_k', 5)
topk = min(k, output_boxes.shape[0])
inds = np.argsort(-output_iou)[:topk]
predicted_box = np.mean(output_boxes[inds, :], axis=0)
predicted_iou = np.mean(
np.reshape(output_iou, (-1, 1))[inds, :], axis=0)
# Update position
new_pos = predicted_box[:2] + predicted_box[2:] / 2 - (
self.iou_img_sample_sz - 1) / 2
new_pos = np.flip(new_pos, 0) * sample_scale + sample_pos
new_target_sz = np.flip(predicted_box[2:], 0) * sample_scale
new_scale = np.sqrt(
np.prod(new_target_sz) / np.prod(self.base_target_sz))
self.pos_iounet = new_pos.copy()
if getattr(self.params, 'use_iounet_pos_for_learning', True):
self.pos = new_pos.copy()
self.target_sz = new_target_sz
if update_scale:
self.target_scale = new_scale
def localize_advanced(self, scores):
"""Does the advanced localization with hard negative detection and target not found."""
sz = scores.shape[-2:]
if self.output_window is not None and getattr(
self.params, 'perform_hn_without_windowing', False):
scores_orig = scores.copy()
scores_orig = np.concatenate([
scores_orig[..., (sz[0] + 1) // 2:, :],
scores_orig[..., :(sz[0] + 1) // 2, :]
], -2)
scores_orig = np.concatenate([
scores_orig[..., :, (sz[1] + 1) // 2:],
scores_orig[..., :, :(sz[1] + 1) // 2]
], -1)
scores *= self.output_window
# Shift scores back
scores = np.concatenate([
scores[..., (sz[0] + 1) // 2:, :], scores[..., :(sz[0] + 1) // 2, :]
], -2)
scores = np.concatenate([
scores[..., :, (sz[1] + 1) // 2:], scores[..., :, :(sz[1] + 1) // 2]
], -1)
# Find maximum
max_score1, max_disp1 = dcf.max2d(scores)
scale_ind = np.argmax(max_score1, axis=0)[0]
max_score1 = max_score1[scale_ind]
max_disp1 = np.reshape(max_disp1[scale_ind].astype('float32'), (-1))
target_disp1 = max_disp1 - self.output_sz // 2
translation_vec1 = target_disp1 * (self.img_support_sz /
self.output_sz) * self.target_scale
if max_score1 < self.params.target_not_found_threshold:
return translation_vec1, scale_ind, scores, 'not_found'
if self.output_window is not None and getattr(
self.params, 'perform_hn_without_windowing', False):
scores = scores_orig
# Mask out target neighborhood
target_neigh_sz = self.params.target_neighborhood_scale * self.target_sz / self.target_scale
tneigh_top = int(max(round(max_disp1[0] - target_neigh_sz[0] / 2), 0))
tneigh_bottom = int(
min(round(max_disp1[0] + target_neigh_sz[0] / 2 + 1), sz[0]))
tneigh_left = int(max(round(max_disp1[1] - target_neigh_sz[1] / 2), 0))
tneigh_right = int(
min(round(max_disp1[1] + target_neigh_sz[1] / 2 + 1), sz[1]))
scores_masked = scores[scale_ind:scale_ind + 1, ...].copy()
scores_masked[..., tneigh_top:tneigh_bottom, tneigh_left:
tneigh_right] = 0
# Find new maximum
max_score2, max_disp2 = dcf.max2d(scores_masked)
max_disp2 = np.reshape(max_disp2.astype('float32'), (-1))
target_disp2 = max_disp2 - self.output_sz // 2
translation_vec2 = target_disp2 * (self.img_support_sz /
self.output_sz) * self.target_scale
# Handle the different cases
if max_score2 > self.params.distractor_threshold * max_score1:
disp_norm1 = np.sqrt(np.sum(target_disp1**2))
disp_norm2 = np.sqrt(np.sum(target_disp2**2))
disp_threshold = self.params.dispalcement_scale * math.sqrt(
sz[0] * sz[1]) / 2
if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold:
return translation_vec1, scale_ind, scores, 'hard_negative'
if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold:
return translation_vec2, scale_ind, scores, 'hard_negative'
if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold:
return translation_vec1, scale_ind, scores, 'uncertain'
# If also the distractor is close, return with highest score
return translation_vec1, scale_ind, scores, 'uncertain'
if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold:
return translation_vec1, scale_ind, scores, 'hard_negative'
return translation_vec1, scale_ind, scores, None
| StarcoderdataPython |
3501591 | <reponame>pratikadarsh/Algorithms
'''
* @file BinarySearch.py
* @author (original JAVA) <NAME>, <EMAIL>
* (conversion to Python) <NAME>, <EMAIL>
* @date 29 Jun 2020
* @version 0.1
* @brief BinarySearch implementation
'''
import math
class BinarySearch():
def __init__(self):
# Comparing double values directly is bad practice.
# Using a small epsilon value is the preferred approach
self.EPS = 0.00000001
def binarySearch(self, lo, hi, target, function):
if hi <= lo:
raise Exception("hi should be greater than lo")
mid = 0
while True:
# Find the middle point
mid = (hi + lo) / 2.0
# Compute the value of our function for the middle point
# Note that f can be any function not just the square root function
value = function(mid)
if value > target:
hi = mid
else:
lo = mid
if (hi - lo) > self.EPS:
break
return mid
# ToDo: The validity of the implementation must be checked!
if __name__ == '__main__':
"""
Example usage
"""
bs = BinarySearch()
# EXAMPLE #1
# Suppose we want to know what the square root of 875 is and
# we have no knowledge of the wonderful Math.sqrt() function.
# One approach is to use a binary search because we know that
# the square root of 875 is bounded in the region: [0, 875].
#
# We can define our function to be f(x) = x*x and our target
# value to be 875. As we binary search on f(x) approaching
# successively closer values of 875 we get better and better
# values of x (the square root of 875)
lo = 0.0
hi = 875.0
target = 875.0
sqr_fun = lambda x: x * x
sqrtVal = bs.binarySearch(lo, hi, target, sqr_fun)
print('sqrt({:.2f}) = {:.5f}, x^2 = {:.5f}\n'.format(target, sqrtVal, (sqrtVal * sqrtVal)))
# EXAMPLE #2
# Suppose we want to find the radius of a sphere with volume 100m^3 using
# a binary search. We know that for a sphere the volume is given by
# V = (4/3)*pi*r^3, so all we have to do is binary search on the radius.
#
# Note: this is a silly example because you could just solve for r, but it
# shows how binary search can be a powerful technique.
radiusLowerBound = 0
radiusUpperBound = 1000
volume = 100.0
sphereVolumeFunction = lambda r: (4.0 / 3.0) * math.pi * r * r * r
sphereRadius = bs.binarySearch(radiusLowerBound, radiusUpperBound, volume, sphereVolumeFunction)
print('Sphere radius = {:.5f}\n'.format(sphereRadius))
| StarcoderdataPython |
1851630 | <reponame>danzek/email-formality-detection
#!/usr/bin/env python
# -*- coding: utf_8 -*-
"""
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
Purdue University
CNIT499 Natural Language Technologies
Simple count features.
"""
__author__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2014, <NAME>, Purdue University"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import re
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(['dr', 'vs', 'mr', 'mrs', 'ms', 'prof', 'inc', 'viz', 'cf'])
def clean_up(sentences):
"""
The PunktSentenceTokenizer from NLTK incorrectly handles initial collocations such as 'e.g.' and 'i.e.'. There is a
PunktParameter for collocations, but it takes tuples containing the collocated initial word and words commonly
following it, which it learns from training data. For simplicity's sake, this method merely does a simple pattern
match on the named initial collocations and concatenates the sentences improperly divided by these.
"""
i = 0
indexes_to_concatenate = []
cleaned_up_sentence_list = []
for s in sentences:
if s.lower().endswith('e.g.') or s.lower().endswith('i.e.') or s.lower().endswith('p.s.') \
or s.lower().endswith('f.y.i.') or s.lower().endswith('m.s.') or s.lower().endswith('b.s.'):
indexes_to_concatenate.append(i)
i += 1
# circumvents bug in NLTK PunktTokenizer that separates last ! or ? in contiguous punctuation occurring at end of
# sentence as a separate sentence
if (len(sentences[-1].strip()) == 1) and (sentences[-1].strip() == '!' or sentences[-1].strip() == '?'):
del sentences[-1]
i = 0 # re-initialize index
if len(indexes_to_concatenate) > 0:
while i < len(sentences):
if i in indexes_to_concatenate:
new_sentence = sentences[i] + ' ' + sentences[i+1]
cleaned_up_sentence_list.append(new_sentence)
i += 2
else:
cleaned_up_sentence_list.append(sentences[i])
i += 1
else:
return sentences
return cleaned_up_sentence_list
def count_letters_except_first_cap(sent_list):
"""
:param sent_list:
:return (total letters - capital letters at beginning of sentences):
"""
letter_count = 0
for sent in sent_list:
#if sent[0].isalpha() is True and sent[0].islower() is True: # if the first letter is not capitalized
# letter_count += 1
for letter in sent[1:]: # count letters from the second letter
if letter.isalpha() is True: # count only alphabets
letter_count += 1
return letter_count
def count_sentences(email):
"""Returns total number of sentences in email body."""
return len(filter(None, prepare_email(email)))
def create_text_from_body(email):
"""Converts email body list object into string."""
text = ""
for line in email.enumerate_lines():
text += line + ' '
return text
def incorrect_first_person_pronoun_capitalization_count(email):
sentences = prepare_email(email)
pattern = re.compile(r"[\s]*[i]{1}[\s!?;:]+")
count = 0
for s in sentences:
hits = pattern.findall(s)
lowercase_first_person_singular_pronoun_count = len(hits)
if lowercase_first_person_singular_pronoun_count > 0:
count += lowercase_first_person_singular_pronoun_count
return count
def prepare_email(email):
"""Handles email object and returns list of sentences so that only one method must be used."""
text = create_text_from_body(email)
return split_sentences(text)
def punctuation(email):
listOutput = prepare_email(email)
punctFilter = re.compile("[\!?]{2,}")
exFilter = re.compile("[\!]{2,}")
quesFilter = re.compile("[\?]{2,}")
periodFilter = re.compile("[\.]{4,}")
punct_count = 0
for i in listOutput:
if re.search(punctFilter, i) or re.search(periodFilter, i) or re.search(exFilter, i) or re.search(quesFilter, i):
punct_count += 1
return punct_count
def punctRatio(email):
listOutput = prepare_email(email)
punct_count = punctuation(email)
sentence_count = len(listOutput)
try:
punct_Ratio = "%.1f" % ((float(punct_count) / sentence_count) * 100)
except ZeroDivisionError:
punct_Ratio = 0.0
return punct_Ratio
def ratio_cap_letters(email):
#def ratio_cap_letters(sent_list): # for testing
"""
calculates the ratio of upper case letters except for at the beginning of the sentence
# percentage = a/(b-c)
# a: total contiguously capitalized letters NOT at the beginning of sentence
# b: total letters
# c: capital letters at beginning of sentence
e.g. Instances where capital letters DO NOT naturally follow periods
--> returns 5
:param email:
:return:
"""
sent_list = prepare_email(email)
# filters the empty email from the beginning.
if len(sent_list) == 0: # if empty email --> return 0.00
return 0.00
letter_count = count_letters_except_first_cap(sent_list)
count = 0
for sent in sent_list:
word_list = sent.lstrip().split()
word_list = filter(None, word_list)
# print(word_list)
try:
first_word = str(word_list[0])
if len(first_word)>=2 and first_word.isupper() is True: # first word of the sentence
count += len(first_word) - 1 # except for the capital letter at the first
for word in word_list[1:]: # after the first word of the sentence
if len(word) >= 2 and word.isupper() is True: # e.g. don't consider "I" or "team A"
count += len(word)
except IndexError: # when the word is None value
pass
# percentage = a/(b-c)
# a: total contiguously capitalized letters NOT at the beginning of sentence
# b: total letters
# c: capital letters at beginning of sentence
try:
percentage = round(float(count)/letter_count, 2)
except ZeroDivisionError:
percentage = 0.0
return percentage
def ratio_incorrect_first_capitalization(email):
"""
:param email:
:return ratio of sentences start with incorrect capitalization:
"""
sent_list = prepare_email(email) # returns the list of sentences
not_cap = 0
for sent in sent_list:
# if the first word of the sentence is in lower case
if sent[0].isalpha() is True and sent[0].islower() is True:
not_cap += 1
count = count_sentences(email) # number of total sentences in an email
if count == 0: # if it is an empty email
percentage = 0.00
else:
percentage = round(float(not_cap)/count_sentences(email), 2) # round it to 2 decimal places
return percentage
def split_sentences(text):
"""Takes string containing email body and splits into sentences and cleans up list."""
sentence_splitter = PunktSentenceTokenizer(punkt_param)
sentences = sentence_splitter.tokenize(text)
return clean_up(sentences) | StarcoderdataPython |
3202673 | #!/usr/bin/env python
import os, signal, time, re
import unittest2 as unittest
import psi.process, subprocess
class GpsshTestCase(unittest.TestCase):
# return count of stranded ssh processes
def searchForProcessOrChildren(self):
euid = os.getuid()
count = 0
for p in psi.process.ProcessTable().values():
if p.euid != euid:
continue
if not re.search('ssh', p.command):
continue
if p.ppid != 1:
continue
count += 1
return count
def test00_gpssh_sighup(self):
"""Verify that gppsh handles sighup
and terminates cleanly.
"""
before_count = self.searchForProcessOrChildren()
p = subprocess.Popen("gpssh -h localhost", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = p.pid
time.sleep(3)
try:
os.kill(int(pid), signal.SIGHUP)
except Exception:
pass
max_attempts = 6
for i in range(max_attempts):
after_count = self.searchForProcessOrChildren()
error_count = after_count - before_count
if error_count:
if (i + 1) == max_attempts:
self.fail("Found %d new stranded gpssh processes after issuing sig HUP" % error_count)
time.sleep(.5)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8049522 | # Generated by Django 3.1.2 on 2020-10-07 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sim', '0009_auto_20200717_0031'),
]
operations = [
migrations.AlterField(
model_name='sim',
name='daysEcmo_cat2',
field=models.PositiveSmallIntegerField(blank=True),
),
migrations.AlterField(
model_name='sim',
name='daysEcmo_cat3',
field=models.PositiveSmallIntegerField(blank=True),
),
migrations.AlterField(
model_name='sim',
name='daysVent_cat1',
field=models.PositiveSmallIntegerField(blank=True),
),
migrations.AlterField(
model_name='sim',
name='daysVent_cat3',
field=models.PositiveSmallIntegerField(blank=True),
),
]
| StarcoderdataPython |
4995716 | #Puts objective vectors on top of video.
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import os
import neat
import sys
import pickle
#TODO: meas history -> get size. make numpy array. send to ANN -> plot.
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
video_id = 0
scenario = "D3_battle_no_ammo"
cap = cv2.VideoCapture('vid'+scenario+str(video_id)+'.avi')
meas_file = "meas_history"+scenario+str(video_id)+".csv" #The meas recorded for each frame. ANN can convert to objectives.
meas_array = np.genfromtxt(meas_file, delimiter = " ")
objs_array = meas_array
winner_filename = scenario+"/winner_network.pickle"
with open(winner_filename, 'rb') as pickle_file:
winner_genome = pickle.load(pickle_file)
config_file = "config"
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
net = neat.nn.FeedForwardNetwork.create(winner_genome, config)
timestep_counter = 0
for timestep in meas_array:
objectives = net.activate(timestep)
o_count = 0
for o in objectives:
objs_array[timestep_counter][o_count] = o
o_count+=1
timestep_counter+=1
# Check if camera opened successfully
if (cap.isOpened() == False):
print("Error opening video stream or file")
objective_names = ["Ammunition", "Health", "Attack"]
# Plot loc and size in pixels
plot_upper_x = 0
plot_x_width = 100
plot_upper_y = 0
plot_y_width = 50
bar_width = 15
#frames_skipped_during_eval = 4 #Skipped every 4 frames during eval -> the objectives measured last 4 "real" frames.
folder = "superimposed_vid_" + scenario
if not os.path.exists(folder):
os.makedirs(folder)
def convert_plotted_values_y_axis(plotted_objective_values):
# Converts the raw values we want to plot to fit inside the image frame.
converted_values = (1 - plotted_objective_values) + 1 # Since the y-axis in images is "upside down"
# Values now range fro 0 to 2, where 2 are those that were previously -1, and 0 are those that were previously 1.
# Stretching out the y-range.
converted_values = converted_values * plot_y_width
return converted_values
def convert_plotted_values_x_axis(plotted_objective_values):
# Stretching out the y-range.
#First making 0 the minimum, then stretching out.
converted_values = (1+plotted_objective_values) * plot_x_width
return converted_values
linestyles = ["-", "--", "-."]
colors = ['blue', 'orange', 'green']
counter = 0
# Read until video is completed
#TODO Plot a line showing where obj_val = 0 goes.
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
#Converting to the colors python expects.
b_channel, g_channel, r_channel = cv2.split(frame)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 50 # creating a dummy alpha channel image.
frame = cv2.merge((r_channel,g_channel,b_channel))
#Trying to superimpose graph
print("Img shape: ", frame.shape)
fig, ax = plt.subplots()
ax.imshow(frame)
#x=np.linspace(0,frame_counter)
objcounter = 0
#Testing bar chart instead of line.
y_pos = [25, 55, 85] #TODO Generalize
for obj in objective_names:
ax.barh(y_pos[objcounter], convert_plotted_values_x_axis(objs_array[counter][objcounter]), bar_width, align='center', color=colors[objcounter],
ecolor='black')
#print("lin shape:", x.shape)
#print("obj shape: ",objective_values[0:frame_counter,objcounter].shape)
# if counter < plot_x_width:
#Before we have many frames, we just plot all obj vals.
# ax.plot(convert_plotted_values(objs_array[0:counter, objcounter]), linewidth = 5, label=obj, alpha=0.7, linestyle = linestyles[objcounter])
# else:
# ax.plot(convert_plotted_values(objs_array[counter-plot_x_width:counter, objcounter]), linewidth = 5, label=obj, alpha=0.7, linestyle = linestyles[objcounter])
objcounter+=1
ax.legend(objective_names, loc='upper right',fontsize=15)
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(tick.NullLocator())
plt.gca().yaxis.set_major_locator(tick.NullLocator())
plt.savefig(folder+"/"+str(counter)+".png",bbox_inches="tight", pad_inches=0)
plt.close(fig)
counter += 1
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
| StarcoderdataPython |
3524715 | <reponame>DanielSBrown/osf.io
# -*- coding: utf-8 -*-
import os
from tabulate import tabulate
from framework.mongo import database
from website import settings
from .utils import mkdirp
user_collection = database['user']
TAB_PATH = os.path.join(settings.ANALYTICS_PATH, 'tables', 'features')
mkdirp(TAB_PATH)
def get_profile_counts():
users_total = user_collection.find()
query_jobs = {'jobs': {'$nin': [None, []]}}
query_schools = {'schools': {'$nin': [None, []]}}
query_social = {'social': {'$nin': [None, {}]}}
users_jobs = user_collection.find(query_jobs)
users_schools = user_collection.find(query_schools)
users_social = user_collection.find(query_social)
users_any = user_collection.find({
'$or': [
query_jobs,
query_schools,
query_social,
]
})
return {
'total': users_total.count(),
'jobs': users_jobs.count(),
'schools': users_schools.count(),
'social': users_social.count(),
'any': users_any.count(),
}
def main():
counts = get_profile_counts()
keys = ['total', 'jobs', 'schools', 'social', 'any']
table = tabulate(
[[counts[key] for key in keys]],
headers=keys,
)
with open(os.path.join(TAB_PATH, 'extended-profile.txt'), 'w') as fp:
fp.write(table)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6530902 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Items schema for marshmallow loader."""
from invenio_records_rest.schemas import RecordMetadataSchemaJSONV1
from marshmallow import fields
from invenio_app_ils.records.api import EItem, Item
class ItemSchemaV1(RecordMetadataSchemaJSONV1):
"""Item schema."""
def get_pid_field(self):
"""Return pid_field value."""
return Item.pid_field
document_pid = fields.Str(required=True) # TODO: validate
internal_location_pid = fields.Str(required=True) # TODO: validate
legacy_id = fields.Str()
legacy_library_id = fields.Str()
circulation_restriction = fields.Str() # TODO: this should be an enum
barcode = fields.Str()
shelf = fields.Str()
description = fields.Str()
_internal_notes = fields.Str()
medium = fields.Str() # TODO: this should be an enum
status = fields.Str() # TODO: this should be an enum
class EItemSchemaV1(RecordMetadataSchemaJSONV1):
"""EItem schema."""
def get_pid_field(self):
"""Return pid_field value."""
return EItem.pid_field
document_pid = fields.Str(required=True) # TODO: validate
description = fields.Str()
_internal_notes = fields.Str()
open_access = fields.Bool(default=True)
| StarcoderdataPython |
8038279 | <filename>analysis/mf_grc_analysis/2share/2share_fuzz_201114.py
from collections import defaultdict
import sys
import random
import numpy as np
import compress_pickle
import importlib
import copy
sys.path.insert(0, '/n/groups/htem/Segmentation/shared-dev/cb2_segmentation/segway.graph.tmn7')
# from segway.graph.synapse_graph import SynapseGraph
# from segway.graph.plot_adj_mat import plot_adj_mat
sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')
# from tools import *
# import tools_mf_graph
from my_plot import MyPlotData
def to_ng_coord(coord):
return (
int(coord[0]/4),
int(coord[1]/4),
int(coord[2]/40),
)
input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114_restricted_z.gz')
grcs = [k for k in input_graph.grcs.keys()]
print(f"GT unique patterns: {input_graph.count_unique_patterns()}")
print(f"GT unsampled mfs: {input_graph.count_unsampled_mfs()}")
input_graph.print_mfs_connected_summary()
mpd = MyPlotData()
def compute_mf_share():
count = defaultdict(lambda: defaultdict(list))
for i in range(len(grcs)):
i_set = set(input_graph.grcs[grcs[i]].mfs)
for j in range(len(grcs)):
if i == j:
continue
j_set = set(input_graph.grcs[grcs[j]].mfs)
common_mfs = i_set & j_set
if len(common_mfs) > 1:
count[grcs[i]][len(common_mfs)].append(grcs[j])
return count
mpd_true = MyPlotData()
mf_share_true_data = compute_mf_share()
'''doubles'''
histogram = defaultdict(int)
doubles_count = []
mf_share_threshold = 2
for grc in grcs:
sum = 0
for share_val in mf_share_true_data[grc]:
if share_val >= mf_share_threshold:
sum += len(mf_share_true_data[grc][share_val])
# doubles_count.append(sum)
histogram[sum] += 1
# mpd.add_data_point(kind='Data', num_partners=sum)
# test = mpd.to_histogram()
for k in range(max(histogram.keys())+1):
# print(f'{k}, {histogram[k]}')
mpd_true.add_data_point(kind='Data', num_partners=k, count=histogram[k])
mpd_true_cdf = copy.deepcopy(mpd_true)
mpd_true_cdf = mpd_true_cdf.to_pdf(count_var='count', cumulative=True)
mpd_true = mpd_true.to_pdf(count_var='count', cumulative=False)
import my_plot
importlib.reload(my_plot)
from my_plot import MyPlotData, my_relplot
def count_shuffle_2share(mf_share):
mpd = MyPlotData()
histogram = defaultdict(int)
mf_share_threshold = 2
for grc in grcs:
sum = 0
for share_val in mf_share[grc]:
if share_val >= mf_share_threshold:
sum += len(mf_share[grc][share_val])
histogram[sum] += 1
for k in range(max(histogram.keys())+1):
mpd.add_data_point(kind='Shuffle', num_partners=k, count=histogram[k])
mpd_cdf = copy.deepcopy(mpd)
mpd = mpd.to_pdf(count_var='count', cumulative=False)
mpd_cdf = mpd_cdf.to_pdf(count_var='count', cumulative=True)
return mpd, mpd_cdf
# def randomize_graph():
# def add_shuffle_data():
# input_graph.randomize_graph()
# count_2share()
random.seed(0)
mpd_all = MyPlotData()
mpd_all_cdf = MyPlotData()
# mf_dist_margin = 500
# mf_dist_margin = 10000
# mf_dist_margin = 4000
mf_dist_margin = 6000
mf_dist_margin = 8000
mf_dist_margin = 20000
n_random = 1000
n_random = 5
print("Generating random graphs")
# for i in range(1000):
for i in range(n_random):
print(i)
input_graph.fuzz_graph_by_grc(
mf_dist_margin=mf_dist_margin,
remove_empty=True,
)
print(f"Random graph unique patterns: {input_graph.count_unique_patterns()}")
print(f"Random graph unsampled mfs: {input_graph.count_unsampled_mfs()}/{len(input_graph.mfs)}")
input_graph.print_mfs_connected_summary()
print()
mf_share_data = compute_mf_share()
mpd, mpd_cdf = count_shuffle_2share(mf_share_data)
mpd_all.append(mpd)
mpd_all_cdf.append(mpd_cdf)
mpd_all.append(mpd_true)
mpd_all_cdf.append(mpd_true_cdf)
importlib.reload(my_plot); my_plot.my_relplot(
mpd_all_cdf, y='count', x='num_partners', hue='kind',
kind='line',
hue_order=['Data', 'Shuffle'],
context='paper',
xlim=(0, 13),
ylim=[0, 1.01],
height=4,
aspect=1.33,
y_axis_label='Cumulative Distribution',
x_axis_label='# of other similar patterns',
save_filename=f'fuzz_{mf_dist_margin}_{n_random}_cdf_201114.svg',
)
importlib.reload(my_plot); my_plot.my_catplot(
mpd_all, y='count', x='num_partners', hue='kind',
kind='bar',
context='paper',
xlim=(0, 13),
height=4,
aspect=1.33,
y_axis_label='Cumulative Distribution',
x_axis_label='# of other similar patterns',
save_filename=f'fuzz_{mf_dist_margin}_{n_random}_bar_201114.svg',
)
| StarcoderdataPython |
182120 | <reponame>2600box/harvest<gh_stars>1-10
# Generated by Django 2.1.7 on 2019-03-22 10:30
from django.db import migrations
import torrents.fields
class Migration(migrations.Migration):
dependencies = [
('redacted', '0006_auto_20190303_2008'),
]
operations = [
migrations.AlterField(
model_name='redactedtorrent',
name='info_hash',
field=torrents.fields.InfoHashField(db_index=True, max_length=40),
),
]
| StarcoderdataPython |
12827906 | #-*- coding:utf-8 -*-
"""convert_grantha.py
"""
from __future__ import print_function
import sys,re,codecs
import transcoder
transcoder.transcoder_set_dir('transcoder')
transcode = transcoder.transcoder_processString # convenience
def parse_filename(filein):
tranin_known = ['slp1','roman','hk']
m = re.search(r'^(.*)_(.*)[.]txt$',filein)
if m:
pfx,tranin = (m.group(1),m.group(2))
if (not m) or (tranin not in tranin_known):
endings = ['_%s.txt' %t for t in tranin_known]
print("filein error: require filename ending in one of %s" %endings)
exit(1)
return pfx,tranin
def generate_text(lines,tranout,classname,filein_pfx,title):
fileout = "%s_%s.txt" %(filein_pfx,classname)
with codecs.open(fileout,"w","utf-8") as f:
f.write(title+'\n')
for x in lines:
y = transcode(x,'slp1',tranout)
f.write(y+'\n')
print('%s lines written to %s' %(len(lines),fileout))
styles={
'grantha':
"""
@font-face {
src: url('../granthafonts/e-Grantamil.ttf');
font-family: grantamil;
/*https://web.archive.org/web/20070506074247/http://www.uni-hamburg.de/Wiss/FB/10/IndienS/Kniprath/INDOLIPI/Indolipi.htm
*/
}
.grantha {
color:teal;
font-size:20px;
font-weight: normal;
font-family: grantamil;
}
""",
'grantha-sd':
"""
@font-face {
src: url('../granthafonts/e-Grantamil-sd.ttf');
font-family: grantamil-sd;
/*$ wget --no-check-certificate https://sanskritdocuments.org/css/fonts/e-Grantamil.ttf*/
}
.grantha-sd {
color:teal;
font-size:20px;
font-weight: normal;
font-family: grantamil-sd;
}
""",
'grantha7':
"""
@font-face {
src: url('../granthafonts/e-Grantamil 7.ttf');
font-family: grantamil7;
/*https://www.aai.uni-hamburg.de/indtib/studium/materialien.html
*/
}
.grantha7 {
color:teal;
font-size:20px;
font-weight: normal;
font-family: grantamil7;
}
""",
'deva':
"""
@font-face {
src: url('../granthafonts/siddhanta.ttf');
font-family: devanagari;
}
.deva {
color:teal;
font-size:20px;
font-weight: normal;
font-family: devanagari;
}
"""
}
def generate_html_head(tranout,classname,filein_pfx):
parts=re.split(r'/',filein_pfx)
fileshow = parts[-1]
style = styles[classname]
outlinestr="""<html>
<head>
<title>%s:%s</title>
<meta charset="UTF-8">
<style>
%s
</style>
</head>
""" %(fileshow,classname,style)
outlines = outlinestr.splitlines()
return outlines
def generate_html(lines,tranout,classname,filein_pfx,title):
# lines assumed coded in slp1
fileout = "%s_%s.html" %(filein_pfx,classname)
head = generate_html_head(tranout,classname,filein_pfx)
def oneline(x):
y = transcode(x,'slp1',tranout)
z = "<span class='%s'>%s</span><br/>" %(classname,y)
return z
linesout = [oneline(x) for x in lines]
titleline = ['<H2>%s</H2>' % title]
body = ['<body>'] + titleline + linesout + ['</body>']
tail = ['</html>']
outlines = head + body + tail
with codecs.open(fileout,"w","utf-8") as f:
for y in outlines:
f.write(y+'\n')
print('%s lines written to %s' %(len(outlines),fileout))
if __name__ == "__main__":
filein = sys.argv[1]
filein_pfx,tranin = parse_filename(filein)
with codecs.open(filein,'r','utf-8') as f:
linesin0 = [x.rstrip('\r\n').rstrip() for x in f]
title = linesin0[0]
linesin = linesin0[1:]
# get lines_slp1 from tranin and linesin
if tranin == 'slp1':
lines_slp1 = linesin
else:
lines_slp1 = []
for x in linesin:
line = transcode(x,tranin,'slp1')
lines_slp1.append(line)
# also, write lines_slp1
fileout = '%s_slp1.txt' % filein_pfx
with codecs.open(fileout,"w","utf-8") as f:
f.write(title+'\n')
for x in lines_slp1:
f.write(x + '\n')
print('%s lines written to %s' %(len(lines_slp1),fileout))
# generate text files:
parmout = [
# (tranout, classname = fileoutsfx)
('grantha2','grantha'),
('grantha2','grantha-sd'),
('grantha3','grantha7'),
('deva','deva')
]
# get different versions of text files
for tranout, classname in parmout:
generate_text(lines_slp1,tranout,classname,filein_pfx,title)
# different versions of html files
for tranout, classname in parmout:
generate_html(lines_slp1,tranout,classname,filein_pfx,title)
| StarcoderdataPython |
6485222 | import cantoseg
def test_cut():
result = cantoseg.cut('香港喺舊石器時代就有人住')
answer = ['香港', '喺', '舊石器時代', '就', '有人', '住']
assert result == answer
def test_lcut():
result = list(cantoseg.lcut('香港喺舊石器時代就有人住'))
answer = ['香港', '喺', '舊石器時代', '就', '有人', '住']
assert result == answer
| StarcoderdataPython |
4809557 | from django.test import TestCase
import json
import jwt
from rest_framework import status
from rest_framework.test import APIClient
class TestAPI(TestCase):
def test_signUp(self):
client = APIClient()
response = client.post(
'/user/',
{
"username": "esteban",
"password": "<PASSWORD>",
"name": "esteban",
"email": "<EMAIL>",
"account": {
"lastChangeDate": "2021-09-23T10:25:43.511Z",
"balance": 20000,
"isActive": "true"
}
},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual('refresh' in response.data.keys(), True)
self.assertEqual('access' in response.data.keys(), True)
# Create your tests here.
| StarcoderdataPython |
11248507 | <gh_stars>0
"""The tests for the Modbus init."""
import pytest
import voluptuous as vol
from homeassistant.components.modbus import number
async def test_number_validator():
"""Test number validator."""
# positive tests
value = number(15)
assert isinstance(value, int)
value = number(15.1)
assert isinstance(value, float)
value = number("15")
assert isinstance(value, int)
value = number("15.1")
assert isinstance(value, float)
# exception test
try:
value = number("x15.1")
except (vol.Invalid):
return
pytest.fail("Number not throwing exception")
| StarcoderdataPython |
11356824 | <gh_stars>100-1000
"""Just print something to stdout."""
print("Hello, world.")
| StarcoderdataPython |
5146559 |
from StringIO import StringIO
from textwrap import dedent
from urlparse import urlparse
import email.utils
import httplib
import mimetypes
import os
import json
import traceback
from KASignature import KASignature
VERSION = '1.1.0'
# Configuration
UPLOAD_ENDPOINT = 'https://upload-api.kooaba.com/'
BUCKET_ID = '<enter-bucket-id>'
DATA_KEY_SECRET_TOKEN = '<enter-secret-token>'
# with KA auth, both http and https are possible
QUERY_ENDPOINT= 'https://query-api.kooaba.com/v4/query'
QUERY_KEY_SECRET_TOKEN = '<enter-secret-token>'
QUERY_KEY_ID='<enter-key-id>' ## only needed for KA authentication
class BasicAPIClient:
""" Client for kooaba API V4. """
def __init__(self, secret_token, key_id=None):
self.KA = KASignature(secret_token)
self.key_id=key_id
self.secret_token=secret_token
#### QUERY API
def query(self,filename, auth_method='Token'):
data, content_type = self.data_from_file(filename)
content_type, body=self.encode_multipart_formdata([],[('image', filename, data)])
(response, body) = self._send_request('POST', QUERY_ENDPOINT, bytearray(body), content_type, auth_method)
return json.loads(body)
#### UPLOAD API (subset of available methods)
def create_item(self, bucket_id, title, refid, json_string):
url=UPLOAD_ENDPOINT+'api/v4/buckets/'+bucket_id+'/items'
metadata=json.loads(json_string)
data= {"title":title, "reference_id":refid, "metadata":metadata}
(response, body) = self._send_request('POST', url, json.dumps(data), 'application/json')
return json.loads(body)
def attach_image(self, bucket_id, item_id, content_type, data):
url=UPLOAD_ENDPOINT+'api/v4/items/'+item_id+'/images'
(response, body) = self._send_request('POST', url, bytearray(data), content_type)
return json.loads(body)
def replace_metadata(self, item_id, json_string):
url=UPLOAD_ENDPOINT+'api/v4/items/'+item_id
metadata=json.loads(json_string)
data= {"metadata": metadata}
(response, body) = self._send_request('PUT', url, json.dumps(data), 'application/json')
return json.loads(body)
## HELPER METHODS
def data_from_file(self,filename):
content_type, _encoding = mimetypes.guess_type(filename)
with open(filename, 'rb') as f:
return f.read() , content_type
def _send_request(self, method, api_path, data=None, content_type=None, auth_method='Token'):
""" Send (POST/PUT/GET/DELETE according to the method) data to an API
node specified by api_path.
Returns tuple (response, body) as returned by the API call. The
response is a HttpResponse object describint HTTP headers and status
line.
Raises exception on error:
- IOError: Failure performing HTTP call
- RuntimeError: Unsupported transport scheme.
- RuntimeError: API call returned an error.
"""
if True:
if data is None:
sys.stderr.write( " > " + method +' '+ api_path)
elif len(data) < 4096:
print " > %s ...%s:\n > %s" % (method, api_path, data)
else:
print " > %s ...%s: %sB" % (method, api_path, len(data))
parsed_url = urlparse(api_path)
if ((parsed_url.scheme != 'https') and (parsed_url.scheme != 'http')):
raise RuntimeError("URL scheme '%s' not supported" % parsed_url.scheme)
port = parsed_url.port
if port is None:
port=80
if (parsed_url.scheme == 'https'):
port = 443
host = parsed_url.hostname
if (parsed_url.scheme == 'https'):
http = httplib.HTTPSConnection(host, port )
elif (parsed_url.scheme == 'http'):
http = httplib.HTTPConnection(host, port )
else:
raise RuntimeError("URL scheme '%s' not supported" % parsed_url.scheme)
try:
date = email.utils.formatdate(None, localtime=False, usegmt=True)
if auth_method=='KA':
signature = self.KA.sign(method, data, content_type, date, parsed_url.path)
headers = {'Authorization': 'KA %s:%s' % (self.key_id,signature),'Date': date}
print "signature: "+headers['Authorization']
else: # Token
headers = {'Authorization': 'Token %s' % (self.secret_token),'Date': date}
if content_type is not None:
headers['Content-Type'] = content_type
if data is not None:
headers['Content-Length'] = str(len(data))
try:
http.request(method, parsed_url.path, body=data, headers=headers)
except Exception, e:
raise #IOError("Error during request: %s: %s" % (type(e), e))
response = http.getresponse()
# we have to read the response before the http connection is closed
body = response.read()
if True:
sys.stderr.write( " < " + str(response.status) + ' ' + str(response.reason)+'\n')
sys.stderr.write( " < " + body+'\n')
if (response.status < 200) or (response.status > 299):
raise RuntimeError("API call returned status %s %s. Message: %s" % (response.status, response.reason, body))
return (response, body)
finally:
http.close()
def encode_multipart_formdata(self, fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % self.get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
### example usage of api
def main():
query_example()
#upload_example()
def query_example():
#simple example of KA signature
signer=KASignature('aSecretKey')
signature=signer.sign('POST', 'aBody','text/plain', 'Sun, 06 Nov 1994 08:49:37 GMT', '/v2/query')
print signature
print 'expected: '+'6XVAzB+hA9JEFWZdg+1ssZ+gfRo='
# perform an actual request
client = BasicAPIClient(QUERY_KEY_SECRET_TOKEN, QUERY_KEY_ID)
try:
result =client.query('../images/query_image.jpg', 'KA')
except:
print "call failed:" , sys.exc_info()
traceback.print_exc()
def upload_example():
client = BasicAPIClient(DATA_KEY_SECRET_TOKEN)
# loading an image into memory
data, content_type = client.data_from_file('../images/db_image.jpg')
try:
item =client.create_item(BUCKET_ID, 'aTitle', 'myRefId', '{}')
print 'created item '+item['uuid']
images=client.attach_image(BUCKET_ID, item['uuid'], content_type, data)
print 'attached image '+images[0]['sha1']
except:
print "Upload failed:", sys.exc_info()
traceback.print_exc()
if __name__ == '__main__':
import sys
sys.exit(main())
| StarcoderdataPython |
9724490 | import tensorflow as tf
import tensorflow.contrib.slim as slim
from utils import box_utils
def batch_roi_pooling(features_batch, rois_normalized_batch, params):
"""
Args:
features_batch:
rois_batch:
img_shape:
Returns:
"""
with tf.variable_scope("ROIPooling"):
rois_normalized_batch = tf.stop_gradient(rois_normalized_batch)
rois_shape = tf.shape(rois_normalized_batch)
ones_mat = tf.ones(rois_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(tf.range(start=0, limit=rois_shape[0]), 1)
box_ind_batch = tf.reshape(ones_mat * multiplier, [-1])
rois_normalized_batch = tf.map_fn(box_utils.convert_xyxy_to_yxyx_format, rois_normalized_batch)
roi_features_cropped = tf.image.crop_and_resize(features_batch,
tf.reshape(rois_normalized_batch, [-1, 4]),
box_ind = box_ind_batch,
crop_size = params.roi_crop_size
)
roi_features = slim.max_pool2d(roi_features_cropped,
[params.roi_pool_kernel_size, params.roi_pool_kernel_size],
stride=params.roi_pool_kernel_size)
return roi_features
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.