text
stringlengths 8
6.05M
|
|---|
from elasticsearchModel import elastic
from collections import defaultdict
from clustering import cluster
from PCA import pca_model
from PIL import Image
import numpy
import os
def extract_X(uuid):
response = elastic.fetch_all_images(uuid)
img_names = []
for res in response['hits']['hits']:
for file_name in res['_source']['files']:
img_names.append((file_name, res['_source']['name']))
X = numpy.zeros((len(img_names), 100 * 100 * 3))
for i, face_img_path in enumerate(img_names):
im = Image.open(face_img_path[0])
im = numpy.array(im).flatten()
X[i, :] = im
# X_red = pca_model.dim_reduction(X)
print X.shape
return X, img_names
def group_pics(uuid):
X, names = extract_X(uuid)
groups = cluster.cluster(X, names)
img_gps = defaultdict(list)
for gp in groups:
for img in groups[gp]:
img_gps[img].append(gp)
return img_gps
|
###########################
# To run the interactive data explorer
# Check if streamlit is installed in conda env
# Or run $ conda install -c conda-forge streamlit
# Run $ streamlit run data_explorer.py
###########################
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import streamlit as st
########################### Preparing Data ################################
parent_folder = os.getcwd()
data_folder = parent_folder + '/data/'
### extracts data from all .json files in the folder
### appends them into a long table format
### site name and data type attributes are extracted from the folder/file paths and added
all_data = pd.DataFrame()
folders = {
'riverdee/deeflow/': 'river',
'riverdee/deerain/': 'rain'}
for folder in folders:
for dirname, _, filenames in os.walk(data_folder + folder):
for filename in filenames:
if filename.rsplit('.',1)[1] == 'json':
site = filename.rsplit('.',1)[0]
site_data = pd.read_json(data_folder + folder + filename)
site_data['Site'] = site.replace("riverdee-", "").replace("rain-", "")
site_data['Type'] = folders[folder]
site_data = site_data.rename(columns={'Timestamp':'DateTime','title':'Value'})
all_data = all_data.append(site_data)
all_data = all_data.reset_index(drop=True)
################################ Displaying Data/ App Layout ################################
st.title('Data explorer')
### Filters and sliders
radio_type = st.sidebar.radio('Choose Type', list(set(all_data['Type'])))
selection_site = st.sidebar.multiselect('Choose Site', list(set(all_data['Site'])))
start_date = all_data['DateTime'].min().date()
end_date = all_data['DateTime'].max().date()
dummy_start = dt.date(2000,1,1)
slider_timeline_start, slider_timeline_end = st.sidebar.slider('Select a range of values',start_date, end_date, (dummy_start , end_date))
### Converting date inputs to datetime format, assuming H:M:S is always midnight.
time = dt.datetime.min.time()
slider_timeline_start = dt.datetime.combine(slider_timeline_start, time)
slider_timeline_end = dt.datetime.combine(slider_timeline_end, time)
### Filters the data based on interactive selections
chrt = plt.figure()
chrt_data = all_data[ \
(all_data['Type']==radio_type) \
& (all_data['DateTime']>slider_timeline_start) \
& (all_data['DateTime']<slider_timeline_end) \
& (all_data['Site'].isin(selection_site)) \
]
### Displays the figures
chrt = sns.FacetGrid(chrt_data, row='Site', height=1.7, aspect=4,)
chrt.map(sns.lineplot, 'DateTime', 'Value')
st.pyplot(chrt)
### you'll need this in future if you're adding any more charts below.
#plt.close()
|
import unittest
import chainer
from chainer import testing
import numpy as np
from tests.helper import ONNXModelTest
@testing.parameterize(
{'in_shape': (3, 5), 'name': 'softmax_cross_entropy'},
)
@unittest.skipUnless(
int(chainer.__version__.split('.')[0]) >= 6,
"SoftmaxCrossEntropy is supported from Chainer v6")
class TestSoftmaxCrossEntropy(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, x, t):
return chainer.functions.softmax_cross_entropy(x, t)
self.model = Model()
self.x = np.random.uniform(size=self.in_shape).astype('f')
self.t = np.random.randint(size=self.in_shape[0], low=0,
high=self.in_shape[1]).astype(np.int32)
def test_output(self):
self.expect(self.model, [self.x, self.t], name=self.name,
skip_opset_version=[7, 8])
|
# This is the weirdest LCA implementation you will probably ever see (due to Python append array)
N, Q = map(int, input().split())
adj = [[] for _ in range(N)]
for i in range(N-1):
a, b = map(int, input().split())
adj[a-1].append(b-1)
adj[b-1].append(a-1)
MAX_LCA = 18
lcaArr = [[] for _ in range(N)]
depth = [0 for _ in range(N)]
def dfs(n, p, d):
depth[n] = d
lcaArr[n].append(p)
for e in adj[n]:
if e != p:
dfs(e, n, d+1)
dfs(0, 0, 0)
for i in range(1, MAX_LCA):
for n in range(N):
lcaArr[n].append(lcaArr[lcaArr[n][i-1]][i-1])
def find_lca(a, b):
if depth[a] < depth[b]:
a, b = b, a
to_move = depth[a] - depth[b]
for i in reversed(range(0, MAX_LCA)):
if (1 << i) <= to_move:
to_move -= 1 << i
a = lcaArr[a][i]
if a == b:
return a
for i in reversed(range(0, MAX_LCA)):
if lcaArr[a][i] != lcaArr[b][i]:
a = lcaArr[a][i]
b = lcaArr[b][i]
return lcaArr[a][0]
for i in range(Q):
a, b = map(int, input().split())
print(find_lca(a-1, b-1) + 1)
|
import fnmatch
import importlib
import inspect
import sys
from dataclasses import dataclass
from enum import Enum
from functools import partial
from inspect import signature
from types import ModuleType
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Set, Type, TypeVar, Union
from torch import nn
from .._internally_replaced_utils import load_state_dict_from_url
__all__ = ["WeightsEnum", "Weights", "get_model", "get_model_builder", "get_model_weights", "get_weight", "list_models"]
@dataclass
class Weights:
"""
This class is used to group important attributes associated with the pre-trained weights.
Args:
url (str): The location where we find the weights.
transforms (Callable): A callable that constructs the preprocessing method (or validation preset transforms)
needed to use the model. The reason we attach a constructor method rather than an already constructed
object is because the specific object might have memory and thus we want to delay initialization until
needed.
meta (Dict[str, Any]): Stores meta-data related to the weights of the model and its configuration. These can be
informative attributes (for example the number of parameters/flops, recipe link/methods used in training
etc), configuration parameters (for example the `num_classes`) needed to construct the model or important
meta-data (for example the `classes` of a classification model) needed to use the model.
"""
url: str
transforms: Callable
meta: Dict[str, Any]
def __eq__(self, other: Any) -> bool:
# We need this custom implementation for correct deep-copy and deserialization behavior.
# TL;DR: After the definition of an enum, creating a new instance, i.e. by deep-copying or deserializing it,
# involves an equality check against the defined members. Unfortunately, the `transforms` attribute is often
# defined with `functools.partial` and `fn = partial(...); assert deepcopy(fn) != fn`. Without custom handling
# for it, the check against the defined members would fail and effectively prevent the weights from being
# deep-copied or deserialized.
# See https://github.com/pytorch/vision/pull/7107 for details.
if not isinstance(other, Weights):
return NotImplemented
if self.url != other.url:
return False
if self.meta != other.meta:
return False
if isinstance(self.transforms, partial) and isinstance(other.transforms, partial):
return (
self.transforms.func == other.transforms.func
and self.transforms.args == other.transforms.args
and self.transforms.keywords == other.transforms.keywords
)
else:
return self.transforms == other.transforms
class WeightsEnum(Enum):
"""
This class is the parent class of all model weights. Each model building method receives an optional `weights`
parameter with its associated pre-trained weights. It inherits from `Enum` and its values should be of type
`Weights`.
Args:
value (Weights): The data class entry with the weight information.
"""
@classmethod
def verify(cls, obj: Any) -> Any:
if obj is not None:
if type(obj) is str:
obj = cls[obj.replace(cls.__name__ + ".", "")]
elif not isinstance(obj, cls):
raise TypeError(
f"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}."
)
return obj
def get_state_dict(self, *args: Any, **kwargs: Any) -> Mapping[str, Any]:
return load_state_dict_from_url(self.url, *args, **kwargs)
def __repr__(self) -> str:
return f"{self.__class__.__name__}.{self._name_}"
@property
def url(self):
return self.value.url
@property
def transforms(self):
return self.value.transforms
@property
def meta(self):
return self.value.meta
def get_weight(name: str) -> WeightsEnum:
"""
Gets the weights enum value by its full name. Example: "ResNet50_Weights.IMAGENET1K_V1"
Args:
name (str): The name of the weight enum entry.
Returns:
WeightsEnum: The requested weight enum.
"""
try:
enum_name, value_name = name.split(".")
except ValueError:
raise ValueError(f"Invalid weight name provided: '{name}'.")
base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1])
base_module = importlib.import_module(base_module_name)
model_modules = [base_module] + [
x[1]
for x in inspect.getmembers(base_module, inspect.ismodule)
if x[1].__file__.endswith("__init__.py") # type: ignore[union-attr]
]
weights_enum = None
for m in model_modules:
potential_class = m.__dict__.get(enum_name, None)
if potential_class is not None and issubclass(potential_class, WeightsEnum):
weights_enum = potential_class
break
if weights_enum is None:
raise ValueError(f"The weight enum '{enum_name}' for the specific method couldn't be retrieved.")
return weights_enum[value_name]
def get_model_weights(name: Union[Callable, str]) -> Type[WeightsEnum]:
"""
Returns the weights enum class associated to the given model.
Args:
name (callable or str): The model builder function or the name under which it is registered.
Returns:
weights_enum (WeightsEnum): The weights enum class associated with the model.
"""
model = get_model_builder(name) if isinstance(name, str) else name
return _get_enum_from_fn(model)
def _get_enum_from_fn(fn: Callable) -> Type[WeightsEnum]:
"""
Internal method that gets the weight enum of a specific model builder method.
Args:
fn (Callable): The builder method used to create the model.
Returns:
WeightsEnum: The requested weight enum.
"""
sig = signature(fn)
if "weights" not in sig.parameters:
raise ValueError("The method is missing the 'weights' argument.")
ann = signature(fn).parameters["weights"].annotation
weights_enum = None
if isinstance(ann, type) and issubclass(ann, WeightsEnum):
weights_enum = ann
else:
# handle cases like Union[Optional, T]
# TODO: Replace ann.__args__ with typing.get_args(ann) after python >= 3.8
for t in ann.__args__: # type: ignore[union-attr]
if isinstance(t, type) and issubclass(t, WeightsEnum):
weights_enum = t
break
if weights_enum is None:
raise ValueError(
"The WeightsEnum class for the specific method couldn't be retrieved. Make sure the typing info is correct."
)
return weights_enum
M = TypeVar("M", bound=nn.Module)
BUILTIN_MODELS = {}
def register_model(name: Optional[str] = None) -> Callable[[Callable[..., M]], Callable[..., M]]:
def wrapper(fn: Callable[..., M]) -> Callable[..., M]:
key = name if name is not None else fn.__name__
if key in BUILTIN_MODELS:
raise ValueError(f"An entry is already registered under the name '{key}'.")
BUILTIN_MODELS[key] = fn
return fn
return wrapper
def list_models(
module: Optional[ModuleType] = None,
include: Union[Iterable[str], str, None] = None,
exclude: Union[Iterable[str], str, None] = None,
) -> List[str]:
"""
Returns a list with the names of registered models.
Args:
module (ModuleType, optional): The module from which we want to extract the available models.
include (str or Iterable[str], optional): Filter(s) for including the models from the set of all models.
Filters are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
wildcards. In case of many filters, the results is the union of individual filters.
exclude (str or Iterable[str], optional): Filter(s) applied after include_filters to remove models.
Filter are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
wildcards. In case of many filters, the results is removal of all the models that match any individual filter.
Returns:
models (list): A list with the names of available models.
"""
all_models = {
k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(".", 1)[0] == module.__name__
}
if include:
models: Set[str] = set()
if isinstance(include, str):
include = [include]
for include_filter in include:
models = models | set(fnmatch.filter(all_models, include_filter))
else:
models = all_models
if exclude:
if isinstance(exclude, str):
exclude = [exclude]
for exclude_filter in exclude:
models = models - set(fnmatch.filter(all_models, exclude_filter))
return sorted(models)
def get_model_builder(name: str) -> Callable[..., nn.Module]:
"""
Gets the model name and returns the model builder method.
Args:
name (str): The name under which the model is registered.
Returns:
fn (Callable): The model builder method.
"""
name = name.lower()
try:
fn = BUILTIN_MODELS[name]
except KeyError:
raise ValueError(f"Unknown model {name}")
return fn
def get_model(name: str, **config: Any) -> nn.Module:
"""
Gets the model name and configuration and returns an instantiated model.
Args:
name (str): The name under which the model is registered.
**config (Any): parameters passed to the model builder method.
Returns:
model (nn.Module): The initialized model.
"""
fn = get_model_builder(name)
return fn(**config)
|
# coding: utf-8
import time
import os
def bytes2human(n):
symbols = ('K','M','G','T','P','E','Z','Y')
prefix = {}
for i,s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value,s)
return '%sB' % n
def get_files_info(info):
files_info=[]
for i in range(len(info.files())):
name = info.files().file_name(i)
path = info.files().file_path(i)
size = bytes2human(info.files().file_size(i))
file_info = {'name':name, 'path':path, 'size':size}
files_info.append(file_info)
return files_info
def get_torrent_info(info):
# 种子生成时间
creation_date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(info.creation_date()))
# 种子名
name = info.name()
# info_hash
info_hash = str(info.info_hash())
# 种子文件注释
comment = info.comment()
# 制作者
creator = info.creator()
# 文件数量
file_num = info.files().num_files()
# 总大小
total_size = bytes2human(info.total_size())
# 文件详情
files = get_files_info(info)
"""
# 文件列表
file_list = []
for i in range(file_num):
file_name = info.files().file_name(i)
file_list.append(file_name)
"""
# trackers
trackers = []
for i in info.trackers():
tracker = i.url
trackers.append(tracker)
# 种子信息汇总
torrent_info = {'creation_date':creation_date, 'name':name, 'comment':comment, 'creator':creator, 'file_num':file_num, 'total_size':total_size, 'files':files, 'info_hash':info_hash, 'trackers':trackers}
return torrent_info
|
import numbers
import random
import datetime
from faker import Faker
from PBD.data import *
fake = Faker()
allClients = list()
allConferences = list()
allConferenceDays = list()
allWorkshops = list()
allConferenceReservations = list()
allConferenceDayReservations = list()
allThresholds = list()
allParticipants = list()
allDayAdmissions = list()
allWorkshopReservations = list()
allWorkshopAdmissions = list()
allPayments = list()
def convertToSqlArgs(*argv):
q = '('
for arg in argv:
if isinstance(arg, numbers.Number):
q += str(arg) + ', '
elif isinstance(arg, str):
q += '\'' + arg + '\', '
q = q[:-2]
return q + ')'
def convertToMoney(val):
return str(val//100) + '.' + str(val%100)
def generateClients(number_of_clients):
cols = '(name, phone, isCompany)'
query = 'INSERT INTO [dbo].[Clients] ' + cols + ' VALUES '
for i in range(number_of_clients):
isCompany = random.randint(0, 1)
if isCompany == 1:
name = fake.company()
else:
name = fake.name()
phone = fake.phone_number()
allClients.append(Client(i + 1, name, phone, isCompany))
query += convertToSqlArgs(name, phone, isCompany) + ', '
return query[:-2]
def generateConferences(number_of_conferences, fromDate, daysDelta):
cols = '(ConferenceName, Price, StudentDiscount, StartDate, EndDate)'
query = 'INSERT INTO [dbo].[Conferences] ' + cols + ' VALUES '
for i in range(number_of_conferences):
name = fake.catch_phrase().split()[-1].capitalize() + \
random.choice([' Days', ' Conference', ' Meet-up', 'Conf', ' Congress'])
price = random.randint(4, 34) * 10 * 100
studentDiscount = random.randint(0, 100)/100
startDate = fromDate + datetime.timedelta(days=random.randint(0, daysDelta))
endDate = startDate + datetime.timedelta(days=random.randint(0, 2))
allConferences.append(Conference(i + 1, name, price, studentDiscount, startDate, endDate))
query += convertToSqlArgs(name)[:-1]+', '+ convertToMoney(price) + ', ' + convertToSqlArgs(studentDiscount, str(startDate), str(endDate))[1:] + ', '
return query[:-2]
def generateDiscountThresholds(maxThresholdsPerConference):
cols = '(ConferenceID, StartDate, EndDate, Discount)'
query = 'INSERT INTO [dbo].[DiscountThresholds] ' + cols + ' VALUES '
i = 0
for conf in allConferences:
numberOfThresholds = random.randint(0, maxThresholdsPerConference)
if numberOfThresholds == 0:
continue
d = random.randint(numberOfThresholds*3, 60)
firstDate = conf.startDate - datetime.timedelta(days=d)
splittingDays = sorted(random.sample(range(1, d-1), numberOfThresholds-1))
splittingDays.append(d)
lastDay = 0
for splitDay in splittingDays:
discount = random.choice([random.randint(0, 100), random.randint(0, 100), random.randint(0, 100), -random.randint(0, 100)])/100
startDate = firstDate + datetime.timedelta(days=lastDay)
endDate = firstDate + datetime.timedelta(days=splitDay)
lastDay = splitDay
allThresholds.append(DiscountThreshold(i+1, conf.id, startDate, endDate, discount))
query += convertToSqlArgs(conf.id, str(startDate), str(endDate), str(discount)) + ', '
i += 1
return query[:-2]
def generateConferenceDays():
cols = '(ConferenceID, Date, ParticipantsLimit)'
query = 'INSERT INTO [dbo].[ConferenceDays] ' + cols + ' VALUES '
i = 0
for conf in allConferences:
numberOfDays = random.randint(1, 4)
for j in range(numberOfDays):
date = conf.startDate + datetime.timedelta(days=j)
limit = random.randint(5, 250)
allConferenceDays.append(ConferenceDay(i + 1, conf.id, date, limit))
query += convertToSqlArgs(conf.id, str(date), limit) + ', '
i += 1
return query[:-2]
def generateWorkshops():
cols = '(DayID , Name, StartTime, EndTime, ParticipantLimit, Price)'
query = 'INSERT INTO [dbo].[Workshops] ' + cols + ' VALUES '
i = 0
for confDay in allConferenceDays:
numberOfWorkshops = random.randint(0, 7)
for j in range(numberOfWorkshops):
name = random.choice([random.choice(['How to ', 'Deciding to ', 'Why I started to ']) + fake.bs(),
random.choice(['The era of ', 'Novel approach to ', 'Pros and cons of ']) + " ".join(
fake.bs().split()[1:])])
start = datetime.time(hour=random.randint(9, 17), minute=random.randint(0, 11) * 5)
end = datetime.time(hour=start.hour + random.randint(1, 4), minute=start.minute)
limit = random.randint(3, confDay.limit)
price = random.randint(1, 25) * 10 * 100
allWorkshops.append(Workshop(i + 1, confDay.id, name, start, end, limit, price))
query += convertToSqlArgs(confDay.id, name, str(start), str(end), limit)[:-1] + ', ' + convertToMoney(price) + '), '
i += 1
return query[:-2]
def generateConferenceReservations(maxReservationsPerConference):
cols = '(ConferenceID, ClientID, ReservationDate)'
query = 'INSERT INTO [dbo].[ConferenceReservations] ' + cols + ' VALUES '
i = 0
for conf in allConferences:
reservationsNumber = random.randint(0, maxReservationsPerConference)
clientsList = random.sample(allClients, min(reservationsNumber, len(allClients)))
for client in clientsList:
reservationDate = conf.startDate - datetime.timedelta(days=random.randint(0, 60))
allConferenceReservations.append(ConferenceReservation(i + 1, conf.id, client.id, reservationDate))
query += convertToSqlArgs(conf.id, client.id, str(reservationDate)) + ', '
i += 1
return query[:-2]
def generateDayReservations(maxReservationsPerDay):
cols = '(DayID, ReservationID, ParticipantsNumber, StudentParticipantsNumber)'
queryStart = 'INSERT INTO [dbo].[DayReservations] ' + cols + ' VALUES '
query = queryStart
i = 0
for day in allConferenceDays:
conf = allConferences[day.confID - 1]
reservations = list()
for confReservation in allConferenceReservations:
if confReservation.confID == day.confID:
reservations.append(confReservation)
reservations = random.sample(reservations, min(maxReservationsPerDay, len(reservations)))
for reservation in reservations:
if day.freePlaces <= 1:
break
participants = random.randint(1, min(3 * day.limit // len(reservations), day.freePlaces))
studentParticipants = random.randint(0, participants)
thresholdDiscount = 0
for t in allThresholds:
if t.confID == conf.id and t.startDate <= reservation.registrationDate < t.endDate:
thresholdDiscount = t.discount
break
toPay = int((1 - thresholdDiscount) * conf.price*((1 - conf.studentDiscount) * studentParticipants +
(participants - studentParticipants)))
reservation.toPay += toPay
day.freePlaces -= participants
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
allConferenceDayReservations.append(
DayReservation(i+1, day.id, reservation.id, participants, studentParticipants, toPay))
query += convertToSqlArgs(day.id, reservation.id, participants, studentParticipants) + ', '
i += 1
return query[:-2]
def generateDayAdmissions():
cols = '(ParticipantID, DayReservationID, isStudent)'
queryStart = 'INSERT INTO [dbo].[DayAdmissions] ' + cols + ' VALUES '
query = queryStart
i = 0
for dayReservation in allConferenceDayReservations:
fullParticipantsNumber = dayReservation.participantsNumber - dayReservation.studentParticipantsNumber
studentParticipantsNumber = dayReservation.studentParticipantsNumber
dayParticipants = random.sample(allParticipants, dayReservation.participantsNumber)
for student in dayParticipants[:studentParticipantsNumber]:
isStudent = 1
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
allDayAdmissions.append(DayAdmission(i + 1, student.id, dayReservation.id, isStudent))
query += convertToSqlArgs(student.id, dayReservation.id, isStudent) + ', '
i += 1
if fullParticipantsNumber == 0:
continue
for adult in dayParticipants[-fullParticipantsNumber:]:
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
isStudent = 0
allDayAdmissions.append(DayAdmission(i+1, adult.id, dayReservation.id, isStudent))
query += convertToSqlArgs(adult.id, dayReservation.id, isStudent) + ', '
i += 1
return query[:-2]
def generateParticipants(participantsNumber):
cols = '(FirstName, LastName, EMailAddress)'
queryStart = 'INSERT INTO [dbo].[Participants] ' + cols + ' VALUES '
query = queryStart
for i in range(0, participantsNumber):
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
name = fake.first_name()
lastName = fake.last_name()
email = fake.ascii_email()
allParticipants.append(Participant(i+1, name, lastName, email))
query += convertToSqlArgs(name, lastName, email) + ', '
return query[:-2]
def generateWorkshopReservations():
cols = '(WorkshopID, DayReservationID, ParticipantsNumber)'
queryStart = 'INSERT INTO [dbo].[WorkshopReservations] ' + cols + ' VALUES '
query = queryStart
i = 0
for workshop in allWorkshops:
dayReservations = [dr for dr in allConferenceDayReservations if dr.dayID == workshop.dayID]
for dayReservation in dayReservations:
participantsNumber = random.randint(0, min(workshop.freePlaces, dayReservation.participantsNumber))
if participantsNumber == 0:
continue
workshop.freePlaces -= participantsNumber
allConferenceReservations[dayReservation.reservationID-1].toPay += workshop.price*participantsNumber
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
allWorkshopReservations.append(WorkshopReservation(i+1, workshop.id, dayReservation.id, participantsNumber))
query += convertToSqlArgs(workshop.id, dayReservation.id, participantsNumber) + ', '
i += 1
return query[:-2]
def generateWorkshopAdmissions():
cols = '(DayAdmissionID, WorkshopReservationID)'
queryStart = 'INSERT INTO [dbo].[WorkshopAdmissions] ' + cols + ' VALUES '
query = queryStart
i = 0
for dayAdmission in allDayAdmissions:
workshopReservations = [w for w in allWorkshopReservations if w.dayReservationID == dayAdmission.dayReservationID]
random.shuffle(workshopReservations)
enrolledForWorkshops = list()
for workshopReservation in workshopReservations:
workshop = allWorkshops[workshopReservation.workshopID-1]
canAttend = True
if workshopReservation.notEnrolled == 0:
canAttend = False
for w in enrolledForWorkshops:
if w.dayID == workshop.dayID and \
(workshop.start <= w.start <= workshop.end or workshop.start <= w.end <= workshop.end):
canAttend = False
break
if not canAttend:
continue
workshopReservation.notEnrolled -= 1
enrolledForWorkshops.append(workshop)
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
allWorkshopAdmissions.append(WorkshopAdmission(dayAdmission.id, workshopReservation.id))
query += convertToSqlArgs(dayAdmission.id, workshopReservation.id) + ', '
i += 1
return query[:-2]
def generatePayments():
cols = '(ConferenceReservationID, Amount, Date)'
queryStart = 'INSERT INTO [dbo].[Payments] ' + cols + ' VALUES '
query = queryStart
i = 0
for conferenceReservation in allConferenceReservations:
if conferenceReservation.toPay == 0:
continue
instalmentsNumber = random.randint(1, 4)
confStartDate = allConferences[conferenceReservation.confID-1].startDate
dayDiff = min((confStartDate - conferenceReservation.registrationDate).days, 7)
toPay = conferenceReservation.toPay-instalmentsNumber
for j in range(instalmentsNumber):
paymentDate = conferenceReservation.registrationDate + datetime.timedelta(days=random.randint(0, dayDiff))
if j == instalmentsNumber-1:
value = toPay
elif toPay <= 0:
value = 0
else:
value = random.randint(0, toPay)
toPay -= value
if i % 600 == 599:
query = query[:-2] + '\n' + queryStart
allPayments.append(Payment(i+1, conferenceReservation.id, value+1, paymentDate))
query += convertToSqlArgs(conferenceReservation.id)[:-1] + ', ' + convertToMoney(value) + ', \'' + str(paymentDate) + '\'), '
i += 1
return query[:-2]
totalQuery = generateClients(250) + '\n' + \
generateConferences(80, datetime.date(year=2017, month=1, day=1), 3 * 365) + '\n' + \
generateDiscountThresholds(4) + '\n' + \
generateConferenceDays() + '\n' + \
generateWorkshops() + '\n' + \
generateConferenceReservations(15) + '\n' + \
generateDayReservations(10) + '\n' + \
generateParticipants(3500) + '\n' + \
generateDayAdmissions() + '\n' + \
generateWorkshopReservations() + '\n' + \
generateWorkshopAdmissions() + '\n' + \
generatePayments()
with open('query.sql', 'w') as f:
f.write(totalQuery)
|
import util
import os
import fnmatch
def getCsvsFiles():
pattern = "*.csv"
holds = []
path = util.getPath('toscrub')
listOfFiles = os.listdir(path)
for entry in listOfFiles:
if fnmatch.fnmatch(entry, pattern):
holds.append("{}/{}".format(path,entry))
return holds
def cleanFiles():
for afile in getCsvsFiles():
with open(afile, "r") as f:
lines = f.readlines()
found = 0
for i,aline in enumerate(lines):
if "Ticker" in aline:
found = i
break
if found != 0:
with open(afile, "w") as f:
f.writelines(lines[found:])
os.system("./scrub.sh Derivatives")
dels = util.getp("deletes")
for astock in dels:
os.system("./scrub.sh {}".format(astock))
|
import uuid
from gridfs import GridFS
from pymongo import MongoClient
from datetime import datetime
from bson import Binary
from io import BytesIO
import os
class NoFileException(Exception):
pass
class WrongTypeException(Exception):
pass
class FileManager:
mongoclient = MongoClient(host='172.17.0.1', port=27017, replicaset="foo")
mongodb = mongoclient.my_db
coll = mongodb.userdata
fs = GridFS(mongodb, collection="userdata")
def saveFile(self, file, filename):
# Pass as file a io.BytesIO data type
# or bytes
id = str(uuid.uuid4())
if (type(file) == unicode):
toSave = Binary(str(file))
elif (type(file) == BytesIO):
toSave = Binary(file.getvalue())
else:
try:
toSave = Binary(file)
except:
try:
toSave = Binary(file.read())
except:
raise WrongTypeException("cannot save the data in the type given")
self.fs.put(toSave, _id=id, filename=filename,
uploadDate=datetime.utcnow())
ids = os.environ.get("savedIds", "")
if not ids:
os.environ["savedIds"] = id
else:
os.environ["savedIds"] = ids + "|" + id
return id
def loadFile(self, fileID):
f = self.fs.find_one(str(fileID))
if f:
return f
else:
raise NoFileException("No file with id " + str(fileID))
|
from dash_bootstrap_components import __version__
def test_version():
assert __version__ == "1.5.0-dev"
|
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
data = {'key1' : [7, 2.72, 2+3j],
'key2' : ( 'abc', 'xyz')}
else:
data = None
data = comm.bcast(data, root=0)
|
import sys
import logging
import traceback
import warnings
from importlib import import_module
logger = logging.getLogger(__name__)
EXTRAS = ["h5py", "z5py", "pyn5", "PIL", "imageio"]
__all__ = ["NoSuchModule"] + EXTRAS
class NoSuchModule(object):
def __init__(self, name):
# logger.warning('Module {} is not accessible, some features may be unavailable'.format(name))
self.__name = name
self.__traceback_str = traceback.format_tb(sys.exc_info()[2])
errtype, value = sys.exc_info()[:2]
self.__exception = errtype(value)
def __getattr__(self, item):
print(self.__traceback_str, file=sys.stderr)
raise self.__exception
def __bool__(self):
return False
def import_if_available(name, namespace):
try:
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", ".*issubdtype")
module = import_module(name)
except ImportError as e:
module = NoSuchModule(name)
namespace[name] = module
return module
for extra in EXTRAS:
import_if_available(extra, locals())
|
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QLabel
from PyQt5.QtGui import QPainter, QFont, QImage
from PyQt5.QtCore import Qt, QCoreApplication
from modules import gamefield as g, bomb as b
import sys
import time
from os.path import *
class Menu(QWidget):
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.is_active = False
self.initUI()
def initUI(self):
self.init_labels()
self.init_buttons()
self.setFixedHeight(self.height())
self.setFixedWidth(self.width())
def init_labels(self):
label_width = self.parent().frame_width * (len(self.parent().gamefield.gamefield[0]) - 2)
label_height = self.parent().frame_height * 1.2
self.slz = (label_width, label_height)
game_over_text = "YOU " + ("WIN " if self.parent().gamefield.win else "LOST ") + \
"AND YOU GOT " + \
str(self.parent().gamefield.bomberman.points) + "POINTS"
label_pos = (self.parent().frame_width, self.parent().frame_width * 0.2)
self.game_over = Label(label_pos, self.slz, game_over_text, self)
lb_size = (self.parent().window_width, self.parent().window_height)
lb_str = self.parent().gamefield.leaderboard.str_format
self.leaderboard = Label(label_pos, lb_size, lb_str,self)
self.close_all_labels()
def close_all_labels(self):
self.game_over.close()
self.leaderboard.close()
def show_last_menu(self, event=None):
self.close_all_labels()
game_over_text = "YOU " + ("WIN " if self.parent().gamefield.win else "LOST ") + \
"AND YOU GOT " + \
str(self.parent().gamefield.bomberman.points) + " POINTS"
self.game_over.setText(game_over_text)
self.game_over.show()
self.show_start_menu()
def init_buttons(self):
sbz = (self.parent().frame_width * (self.parent().gamefield.width - 2), self.parent().frame_height * 1.2)
pos = [self.parent().frame_width, self.parent().frame_height]
text = "New game"
self.new_game_button = Button(self._start_game, (pos[0], pos[1] * 1.5), sbz, text, self)
text = "Show Leaderboard"
self.leaderboard_button = Button(self.show_leaderboard, (pos[0], pos[1] * 4.1), sbz, text, self)
text = "Exit"
self.exit_button = Button(self._exit_func, (pos[0], pos[1] * 5.7), sbz, text, self)
self.setChildNoFocus(self.new_game_button)
self.setChildNoFocus(self.leaderboard_button)
self.setChildNoFocus(self.exit_button)
self.close_all_buttons()
def setChildNoFocus(self, child):
child.setFocusPolicy(Qt.NoFocus)
def close(self):
super().close()
def close_all_buttons(self):
self.new_game_button.close()
self.leaderboard_button.close()
self.exit_button.close()
def show_leaderboard(self):
self.leaderboard.setText(
self.parent().gamefield.leaderboard.str_format)
self.leaderboard.mouseReleaseEvent = self.show_start_menu if not self.parent().gamefield.game_over\
else self.show_last_menu
self.leaderboard.setStyleSheet("background-color : black;"
"color : blue;")
self.leaderboard.show()
self.close_all_buttons()
def _start_game(self):
self.parent().start()
def _exit_func(self):
self.parent()._close()
def show_start_menu(self, event=None):
self.leaderboard.close()
self.new_game_button.show()
self.leaderboard_button.show()
self.exit_button.show()
'''def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.deactivate()
def deactivate(self):
self.is_active = False
if self.qb:
self.qb.is_active = not self.is_active
self.close()'''
class Label(QLabel):
def __init__(self, coordinates, size, text, parent=None):
super().__init__(parent)
self.setText(text)
self.setGeometry(coordinates[0], coordinates[1], size[0], size[1])
self.setFixedHeight(self.height())
self.setFixedWidth(self.width())
class QtCheatWindow(QWidget):
def __init__(self, bomberman):
super().__init__()
self.active = True
self.setGeometry(150, 150, 200, 200)
self.setWindowTitle('CHEATS')
self.bomberman = bomberman
self.create_buttons()
self.show()
def create_buttons(self):
buttons_folder = "images"
standart_button_size = (150, 50)
self.lifes_button = Button(self.infinite_lifes, (25, 20), standart_button_size, "infinite lifes", self)
self.bombs_button = Button(self.infinite_bombs, (25, 80), standart_button_size, "infinite bombs", self)
self.range_button = Button(self.infinite_range, (25, 140), standart_button_size, "infinite range", self)
def closeEvent(self, *args, **kwargs):
self.active = False
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
def infinite_lifes(self):
self.bomberman.lifes = sys.maxsize
def infinite_bombs(self):
self.bomberman.bombs_count = sys.maxsize
def infinite_range(self):
self.bomberman.bomb_range = sys.maxsize
class Button(QPushButton):
def __init__(self, function, coordinates, size, text, parent=None):
super(QPushButton, self).__init__(parent)
self.setText(text)
self.function = function
self.setGeometry(coordinates[0], coordinates[1], size[0], size[1])
self.clicked.connect(self._on_click)
def _on_click(self):
self.function()
class QtBomberman(QWidget):
def __init__(self, lvlname, lvlnum=1, file_ext=".txt", lb_path="leaderbord.txt", lb_size=10):
super().__init__()
self.menu = None
self.lb_path = join("text files", lb_path)
self.lb_size = lb_size
self.lvl_names = []
self.generate_lvl_names(join("text files", lvlname), file_ext, lvlnum)
self.is_active = True
self.start()
self.menu = Menu(self)
self.menu.setFocusPolicy(Qt.NoFocus)
self.initUI()
def start(self):
self.activate()
self.frame_width = 32
self.frame_height = 32
self.tick_time = 0.05
self.images = {}
self.qc = None
self.cheat = [Qt.Key_H, Qt.Key_E, Qt.Key_S,
Qt.Key_O, Qt.Key_Y, Qt.Key_A, Qt.Key_M]
self.cheats_detector = []
self.gamefield = g.GameField(
self.lvl_names, self.tick_time, self.lb_path, self.lb_size)
if not self.gamefield.gamefield:
self._close()
self.key_moves = {
Qt.Key_D: self.gamefield.bomberman.move_right,
Qt.Key_A: self.gamefield.bomberman.move_left,
Qt.Key_W: self.gamefield.bomberman.move_up,
Qt.Key_S: self.gamefield.bomberman.move_down,
Qt.Key_Right: self.gamefield.bomberman.move_right,
Qt.Key_Left: self.gamefield.bomberman.move_left,
Qt.Key_Up: self.gamefield.bomberman.move_up,
Qt.Key_Down: self.gamefield.bomberman.move_down
}
self.key_stop_moves = {
Qt.Key_D: self.gamefield.bomberman.stop_moving_right,
Qt.Key_A: self.gamefield.bomberman.stop_moving_left,
Qt.Key_W: self.gamefield.bomberman.stop_moving_up,
Qt.Key_S: self.gamefield.bomberman.stop_moving_down,
Qt.Key_Right: self.gamefield.bomberman.stop_moving_right,
Qt.Key_Left: self.gamefield.bomberman.stop_moving_left,
Qt.Key_Up: self.gamefield.bomberman.stop_moving_up,
Qt.Key_Down: self.gamefield.bomberman.stop_moving_down
}
def activate(self):
self.is_active = True
if self.menu:
self.menu.is_active = not self.is_active
self.menu.close()
def generate_lvl_names(self, lvlname, file_ext, lvls_count):
for lvlnum in range(lvls_count):
num = str(lvlnum) if lvlnum != 0 else ""
file_path = lvlname + num + file_ext
self.lvl_names.append(file_path)
def initUI(self):
self.setGeometry(100, 100, self.frame_width *
self.game_width, self.frame_height * self.game_height)
self.setFixedHeight(self.height())
self.setFixedWidth(self.width())
self.setWindowTitle('Bomberman')
self.show()
self.game_loop()
def closeEvent(self, e):
self._close()
def _close(self):
exit()
@property
def game_width(self):
return self.gamefield.width
@property
def game_height(self):
return self.gamefield.height
@property
def window_width(self):
return self.gamefield.width * self.frame_width
@property
def window_height(self):
return self.gamefield.height * self.frame_height
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
for raw in range(self.game_height):
for column in range(self.game_width):
image_name = self.gamefield.gamefield[raw][column].image_name
if image_name:
image = self.find_image(image_name).scaled(
self.frame_width, self.frame_height)
qp.drawImage(column * self.frame_width,
raw * self.frame_height, image)
for movable in self.gamefield.movable:
image_name = movable.image_name
if image_name:
image = self.find_image(image_name).scaled(
self.frame_width, self.frame_height)
qp.drawImage(movable.x * self.frame_width,
movable.y * self.frame_height, image)
image = self.find_image(self.gamefield.bomberman.image_name).scaled(
self.frame_width, self.frame_height)
qp.drawImage(self.gamefield.bomberman.x * self.frame_width,
self.gamefield.bomberman.y * self.frame_height, image)
bomb_image_name = b.Bomb(None, None, self.gamefield.bomberman).image_name
image = self.find_image(bomb_image_name)
image_width = (len(self.gamefield.gamefield[0]) - 1.2) * self.frame_width
image_height = (len(self.gamefield.gamefield) - 1.2) * self.frame_height
qp.drawImage(image_width,image_height, image)
font = QFont()
font.setPointSize(font.pointSize() * 3)
qp.setFont(font)
text_width = (len(self.gamefield.gamefield[0]) - 5.8) * self.frame_width
text_height = (len(self.gamefield.gamefield) - 1.2) * self.frame_height
text = str.format("POINTS: {0}", str(self.gamefield.bomberman.points))
qp.drawText(text_width,text_height,self.frame_width * 5, self.frame_height, Qt.AlignLeft, text)
self.show()
def find_image(self, image_name):
if not (image_name in self.images):
self.images[image_name] = QImage(join("images", image_name))
image = self.images[image_name]
return image
def game_loop(self):
while True:
try:
QCoreApplication.processEvents()
self.make_moves()
time.sleep(self.tick_time)
if self.qc and self.qc.active or not self.is_active:
continue
if self.gamefield.game_over:
self.change_activity_menu(self.menu.show_last_menu)
except KeyboardInterrupt:
self._close()
def make_moves(self):
self.gamefield.make_moves()
self.repaint()
def change_activity_menu(self, *funcs):
self.is_active = not self.is_active
self.menu.is_active = not self.is_active
if self.is_active:
self.menu.close()
else:
self.menu.show()
for func in funcs:
func()
self.repaint()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape and not self.gamefield.game_over:
self.change_activity_menu(self.menu.show_start_menu)
if self.gamefield.game_over or not self.is_active:
return
if e.key() == Qt.Key_Space:
self.gamefield.bomberman.place_bomb(self.gamefield)
if e.key() == Qt.Key_Tab:
self.gamefield.bomberman.change_type()
else:
try:
self.key_moves[e.key()]()
except KeyError:
pass
def detect_cheat(self, key):
self.cheats_detector.append(key)
if self.cheat[len(self.cheats_detector) - 1] != self.cheats_detector[- 1]:
self.cheats_detector = []
return
if len(self.cheat) == len(self.cheats_detector):
self.qc = QtCheatWindow(self.gamefield.bomberman)
self.cheats_detector = []
def keyReleaseEvent(self, e):
if e.isAutoRepeat():
return
self.detect_cheat(e.key())
try:
self.key_stop_moves[e.key()]()
except KeyError:
pass
self.repaint()
def main():
app = QApplication(sys.argv)
lvl_name, lvl_count, file_ext = enter_info()
qb = QtBomberman(lvl_name, lvl_count, file_ext)
sys.exit(app.exec_())
def enter_info():
print("Enter lvl names dividing by space:")
print("general lvl name, amount of lvls with this name, general file extension")
info = input().split(' ')
try:
lvl_name = info[0]
lvl_count = int(info[1])
file_ext = info[2]
return lvl_name, lvl_count, file_ext
except:
print("Wrong arguments, please try again")
enter_info()
if __name__ == "__main__":
main()
|
# Necessary Imports
from django.db import models
from datetime import date
from django.urls import reverse # Used to generate URLs by reversing the URL patterns
from django.contrib.auth.models import User # Blog author or commenter
from ckeditor.fields import RichTextField
# Create your models here.
class BlogAuthor(models.Model):
"""
Model representing a blogger.
"""
user = models.OneToOneField(User, on_delete=models.SET_NULL, null=True)
bio = models.TextField(max_length=400,
help_text="Enter your bio details here.")
class Meta:
ordering = ["user", "bio"]
def get_absolute_url(self):
"""
Return the url to access a particular blog-author instance.
"""
return reverse('posts-by-author', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return self.user.username
class Post(models.Model):
"""
Model representing a blog post.
"""
title = models.CharField(max_length=200)
header_image = models.ImageField(null=True, blank=True)
subtitle = models.CharField(max_length=200)
author = models.ForeignKey(BlogAuthor,
on_delete=models.SET_NULL,
null=True)
# Foreign Key used because blog can only have one author(User), but bloggers can have multiple blog posts.
body = RichTextField(blank=True, null=True)
post_date = models.DateField(default=date.today)
class Meta:
ordering = ["-post_date"]
def get_absolute_url(self):
"""
Returns the url to access a particular blog instance.
"""
return reverse('post-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
class PostComment(models.Model):
"""
Model representing a comment against a blog post.
"""
comment = models.CharField(max_length=400)
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
# Foreign Key used because BlogComment can only have one author(User), but users can have multiple comments
post_date = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
class Meta:
ordering = ["post_date"]
def __str__(self):
"""
String for representing the Model object.
"""
len_title = 75
if len(self.description) > len_title:
titlestring = self.description[:len_title] + '...'
else:
titlestring = self.description
return titlestring
# Configurations for blog Application
class Configuration(models.Model):
"""
Model representing Site Configurations
"""
name = "Configurations"
index_title = models.CharField(max_length=100)
index_subtitle = models.CharField(max_length=250)
index_header_image = models.ImageField(null=True, blank=True)
index_body = RichTextField(blank=True, null=True)
about_title = models.CharField(max_length=100)
about_subtitle = models.CharField(max_length=250)
about_header_image = models.ImageField(null=True, blank=True)
about_body = RichTextField(blank=True, null=True)
contact_title = models.CharField(max_length=100)
contact_subtitle = models.CharField(max_length=250)
contact_header_image = models.ImageField(null=True, blank=True)
contact_body = RichTextField(blank=True, null=True)
login_background = models.ImageField(null=True, blank=True)
register_background = models.ImageField(null=True, blank=True)
def __str__(self):
"""
String for representing the Model object.
"""
return self.name
|
person = {}
print(person)
print(person == {})
# person = {
# "name" : "le duc viet"
# }
# person = {
# "name" : "le duc viet",
# "age" : 16,
# }
# print(person)
# print(person == {})
# person = {
# "name" : "le duc viet",
# "age" : 16,
# }
# print(person)
# person["status"]= "single"
# print(person)
|
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2018-07-28
Find the Reverse Complement of a String
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'. Given a nucleotide p, we denote its complementary nucleotide as p. The reverse complement of a DNA string Pattern = p1…pn is the string Pattern = pn … p1 formed by taking the complement of each nucleotide in Pattern, then reversing the resulting string.
For example, the reverse complement of Pattern = "GTCA" is Pattern = "TGAC".
Reverse Complement Problem
Find the reverse complement of a DNA string.
Given: A DNA string Pattern.
Return: Pattern, the reverse complement of Pattern.
Sample Dataset
AAAACCCGGT
Sample Output
ACCGGGTTTT
Execute like:
python src/ba1c.py data/ba1c.txt output/ba1c.txt
"""
__author__ = 'johndibaggio'
import sys
import fileinput
COMPLEMENTARY_NUCLEOTIDE_MAP = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
def complement(dna):
"""
Return the complement of DNA string
:param text: DNA string
:type dna: str
:return: complement of DNA string
:rtype: str
"""
dna_complement = [None]*len(dna)
i = 0
for nb in dna:
if nb in COMPLEMENTARY_NUCLEOTIDE_MAP:
dna_complement[i] = COMPLEMENTARY_NUCLEOTIDE_MAP[nb]
i += 1
else:
raise ValueError("Invalid nucleotide base \"{}\" in DNA string \"{}\"".format(nb, dna))
return "".join(dna_complement)
def reverse_complement(dna):
"""
Return the complement of DNA string
:param text: DNA string
:type dna: str
:return: complement of DNA string
:rtype: str
"""
dna_reverse_complement = [None] * len(dna)
i = len(dna) - 1
for nb in dna:
if nb in COMPLEMENTARY_NUCLEOTIDE_MAP:
dna_reverse_complement[i] = COMPLEMENTARY_NUCLEOTIDE_MAP[nb]
i -= 1
else:
raise ValueError("Invalid nucleotide base \"{}\" in DNA string \"{}\"".format(nb, dna))
return "".join(dna_reverse_complement)
argv = list(sys.argv)
input_dna = ""
for line in fileinput.input(argv[1]):
if len(line) > 0:
input_dna += line.replace('\n', '')
try:
output_reverse_complement = reverse_complement(input_dna)
except ValueError as err:
output_reverse_complement = ""
print(err)
print("The following is the reverse complement of DNA string \"{}\":\n\"{}\"".format(input_dna,
output_reverse_complement))
output_file = open(argv[2], 'w+')
output_file.write(output_reverse_complement)
output_file.close()
|
import coreir
import os
def test_genargs():
context = coreir.Context()
mod = context.load_from_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), "genargs.json"))
for instance in mod.definition.instances:
assert instance.module.generator_args["width"].value == 4
if __name__ == "__main__":
test_genargs()
|
#!/usr/bin/env python
# Script by Steven Grove (@sigwo)
# www.sigwo.com
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Date: 09-01-13
import sys
import os
#from blessings import Terminal
# Going to set up colors for a future project, not completed yet
#term = Terminal()
def main():
pass
while quit != 'q':
# Get address string and CIDR string from command line
xaddr = raw_input("\n" + "IP address: ") # need to validate input of IP address
xcidr = raw_input("CIDR notation, NO / mark!: ")
addr = xaddr.split('.')
cidr = int(xcidr)
# Initialize the netmask and calculate based on CIDR mask
mask = [0, 0, 0, 0]
for i in range(cidr):
mask[i/8] = mask[i/8] + (1 << (7 - i % 8))
# Initialize net and binary and netmask (net) with addr to get network
net = []
for i in range(4):
net.append(int(addr[i]) & mask[i])
# Duplicate net into broad array, gather host bits, and generate broadcast
broad = list(net)
brange = 32 - cidr
for i in range(brange):
broad[3 - i/8] = broad[3 - i/8] + (1 << (i % 8))
# This gives you usable hosts for the given subnet
xhost = 2 ** brange - 2
host = "{:,}".format(xhost)
# Initialize o for wildcard mask (imask) with broadcast - net
o = [0, 0, 0, 0]
for i in range(4):
o[i] = broad[i] - net[i]
# This gives the wildcard mask for the given subnet
imask = []
for i in range (4):
imask.append(int(o[i]) & broad[i])
# Print information, mapping integer lists to strings for easy printing
print "\n" + 'Here are your results:'
print "Address: " , xaddr
print "Netmask: " , ".".join(map(str, mask))
print "Wildcard Mask: " , ".".join(map(str, imask))
print "Network: " , ".".join(map(str, net))
print "Usable IPs: " , host
print "Broadcast: " , ".".join(map(str, broad))
quit = raw_input("\n" + "Press Enter to try another IP or q to exit...")
|
#!/usr/bin/env python3
from ipaddress import IPv6Address
import sys
def mcast_mac(address):
sixmcast = IPv6Address(address)
x = int(sixmcast) & 0xffffffff
mac = x + 0x333300000000
return ( "{}:{}:{}:{}:{}:{}".format(hex(mac)[2:][0:2],
hex(mac)[2:][2:4],
hex(mac)[2:][4:6],
hex(mac)[2:][6:8],
hex(mac)[2:][8:10],
hex(mac)[2:][10:12]))
print(mcast_mac(sys.argv[1]))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 12:15:48 2017
@author: Elliott
"""
#!/usr/bin/env python
import sys
import json
from nltk.corpus import wordnet as wn
## GETTING INFO FROM TERMINAL (JSON)
## sys.argv is list where 1st item is path of this file, 2nd, 3rd,...,nth are passed in arguments
#for string in sys.argv:
# print(string) # if(len(wn.synsets(mystring)) > 0)
def syns(mystring):
a = wn.synsets(mystring)
#print(a)
if (len(a) > 0):
return a[0]
else:
return wn.synsets("crepuscular")[0]
def compare(word1, word2):
result = syns(word1).path_similarity(syns(word2))
if (isinstance(result, float)):
return result
else:
return .001
## INTERPRETING JSON FROM FILE
with open('data.json') as data_file:
data = json.load(data_file)
## EXAMPLE DATA
newdata = """
{
"user": {
"_id": "0001",
"selfEntitySalience": {"ponies": 0.55, "rainbows": 0.36, "andrew": 0.10},
"selfCategories": {"technology": 0.61, "news": 0.53, "animals": 0.22},
"matchEntitySalience": {"iPhone": 0.62, "rich": 0.52, "hats": 0.21},
"matchCategories": {"fashion": 0.67, "money": 0.57, "technology": 0.27}
},
"potentialMatches": [
{
"_id": "0002",
"selfEntitySalience": {"ponies": 0.55, "rainbows": 0.36, "andrew": 0.10},
"selfCategories": {"technology": 0.61, "news": 0.53, "animals": 0.22},
"matchEntitySalience": {"iPhone": 0.62, "rich": 0.52, "hats": 0.21},
"matchCategories": {"fashion": 0.67, "money": 0.57, "technology": 0.27}
},
{
"_id": "0003",
"selfEntitySalience": {"ponies": 0.55, "rainbows": 0.36, "andrew": 0.10},
"selfCategories": {"technology": 0.61, "news": 0.53, "animals": 0.22},
"matchEntitySalience": {"iPhone": 0.62, "rich": 0.52, "hats": 0.21},
"matchCategories": {"fashion": 0.67, "money": 0.57, "technology": 0.27}
},
{
"_id": "0004",
"selfEntitySalience": {"ponies": 0.55, "rainbows": 0.36, "andrew": 0.10},
"selfCategories": {"technology": 0.61, "news": 0.53, "animals": 0.22},
"matchEntitySalience": {"iPhone": 0.62, "rich": 0.52, "hats": 0.21},
"matchCategories": {"fashion": 0.67, "money": 0.57, "technology": 0.27}
}
]
}
"""
import numpy
SCORE = numpy.zeros(len(data['potentialMatches'])) #array of scores, one for each potential match
myIDs = [] #all scores start at zero
bestMatches = []
for i in range(len(data['potentialMatches'])):
ith_person = data['potentialMatches'][i] #ITERATING OVER OTHER PEOPLE IN DB
myIDs.append(ith_person['_id'])
highest = 0
secondhighest = 0
thirdhighest = 0
bestkeywords = []
for category_string in data['user']['selfCategories']: #for each category that this person has,
#if that category is wanted by a person in the database,
if 'matchCategories' in ith_person and category_string in ith_person['matchCategories']:
x = data['user']['selfCategories'][category_string] * ith_person['matchCategories'][category_string]
SCORE[i] += x
if x >= highest:
thirdhighest = secondhighest
secondhighest = highest
highest = x
if (category_string not in bestkeywords):
bestkeywords.append(category_string)
elif x >= secondhighest:
thirdhighest = secondhighest
secondhighest = x
if (category_string not in bestkeywords):
bestkeywords.append(category_string)
elif x >= thirdhighest:
thirdhighest = x
if (category_string not in bestkeywords):
bestkeywords.append(category_string)
"""
if x > highest:
usefulFunc1(thirdhighest, secondhighest, string)
usefulFunc2(secondhighest, dict3, string)
usefulFunc3(dic3, val, string
elif x > secondhighest:
usefulFunc2(thirdhighest, secondhighest, category_string)
usefulFunc3(secondhighest , x)
elif x > thirdhighest:
thirdhighest = x
"""
for category_string in data['user']['matchCategories']: #for each category that this person wants,
if 'selfCategories' in ith_person and category_string in ith_person['selfCategories']:
x = data['user']['matchCategories'][category_string] * ith_person['selfCategories'][category_string]
SCORE[i] += x
if x >= highest:
thirdhighest = secondhighest
secondhighest = highest
highest = x
if (category_string not in bestkeywords):
bestkeywords.append(category_string)
elif x >= secondhighest:
thirdhighest = secondhighest
secondhighest = x
if (category_string not in bestkeywords):
bestkeywords.append(category_string)
elif x >= thirdhighest:
thirdhighest = x
if (category_string not in bestkeywords):
bestkeywords.append(category_string)
for my_salience in data['user']['selfEntitySalience']: #for each category that this person wants,
if(len(wn.synsets(my_salience)) is 0):
if my_salience in ith_person['matchEntitySalience']:
x = data['user']['selfEntitySalience'][my_salience] * ith_person['matchEntitySalience'][my_salience]
SCORE[i] += x
if x >= highest:
thirdhighest = secondhighest
secondhighest = highest
highest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience)
elif x >= secondhighest:
thirdhighest = secondhighest
secondhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience)
elif x >= thirdhighest:
thirdhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience)
elif not (len(wn.synsets(my_salience)) is 0):
for my_salience2 in ith_person['matchEntitySalience']: #if that category is had by a person in the database,
x = compare(my_salience, my_salience2) * data['user']['selfEntitySalience'][my_salience] * ith_person['matchEntitySalience'][my_salience2]
SCORE[i] += x
if x >= highest:
thirdhighest = secondhighest
secondhighest = highest
highest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience2)
elif x >= secondhighest:
thirdhighest = secondhighest
secondhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience2)
elif x >= thirdhighest:
thirdhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience2)
for my_salience in data['user']['matchEntitySalience']: #for each category that this person wants,
if (len(wn.synsets(my_salience)) is 0):
if my_salience in ith_person['selfEntitySalience']:
x = data['user']['matchEntitySalience'][my_salience] * ith_person['selfEntitySalience'][my_salience]
if x >= highest:
thirdhighest = secondhighest
secondhighest = highest
highest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience)
elif x >= secondhighest:
thirdhighest = secondhighest
secondhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience)
elif x >= thirdhighest:
thirdhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience)
elif not (len(wn.synsets(my_salience)) is 0):
for my_salience2 in ith_person['selfEntitySalience']: #if that category is had by a person in the database,
x = compare(my_salience, my_salience2) * data['user']['matchEntitySalience'][my_salience] * ith_person['selfEntitySalience'][my_salience2]
SCORE[i] += x
if x >= highest:
thirdhighest = secondhighest
secondhighest = highest
highest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience2)
elif x >= secondhighest:
thirdhighest = secondhighest
secondhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience2)
elif x >= thirdhighest:
thirdhighest = x
if (my_salience not in bestkeywords):
bestkeywords.append(my_salience2)
#for keyword in bestkeywords:
# print(keyword)
if len(bestkeywords) > 2:
bestMatches.append([bestkeywords[-1],bestkeywords[-2],bestkeywords[-3]])
elif len(bestkeywords) > 1:
bestMatches.append([bestkeywords[-1],bestkeywords[-2]])
elif len(bestkeywords) > 0:
bestMatches.append([bestkeywords[-1]])
myDict = dict(zip(myIDs, SCORE))
def point5Round(afloat):
if afloat % 1 < .25:
#print(str(afloat) + " goes to " + str(afloat - afloat % 1))
return afloat - afloat % 1
elif afloat % 1 < .5:
#print(str(afloat) + " goes to " + str(afloat + (.5 - afloat % 1)))
return afloat + (.5 - afloat % 1)
elif afloat % 1 < .75:
#print(str(afloat) + " goes to " + str(afloat - afloat % .5))
return afloat - afloat % .5
elif afloat % 1 > .75:
#print(str(afloat) + " goes to " + str(afloat + (1 - afloat % 1)))
return afloat + (1 - afloat % 1)
Matrix = [[0 for x in range(3)] for y in range(len(myDict))]
#Normalization of data:
rownum = 0
for key in myDict:
# print(key + ":" + str(myDict[key]))
myDict[key] = point5Round(10 * (myDict[key] * 1) ** (.5))
if myDict[key] > 10:
myDict[key] = 10.0
Matrix[rownum][0] = key
Matrix[rownum][1] = myDict[key]
if (len(bestMatches) > rownum):
Matrix[rownum][2] = bestMatches[rownum]
rownum += 1
"""
Matrix looks like:
[['0001', 9, {'money','fun','tech'}]
[]
[]
]
"""
def sort2DArray(TwoDArray):
res = sorted(TwoDArray, key=lambda x: -x[1])
return res
file = open("results.txt" , 'w')
file.write(json.dumps(sort2DArray(Matrix)))
file.close()
print("stored in results.txt")
"""
stringRes = (str(sort2DArray(Matrix)))
realString = ""
for char in stringRes:
if char == '(':
realString += '['
elif char == ')':
realString += ']'
else:
realString += char
#print(" ")
#print("HERE IS THE RESULT:")
# print(realString)
#print(stringRes)
"""
|
# Generated by Django 3.1.4 on 2020-12-27 12:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Game', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='asd',
),
]
|
# -*- coding: utf-8 -*-
class Solution:
def missingNumber(self, nums):
return len(nums) * (len(nums) + 1) // 2 - sum(nums)
if __name__ == "__main__":
solution = Solution()
assert 2 == solution.missingNumber([3, 0, 1])
assert 8 == solution.missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1])
|
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
from tqdm import tqdm
from sklearn.svm import SVC
def rand_sampling(x, var_hist):
if np.isnan(x):
rand_idx = np.random.choice(len(var_hist))
x = var_hist.iloc[rand_idx][0]
return x
def blank_2_num_samp(T1D_features):
"""
:param T1D_features: Pandas series of T1D features
:return: A pandas dataframe containing the "clean" features
"""
T1D_features = T1D_features.replace(r'^\s*$', np.nan, regex=True) #replace blanks with NaN
T1D_features = T1D_features.replace('No', 0, regex=True) #replace 'No' with 0
T1D_features = T1D_features.replace('Yes', 1.0, regex=True) #replace 'Yes' with 1
T1D_features = T1D_features.replace('Negative', 0, regex=True) #replace 'Negative' with 0
T1D_features = T1D_features.replace('Positive', 1.0, regex=True) #replace 'Positive' with 1
T1D_features = T1D_features.replace('Male', 2.0, regex=True) #replace 'Male' with 2
T1D_features = T1D_features.replace('Female', 3.0, regex=True) #replace 'Female' with 3
T1D_features_clean = pd.DataFrame()
for key in T1D_features.keys():
feat=[key]
T1D_no_NaN=T1D_features[feat].loc[:].dropna() #no Nan's
new = T1D_features[feat].applymap(lambda x: rand_sampling(x, T1D_no_NaN))
new1 = pd.DataFrame.from_dict(new)
T1D_features_clean[feat] = new1
return T1D_features_clean
def train_test(x_train, x_test, y_train, y_test, model):
clf = model.fit(x_train, y_train)
y_pred_val = clf.predict(x_test)
ROC_log = roc_auc_score(y_test, y_pred_val)
return ROC_log
def k_fold_CV(X, Y, linear, penalty, kernel, lmbda, n_splits, solver):
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=10)
m_x_train, m_x_val, m_y_train, m_y_val = train_test_split(X, Y, test_size =0.2, random_state = 5, stratify=Y)
ROC_lamb = []
for idx, lmb in enumerate(lmbda):
C = 1/lmb
if linear:
model = LogisticRegression(random_state=5, penalty=penalty, C = C, max_iter=1000000, solver=solver)
else:
model = SVC(random_state=5, C = C, kernel = kernel, degree=3)
print(model)
with tqdm(total=n_splits, file=sys.stdout, position=0, leave=True) as pbar:
h = 0 # index per split per lambda
ROC = []
for train_index, val_index in skf.split(m_x_train, m_y_train):
clf = []
pbar.set_description('%d/%d lambda values, processed folds' % ((1 + idx), len(lmbda)))
pbar.update()
#--------------------------Impelment your code here:-------------------------------------
x_train_fold = m_x_train[train_index,:]
y_train_fold = m_y_train[train_index]
x_test_fold = m_x_train[val_index,:]
y_test_fold = m_y_train[val_index]
ROC = train_test(x_train_fold, x_test_fold, y_train_fold, y_test_fold, model)
#----------------------------------------------------------------------------------------
h += 1
ROC_lamb.append(np.mean(ROC))
model = []
return ROC_lamb
def best_estimator(x, y, model, n_splits):
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
if model == 'linear':
lmbda = np.linspace(1e-5, 1, num=10)
skf = StratifiedKFold(n_splits=n_splits, random_state=10, shuffle=True)
#C = 1/best_lambda
solver = 'liblinear'
log_reg = LogisticRegression(random_state=5, C = 1/lmbda, max_iter=1000000, solver=solver)
pipe = Pipeline(steps=[('logistic', log_reg)])
clf = GridSearchCV(estimator=pipe, param_grid={'logistic__C': 1/lmbda, 'logistic__penalty': ['l1', 'l2']},
scoring=['accuracy','f1','precision','recall','roc_auc'], cv=skf,
refit='roc_auc', verbose=3, return_train_score=True)
clf.fit(x, y)
lin_best = clf.best_params_
return lin_best
if model == 'svm':
lmbda = np.linspace(1e-5, 1, num=10)
C = 1/lmbda
svc = SVC(random_state=5, C = C, probability=True)
skf = StratifiedKFold(n_splits=n_splits, random_state=10, shuffle=True)
pipe = Pipeline(steps=[ ('svm', svc)])
svm_nonlin = GridSearchCV(estimator=pipe,
param_grid={'svm__kernel':['rbf','poly'], 'svm__C':C, 'svm__degree':[3]},
scoring=['accuracy','f1','precision','recall','roc_auc'],
cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
svm_nonlin.fit(x, y)
best_svm_nonlin = svm_nonlin.best_params_
return best_svm_nonlin
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core import serializers
# Create your views here.
from .models import timeSheetEntry
from .models import User
from dateutil.relativedelta import relativedelta
from dateutil import parser
@login_required
@csrf_exempt
def timesheet(request):
sql = "SELECT * FROM csb_app_timesheetentry WHERE user_id = " + str(request.user.id)
if request.GET.get('search'):
sql += " AND comment LIKE '%" + request.GET.get('search') + "%'"
print(sql)
entries = timeSheetEntry.objects.raw(sql)
return render(request, 'timesheet.html', {"entries": entries})
@login_required
def newpw(request):
from django.contrib.auth.hashers import make_password
if request.method == 'POST':
password = request.POST.get('password')
user = User.objects.get(username=request.user)
user.set_password(password)
user.save()
return redirect('/')
return render(request, 'newpw.html')
@login_required
@csrf_exempt
def newentry(request):
if request.method == 'POST':
user = request.user
start = request.POST.get('start')
end = request.POST.get('end')
print(start)
print(end)
comment = request.POST.get('comment')
time = relativedelta(parser.parse(end), parser.parse(start)).hours
print(relativedelta(parser.parse(end), parser.parse(start)))
completed = True #Only completed entries supported :S
timeSheetEntry.objects.create(
user=user,
start=start,
end=end,
time=time,
comment=comment,
completed=completed)
return redirect('/')
return render(request, 'newentry.html')
|
'''
Created on Oct 19, 2010
@author: Jason Huang
'''
import bson
import pymongo
import datetime
import tornado.web
import simplejson
import MongoEncoder.MongoEncoder
from Users.Message import MessageHandler
from BrowseTripHandler import BaseHandler
from Auth.AuthHandler import ajax_login_authentication
class MyTripsHandler(BaseHandler):
@ajax_login_authentication
def get(self):
response = self.syncdb.trips.find({'trip_id':{'$in':self.current_user['trips']}}).sort("published", pymongo.DESCENDING)
self.render("userentry.html", check=True, custom_user = self.current_user, trips = response)
class MergeTripGroupHandler(BaseHandler):
@ajax_login_authentication
def post(self):
trip_id = self.get_argument('trip_id')
#splitter = re.compile(r'test')
group_ids = self.get_argument('group_ids')
#main_group_id = self.get_argument('group_id')
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
groups = trip['groups']
main_group = groups[0]
dates = []
#for group in groups:
# if group['group_id'] == bson.ObjectId(main_group_id):
# main_group = group
for dest_place in main_group['dest_place']:
dates.append(dest_place['date'])
for index, _group in enumerate(groups):
if str(_group['group_id']) not in group_ids or index ==0:
continue;
print _group['group_id']
for member in _group['members']:
main_group['members'].append(member)
for dest_place in _group['dest_place']:
if dest_place['date'] not in dates:
main_group['dest_place'].append(dest_place)
del groups[index]
self.syncdb.trips.save(trip)
self.write('success')
class RemoveTripGroupHandler(BaseHandler):
@ajax_login_authentication
def post(self):
trip_id = self.get_argument('trip_id')
group_id = self.get_argument('group_id')
if group_id == 'new':
self.write('success')
return
groups = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})['groups']
if self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id), 'groups.group_id':bson.ObjectId(group_id)}):
len = 0
for group in groups:
if group['group_id'] == bson.ObjectId(group_id):
len = 0 - group['members'].__len__()
#print(str(group['members'].__len__()))
self.syncdb.trips.update({'trip_id':bson.ObjectId(trip_id)},{'$pull':{ 'groups': {'group_id':bson.ObjectId(group_id)}}})
self.syncdb.trips.update({'trip_id':bson.ObjectId(trip_id)},{'$inc':{'member_count': len}})
self.write('success')
class AddTripGroupHandler(BaseHandler):
@ajax_login_authentication
def post(self):
trip_id = self.get_argument('trip_id')
group_id = self.get_argument('group_id')
user_id = self.get_argument('user_id')
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
_groups = trip['groups']
_user = self.syncdb.users.find_one({'user_id':bson.ObjectId(user_id)})
if _user['slug'] not in trip['expense']:
trip['expense'][_user['slug']]=[]
for group in _groups:
for index, user in enumerate(group['members']):
if user['user_id'] == bson.ObjectId(user_id):
del group['members'][index]
break
if group_id == 'new':
group_id = bson.ObjectId()
group_template = _groups[0].copy()
group_template['group_id']=bson.ObjectId(group_id)
group_template['members'] = []
group_template['members'].append(_user)
_groups.append(group_template)
else:
# add user to existed group
for index, group in enumerate(_groups):
if group['group_id'] == bson.ObjectId(group_id):
_groups[index]['members'].append(_user)
break
self.syncdb.trips.save(trip)
self.write('success')
class GetTripGroupForMergeHandler(BaseHandler):
def post(self):
trip_id = self.get_argument('trip_id')
groups = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})['groups']
self.write(unicode(simplejson.dumps(groups, cls=MongoEncoder.MongoEncoder.MongoEncoder)))
class GetTripGroupForMapHandler(BaseHandler):
def get(self, group_id, trip_id):
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
group = None
if group_id == 'default' or group_id =='new':
group = trip['groups'][0]
else:
for _group in trip['groups']:
if _group['group_id'] == bson.ObjectId(group_id):
group = _group
break
if group == None:
return
else:
self.write(unicode(simplejson.dumps(group['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)))
class GetTripGroupForSiteHandler(BaseHandler):
def get(self, group_id, trip_id):
if group_id == 'default' or group_id =='new':
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
if trip:
group = trip['groups'][0]
for site in group['dest_place']:
self.write(self.render_string("Sites/trip_site.html", site = site, singletrip = trip ) + "||||")
else:
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
if trip:
for _group in trip['groups']:
if _group['group_id'] == bson.ObjectId(group_id):
for site in _group['dest_place']:
self.write(self.render_string("Sites/trip_site.html", site = site, singletrip = trip))
break
class AddTripTagHandler(BaseHandler):
@ajax_login_authentication
def post(self):
trip_id = self.get_argument('trip_id')
tag = self.get_argument('tag')
action = self.get_argument('action')
if action == 'remove':
self.syncdb.trips.update({'trip_id': bson.ObjectId(trip_id)}, {'$pull':{'tags': tag}})
else:
self.syncdb.trips.update({'trip_id': bson.ObjectId(trip_id)}, {'$addToSet':{'tags': tag}})
class LikeTripHandler(BaseHandler):
@ajax_login_authentication
def post(self):
id = self.get_argument('trip_id')
check = self.syncdb.users.find_one({'user_id': bson.ObjectId(self.current_user['user_id']), 'like_trip': bson.ObjectId(id)})
if check == None:
self.syncdb.users.update({'user_id': bson.ObjectId(self.current_user['user_id'])}, {'$addToSet':{'like_trip': bson.ObjectId(id)}})
self.syncdb.trips.update({'trip_id': bson.ObjectId(id)}, {'$inc':{'rating': 1}, '$addToSet':{'user_like': bson.ObjectId(self.current_user['user_id'])}})
else:
self.syncdb.users.update({'user_id': bson.ObjectId(self.current_user['user_id'])}, {'$pull':{'like_trip': bson.ObjectId(id)}})
self.syncdb.trips.update({'trip_id': bson.ObjectId(id)}, {'$inc':{'rating': -1}, '$pull':{'user_like': bson.ObjectId(self.current_user['user_id'])}})
class SaveTripHandler(BaseHandler):
@ajax_login_authentication
def post(self):
id = self.get_argument('trip_id')
check = self.syncdb.users.find_one({'user_id': bson.ObjectId(self.current_user['user_id']), 'save_trip': bson.ObjectId(id)})
if check == None:
self.syncdb.users.update({'user_id': bson.ObjectId(self.current_user['user_id'])}, {'$addToSet':{'save_trip': bson.ObjectId(id)}})
else:
self.syncdb.users.update({'user_id': bson.ObjectId(self.current_user['user_id'])}, {'$pull':{'save_trip': bson.ObjectId(id)}})
class GetTrips(BaseHandler):
def get(self):
members = []
latest_trip_ids = self.syncdb.trips.find().limit(20).sort("published", pymongo.DESCENDING)
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
for _group in latest_trip_id['groups']:
for _member in _group['members']:
members.append(_member)
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
print("true")
break
latest_trip_id['html'] = self.render_string("Module/tripinexportlist.html", trip = latest_trip_id) + "||||"
#self.write(json.dumps(latest_trip_id, cls=MongoEncoder.MongoEncoder, ensure_ascii=False, indent=0))
self.write(latest_trip_id['html'])
class SaveTrips(BaseHandler):
slug = None
#@tornado.web.asynchronous
@ajax_login_authentication
def post(self):
tripStart = self.get_argument("startPlace")
tripDest = self.get_argument("endPlace")
tripStartPosition = self.get_argument("startPosition")
tripDestPosition = self.get_argument("endPosition")
tripPath = self.get_argument("encodedPolyline")
self.slug = self.get_argument("slug")
#wayPoints = self.get_argument("wayPoints")
#wayPoints = [1, 2, 3]
#===================================================================
# if not wayPoints:
# self.db.trips.update({'slug':self.slug}, {'$set':{'start_place':tripStart, 'dest_place':tripDest, 'dest_place':tripDest, 'trip_path':tripPath}}, callback=self._save_callback)
# else:
# self.db.trips.update({'slug':self.slug}, {'$set':{'start_place':tripStart, 'dest_place':tripDest, 'dest_place':tripDest, 'trip_path':tripPath}, '$pushAll':{'way_points': wayPoints} }, callback=self._save_callback)
#===================================================================
#self.db.trips.update({'slug':self.slug}, {'$set':{'start_place':tripStart, 'dest_place':tripDest, 'start_place_position':tripStartPosition, 'dest_place_position':tripDestPosition,'trip_path':tripPath}}, callback=self._save_callback)
self.syncdb.trips.update({'slug':self.slug}, {'$set':{'start_place':tripStart, 'dest_place':tripDest, 'start_place_position':tripStartPosition, 'dest_place_position':tripDestPosition,'trip_path':tripPath}})
self.redirect("/trip/" + str(self.slug))
def _save_callback(self, response, error):
if error:
raise tornado.web.HTTPError(500)
else:
self.redirect("/trip/" + str(self.slug))
#===================================================================
# json = tornado.escape.json_decode(response.body)
# self.write("Fetched %d entries from the FriendFeed API" %
# len(json['entries']))
# self.finish()
#===================================================================
class SubscribeTrip(BaseHandler):
@ajax_login_authentication
def get(self, trip_id):
check = False
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
for group in trip['groups']:
for member in group['members']:
if member['user_id'] == self.current_user['user_id']:
check = True
if not check:
trip['groups'][0]['members'].append(self.current_user)
trip['member_count'] += 1
self.syncdb.trips.save(trip)
self.write(trip_id)
class UnsubscribeTrip(BaseHandler):
@ajax_login_authentication
def get(self, trip_id):
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
for index, group in enumerate(trip['groups']):
for _index, member in enumerate(group['members']):
if member['user_id'] == self.current_user['user_id']:
del trip['groups'][index]['members'][_index]
trip['member_count'] -= 1
self.syncdb.trips.save(trip)
self.write('success')
class ShowNewTrips(BaseHandler):
def post(self):
members = []
latest_trip_ids = self.syncdb.trips.find({"privacy": {"$ne": 1}}).limit(20).sort("published", pymongo.DESCENDING)
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
for _group in latest_trip_id['groups']:
for _member in _group['members']:
members.append(_member)
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
print("true")
break
latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id) + "||||"
#self.write(json.dumps(latest_trip_id, cls=MongoEncoder.MongoEncoder, ensure_ascii=False, indent=0))
self.write(latest_trip_id['html'])
def get(self):
image_info=[]
dest_places = []
""" Get RANDOM trips to show in the map"""
trips = self.syncdb.trips.find().limit(10)
if trips.count() > 0:
for trip in trips:
trip_user = self.syncdb.users.find_one({'user_id': bson.ObjectId(trip['owner_id'])})
if (trip_user):
image_info.append(trip['title']+';'+trip_user['picture'] +';'+'/trip/'+trip['slug'])
dest_places.append(unicode(simplejson.dumps(trip['groups'][0]['dest_place'], cls=MongoEncoder.MongoEncoder.MongoEncoder)))
""" Get latest trips to show in the list"""
latest_trip_ids = self.syncdb.trips.find().sort("published", pymongo.DESCENDING).limit(10)
top_shares = self.syncdb.users.find().sort("trip_count", pymongo.DESCENDING).limit(10)
top_guides = self.syncdb.guides.find().sort("rating", pymongo.DESCENDING).limit(5)
_trips = []
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
if len(latest_trip_id['groups'])>0:
members = latest_trip_id['groups'][0]['members']
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
break
#latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id)
_trips.append(latest_trip_id)
self.render("newbeforesignin.html", guides=top_guides, dest_places = dest_places, trips=trips, image_info=image_info, latest_trip_ids=_trips, top_shares = top_shares)
class ShowMyTrips(BaseHandler):
def post(self):
members = []
t = datetime.datetime.now()
latest_trip_ids =[]
for trip_id in self.current_user['trips']:
trip = self.syncdb.trips.find_one({"trip_id":bson.ObjectId(trip_id)})
if trip:
latest_trip_ids.append(trip)
latest_trip_ids.reverse()
if len(latest_trip_ids) > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
for _group in latest_trip_id['groups']:
for _member in _group['members']:
members.append(_member)
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
#print("true")
break
latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id) + "||||"
#self.write(json.dumps(latest_trip_id, cls=MongoEncoder.MongoEncoder, ensure_ascii=False, indent=0))
self.write(latest_trip_id['html'])
def get(self):
members = []
t = datetime.datetime.now()
latest_trip_ids =[]
for trip_id in self.current_user['trips']:
trip = self.syncdb.trips.find_one({"trip_id":bson.ObjectId(trip_id)})
if trip:
latest_trip_ids.append(trip)
latest_trip_ids.reverse()
if len(latest_trip_ids) > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
for _group in latest_trip_id['groups']:
for _member in _group['members']:
members.append(_member)
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
#print("true")
break
top_shares = self.syncdb.users.find().sort("trip_count", pymongo.DESCENDING).limit(10)
top_guides = self.syncdb.guides.find().sort("rating", pymongo.DESCENDING).limit(5)
image_info=[]
dest_places = []
""" Get RANDOM trips to show in the map"""
trips = []
self.render("newbeforesignin.html", guides=top_guides, dest_places = dest_places, trips=trips, image_info=image_info, latest_trip_ids=latest_trip_ids, top_shares = top_shares)
class ShowHotTrips(BaseHandler):
def get(self):
members = []
t = datetime.datetime.now()
latest_trip_ids = self.syncdb.trips.find({"end_date": {"$gt": t}}).sort("members", pymongo.DESCENDING).limit(20)
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
for _group in latest_trip_id['groups']:
for _member in _group['members']:
members.append(_member)
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
#print("true")
break
latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id) + "||||"
#self.write(json.dumps(latest_trip_id, cls=MongoEncoder.MongoEncoder, ensure_ascii=False, indent=0))
self.write(latest_trip_id['html'])
class ShowEndTrips(BaseHandler):
def get(self):
members = []
t = datetime.datetime.now()
latest_trip_ids = self.syncdb.trips.find({"end_date": {"$lt": t}}).sort("published", pymongo.DESCENDING).limit(20)
if latest_trip_ids.count() > 0:
for latest_trip_id in latest_trip_ids:
latest_trip_id['check_join'] = False
for _group in latest_trip_id['groups']:
for _member in _group['members']:
members.append(_member)
if self.current_user:
for member in members:
if member['user_id'] == self.current_user['user_id']:
latest_trip_id['check_join'] = True
print("true")
break
latest_trip_id['html'] = self.render_string("Module/trip.html", trip = latest_trip_id) + "||||"
#self.write(json.dumps(latest_trip_id, cls=MongoEncoder.MongoEncoder, ensure_ascii=False, indent=0))
self.write(latest_trip_id['html'])
class ProcessTripRequestHandler(MessageHandler):
def post(self):
trip_slug = self.get_argument('trip_slug')
user_id = self.get_argument('user_id')
user = self.syncdb.users.find_one({"user_id":bson.ObjectId(user_id)})
type = self.get_argument('type')
if type == 'join':
trip = self.syncdb.trips.find_one({'slug':trip_slug})
print trip_slug
if trip != None:
if self.current_user not in trip['groups'][0]['members']:
trip['groups'][0]['members'].append(self.current_user)
trip['member_count'] += 1
self.syncdb.trips.save(trip)
self.Send(self.current_user['user_id'], user_id, user['username']+" has accepted your trip request.", 'system_message')
self.write('accepted')
else:
self.Send(self.current_user['user_id'], user_id, user['username']+" failed to accept your trip request.", 'system_message')
self.write('failed')
elif type == 'decline':
self.Send(self.current_user['user_id'], user_id, user['username']+" has declined your trip request.", 'system_message')
self.write('declined')
self.finish()
|
import pandas as pd
import sqlite3
# --- LOAD THE SOURCE DATA SETS TO DATA FRAMES
# get list prices data from github
df_list_prices = pd.read_csv(
'https://raw.githubusercontent.com/gygergely/PythonPandas/master/00_src_files/list_prices.csv',
parse_dates=['valid_from', 'valid_to'],
dtype={'product_id': 'str',
'list_price': 'float64'})
# get transactions data from github
df_trans = pd.read_csv(
'https://raw.githubusercontent.com/gygergely/PythonPandas/master/00_src_files/transactions.csv',
parse_dates=['sales_date'],
dtype={'sold_qty': 'int64',
'sales_price': 'float64'})
# --- QUICK LOOK AT THE DATA TYPES OF THE DATA FRAMES
print(df_list_prices.info())
print(df_trans.info())
# --- CREATE AN SQLITE DATABASE READ THE DATA AND LOOK-UP THE LIST PRICES
print(df_trans.shape)
# create an sqlite db in memory
conn = sqlite3.connect(':memory:')
# read the data to db tables
df_trans.to_sql('trans', conn, index=False)
df_list_prices.to_sql('list_prices', conn, index=False)
# sql query to run
sql_query = '''
SELECT
trans.*,
list_prices.list_price
FROM
trans
LEFT JOIN list_prices
ON trans.product_id = list_prices.product_id
AND (list_prices.valid_from <= trans.sales_date
AND list_prices.valid_to >= trans.sales_date)
'''
# re-read the sql query results to df_trans data frame
df_trans = pd.read_sql_query(sql_query, conn)
print(df_trans.shape)
# Print the first 10 items from transactions data frame
print(df_trans.head(10))
|
from setuptools import setup, find_packages
with open('README.rst','r') as f:
long_desc = f.read()
setup(
name='wikitable',
version='0.0.6',
description='Converts Wikipedia tables to dataframes and CSVs',
py_modules=["wikitable"],
packages=find_packages(),
install_requires=[
'requests',
'pandas',
'beautifulsoup4'],
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',],
long_description = long_desc,
long_description_content_type = 'text/x-rst',
url = 'https://github.com/jaredmeyers/wikitable',
author = 'Jared Meyers',
author_email = 'jaredehren@verizon.net',
)
|
#!/usr/bin/python
import sys
import eventlet
import shlex
import os
import traceback
import subprocess
ircbot = eventlet.import_patched('ircbot')
irclib = eventlet.import_patched('irclib')
from irclib import irc_lower, ServerConnectionError, ip_quad_to_numstr, ip_numstr_to_quad, nm_to_n, is_channel
import commands
import logging
log = logging.getLogger(__name__)
class SSLIrcBot(ircbot.SingleServerIRCBot):
def _connect(self):
"""[Internal]"""
password = None
ssl = False
if len(self.server_list[0]) > 2:
ssl = self.server_list[0][2]
if len(self.server_list[0]) > 3:
password = self.server_list[0][3]
try:
self.connect(self.server_list[0][0],
self.server_list[0][1],
self._nickname,
password,
ircname=self._realname,
ssl=ssl)
except ServerConnectionError:
pass
class RelengBot(SSLIrcBot):
def __init__(self, start_channels, logchannel, nickname, server, port=6667, ssl=False):
SSLIrcBot.__init__(self, [(server, port, ssl)], nickname, nickname)
self.start_channels = start_channels
self.logchannel = logchannel
self.commands = {
'disconnect': commands.Disconnect(self),
'die': commands.Die(self),
'ping': commands.Ping(self),
'reboot': commands.Reboot(self),
'join': commands.Join(self),
'leave': commands.Leave(self),
'dance': commands.Dance(self),
}
self.watchers = [
commands.BugWatcher(self),
commands.HungSlaveWatcher(self),
]
self.periodic_commands = [
(commands.HungSlaveChecker(self), 3600),
]
self._file_ages = {}
self._monitor_files()
def _monitor_files(self):
log.info("checking files")
import sys
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
# Get the age of the file
filename = module.__file__
mtime = os.path.getmtime(filename)
if filename.endswith(".pyc"):
# Check the .py file too
sourcefile = filename[:-4] + ".py"
if os.path.exists(sourcefile):
mtime = os.path.getmtime(sourcefile)
filename = sourcefile
old_mtime = self._file_ages.get(filename)
if old_mtime and mtime > old_mtime:
# Something has changed, restart!
self.do_restart("self-update")
return
self._file_ages[filename] = mtime
# Try again later
eventlet.spawn_after(10, self._monitor_files)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
for channel in self.start_channels:
log.info("Joining %s", channel)
c.join(channel)
# Start up periodic commads
def command_loop(command, interval):
try:
command.run()
except:
traceback.print_exc()
msg = "Unhandled exception: %s" % traceback.format_exc()
self.log(msg)
self.log("Scheduling %s to run in %i seconds" % (command, interval))
eventlet.spawn_after(interval, command_loop, command, interval)
for command, interval in self.periodic_commands:
try:
eventlet.spawn(command_loop, command, interval)
except:
traceback.print_exc()
def log(self, msg):
self._sendlines(self.connection, self.logchannel, msg)
def scan_msg(self, c, e):
s = e.arguments()[0]
for w in self.watchers:
try:
w.send_input(c, e)
except:
traceback.print_exc()
msg = "Unhandled exception: %s" % traceback.format_exc()
self.log(msg)
def sendlines(self, c, e, m):
target = e.target()
if not is_channel(target):
target = nm_to_n(e.source())
self._sendlines(c, target, m)
def _sendlines(self, c, target, m):
for line in m.split("\n"):
c.privmsg(target, line)
def on_privmsg(self, c, e):
self.scan_msg(c, e)
self.do_command(e, e.arguments()[0])
def on_pubmsg(self, c, e):
self.scan_msg(c, e)
a = e.arguments()[0].split(":", 1)
if len(a) > 1 and irc_lower(a[0]) == irc_lower(self.connection.get_nickname()):
self.do_command(e, a[1].strip())
return
def do_command(self, e, cmd):
try:
c = self.connection
args = shlex.split(cmd)
cmd = args[0]
if cmd in self.commands:
self.log("executing %s" % cmd)
self.commands[cmd].run(c, e, args)
elif cmd == "restart":
self.log("restarting")
self.do_restart()
elif cmd == 'help':
self.sendlines(c, e, "Known commands: %s" % ", ".join(self.commands.keys()))
except SystemExit:
raise
except:
traceback.print_exc()
msg = "Unhandled exception: %s" % traceback.format_exc()
self.sendlines(c, e, msg)
def do_restart(self, message=None):
if message:
message = "Restarting! %s" % message
else:
message = "Restarting!"
print message
self.disconnect(message)
cmd = [sys.executable] + sys.argv
print "Starting new process", cmd
subprocess.Popen(cmd)
print "Exiting"
sys.exit(0)
if __name__ == '__main__':
b = RelengBot(['#relengbot'], '#relengbot', 'relengbot', '63.245.208.159', 6697, ssl=True)
b.start()
print "So long..."
|
#!/usr/bin/env python
import subprocess
import re
def passwd():
ignore = ('nfsnobody')
infile = open("/etc/passwd", 'r')
lines = infile.readlines()
# declare empty dictionary
passwd = { }
for line in lines:
field=line.split(":")
user, userid, userdir = field[0], field[2], field[5]
# system and ignore user
if int(userid) < 500 or user in ignore:
continue
match = re.search('(.*)/(\w+)$', userdir)
if match:
homedir = match.group(1)
if homedir not in passwd:
passwd[homedir] = []
passwd[homedir].append(user)
infile.close()
return passwd
def df():
cmd = "df -B G"
f= subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
f.stdout.readline()
size = {}
for line in f.stdout.readlines():
size [ line.split()[-1] ] = line.split()[1][:-1]
return size
def du(usrdir):
cmd = "du -sBG" + " " + usrdir
f= subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
usage = f.stdout.readline().split()[0][:-1]
return usage
|
from src.nlpTool.contentClean import cleanArticle
from src.nlpTool.paragraphSplit import splitTextIntoParagraphList
from src.nlpTool.sentenceSplit import splitTextIntoSentences
from src.nlpTool.wordSegment import splitSentenceIntoWords
from src.entity.article import Article
import time
if __name__ == '__main__':
article = Article()
article.title = "题目"
article.content = """我爱中国。
这是我的祖国。
我出生在这里。也在这里长大。"""
t1 = time.time()
cleanArticle(article)
splitTextIntoParagraphList(article)
splitTextIntoSentences(article)
splitSentenceIntoWords(article)
t2 = time.time()
print(t2-t1)
print(article.__dict__)
|
def band_name_generator(n):
return n.capitalize() + n[1:] if n[0]==n[-1] else 'The ' + n.capitalize()
'''
My friend wants a new band name for her band. She like bands that use the formula:
"The" + a noun with the first letter capitalized, for example:
"dolphin" -> "The Dolphin"
However, when a noun STARTS and ENDS with the same letter, she likes to repeat the
noun twice and connect them together with the first and last letter,
combined into one word (WITHOUT "The" in front), like this:
"alaska" -> "Alaskalaska"
Complete the function that takes a noun as a string,
and returns her preferred band name written as a string.
'''
|
# coding: utf-8
import json
import watson_developer_cloud
# BEGIN of python-dotenv section
from os.path import join, dirname
from dotenv import load_dotenv
import os
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# END of python-dotenv section
discovery = watson_developer_cloud.DiscoveryV1(
'2016-12-01',
username=os.environ.get("DISCOVERY_USERNAME"),
password=os.environ.get("DISCOVERY_PASSWORD")
)
environment_id = os.environ.get("DISCOVERY_ENVIRONMENT_ID")
collection_id = os.environ.get("DISCOVERY_COLLECTION_ID")
def display_discovery_query_response(json_data):
for entry in json_data['results']:
print("*** [{}] {}".format(entry['score'],
entry['title']))
for keyword in entry['enriched-text']['keywords']:
if keyword['sentiment']['type'] == 'positive':
print("+ [{}]".format(keyword['text']))
if keyword['sentiment']['type'] == 'negative':
print("- [{}]".format(keyword['text']))
if __name__ == '__main__':
while 1:
# get some natural language query from user input
input_content = input('Discovery NLQ> ')
# use line below instead if you're in python 2
# input_content = raw_input('Discovery NLQ> ').strip()
# if you type one of these, you exit the script
if (input_content.lower() in {'exit', 'quit', 'q', 'n'}):
break
query_options = {'natural_language_query': input_content,
'count': 10}
query_results = discovery.query(environment_id,
collection_id,
query_options)
# dumping the raw JSON response
print(json.dumps(query_results, indent=2))
|
import numpy as np
import pyaudio
import effects
def sinusoid(amp: float, freq: float, phs: float, fs: float, duration: float) -> list:
return amp*np.sin(2*np.pi*np.arange(fs*duration)*(freq/fs)+phs).astype(np.float32)
def play(xn: list, fs: float):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=2, rate=fs, output=True)
stream.write(xn.tobytes())
stream.stop_stream()
stream.close()
p.terminate()
|
from mmvizutil.db.query import (
Query
)
def query_box(q):
query = Query()
query.parameters = q.parameters
query.value = """
select min(num_1) num_1_min,
PERCENTILE_DISC(0.25) WITHIN GROUP (ORDER BY num_1) num_1_25,
median(num_1) num_1_50,
PERCENTILE_DISC(0.75) WITHIN GROUP (ORDER BY num_1) num_1_75,
max(num_1) num_1_max,
avg(num_1) num_1_avg
from ( {query} ) query
""".format(query=q.value)
return query
|
def decor(say_happy):
def wrapper():
print("\n")
say_happy()
return wrapper
@decor
def say_happy():
print("I am happy")
say_happy()
|
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randn(1000)
plt.title("histogram")
plt.xlabel("random data")
plt.ylabel("freqiency")
plt.hist(x,10)
plt.show()
|
from django.conf.urls import patterns,url
#from django.conf.urls import url
from app.views import *
urlpatterns = patterns('',
url(r'^login/',loginUsuario,name='loginUsuario',),
url(r'^fetch_data/', fetch_data, name='get_data'),
url(r'^home/(?P<anystring>.+)/', homeView,name='homeView',),
)
|
from enum import IntEnum
class KeyBind(IntEnum):
FileNew = 0
FileSave = 1
FileSaveAs = 2
FileOpen = 3
FileClose = 4
Undo = 5
Redo = 6
Delete = 9
Copy = 10
Paste = 11
IncGridSize = 12
DecGridSize = 13
Toggle2DGrid = 14
Toggle3DGrid = 15
ToggleGridSnap = 16
SelectTool = 17
MoveTool = 18
RotateTool = 19
ScaleTool = 20
EntityTool = 21
BlockTool = 22
ClipTool = 23
SelectGroups = 24
SelectObjects = 25
SelectFaces = 26
SelectVertices = 27
Confirm = 28
Cancel = 29
Pan2DView = 31
ZoomOut = 32
ZoomIn = 33
FlyCam = 34
Forward3DView = 35
Left3DView = 36
Right3DView = 37
Back3DView = 38
Up3DView = 39
Down3DView = 40
LookLeft3DView = 41
LookRight3DView = 42
LookUp3DView = 43
LookDown3DView = 44
Select = 45
SelectMultiple = 46
NextDocument = 47
PrevDocument = 48
Exit = 49
FileSaveAll = 50
FileCloseAll = 51
ViewQuads = 52
View3D = 53
ViewXY = 54
ViewYZ = 55
ViewXZ = 56
DNATool = 57
GroupSelected = 58
UngroupSelected = 59
Cut = 60
TieToWorld = 61
TieToEntity = 62
Run = 63
|
import os
import time
print("Howdy.")
time.sleep(2)
os.system('clear')
print("We have assumed control of your computer. Resistance is futile.")
time.sleep(3)
print(''' ```''')
time.sleep(.5)
print(''' (`/\\''')
time.sleep(.5)
print(''' `=\/\\''')
time.sleep(.5)
print(''' `=\/\\''')
time.sleep(.5)
print(''' `=\/''')
time.sleep(.5)
print(''' _\___''')
time.sleep(.5)
print(''' ) (''')
time.sleep(.5)
print(''' ( INK )''')
time.sleep(.5)
print(''' \___/''')
time.sleep(3)
print("Just kidding. All's well. Thanks for playing.")
|
'''
15. 3Sum
Given an array S of n integers, are there elements a, b, c in S such that
a + b + c = 0? Find all unique triplets in the array which gives the sum of
zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
'''
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if len(nums) < 3:
return []
sorted_nums = sorted(nums)
left, right = 0, len(sorted_nums)-1
result = []
while left <= right-2:
if 0 < left <= right-2 and sorted_nums[left-1] == sorted_nums[left]:
left = left + 1
continue
low = left
mid = left + 1
high = right
while mid < high:
target = sorted_nums[low] + sorted_nums[mid] + sorted_nums[high]
if target < 0:
mid = mid + 1
elif target > 0:
high = high - 1
else:
result.append([sorted_nums[low], sorted_nums[mid],
sorted_nums[high]])
while mid < high and sorted_nums[mid] == sorted_nums[mid+1]:
mid = mid + 1
while mid < high and sorted_nums[high] == sorted_nums[high-1]:
high = high - 1
mid = mid + 1
high = high - 1
left = left + 1
return result
if __name__ == '__main__':
nums = [-1, 0, 1, 2, -1, -4]
cs = Solution()
print cs.threeSum(nums)
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
#辅助管理-读卡器配置
_chrome_path = 'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
url = 'http://192.168.128.234/masi-medicare-settlement-web/web/medicare/settlement/common/homepage/index.html'
driver = webdriver.Chrome(executable_path=_chrome_path)
driver.get(url)
driver.find_element_by_xpath('//*[@id="userId"]').send_keys('ya-csyd')
driver.find_element_by_xpath('//*[@id="password"]').send_keys('123456')
driver.find_element_by_xpath('//button[@class="layui-btn masi-login-input masi-login-btn"]').click()
js = 'var q=document.documentElement.scrollTop=10000'
driver.execute_script(js)
time.sleep(2)
driver.find_element_by_xpath('//*[@id="sidebar"]/ul[1]/li[3]').click()
time.sleep(1)
driver.find_element_by_xpath('//ul[@class="layui-nav-sub-list sidebar-sublist"]/li[@data-id="20180807142350123224"]').click() #读卡器配置
xf = driver.find_element_by_xpath("//div[@id='20180807142350123224']/iframe[@frameborder='no']")
driver.switch_to_frame(xf)
# driver.find_element_by_xpath('//div[@class="layui-btn-container demoTable"]/button[@class="layui-btn layui-btn-radius btn btn-sm"]').click() #新增读卡器
# xf2 = driver.find_element_by_xpath("//div[@class='layui-layer-content']/iframe[@src='./addcard.html?type=1']")
# driver.switch_to_frame(xf2)
# driver.find_element_by_xpath('//div[@class="layui-select-title"]/input[@placeholder="请选择读卡器型号"]').click() #点击下拉框
# driver.find_element_by_xpath('//dl[@class="layui-anim layui-anim-upbit"]/dd[text()="新读卡器"]').click() #点击新读卡器
# driver.find_element_by_xpath('//div[@class="layui-select-title"]/input[@placeholder="请选择读卡器型号"]').click() #再点击下拉框
# driver.find_element_by_xpath('//dl[@class="layui-anim layui-anim-upbit"]/dd[text()="旧读卡器"]').click() #点击旧读卡器
# driver.find_element_by_xpath('//div[@class="layui-input-block"]/input[@placeholder="请输入读卡器端口"]').send_keys('1')
# driver.find_element_by_xpath('//div[@id="masiPopBtn"]/button[text()="保存"]')
driver.find_element_by_xpath('//div[@class="layui-table-cell laytable-cell-1-0-6"]/a[@title="修改"]').click()
xf3 = driver.find_element_by_xpath('//div[@class="layui-layer-content"]/iframe[@src="./addcard.html?type=2"]')
driver.switch_to_frame(xf3)
driver.find_element_by_xpath('//div[@class="layui-select-title"]/input[@placeholder="请选择读卡器型号"]').click()
driver.find_element_by_xpath('//dl[@class="layui-anim layui-anim-upbit"]/dd[text()="旧读卡器"]').click()
driver.find_element_by_xpath('//div[@class="layui-select-title"]/input[@placeholder="请选择读卡器型号"]').click()
driver.find_element_by_xpath('//dl[@class="layui-anim layui-anim-upbit"]/dd[text()="新读卡器"]').click()
driver.find_element_by_xpath('//div[@class="layui-input-block"]/input[@placeholder="请输入读卡器端口"]').send_keys(Keys.BACK_SPACE)
driver.find_element_by_xpath('//div[@class="layui-input-block"]/input[@placeholder="请输入读卡器端口"]').send_keys('2')
|
#!/usr/bin/python3
''' I/O module '''
def pascal_triangle(n):
''' Returns a list of lists of integers representing
the Pascal’s triangle of n.
'''
if n <= 0:
return []
p_tri = []
for row in range(n + 1):
row_list = []
for col in range(row):
if col == 0 or col == row - 1:
row_list.append(1)
else:
row_list.append(p_tri[row - 1][col - 1] + p_tri[row - 1][col])
p_tri.append(row_list)
return p_tri[1:]
|
#!/usr/bin/env python
""" Example for analyse URIs for detect a Jboss attack
A detail explation of the attack is under http://www.deependresearch.org/
"""
import sys
import os
import base64
import pyaiengine
def callback_uri(flow):
""" This callback is called on every http request to the server """
print("\033[93m" + "WARNING: Potential possible Attack detected on %s" % str(flow) + "\033[0m")
uri = flow.http_info.uri
if ((uri)and(len(uri) > 500)):
""" A URI bigger than 500 bytes could be suspicious """
items = uri.split("?")
args = items[1].split("&")
for arg in args:
""" Iterate through the arguments """
idx = arg.find("=")
if (idx > -1):
value = arg[idx + 1:]
if (len(value) > 64):
""" Ummmm a value of an argument bigger than 64 """
decode = value.replace("%", "").decode("hex")
if (decode.find("Runtime.getRuntime().exec") > -1):
""" Somebody is executing remote commands """
print("\033[31m" + "ALERT: Jboss exploit detected %s" % str(flow) + "\033[0m")
# print("[WARNING]: argument value:%s" % decode)
if __name__ == '__main__':
st = pyaiengine.StackLan()
http_mng = pyaiengine.DomainNameManager()
dom = pyaiengine.DomainName("My jboss host" ,"52.90.136.228:8080")
re = pyaiengine.Regex("Set regex for Jboss uris", "^(/jmx-console/HtmlAdaptor).*")
rm = pyaiengine.RegexManager()
rm.add_regex(re)
http_mng.add_domain_name(dom)
dom.http_uri_regex_manager = rm
re.callback = callback_uri
""" Plug the DomainNameManager on the HTTPProtocol """
st.set_domain_name_manager(http_mng,"HTTPProtocol")
st.tcp_flows = 50000
st.udp_flows = 16380
source = "jexboss_attack_v4a_victim_vantage.pcap"
with pyaiengine.PacketDispatcher(source) as pd:
pd.stack = st
pd.run()
sys.exit(0)
|
"""
Tests of neo.io.hdf5io_new
"""
import unittest
import sys
import numpy as np
from numpy.testing import assert_array_equal
from quantities import kHz, mV, ms, second, nA
try:
import h5py
HAVE_H5PY = True
except ImportError:
HAVE_H5PY = False
from neo.io.hdf5io import NeoHdf5IO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.iotest.tools import get_test_file_full_path
@unittest.skipUnless(HAVE_H5PY, "requires h5py")
class ReadOldNeoHdf5IOTest(BaseTestIO, unittest.TestCase):
"""
Test that data generated by NeoHdf5IO in Neo versions 0.3, 0.4 are
read correctly.
"""
ioclass = NeoHdf5IO
files_to_test = ["neo_hdf5_example.h5"]
files_to_download = files_to_test
def test_read_with_merge(self):
test_file = get_test_file_full_path(self.ioclass, filename=self.files_to_test[0],
directory=self.local_test_dir, clean=False)
io = NeoHdf5IO(test_file)
blocks = io.read_all_blocks(merge_singles=True)
# general tests, true for both blocks
for block in blocks:
for segment in block.segments:
self.assertEqual(segment.block, block)
# tests of Block #1, which is constructed from "array" (multi-channel)
# objects, so should be straightforward to convert to the version 0.5 API
block0 = blocks[0]
self.assertEqual(block0.name, "block1")
self.assertEqual(block0.index, 1234)
self.assertEqual(block0.annotations["foo"], "bar")
self.assertEqual(len(block0.segments), 3)
for segment in block0.segments:
self.assertEqual(len(segment.analogsignals), 2)
as0 = segment.analogsignals[0]
self.assertEqual(as0.shape, (1000, 4))
self.assertEqual(as0.sampling_rate, 1 * kHz)
self.assertEqual(as0.units, mV)
self.assertEqual(as0.segment, segment)
self.assertEqual(len(segment.spiketrains), 4)
st = segment.spiketrains[-1]
self.assertEqual(st.units, ms)
self.assertEqual(st.t_stop, 1000 * ms)
self.assertEqual(st.t_start, 0 * ms)
self.assertEqual(st.segment, segment)
self.assertEqual(len(segment.events), 1)
ev = segment.events[0]
assert_array_equal(ev.labels,
np.array(['trig0', 'trig1', 'trig2'],
dtype=(sys.byteorder == 'little' and '<' or '>') + 'U5'))
self.assertEqual(ev.units, second)
assert_array_equal(ev.magnitude, np.arange(0, 30, 10))
self.assertEqual(ev.segment, segment)
self.assertEqual(len(segment.epochs), 1)
ep = segment.epochs[0]
assert_array_equal(ep.labels,
np.array(['btn0', 'btn1', 'btn2'],
dtype=(sys.byteorder == 'little' and '<' or '>') + 'U4'))
assert_array_equal(ep.durations.magnitude,
np.array([10, 5, 7]))
self.assertEqual(ep.units, second)
assert_array_equal(ep.magnitude, np.arange(0, 30, 10))
self.assertEqual(ep.segment, segment)
self.assertEqual(len(segment.irregularlysampledsignals), 2)
iss0 = segment.irregularlysampledsignals[0]
self.assertEqual(iss0.shape, (3, 2))
assert_array_equal(iss0.times,
[0.01, 0.03, 0.12] * second)
assert_array_equal(iss0.magnitude,
np.array([[4, 3],
[5, 4],
[6, 3]]))
self.assertEqual(iss0.units, nA)
self.assertEqual(iss0.segment, segment)
iss1 = segment.irregularlysampledsignals[1]
self.assertEqual(iss1.shape, (3, 1))
assert_array_equal(iss1.times,
[0.02, 0.05, 0.15] * second)
self.assertEqual(iss1.units, nA)
assert_array_equal(iss1.magnitude,
np.array([[3], [4], [3]]))
# tests of Block #2, which is constructed from "singleton"
# (single-channel) objects, so is potentially tricky to convert to the
# version 0.5 API
block1 = blocks[1]
self.assertEqual(block1.name, "block2")
for segment in block1.segments:
self.assertEqual(len(segment.analogsignals), 2)
as0 = segment.analogsignals[0]
self.assertEqual(as0.shape, (1000, 4))
self.assertEqual(as0.sampling_rate, 1 * kHz)
self.assertEqual(as0.units, mV)
self.assertEqual(as0.segment, segment)
self.assertEqual(len(segment.spiketrains), 7)
st = segment.spiketrains[-1]
self.assertEqual(st.units, ms)
self.assertEqual(st.t_stop, 1000 * ms)
self.assertEqual(st.t_start, 0 * ms)
self.assertEqual(st.segment, segment)
self.assertEqual(len(segment.events), 0)
self.assertEqual(len(segment.epochs), 0)
self.assertEqual(len(block1.channel_indexes), 3)
ci0 = block1.channel_indexes[0]
self.assertEqual(ci0.name, "electrode1")
self.assertEqual(len(ci0.analogsignals), 1)
as00 = ci0.analogsignals[0]
self.assertEqual(as00.segment, segment)
self.assertEqual(as00.shape, (1000, 4))
self.assertEqual(id(as00), id(segment.analogsignals[0]))
self.assertEqual(as00.mean(), segment.analogsignals[0].mean())
self.assertEqual(as00.channel_index, ci0)
assert_array_equal(ci0.index, np.array([0, 1, 2, 3]))
assert_array_equal(ci0.channel_ids, np.array([0, 1, 2, 3]))
self.assertEqual(len(ci0.units), 2)
self.assertEqual(len(ci0.units[0].spiketrains), 2)
self.assertEqual(id(ci0.units[0].spiketrains[0]),
id(block1.segments[0].spiketrains[0]))
self.assertEqual(id(ci0.units[0].spiketrains[1]),
id(block1.segments[1].spiketrains[0]))
self.assertEqual(id(ci0.units[1].spiketrains[0]),
id(block1.segments[0].spiketrains[1]))
ci1 = block1.channel_indexes[1]
self.assertEqual(ci1.name, "electrode2")
self.assertEqual(len(ci1.analogsignals), 1)
as10 = ci1.analogsignals[0]
self.assertEqual(as10.segment, segment)
self.assertEqual(as10.shape, (1000, 4))
self.assertEqual(id(as10), id(segment.analogsignals[1]))
self.assertEqual(as10.mean(), segment.analogsignals[1].mean())
self.assertEqual(as10.channel_index, ci1)
assert_array_equal(ci1.index, np.array([0, 1, 2, 3]))
assert_array_equal(ci1.channel_ids, np.array([4, 5, 6, 7]))
self.assertEqual(len(ci1.units), 5)
self.assertEqual(id(ci1.units[0].spiketrains[0]),
id(block1.segments[0].spiketrains[2]))
self.assertEqual(id(ci1.units[3].spiketrains[1]),
id(block1.segments[1].spiketrains[5]))
ci2 = block1.channel_indexes[2]
self.assertEqual(ci2.name, "my_favourite_channels")
self.assertEqual(len(ci2.analogsignals), 1)
self.assertEqual(id(ci2.analogsignals[0]), id(as00))
assert_array_equal(ci2.index, np.array([1, 3]))
assert_array_equal(ci2.channel_ids, np.array([1, 3]))
|
#! /usr/bin/python
from flask import Flask, render_template, request, url_for, flash, redirect, jsonify
from pytrends.request import TrendReq
app = Flask(__name__)
app.secret_key = "whatever floats your boat"
# Connect to Google with pytrends
pytrend = TrendReq(hl='en-US', tz=360)
# Views
@app.route('/', methods = ['GET', 'POST'])
@app.route('/index', methods = ['GET', 'POST'])
def main():
return render_template('index.html')
@app.route('/trends', methods=["GET", "POST"])
def trends():
# reading the GET argument with the request
keyword = request.args.get('keyword')
# use pytrends to retrieve data & change df index to string
pytrend.build_payload(kw_list=[keyword], timeframe='today 5-y')
results = pytrend.interest_over_time()
results.index = results.index.astype(str)
# jsonify data for usage in chart
results = results.to_json(orient='index')
results = jsonify(results)
return results # results is Google Trends data
if __name__== '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import Category, Item
class ItemSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Item
fields = ('id', 'name', 'categories', 'value_int', 'value_float')
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ('id', 'name', 'description')
|
#!/usr/local/bin/python3
# -*- conding: utf-8 -*-
import base64
import hashlib
from config import Config
import smtplib
from email.utils import parseaddr, formataddr
from email.header import Header
from email.mime.text import MIMEText
from flask import request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cache import Cache
db = SQLAlchemy()
cache = Cache(config=Config.REDIS_CONFIG)
templates = {
'phone': '字段:{column}-日志类型:手机号-原值:{source}-现值:{now}',
'job': '字段:{column}-日志类型:职位-原值:{source}-现值:{now}',
'company': '字段:{column}-日志类型:公司-原值:{source}-现值:{now}',
'department': '字段:{column}-日志类型:部门-原值:{source}-现值:{now}',
'salary': '字段:{column}-日志类型:薪资-原值:{source}-现值:{now}',
'is_leave': '字段:{column}-日志类型:是否离职-原值:否-现值:是',
}
def apiResponse(code, msg="", data=""):
"""
封装返回结果
params: int code
params: str msg
params: dict data
return: json
"""
if code == 200 and not msg:
msg = "请求成功"
if code == 204 and not msg:
msg = "未知参数"
if code == 403 and not msg:
msg = "验证失败"
return jsonify({'code': code, 'msg': msg, 'data': data})
def format_addr(s):
"""
格式化发送邮件地址
params: str s
return: str
"""
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def encrypt(u_password):
"""
加密密码
params: str u_password
return: str
"""
md5 = hashlib.md5()
md5.update(u_password.encode("utf-8"))
u_password = md5.hexdigest()
return u_password
def authticket(func):
"""
普通验证路径加密
params: func func
return:
"""
def wrapper(*args, **kwargs):
request_path = request.base_url.split('/')[-1]
ticket = request.args.get('ticket') or None
enstr = 'LRnS4t'
if ticket:
ticket = ticket + len(ticket) % 4 * '='
destrlist = base64.b64decode(ticket.encode()).decode().split('-')
if destrlist[0] == request_path and len(destrlist[1]) == 10 and destrlist[2] == enstr:
return func()
return apiResponse(403, '验证失败')
return wrapper
def loginauth(func):
"""
登陆验证接口加密
params: func func
return:
"""
def wrapper(*args, **kwargs):
token = request.cookies.get('token') or None
if token:
info = cache.get(token)
if info:
return func()
return apiResponse(403, '验证失败')
return wrapper
def makemail(context, to_addr, subject):
"""
发送邮件
params: str context
params: email to_addr
params: subject
return: bool
"""
from_addr = Config.MAIL_USERNAME
from_pass = Config.MAIL_PASSWORD
smtp_server = Config.MAIL_SERVER
content = MIMEText(context, 'html', 'utf-8')
content['From'] = format_addr('Animekid <%s>' % from_addr)
content['To'] = format_addr('{to_addr} <{to_addr}>'.format(to_addr=to_addr))
content['Subject'] = Header(subject, 'utf-8').encode()
server = smtplib.SMTP_SSL(smtp_server, 465)
server.login(from_addr, from_pass)
if server.noop()[0] == 250:
server.sendmail(from_addr, [to_addr], content.as_string())
server.close()
return True
server.close()
return False
|
from os import system
class Leitura:
def __init__(self):
self.__dados= open('dados.txt').readlines()
self.__db = {}
try:
self.__upload()
except ValueError:
raise ValueError('dados corompidos')
def __upload(self):
if self.__dados == []:
self.__db['palavras'] = 4
self.__db['ppm'] = 200
self.__db['seg'] = self.__pal()
else:
try:
self.__db['palavras'] = int(self.__dados[1])
self.__db['ppm'] = int(self.__dados[0])
self.__db['seg'] = self.__pal()
except (ValueError,IndexError):
raise ValueError()
def __pal(self):
return (60*self.__db['palavras'])/self.__db['ppm']
def ppm(self):
return self.__db['ppm']
def palavras(self):
return self.__db['palavras']
def segs(self):
return self.__db['seg']
def setppm(self,x):
self.__db['ppm'] = x
self.__db['seg'] = self.__pal()
def setpalavra(self,x):
self.__db['palavras'] = x
self.__db['seg'] = self.__pal()
def save(self):
o = open('dados.txt','w')
o.write(str(self.__db['ppm'])+'\n')
o.write(str(self.__db['palavras']))
|
from dask.array import stats
import re
import argparse
from numba import njit, vectorize
from dklearn.pipeline import Pipeline
from dklearn.grid_search import GridSearchCV
import dask.bag as db
import dask.dataframe as dd
import dask.array as da
import dask
from os import path
from scipy.sparse import csr_matrix, triu
from ete3 import Tree
import utils
import numpy as np
import pandas as pd
from numba.types import *
from scipy import stats
from functools import partial
from io import StringIO
import matplotlib.pyplot as plt
from itertools import combinations as combs
import matplotlib
from glob import glob
from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, ExtraTreeRegressor, ExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, RandomForestClassifier, AdaBoostRegressor, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor, GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.linear_model import ElasticNetCV, LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn import metrics as met
from sklearn.model_selection import cross_validate, StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectFromModel
from sklearn.base import clone
scaler = StandardScaler() # should we use this?
matplotlib.use('Agg')
# from sklearn.grid_search import GridSearchCV
# from sklearn.pipeline import Pipeline
dask.config.config
# one func: calc triplet topologies (gt,wag,jtt,...) -> 1*ntaxa^3 parquet
# one func: calc dmat (gt,wag,jtt,...) -> nloci*ntaxa^2 parquet
@njit
def itl(k):
"""convert row,col indices to lower triangular 1-d array"""
return j+(i-1)*i/2
@njit
def lti(i, j):
i, j = sorted(i, j)
"""convert row,col indices to lower triangular 1-d array"""
return j+(i-1)*i/2
# use only the <x% and >1-x% ILS percentiles
tol = .2
@vectorize([int16(int32, int32),
int16(int64, int64),
int16(float32, float32),
int16(float64, float64)])
def extremize(x, y):
"""0=no ILS, 1=ILS"""
z = x-y
if z < tol:
return 1
if z > 1-tol:
return 0
return -1
# assume 3 topos
def read_topos(fname):
"""assumes we have a header"""
return np.loadtxt(fname, dtype='int16')[1, :]
def summary_stats(x):
return [stats.moment(x, 1, nan_policy='omit'),
stats.moment(x, 2, nan_policy='omit'),
stats.skew(x, 1, nan_policy='omit'),
]
def train_and_test(X, y, learners, metrics, outfile, nfolds=10, nprocs=1):
"""main loop"""
results = dict.fromkeys(learners)
feature_weights = {l: [] for l in learners}
with open(outfile+'.txt', 'w') as f:
npos = sum(y)
f.write('npos (ILS): %d, nneg %d, nfolds: %d\n' %
(npos, len(y)-npos, nfolds))
f.write('metric\tmean\tstd\n')
for learner_name, learner in learners.iteritems():
f.write('\n----\n%s\n' % learner_name)
f.write('\nparams:\t%s\n' % learner.get_params())
res = []
true = []
folds = StratifiedKFold(n_splits=nfolds,
shuffle=True, random_state=12345)
# SelectFromModel(learner,threshold='mean'))
clf = make_pipeline(StandardScaler(), learner)
results[learner_name] = cross_validate(
clf, X, y, scoring=metrics, cv=folds, n_jobs=nprocs)
for k, v in results[learner_name].items():
f.write('%s\t%f\t%f\t' % (k, np.mean(v), np.std(v)))
df = u.multidict_to_df(
results,
names=['learners', 'metrics']
)
df.to_csv(outfile+'.csv')
# weights trained on WHOLE dataset
d = {learner_name: u.get_features(clone(learner).fit(
X, y)) for learner_name, learner in learners.iteritems()}
for learner_name, learner in learners.iteritems():
clf = clone(learner)
clf.fit(X, y)
ftrs = u.get_features(clf)
if not ftrs is None:
print(len(ftrs), X.shape, y.shape)
for k, v in d.items():
print(k, v)
feature_weights = pd.DataFrame.from_dict(
d
)
feature_weights.index = X.columns
feature_weights.to_csv(outfile+'.features.csv')
print('feature_weights', feature_weights)
def main(args):
tracemalloc.start()
@vectorize([int16(int32),
int16(int64),
int16(float32),
int16(float64)])
def extremize(z):
"""0=no ILS, 1=ILS"""
if z < args.tol+1./3:
return 1
if z > 1-args.tol:
return 0
return -1
# configuration for this dataset
if args.leafnames:
tree_config = utils.TreeConfig(leafnames=args.leafnames,
outgroup=0,
subtree_sizes=[3]) # ,4])
else:
tree_config = utils.TreeConfig(ntaxa=39,
outgroup=0,
subtree_sizes=[3]) # ,4])
if args.dirlist:
with open(args.dirlist) as f:
dirs = [s.strip() for s in f]
elif args.indir:
dirs = [os.path.join(args.indir, x)
for x in next(os.walk(args.indir))[1]]
n_procs = 4
# should include this as another column
# f=lambda s: re.sub('[();]','',s).split(',') # don't assume these are ints, return list of strs
# want this to be sorted so we can convert to cov easily
def f(s): return tuple(sorted(int(ss)
for ss in re.sub('[();]', '', s).split(',')))
# f = lambda s: re.sub('[();]','',s)
def find_polytomies(s): return re.search('\d+,\d+,\d+', s) is not None
def pq2df(x): return dd.read_parquet(
x, engine='pyarrow').repartition(npartitions=args.procs)
for d in dirs:
s_cov = pq2df(path.join(d, 's_tree.trees.covs.parquet/'))
s_top = pq2df(path.join(d, 's_tree.trees.topos.parquet/')
).rename(columns={'count': 'count_t'})
#g_cov = pq2df(path.join(d,'g_tree.all.trees.covs.parquet/'))
g_top = pq2df(path.join(d, 'g_tree.all.trees.topos.parquet/'))
y = g_top.merge(s_top, how='right', on='topo', suffixes=('_i', '_t'))[
'topo', 'count_i'] # keep only true sp tree freq
for tops, covs in zip(
map(pq2df, sorted(glob(path.join(d, 'dataset.*topos*')))),
map(pq2df, sorted(glob(path.join(d, 'dataset.*covs*'))))
):
# TODO: for arbitrary n, need to have topological ordering
# need sp tree to get maj/minor topos. TODO: for quartets, need to distinguish between symmetric and asymmetric as well.
tops = tops.merge(s_top, how='left', on='topo')
# TODO: make sure all the df->bag->array operations are order-preserving
to_drop = tops.topo.apply(find_polytomies)
tops = tops[~to_drop]
# .to_dask_array().compute()
trio_inds = tops.topo.apply(f, meta=('trios', 'str'))
trios = trio_inds.drop_duplicates()
freq_grouped = tops.groupby(trio_inds)['count']
fmin = freq_grouped.min()
fmax = freq_grouped.max()
fsum = freq_grouped.sum()
fmask = freq_grouped.count() >= min(tree_config.subtree_sizes)
x = (fmax[fmask]-fmin[fmask]) / fsum[fmask]
x = x[x < args.tol] # .index.compute()
tops = tops.assign(tid=trio_inds).merge(x.to_frame(),
left_on='tid',
right_index=True,
suffixes=('_i', '_diff'),
how='inner')
# tops.compute().groupby(trio_inds).apply(lambda x:x['count_i'].values)
# tops.compute().groupby(trios)#.apply(lambda x: (x.count_i.max()-x.count_i.min())/x.
cov_summaries = utils.Reductions(covs)
t_c = tops.tid.apply(utils.leafset2subset_cov, meta='str').to_bag()
covariance_mat = da.stack(
[s for s in t_c.map(cov_summaries.reduce)], axis=0)
cov_summaries.get_metadata()
# each row in X consists of the tree topo counts and some summary stats derived from the 500+ inferred gene trees
concordant_topo_counts = tops[~tops.count_t.isna()][['tid', 'count_i']].rename(
columns={'count_i': 'concordant'})
ils = tops[tops.count_t.isna()]
# annoyingly, dask groupby only works on scalar outputs.
# TODO: find another workaround for > 2 ILS trees
ils_topo_counts = ils.groupby(ils.tid).count_i
counts_mat = concordant_topo_counts.merge(
ils_topo_counts.first().to_frame().rename(
columns={'count_i': 'ils1'}),
left_on='tid',
right_index=True
).merge(ils_topo_counts.last().to_frame().rename(columns={'count_i': 'ils2'}),
left_on='tid',
right_index=True
)
try:
print('fraction of runs for which the true tree is the dominant topology:',
(counts_mat.iloc[:, 1:].apply(np.argmax, axis=1) == 'concordant').mean().compute())
except:
pass
y = full_dataset[y_counts].copy().div(full_dataset[y_counts].sum(
axis=1), axis=0) # normalize, drop all but counts
sort_y = np.sort(
y.values,
axis=1
) # sort each row
# for regressing on % of non-ILS trees
y_frac = y.values[:, 0] # y[:,1:-1].sum(axis=0)
y_bin = extremize(y_frac)
keep_rows = y_bin > -1
y_bin = y_bin[keep_rows]
X_bin = X[keep_rows]
if args.true_dmat:
true_dmat = u.read_data_frames(args.true_dmat)
true_dmat['join_col'] = true_dmat.filename.map(clean_filenames)
X_true_gene_trees = pd.merge(X, true_dmat, on=join_cols, how='right')
# classify based on presence/absence of ILS
#y_bin = np.apply_along_axis(np.all,arr=y,axis=1)
# regress on % of all trees
decision_tree_params = {
'max_depth': 3,
'min_samples_leaf': 1}
# 'criterion':'gini', # by default
learners = {'Random': DummyClassifier('stratified'),
'Trivial': DummyClassifier('most_frequent'),
'RBF-SVM': SVC(kernel='rbf'),
'RF': RandomForestClassifier(bootstrap=True, n_estimators=20, **decision_tree_params),
'ExtraTrees': ExtraTreesClassifier(bootstrap=True, n_estimators=20, **decision_tree_params),
'AdaBoost': AdaBoostClassifier(n_estimators=20, base_estimator=DecisionTreeClassifier(**decision_tree_params)),
'GradBoost': GradientBoostingClassifier(n_estimators=20, criterion='friedman_mse', **decision_tree_params),
'GP': GaussianProcessClassifier(copy_X_train=False),
'LogisticReg': LogisticRegressionCV(penalty='l1', class_weight='balanced', solver='liblinear', cv=10),
'MLP': MLPClassifier(solver='sgd', batch_size=50, learning_rate='adaptive', learning_rate_init=0.01, momentum=0.9, nesterovs_momentum=True,
hidden_layer_sizes=(10, 10, 10), max_iter=500, shuffle=True)
}
metrics = {'Acc': met.accuracy_score,
'F1': met.f1_score,
'Prec': met.precision_score,
'Recall': met.recall_score,
'MCC': met.matthews_corrcoef}
# cv requires scoring fn
for m in metrics:
metrics[m] = met.make_scorer(metrics[m])
# met.roc_auc_score requires y_score
results_raxml = train_and_test(X_bin, y_bin, learners, metrics,
outfile=path.join(
args.outdir, 'results.classify'),
nfolds=args.folds,
nprocs=n_procs)
if args.true_dmat:
results_true = train_and_test(X_true_gene_trees, y_bin, learners, metrics,
outfile=path.join(
args.outdir, 'results.true_trees.classify'),
nfolds=args.folds,
nprocs=n_procs)
###### REGRESSION #######
decision_tree_params = {
'max_depth': 3,
'min_samples_leaf': 1}
# 'criterion':'mse', # by default
learners = {'Mean': DummyRegressor('mean'),
'Median': DummyRegressor('median'),
'RF': RandomForestRegressor(bootstrap=True, n_estimators=20, **decision_tree_params),
'ExtraTrees': ExtraTreesRegressor(bootstrap=True, n_estimators=20, **decision_tree_params),
'AdaBoost': AdaBoostRegressor(base_estimator=DecisionTreeRegressor(**decision_tree_params)),
'GradBoost': GradientBoostingRegressor(n_estimators=20, criterion='friedman_mse', **decision_tree_params),
'GP': GaussianProcessRegressor(copy_X_train=False),
'ElasticNet': ElasticNetCV(cv=10),
'MLP': MLPRegressor(solver='sgd', batch_size=50, learning_rate='adaptive', learning_rate_init=0.01, momentum=0.9, nesterovs_momentum=True,
hidden_layer_sizes=(20, 20, 20), max_iter=500, shuffle=True)
}
metrics = {'MSE': met.mean_squared_error,
'MAE': met.mean_absolute_error,
'EV': met.explained_variance_score
}
for m in metrics:
metrics[m] = met.make_scorer(metrics[m])
results_raxml = train_and_test(X, y_frac, learners, metrics,
outfile=path.join(
args.outdir, 'results.regress'),
nfolds=args.folds,
nprocs=n_procs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--procs', '-p', type=int, help='num procs', default=4)
parser.add_argument('--threads', '-t', type=int,
help='num threads per proc', default=4)
parser.add_argument('--mem', '-m', type=float,
help='memory (in bytes)', default=4e9)
parser.add_argument(
'--tol', default=.3, help='observed frequencies must be within tol of each other')
parser.add_argument('--indir', help='directory to search for files',
default='/N/dc2/projects/bkrosenz/deep_ils/sims/simphy/SimPhy38')
parser.add_argument('--dirlist', type=str,
help='file with list of dirs to process')
parser.add_argument('--outdir', help='directory to store results files',
default='/N/dc2/projects/bkrosenz/deep_ils/results')
parser.add_argument('--folds', '-f', type=int, help='CV folds', default=10)
parser.add_argument('--use_counts', action='store_true',
help='use topology frequencies of inferred trees')
# dest='accumulate', action='store_const',
# const=sum, default=max,
# help='sum the integers (default: find the max)')
args = parser.parse_args()
print('Arguments: ', args)
main(args)
|
from aiogram import types
test = types.ReplyKeyboardMarkup(
keyboard=[
[
types.KeyboardButton(text="/items"),
types.KeyboardButton(text="/herou"),
types.KeyboardButton(text="/guide"),
types.KeyboardButton(text="/help"),
types.KeyboardButton(text="/menu")
]
],resize_keyboard= True)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from impala.error import HiveServer2Error
from tests.common.environ import specific_build_type_timeout
from time import sleep
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
class TestRestart(CustomClusterTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@pytest.mark.execute_serially
def test_restart_statestore(self, cursor):
""" Regression test of IMPALA-6973. After the statestore restarts, the metadata should
eventually recover after being cleared by the new statestore.
"""
self.cluster.statestored.restart()
# We need to wait for the impalad to register to the new statestored and for a
# non-empty catalog update from the new statestored. It cannot be expressed with the
# existing metrics yet so we wait for some time here.
wait_time_s = specific_build_type_timeout(60, slow_build_timeout=100)
sleep(wait_time_s)
for retry in xrange(wait_time_s):
try:
cursor.execute("describe database functional")
return
except HiveServer2Error, e:
assert "AnalysisException: Database does not exist: functional" in e.message,\
"Unexpected exception: " + e.message
sleep(1)
assert False, "Coordinator never received non-empty metadata from the restarted " \
"statestore after {0} seconds".format(wait_time_s)
@pytest.mark.execute_serially
def test_restart_impala(self):
""" This test aims to restart Impalad executor nodes between queries to exercise
the cluster membership callback which removes stale connections to the restarted
nodes."""
self._start_impala_cluster([], num_coordinators=1, cluster_size=3)
assert len(self.cluster.impalads) == 3
client = self.cluster.impalads[0].service.create_beeswax_client()
assert client is not None
for i in xrange(5):
self.execute_query_expect_success(client, "select * from functional.alltypes")
node_to_restart = 1 + (i % 2)
self.cluster.impalads[node_to_restart].restart()
# Sleep for a bit for the statestore change in membership to propagate. The min
# update frequency for statestore is 100ms but using a larger sleep time here
# as certain builds (e.g. ASAN) can be really slow.
sleep(3)
client.close()
|
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import sys
import math
import random
import string
from copy import deepcopy
from itertools import combinations, permutations, product
from bisect import bisect_left, bisect_right
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
INF = float('inf')
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import sys
import math
import random
import string
from copy import deepcopy
from itertools import combinations, permutations, product
from bisect import bisect_left, bisect_right
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
INF = float('inf')
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
# ABC189 Fより
# 1 ~ Mの目が出るサイコロで0 ~ Nまでいく場合の確率と期待値
# ゴールは超過していい
N, M = getNM()
# 0からマスiに到達する確率を求める
P = [0] * (N + M + 1)
P[0] = 1
rec = [0] * (N + M + 1)
for i in range(N + M + 1):
if 0 < i:
rec[i] += rec[i - 1]
P[i] += rec[i]
P[i] = max(0, P[i])
if i < N:
rec[i + 1] += P[i] / M
rec[i + M + 1] -= P[i] / M
# 0からマスiに到達する期待値を求める
E = [0] * (N + M + 1)
rec = [0] * (N + M + 1)
for i in range(N + M + 1):
if 0 < i:
rec[i] += rec[i - 1]
E[i] += rec[i]
E[i] = max(E[i], 0)
# iにくる確率の逆数でかける
if P[i]:
E[i] = E[i] / P[i]
if i < N:
# P[i] / M:その方面から飛んでくることがある確率
rec[i + 1] += P[i] * (E[i] + 1) / M
rec[i + M + 1] -= P[i] * (E[i] + 1) / M
print(P, E, rec)
# トポロジカルソートにすればs < tの条件がなくても使える
# エッジを逆向きにして前から探索すると前からの期待値が計算できる
def calc(edges):
# 確率を計算
P = [0] * N
P[0] = 1
for u in range(N):
for v in edges[u]:
P[v] += P[u] / len(edges[u])
# 期待値を計算 ゴールから逆向きで期待値を求める
# あと何回進めばゴールまで行けるか
E = [0] * N
for u in range(N - 1, -1, -1):
for v in edges[u]:
# この(E[v] + 1)が大きくなるものを削ればいい
E[u] += (E[v] + 1) / len(edges[u])
return P, E
|
def summation(x):
return sum(xrange(1, x + 1)) if isinstance(x, int) else 'Error 404'
|
import sqlite3
file_name = input()
con = sqlite3.connect(file_name)
cur = con.cursor()
result = cur.execute(f'''select title from films where title like "%Астерикс%"
and not title like "%Обеликс%"''')
for elem in result:
print(elem[0])
|
# -*- coding: utf-8 -*-
"""Parser for the API."""
from collections import OrderedDict
from json import loads
from django.conf import settings
from django.utils.six import text_type
from rest_framework.exceptions import ParseError
from rest_framework.parsers import JSONParser as BaseJSONParser
class JSONParser(BaseJSONParser):
dict_class = OrderedDict
def parse(self, stream, media_type=None, parser_context=None):
"""Parse JSON-serialized data.
Same as base JSONParser, but uses dict_class to preserve order.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return loads(data, object_pairs_hook=self.dict_class)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % text_type(exc))
|
import pandas as pd
import numpy as np
import csv
import os
import const
def run_benchmark_cnn():
import sys
sys.path.append("/content/drive/My Drive/capstone1/CAN/torch2trt") # https://github.com/NVIDIA-AI-IOT/torch2trt
from torch2trt import torch2trt
import model
import time
import torch
import dataset
import torch.nn as nn
test_model = model.DenseNet()
test_model.eval().cuda()
batch_size = 1
inputs = torch.ones((batch_size, 1, const.CNN_FRAME_LEN, const.CNN_FRAME_LEN))
trt_x = inputs.half().cuda() # forward error, densenet half weight layer?
trt_model = model.DenseNet()
trt_model.eval().cuda()
trt_f16_x = inputs.half().cuda()
trt_f16_model = model.DenseNet().half()
trt_f16_model.half().eval().cuda()
trt_int8_strict_x = inputs.half().cuda() # match model weight
trt_int8_strict_model = model.DenseNet()
trt_int8_strict_model.eval().cuda() # no attribute 'char'
# convert to TensorRT feeding sample data as input
print('done 0/3')
model_trt = torch2trt(trt_model, [trt_x], max_batch_size=batch_size)
print('done 1/3')
model_trt_f16 = torch2trt(trt_f16_model, [trt_f16_x], fp16_mode=True, max_batch_size=batch_size)
print('done 2/3')
model_trt_int8_strict = torch2trt(trt_int8_strict_model, [trt_int8_strict_x], fp16_mode=False, int8_mode=True, strict_type_constraints=True, max_batch_size=batch_size)
with torch.no_grad():
### test inference time
dummy_x = torch.ones((batch_size, 1, const.CNN_FRAME_LEN, const.CNN_FRAME_LEN)).half().cuda()
dummy_cnt = 10000
print('ignore data loading time, inference random data')
check_time = time.time()
for i in range(dummy_cnt):
_ = test_model(dummy_x)
print('torch model: %.6f' % ((time.time() - check_time) / dummy_cnt))
check_time = time.time()
for i in range(dummy_cnt):
_ = model_trt(dummy_x)
print('trt model: %.6f' % ((time.time() - check_time) / dummy_cnt))
dummy_x = torch.ones((batch_size, 1, const.CNN_FRAME_LEN, const.CNN_FRAME_LEN)).half().cuda()
check_time = time.time()
for i in range(dummy_cnt):
_ = model_trt_f16(dummy_x)
print('trt float 16 model: %.6f' % ((time.time() - check_time) / dummy_cnt))
dummy_x = torch.ones((batch_size, 1, const.CNN_FRAME_LEN, const.CNN_FRAME_LEN)).char().cuda()
check_time = time.time()
for i in range(dummy_cnt):
_ = model_trt_int8_strict(dummy_x)
print('trt int8 strict model: %.6f' % ((time.time() - check_time) / dummy_cnt))
### end
def run_benchmark(weight_path):
import sys
sys.path.append("/content/drive/My Drive/capstone1/CAN/torch2trt")
from torch2trt import torch2trt
import model
import time
import torch
import dataset
import torch.nn as nn
test_model = model.OneNet()
test_model.load_state_dict(torch.load(weight_path))
test_model.eval().cuda()
batch_size = 1
_, _, _, test_data_set = dataset.GetCanDataset(100, 0, "./dataset/Mixed_dataset.csv", "./dataset/Mixed_dataset_1.txt")
sampler = dataset.BatchIntervalSampler(len(test_data_set), batch_size)
testloader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
# create model and input data
for inputs, labels in testloader:
# inputs = torch.cat([inputs, inputs, inputs], 1)
trt_x = inputs.float().cuda()
trt_state = torch.zeros((batch_size, 8 * 32)).float().cuda()
trt_model = model.OneNet()
trt_model.load_state_dict(torch.load(weight_path))
trt_model.float().eval().cuda()
trt_f16_x = inputs.half().cuda()
trt_f16_state = torch.zeros((batch_size, 8 * 32)).half().cuda()
trt_f16_model = model.OneNet().half()
trt_f16_model.load_state_dict(torch.load(weight_path))
trt_f16_model.half().eval().cuda()
trt_int8_strict_x = inputs.float().cuda()
trt_int8_strict_state = torch.zeros((batch_size, 8 * 32)).float().cuda() # match model weight
trt_int8_strict_model = model.OneNet()
trt_int8_strict_model.load_state_dict(torch.load(weight_path))
trt_int8_strict_model.eval().cuda() # no attribute 'char'
break
# convert to TensorRT feeding sample data as input
model_trt = torch2trt(trt_model, [trt_x, trt_state], max_batch_size=batch_size)
model_trt_f16 = torch2trt(trt_f16_model, [trt_f16_x, trt_f16_state], fp16_mode=True, max_batch_size=batch_size)
model_trt_int8_strict = torch2trt(trt_int8_strict_model, [trt_int8_strict_x, trt_int8_strict_state], fp16_mode=False, int8_mode=True, strict_type_constraints=True, max_batch_size=batch_size)
testloader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
with torch.no_grad():
### test inference time
dummy_x = torch.ones((batch_size, 8)).cuda()
dummy_state = torch.zeros(batch_size, model.STATE_DIM).cuda()
dummy_cnt = 10000
print('ignore data loading time, inference random data')
check_time = time.time()
for i in range(dummy_cnt):
_, _ = test_model(dummy_x, dummy_state)
print('torch model: %.6f' % ((time.time() - check_time) / dummy_cnt))
check_time = time.time()
for i in range(dummy_cnt):
_, _ = model_trt(dummy_x, dummy_state)
print('trt model: %.6f' % ((time.time() - check_time) / dummy_cnt))
dummy_x = torch.ones((batch_size, 8)).half().cuda()
dummy_state = torch.zeros(batch_size, model.STATE_DIM).half().cuda()
check_time = time.time()
for i in range(dummy_cnt):
_, _ = model_trt_f16(dummy_x, dummy_state)
print('trt float 16 model: %.6f' % ((time.time() - check_time) / dummy_cnt))
dummy_x = torch.ones((batch_size, 8)).char().cuda()
dummy_state = torch.zeros(batch_size, model.STATE_DIM).char().cuda()
check_time = time.time()
for i in range(dummy_cnt):
_, _ = model_trt_int8_strict(dummy_x, dummy_state)
print('trt int8 strict model: %.6f' % ((time.time() - check_time) / dummy_cnt))
## end
criterion = nn.CrossEntropyLoss()
state_temp = torch.zeros((batch_size, 8 * 32)).cuda()
step_acc = 0.0
step_loss = 0.0
cnt = 0
loss_cnt = 0
for i, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.float().cuda(), labels.long().cuda()
normal_outputs, state_temp = test_model(inputs, state_temp)
_, preds = torch.max(normal_outputs, 1)
edge_loss = criterion(normal_outputs, labels)
step_loss += edge_loss.item()
loss_cnt += 1
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
cnt += batch_size
print('torch', step_acc.item() / cnt, step_loss / loss_cnt)
state_temp = torch.zeros((batch_size, 8 * 32)).cuda()
step_acc = 0.0
cnt = 0
step_loss = 0.0
loss_cnt = 0
for i, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.float().cuda(), labels.long().cuda()
normal_outputs, state_temp = model_trt(inputs, state_temp)
_, preds = torch.max(normal_outputs, 1)
edge_loss = criterion(normal_outputs, labels)
step_loss += edge_loss.item()
loss_cnt += 1
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
cnt += batch_size
print('trt', step_acc.item() / cnt, step_loss / loss_cnt)
state_temp = torch.zeros((batch_size, 8 * 32)).half().cuda()
step_acc = 0.0
cnt = 0
step_loss = 0.0
loss_cnt = 0
for i, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.half().cuda(), labels.long().cuda()
normal_outputs, state_temp = model_trt_f16(inputs, state_temp)
_, preds = torch.max(normal_outputs, 1)
edge_loss = criterion(normal_outputs, labels)
step_loss += edge_loss.item()
loss_cnt += 1
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
cnt += batch_size
print('float16', step_acc.item() / cnt, step_loss / loss_cnt)
state_temp = torch.zeros((batch_size, 8 * 32)).float().cuda()
step_acc = 0.0
cnt = 0
step_loss = 0.0
loss_cnt = 0
for i, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.float().cuda(), labels.long().cuda()
normal_outputs, state_temp = model_trt_int8_strict(inputs, state_temp)
_, preds = torch.max(normal_outputs, 1)
edge_loss = criterion(normal_outputs, labels)
step_loss += edge_loss.item()
loss_cnt += 1
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
cnt += batch_size
print('int8 strict', step_acc.item() / cnt, step_loss / loss_cnt)
def CsvToTextOne(csv_file):
target_csv = pd.read_csv(csv_file)
file_name, extension = os.path.splitext(csv_file)
print(file_name, extension)
target_text = open(file_name + '_1.txt', mode='wt', encoding='utf-8')
idx = 0
print(len(target_csv))
while idx < len(target_csv):
csv_row = target_csv.iloc[idx]
data_len = csv_row[1]
is_regular = (csv_row[data_len + 2] == 'R')
if is_regular:
target_text.write("%d R\n" % idx)
else:
target_text.write("%d T\n" % idx)
idx += 1
if (idx % 1000000 == 0):
print(idx)
target_text.close()
print('done')
def Mix_Four_CANDataset():
Dos_csv = pd.read_csv('./dataset/DoS_dataset.csv')
Other_csv = [pd.read_csv('./dataset/Fuzzy_dataset.csv'),
pd.read_csv('./dataset/RPM_dataset.csv'),
pd.read_csv('./dataset/gear_dataset.csv')]
Other_csv_idx = [0, 0, 0]
save_csv = open('./dataset/Mixed_dataset.csv', 'w')
save_csv_file = csv.writer(save_csv)
# DoS 유해 트래픽 주기를 바꿈
# DoS 다음 세번의 Dos 자리를 다른 유해 트래픽으로 바꿈
# DoS / (Fuzzy, RPM, gear) 중 3번 순서 랜덤, 뽑히는 개수 랜덤 / Dos ...
dos_idx = 0
dos_preriod = 3
while dos_idx < len(Dos_csv):
dos_row = Dos_csv.iloc[dos_idx]
number_of_data = dos_row[2]
is_regular = (dos_row[number_of_data + 3] == 'R')
dos_row.dropna(inplace=True)
if is_regular:
save_csv_file.writerow(dos_row[1:])
else:
if dos_preriod == 3:
save_csv_file.writerow(dos_row[1:])
np.random.seed(dos_idx)
selected_edge = np.random.choice([0, 1, 2], 3, replace=True)
else:
selected_idx = selected_edge[dos_preriod]
local_csv = Other_csv[selected_idx]
local_idx = Other_csv_idx[selected_idx]
while True:
local_row = local_csv.iloc[local_idx]
local_number_of_data = local_row[2]
is_injected = (local_row[local_number_of_data + 3] == 'T')
local_idx += 1
if is_injected:
local_row.dropna(inplace=True)
save_csv_file.writerow(local_row[1:])
break
Other_csv_idx[selected_idx] = local_idx
dos_preriod -= 1
if dos_preriod == -1:
dos_preriod = 3
dos_idx += 1
if dos_idx % 100000 == 0:
print(dos_idx)
# break
save_csv.close()
|
#!/usr/bin/python3.6
import requests
import sys
import json
def make_turn(turn_payload, token, count) -> bool:
turn_payload['turn_x'] = 0
turn_payload['turn_y'] = count
turn_text = requests.post(f'{API_URL}/game/turn', json=turn_payload).text
turn = json.loads(turn_text)
print('My turn: ' + turn_text)
if isinstance(turn['data'], str):
return True
print(token)
turn_text = requests.post(f'{API_URL}/game/wait', json=token).text
turn = json.loads(turn_text)
print('Enemy turn: ' + turn_text)
if isinstance(turn['data'], str):
return True
return False
START_GAME_REQUEST_PAYLOAD = {'playWithAI': False, 'ships': [
{'name': 'ship', 'coords': [[0, 0], [0, 1], [0, 2]]}]}
username = sys.argv[1]
password = sys.argv[2]
API_URL = 'http://localhost:9090/api'
login_request = requests.post(f'{API_URL}/login',
json={'userNameOrEmail': username, 'password': password})
print(login_request.text)
token_string = json.loads(login_request.text)['data']['token']
print(token_string)
START_GAME_REQUEST_PAYLOAD['token'] = token_string
start_game_request = requests.post(
f'{API_URL}/game/start', json=START_GAME_REQUEST_PAYLOAD)
print(start_game_request.text)
do_i_start = json.loads(start_game_request.text)['data'] == 'start'
print(do_i_start)
turn_payload = {'token': token_string}
token_payload = {'token': token_string, 'another_field': ""}
turn_count = 0
if not do_i_start:
print('Enemy turn:' +
requests.post(f'{API_URL}/game/wait', json=token_payload).text)
while not make_turn(turn_payload, token_payload, turn_count):
turn_count += 1
|
import re
# функция принимает имя лога и событие, которое нужно отслеживать
def parsLog(log, event):
try:
# результирующий массив структур
arResult = []
# словарь, который будет заполняться и обнуляться при каждом новом появлении события
objStruct = {}
with open(log, 'r') as data_source:
for row in data_source:
row = str(row)
# если словарь пуст, мы ищим событие, если не пуст, собираем поле SF_TEXT с помощью PID
if objStruct == {}:
if row.find(event) != -1:
# собираем в словарь PID, SF_AT и задаём SF_TEXT для дальнейшего заполнения
objStruct["PID"] = re.findall(r'F-(\d+):', str(row))[0]
objStruct["SF_AT"] = re.findall(r'' + event + ' at (\d+)', str(row))[0]
objStruct["SF_TEXT"] = ""
else:
if row.find("F-" + objStruct["PID"]) != -1:
# с помощью PID собираем поле SF_TEXT из каждой строки массива
objStruct["SF_TEXT"] += re.findall(r'Dump: (.+)', str(row))[0] + " \n"
else:
# если PID не найден => событие закончилось, добавляем словарь в массив
arResult.append(objStruct)
# обнуляем словарь для поиска следующего события
objStruct = {}
return arResult
except IOError:
return False
print(parsLog("/home/python_tests/log.txt", "Segmentation fault"))
|
from _collections import defaultdict
bands = {}
bands_time = defaultdict(int)
while True:
tokens = input()
if tokens == "start of concert":
break
tokens = tokens.split("; ")
command = tokens[0]
if command == "Add":
band_name, members = tokens[1], tokens[2]
members = members.split(", ")
if band_name not in bands.keys():
bands[band_name] = []
for member in members:
if member not in bands[band_name]:
bands[band_name].append(member)
elif command == "Play":
band_name, time = tokens[1], int(tokens[2])
bands_time[band_name] += time
band_name_to_print = input()
bands_time = dict(sorted(bands_time.items(), key=lambda band: (-band[1], band[0])))
print(f'Total time: {sum(bands_time.values())}')
for band, time in bands_time.items():
print(f"{band} -> {time}")
members_to_print = bands[band_name_to_print]
print(band_name_to_print)
for member in members_to_print:
print(f"=> {member}")
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
python tasks/artefact.py ArtefactDatasetAPITask --local-scheduler
"""
import luigi
from ke2mongo import config
from ke2mongo.tasks import DATASET_LICENCE, DATASET_AUTHOR, DATASET_TYPE
from ke2mongo.tasks.dataset import DatasetTask, DatasetCSVTask, DatasetAPITask
class ArtefactDatasetTask(DatasetTask):
# CKAN Dataset params
package = {
'name': 'collection-artefacts',
'notes': u'Cultural and historical artefacts from The Natural History Museum',
'title': "Artefacts",
'author': DATASET_AUTHOR,
'license_id': DATASET_LICENCE,
'resources': [],
'dataset_category': DATASET_TYPE,
'owner_org': config.get('ckan', 'owner_org')
}
# And now save to the datastore
datastore = {
'resource': {
'name': 'Artefacts',
'description': 'Museum artefacts',
'format': 'csv'
},
'primary_key': 'GUID'
}
columns = [
('ecatalogue.AdmGUIDPreferredValue', 'GUID', 'uuid'),
('ecatalogue.ArtName', 'Name', 'string:100'),
('ecatalogue.ArtKind', 'Kind', 'string:100'),
('ecatalogue.PalArtDescription', 'Description', 'string:100'),
('ecatalogue.IdeCurrentScientificName', 'Scientific name', 'string:100'),
('ecatalogue.MulMultiMediaRef', 'Multimedia', 'json')
]
record_type = 'Artefact'
def process_dataframe(self, m, df):
"""
Process the dataframe, converting image IRNs to URIs
@param m: monary
@param df: dataframe
@return: dataframe
"""
# And update images to URLs
df = super(ArtefactDatasetTask, self).process_dataframe(m, df)
self.ensure_multimedia(df, 'Multimedia')
return df
class ArtefactDatasetCSVTask(ArtefactDatasetTask, DatasetCSVTask):
pass
class ArtefactDatasetAPITask(ArtefactDatasetTask, DatasetAPITask):
pass
if __name__ == "__main__":
luigi.run()
|
""" script to display the different interaction channels from Julia's GENIE simulation in a bar chart:
The original GENIE root file of Julia is read with "read_GENIE_file.py" and the fractions of the interaction
channels for the different isotopes are saved in file "interaction_channels_qel_NC_201617evts.txt".
This txt file is edited in "interaction_channels_edit.ods" (also the file "interaction_channels_NC_318097evts.txt"
is edited in this libre office file). The calculated fraction are defined as
number_of_events/number_of_total_events, where the total events are all NC and QEL events (channels with targets
C12, proton, electron, N14, O16, S32). In "interaction_channels_edit.ods", these fractions are converted to
fractions, which are defined as number_of_events/number_of_events_with_target_C12 (only channels with C12 as target
are taken into account).
These are the only channel that can mimic an IBD event (the other channels are just elastic scattering on the target
and therefore there is no neutron).
In the second sheet of the libre office file ('sorted information'), the interaction channels are sorted with
descending fraction.
INFO: all channels can also include Pion_0, Kaon_0, Kaon_plus, Kaon_minus (or heavier hadrons), but this is very
unlikely, because events like this should not be quasi-elastic and therefore not stored.
"""
import numpy as np
from matplotlib import pyplot as plt
def label_barh(ax, bars, text_format, is_inside=True, **kwargs):
"""
Attach a text label to each horizontal bar displaying its y value
"""
max_y_value = max(bar.get_height() for bar in bars)
if is_inside:
distance = max_y_value * 0.05
else:
distance = max_y_value * 0.1
for bar in bars:
text = text_format.format(bar.get_width())
if is_inside:
text_x = bar.get_width() - distance
else:
text_x = bar.get_width() + distance
text_y = bar.get_y() + bar.get_height() / 2
ax.text(text_x, text_y, text, va='center', **kwargs)
""" insert the fractions of the different interaction channels from 'interaction_channels_edit.ods',
sheet 'Sorted_Information'. These fractions contain 89.7480 % of all interaction channels
(fractions of nu + C12 -> ... in %): """
Frac_nu_B11_p = 29.0523
Frac_nu_C11_n = 24.9883
Frac_nu_B10_p_n = 18.2382
Frac_nu_Be10_2p = 4.1941
Frac_nu_C10_2n = 4.0271
Frac_nu_Be9_2p_n = 1.1153
Frac_nu_B9_p_2n = 1.0579
Frac_nu_Be8_2p_2n = 0.9999
Frac_nu_Li6_3p_3n = 0.8657
Frac_nu_Li7_3p_2n = 0.8510
Frac_nu_Be7_2p_3n = 0.8258
Frac_nu_Li9_3p = 0.7461
Frac_nu_Li8_3p_n = 0.7191
Frac_nu_C9_3n = 0.6975
Frac_nu_B8_p_3n = 0.6892
Frac_nu_He5_4p_3n = 0.6805
Frac_other = 10.2520
channels = ('other rare channels', '$^5He$ + $4p$ + $3n$', '$^8B$ + $p$ + $3n$', '$^9C$ + $3n$', '$^8Li$ + $3p$ + $n$',
'$^9Li$ + $3p$',
'$^7Be$ + $2p$ + $3n$', '$^7Li$ + $3p$ + $2n$', '$^6Li$ + $3p$ + $3n$', '$^8Be$ + $2p$ + $2n$',
'$^9B$ + $p$ + $2n$', '$^9Be$ + $2p$ + $n$', '$^{10}C$ + $2n$', '$^{10}Be$ + $2p$', '$^{10}B$ + $p$ + $n$',
'$^{11}C$ + $n$', '$^{11}B$ + $p$')
pos = np.arange(len(channels))
fractions = np.array([Frac_other, Frac_nu_He5_4p_3n, Frac_nu_B8_p_3n, Frac_nu_C9_3n, Frac_nu_Li8_3p_n, Frac_nu_Li9_3p,
Frac_nu_Be7_2p_3n, Frac_nu_Li7_3p_2n, Frac_nu_Li6_3p_3n, Frac_nu_Be8_2p_2n, Frac_nu_B9_p_2n,
Frac_nu_Be9_2p_n, Frac_nu_C10_2n, Frac_nu_Be10_2p, Frac_nu_B10_p_n, Frac_nu_C11_n, Frac_nu_B11_p])
""" display in bar chart """
fig, ax = plt.subplots(figsize=(11, 6))
horizontal_bars = ax.barh(pos, fractions, align='center', alpha=0.9)
label_barh(ax, horizontal_bars, "{:.4}", is_inside=False, fontsize=12)
plt.xticks(fontsize=11)
plt.yticks(pos, channels, fontsize=12)
plt.xlabel('fraction in %', fontsize=12)
plt.title('Atmospheric NC QEL neutrino interactions on $^{12}C$\n'
'(interaction channels: $\\nu_{x}$ + $^{12}C$ $\\rightarrow$ $\\nu_{x}$ + ...)', fontsize=15)
plt.grid(axis='x')
""" insert the fractions of the different interaction channels from 'interaction_channels_edit.ods',
sheet 'Sorted_Information 2'. These fractions contain 74.80581% of all interaction channels
(fractions of nu + C12 -> ... in %): """
F_nu_B11_p = 20.9711
F_nu_C11_n = 18.7591
F_nu_B10_p_n = 13.9626
F_nu_C11_p_piminus = 3.3507
F_nu_Be10_2p = 3.3229
F_nu_C10_2n = 3.2678
F_nu_B11_n_piplus = 2.6327
F_nu_B9_p_2n = 2.0312
F_nu_Be9_2p_n = 1.6938
F_nu_noiso = 1.6814
F_nu_Be8_2p_2n = 1.1800
F_nu_C10_p_n_piminus = 1.0105
F_nu_Be10_p_n_piplus = 0.9422
channels_2 = ('$^{10}Be$ + $p$ + $n$ + $\\pi^+$', '$^{10}C$ + $p$ + $n$ + $\\pi^-$', '$^8Be$ + $2p$ + $2n$',
'no isotope (only $p$, $n$, $\\pi$, ...)', '$^9Be$ + $2p$ + $n$', '$^9B$ + $p$ + $2n$',
'$^{11}B$ + $n$ + $\\pi^+$', '$^{10}C$ + $2n$', '$^{10}Be$ + $2p$',
'$^{11}C$ + $p$ + $\\pi^-$', '$^{10}B$ + $p$ + $n$', '$^{11}C$ + $n$', '$^{11}B$ + $p$')
pos_2 = np.arange(len(channels_2))
fractions_2 = np.array([F_nu_Be10_p_n_piplus, F_nu_C10_p_n_piminus, F_nu_Be8_2p_2n, F_nu_noiso, F_nu_Be9_2p_n,
F_nu_B9_p_2n, F_nu_B11_n_piplus, F_nu_C10_2n, F_nu_Be10_2p, F_nu_C11_p_piminus, F_nu_B10_p_n,
F_nu_C11_n, F_nu_B11_p])
""" display in bar chart """
fig2, ax2 = plt.subplots(figsize=(11, 6))
horizontal_bars_2 = ax2.barh(pos_2, fractions_2, align='center', alpha=0.9)
label_barh(ax2, horizontal_bars_2, "{:.4}", is_inside=False, fontsize=12)
plt.xticks(fontsize=11)
plt.yticks(pos_2, channels_2, fontsize=12)
plt.xlabel('fraction in %', fontsize=12)
plt.title('75% of all atmospheric NC neutrino interactions on $^{12}C$\n'
'(interaction channels: $\\nu_{x}$ + $^{12}C$ $\\rightarrow$ $\\nu_{x}$ + ...)', fontsize=15)
plt.grid(axis='x')
""" insert the fractions of the different interaction channels from 'interaction_channels_edit.ods',
sheet 'Interaction Channels after Generator'. These fraction are calculated with checkout_NCgen.py and saved in
file NC_onlyC12_interaction_channels_250000evts.txt.
IMPORTANT: fractions of isotopes are only included, when there exists the corresponding TALYS simulation of the
isotope! """
# total fractions of each isotope (in %), if total fraction > 2 % (sum of these fraction 77.65 %):
frac_nu_B11 = 24.55
frac_nu_C11 = 22.72
frac_nu_B10 = 16.41
frac_nu_Be10 = 4.50
frac_nu_C10 = 4.43
frac_nu_B9 = 2.68
frac_nu_Be9 = 2.36
channels_3 = ('$^{9}Be$ + ...', '$^{9}B$ + ...', '$^{10}C$ + ...', '$^{10}Be$ + ...', '$^{10}B$ + ...',
'$^{11}C$ + ...', '$^{11}B$ + ...')
pos_3 = np.arange(len(channels_3))
fractions_3 = np.array([frac_nu_Be9, frac_nu_B9, frac_nu_C10, frac_nu_Be10, frac_nu_B10, frac_nu_C11, frac_nu_B11])
""" display in bar chart """
fig3, ax3 = plt.subplots(figsize=(11, 6))
horizontal_bars_3 = ax3.barh(pos_3, fractions_3, align='center', alpha=0.9)
label_barh(ax3, horizontal_bars_3, "{:.4}", is_inside=False, fontsize=12)
plt.xticks(fontsize=11)
plt.yticks(pos_3, channels_3, fontsize=12)
plt.xlabel('fraction in %', fontsize=12)
plt.title('78% of all atmospheric NC neutrino interactions on $^{12}C$\n'
'(interaction channels: $\\nu_{x}$ + $^{12}C$ $\\rightarrow$ $\\nu_{x}$ + ...)', fontsize=15)
plt.grid(axis='x')
# leading fractions in % (sum of these fractions 70.02 %):
frac_nu_B11_p = 21.01
frac_nu_C11_n = 18.79
frac_nu_B10_p_n = 14.03
frac_nu_Be10_2p = 3.30
frac_nu_C11_p_piminus = 3.25
frac_nu_C10_2n = 3.24
frac_nu_B11_n_piplus = 2.67
frac_nu_B9_p_2n = 2.02
frac_nu_Be9_2p_n = 1.71
channels_4 = ('$^9Be$ + $2p$ + $n$', '$^9B$ + $p$ + $2n$', '$^{11}B$ + $n$ + $\\pi^+$', '$^{10}C$ + $2n$',
'$^{11}C$ + $p$ + $\\pi^-$', '$^{10}Be$ + $2p$', '$^{10}B$ + $p$ + $n$', '$^{11}C$ + $n$',
'$^{11}B$ + $p$')
pos_4 = np.arange(len(channels_4))
fractions_4 = np.array([frac_nu_Be9_2p_n, frac_nu_B9_p_2n, frac_nu_B11_n_piplus, frac_nu_C10_2n, frac_nu_C11_p_piminus,
frac_nu_Be10_2p, frac_nu_B10_p_n, frac_nu_C11_n, frac_nu_B11_p])
""" display in bar chart """
fig4, ax4 = plt.subplots(figsize=(11, 6))
horizontal_bars_4 = ax4.barh(pos_4, fractions_4, align='center', alpha=0.9)
label_barh(ax4, horizontal_bars_4, "{:.4}", is_inside=False, fontsize=12)
plt.xticks(fontsize=11)
plt.yticks(pos_4, channels_4, fontsize=12)
plt.xlabel('fraction in %', fontsize=12)
plt.title('70% of all atmospheric NC neutrino interactions on $^{12}C$\n'
'(interaction channels: $\\nu_{x}$ + $^{12}C$ $\\rightarrow$ $\\nu_{x}$ + ...)', fontsize=15)
plt.grid(axis='x')
""" insert the fractions of the different combined channels (NC interaction and deexcitation) from
'combined_channels_250000evts.ods'. These fraction are calculated with checkout_NCgen.py and saved in
file combined_channels_NC_onlyC12_250000evts.txt.
"""
# leading fractions in % (sum of these fractions 63 %):
Frac_nu_B11_p = 14.02
Frac_nu_C11_n = 12.48
Frac_nu_B10_p_n = 8.25
Frac_nu_Be9_2p_n = 5.44
Frac_nu_B9_p_2n = 4.20
Frac_nu_Be8_2p_2n = 3.60
Frac_nu_Be8_p_n_d = 3.34
Frac_nu_Li6_p_n_alpha = 2.98
Frac_nu_C11_p_piminus = 2.17
channels_5 = ('$^{11}C$ + $p$ + $\\pi^-$', '$^6Li$ + $p$ + $n$ + $\\alpha$', '$^8Be$ + $p$ + $n$ + $d$',
'$^8Be$ + $2p$ + $2n$', '$^9B$ + $p$ + $2n$', '$^9Be$ + $2p$ + $n$', '$^{10}B$ + $p$ + $n$',
'$^{11}C$ + $n$', '$^{11}B$ + $p$')
pos_5 = np.arange(len(channels_5))
fractions_5 = np.array([Frac_nu_C11_p_piminus, Frac_nu_Li6_p_n_alpha, Frac_nu_Be8_p_n_d, Frac_nu_Be8_2p_2n,
Frac_nu_B9_p_2n, Frac_nu_Be9_2p_n, Frac_nu_B10_p_n, Frac_nu_C11_n, Frac_nu_B11_p])
sum_5 = np.sum(fractions_5)
""" display in bar chart """
fig5, ax5 = plt.subplots(figsize=(11, 6))
horizontal_bars_5 = ax5.barh(pos_5, fractions_5, align='center', alpha=0.9)
label_barh(ax5, horizontal_bars_5, "{:.4}", is_inside=False, fontsize=12)
plt.xticks(fontsize=11)
plt.yticks(pos_5, channels_5, fontsize=12)
plt.xlabel('fraction in %', fontsize=12)
plt.title('{0:.1f} %'.format(sum_5) + ' of all total atmospheric NC neutrino interactions on $^{12}C$\n'
'(interaction channels: $\\nu_{x}$ + $^{12}C$ $\\rightarrow$ $\\nu_{x}$ + ..., after deexcitation)'
, fontsize=15)
plt.grid(axis='x')
plt.show()
|
bino = int(input())
cino = int(input())
print('Bino' if ((bino + cino) % 2 == 0) else 'Cino')
|
# -*- coding: utf-8 -*-
import cloud
class RTCoreRequest(object):
"""A wrapper class that requests real-time cores to PiCloud.
Usage:
>>> from picrawler.rt_cores import RTCoreRequest
>>> with RTCoreRequest(core_type='s1', num_cores=10):
... pass
:param str core_type: The PiCloud core type.
:param int num_cores: The number of cores.
:param int max_duration: (optional) The lifetime of the request, in hours.
"""
def __init__(self, core_type, num_cores, max_duration=None):
self._core_type = core_type
self._num_cores = num_cores
self._max_duration = max_duration
def __enter__(self):
self._request_id = self.request()
return self
def __exit__(self, type, value, traceback):
self.release(self._request_id)
def request(self):
"""Requests PiCloud's realtime cores.
:return: Request ID
"""
req = cloud.realtime.request(self._core_type, self._num_cores,
self._max_duration)
req_id = req['request_id']
return req_id
def release(self, req_id):
"""Releases PiCloud's realtime cores.
:param int req_id: The request ID.
"""
cloud.realtime.release(req_id)
|
n=int(input())
for i in range(n+1):
for j in range(i):
print("*",end=" ")
print()
# added a new line
#added one more line
|
import psycopg2
import numpy as np
try:
conn = psycopg2.connect(user = "postgres",
password = "123",
host = "127.0.0.1",
port = "5432",
database = "test")
for i in np.arange(1000):
myint = i.item()
query = "INSERT INTO my_tree (id, child_id) VALUES (%s, %s);"
cursor = conn.cursor()
cursor.execute(query, (myint, myint+1))
conn.commit()
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
finally:
#closing database connection.
if conn:
cursor.close()
conn.close()
|
# -*- coding: utf-8 -*-
class Solution:
def findDuplicates(self, nums):
result = []
for num in nums:
i = abs(num) - 1
if nums[i] > 0:
nums[i] = -nums[i]
else:
result.append(abs(num))
return result
if __name__ == "__main__":
solution = Solution()
assert [1, 2] == solution.findDuplicates([1, 1, 2, 3, 2])
assert [2, 3] == solution.findDuplicates([4, 3, 2, 7, 8, 2, 3, 1])
|
import os,nmap
import socket,thread,threading
import urllib2
import time
import download_dhaga
rurl='' #download link
fname='' #file to be saved as
ext='' #extension of file
myip='' #ip extension of my node
live_ips=[]
size=''
yescount=0
th=[]
def handleclient(connsocket,start,end,i):
global rurl
msg=rurl+' '+str(start)+' '+str(end)+' '+str(i)
try:
connsocket.send(ext)
connsocket.send(msg)
except:
print "URL and ext message not sent"
f=open(str(i)+ext,'wb')
while True:
l=connsocket.recv(1024)
if not l:
break
f.write(l)
f.close()
print "Recvd succesfully"
connsocket.close()
def mgfiles(ind):
f=open(fname+ext,"ab")
f1=open(str(ind)+ext,"rb")
f.write(f1.read())
f1.close()
f.close()
def acc_tcp():
#Divide file into ranges
global yescount
N=yescount
d=(size)/N
start1=0
end1=d-1
arr=[[0]*2 for i in range(N)]
arr[0]=[start1,end1]
for i in range(1,N):
if i!=N-1:
start1=end1+1
end1=start1+d-1
arr[i]=[start1,end1]
else:
start1=end1+1
end1=size-1
arr[i]=[start1,end1]
#Set server host,port and socket
global myip
host = myip
port = 50005
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# Bind the socket to the port
s.bind((host, port))
s.listen(5)
print 'Server binded and ready to use for TCP'
i=0
while i<yescount:
try:
s.settimeout(80)
connsocket,addr = s.accept()
print addr
t = threading.Thread(target=handleclient, args=(connsocket,arr[i][0],arr[i][1],i,))
th.append(t)
i=i+1
except socket.timeout:
#In case the client under consideration fails to make TCP connection
print "Problem1: Server itself downloads chunk "+str(arr[i][0])+"-" +str(arr[i][1])
download_dhaga.download(rurl,arr[i][0],arr[i][1],ext)
os.rename('final'+ext,str(i)+ext)
mgfiles(i)
os.remove(str(i)+ext)
i=i+1
for i in range(len(th)):
th[i].start()
#th[i].join()
for i in range(len(th)):
th[i].join(60.0)
if th[i].isAlive():
print "Problem2: Server itself downloads chunk "+str(arr[i][0])+"-" +str(arr[i][1])
download_dhaga.download(rurl,arr[i][0],arr[i][1],ext)
os.rename('final'+ext,str(i)+ext)
for i in range(len(th)):
mgfiles(i)
for i in range(len(th)):
os.remove(str(i)+ext)
s.close()
def broad_udp():
host = myip
port = 50005
message="Can you help in download"
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((host,port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(message,('<broadcast>',50008))
s.sendto(message,('<broadcast>',50020))
s.close()
ini=time.time()
global yescount
while time.time()-ini<= 40:
try:
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((host,port))
s.settimeout(6)
reply,caddr=s.recvfrom(2048)
print caddr
if reply=='Yes' or reply=='yes':
yescount=yescount+1
s.close()
except socket.timeout:
print "caught timeout"
print yescount
def beg_server(rurl_val,fname_val,ext_val): #rurl url of file to be downloaded
global rurl,fname,ext,myip,live_ips,size
rurl=rurl_val
fname=fname_val
ext=ext_val
#To get ip of current node
x=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
x.connect(("gmail.com",80))
myip=x.getsockname()[0]
except:
print "Server not connected to internet !!!!!"
return
#size of file
site = urllib2.urlopen(rurl)
meta= site.info()
size= meta.getheaders("Content-Length")[0]
size=int(size)
print "Total File size to be downloaded: "+str(size)
#myip='10.42.0.1'
#Find live nodes
'''
temp = []
temp = myip.split('.')
stip=temp[0]+'.'+temp[1]+'.'+temp[2]+'.'+'0/24' #start ip/24
nm = nmap.PortScanner()
nm.scan(hosts=stip, arguments='-n -sP -PE -PA21,23,80,3389')
hosts_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()]
for host, status in hosts_list:
live_ips.append(str(host))
print(str(host)+":"+str(status))
'''
broad_udp()
acc_tcp()
|
from constant import INT_SIZE, INT_REPR
class LogicClock:
def __init__(self, n_instance, instance_id, zero_fill=False):
self.n_instance = n_instance
self.instance_id = instance_id
self._clock = [-1] * n_instance
if zero_fill:
self._clock = [0] * n_instance
def __repr__(self):
return str(self.get_time())
def __str__(self):
return self.__repr__()
def __eq__(self, other):
for i in range(self.n_instance):
if self._clock[i] != other.get_time()[i]:
return False
return True
def __lt__(self, other):
return self <= other and self != other
def __le__(self, other):
# A<=B if all A[i]<=B[i]
for i in range(self.n_instance):
if self._clock[i] > other.get_time()[i]:
return False
return True
def serialize(self):
data = bytes()
for i in range(self.n_instance):
data = data + self._clock[i].to_bytes(INT_SIZE, INT_REPR, signed=True)
return data
def deserialize(self, data):
new_clock = LogicClock(self.n_instance, self.instance_id)
for i in range(self.n_instance):
new_clock._clock[i] = int.from_bytes(data[INT_SIZE * i: INT_SIZE * (i + 1)], INT_REPR, signed=True)
return new_clock
def get_time(self):
return tuple(self._clock)
def increase(self):
self._clock[self.instance_id] += 1
def is_null(self):
for i in range(self.n_instance):
if self._clock[i] == -1:
return True
return False
def merge(self, other):
if self.is_null():
for i in range(self.n_instance):
self._clock[i] = other.get_time()[i]
else:
for i in range(self.n_instance):
self._clock[i] = max(self._clock[i], other.get_time()[i])
# clock = LogicClock(3, 0)
# print(clock.get_time())
# clock.increase()
# print(clock.get_time())
|
# num = int(input("구구단을 외자! 17 x 5 = "))
# if num == 17 * 5:
# print("정답! 똑똑해")
# else:
# print("실망이야...")
#elif를 배우고 해결해봅시다
# money = int(input("밥 뭐먹지? 돈 얼마있어? : "))
# if money >= 50000:
# print("소고기 먹으러 가자")
# if money >= 30000:
# print("돼지고기 먹자")
# if money >= 10000:
# print("쟈니로켓 먹자")
# else:
# print("한솥먹자...")
print("오늘도 지각이다... 대중교통을 탈까? 택시를 탈까?")
money = input("택시 탈 돈이 있나[y/n] : ")
if money == 'n':
print("어쩔수 없지... 대중교통을 타자")
print("지하철, 버스 뭘 타지?")
time = input("출근 시간인가[y/n] : ")
if time == 'y':
print("지하철 타고 가자")
else:
print("버스 타고 가자")
else:
print("남는게 돈이야 택시타고 얼른가자!")
|
import numpy as np
import pandas as pd
import warnings
from sklearn.base import is_regressor, is_classifier
from scipy.stats import norm
from statsmodels.stats.multitest import multipletests
from abc import ABC, abstractmethod
from .double_ml_data import DoubleMLData, DoubleMLClusterData
from ._utils_resampling import DoubleMLResampling, DoubleMLClusterResampling
from ._utils import _check_is_partition, _check_all_smpls, _check_smpl_split, _check_smpl_split_tpl, _draw_weights
class DoubleML(ABC):
"""Double Machine Learning.
"""
def __init__(self,
obj_dml_data,
n_folds,
n_rep,
score,
dml_procedure,
draw_sample_splitting,
apply_cross_fitting):
# check and pick up obj_dml_data
if not isinstance(obj_dml_data, DoubleMLData):
raise TypeError('The data must be of DoubleMLData type. '
f'{str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed.')
self._is_cluster_data = False
if isinstance(obj_dml_data, DoubleMLClusterData):
if obj_dml_data.n_cluster_vars > 2:
raise NotImplementedError('Multi-way (n_ways > 2) clustering not yet implemented.')
self._is_cluster_data = True
self._dml_data = obj_dml_data
# initialize learners and parameters which are set model specific
self._learner = None
self._params = None
# initialize predictions to None which are only stored if method fit is called with store_predictions=True
self._predictions = None
# check resampling specifications
if not isinstance(n_folds, int):
raise TypeError('The number of folds must be of int type. '
f'{str(n_folds)} of type {str(type(n_folds))} was passed.')
if n_folds < 1:
raise ValueError('The number of folds must be positive. '
f'{str(n_folds)} was passed.')
if not isinstance(n_rep, int):
raise TypeError('The number of repetitions for the sample splitting must be of int type. '
f'{str(n_rep)} of type {str(type(n_rep))} was passed.')
if n_rep < 1:
raise ValueError('The number of repetitions for the sample splitting must be positive. '
f'{str(n_rep)} was passed.')
if not isinstance(apply_cross_fitting, bool):
raise TypeError('apply_cross_fitting must be True or False. '
f'Got {str(apply_cross_fitting)}.')
if not isinstance(draw_sample_splitting, bool):
raise TypeError('draw_sample_splitting must be True or False. '
f'Got {str(draw_sample_splitting)}.')
# set resampling specifications
if self._is_cluster_data:
if (n_folds == 1) | (not apply_cross_fitting):
raise NotImplementedError('No cross-fitting (`apply_cross_fitting = False`) '
'is not yet implemented with clustering.')
self._n_folds_per_cluster = n_folds
self._n_folds = n_folds ** self._dml_data.n_cluster_vars
else:
self._n_folds = n_folds
self._n_rep = n_rep
self._apply_cross_fitting = apply_cross_fitting
# check and set dml_procedure and score
if (not isinstance(dml_procedure, str)) | (dml_procedure not in ['dml1', 'dml2']):
raise ValueError('dml_procedure must be "dml1" or "dml2". '
f'Got {str(dml_procedure)}.')
self._dml_procedure = dml_procedure
self._score = score
if (self.n_folds == 1) & self.apply_cross_fitting:
warnings.warn('apply_cross_fitting is set to False. Cross-fitting is not supported for n_folds = 1.')
self._apply_cross_fitting = False
if not self.apply_cross_fitting:
assert self.n_folds <= 2, 'Estimation without cross-fitting not supported for n_folds > 2.'
if self.dml_procedure == 'dml2':
# redirect to dml1 which works out-of-the-box; dml_procedure is of no relevance without cross-fitting
self._dml_procedure = 'dml1'
# perform sample splitting
self._smpls = None
self._smpls_cluster = None
if draw_sample_splitting:
self.draw_sample_splitting()
# initialize arrays according to obj_dml_data and the resampling settings
self._psi, self._psi_a, self._psi_b,\
self._coef, self._se, self._all_coef, self._all_se, self._all_dml1_coef = self._initialize_arrays()
# also initialize bootstrap arrays with the default number of bootstrap replications
self._n_rep_boot, self._boot_coef, self._boot_t_stat = self._initialize_boot_arrays(n_rep_boot=500)
# initialize instance attributes which are later used for iterating
self._i_rep = None
self._i_treat = None
def __str__(self):
class_name = self.__class__.__name__
header = f'================== {class_name} Object ==================\n'
if self._is_cluster_data:
cluster_info = f'Cluster variable(s): {self._dml_data.cluster_cols}\n'
else:
cluster_info = ''
data_info = f'Outcome variable: {self._dml_data.y_col}\n' \
f'Treatment variable(s): {self._dml_data.d_cols}\n' \
f'Covariates: {self._dml_data.x_cols}\n' \
f'Instrument variable(s): {self._dml_data.z_cols}\n' \
+ cluster_info +\
f'No. Observations: {self._dml_data.n_obs}\n'
score_info = f'Score function: {str(self.score)}\n' \
f'DML algorithm: {self.dml_procedure}\n'
learner_info = ''
for key, value in self.learner.items():
learner_info += f'Learner {key}: {str(value)}\n'
if self._is_cluster_data:
resampling_info = f'No. folds per cluster: {self._n_folds_per_cluster}\n' \
f'No. folds: {self.n_folds}\n' \
f'No. repeated sample splits: {self.n_rep}\n' \
f'Apply cross-fitting: {self.apply_cross_fitting}\n'
else:
resampling_info = f'No. folds: {self.n_folds}\n' \
f'No. repeated sample splits: {self.n_rep}\n' \
f'Apply cross-fitting: {self.apply_cross_fitting}\n'
fit_summary = str(self.summary)
res = header + \
'\n------------------ Data summary ------------------\n' + data_info + \
'\n------------------ Score & algorithm ------------------\n' + score_info + \
'\n------------------ Machine learner ------------------\n' + learner_info + \
'\n------------------ Resampling ------------------\n' + resampling_info + \
'\n------------------ Fit summary ------------------\n' + fit_summary
return res
@property
def n_folds(self):
"""
Number of folds.
"""
return self._n_folds
@property
def n_rep(self):
"""
Number of repetitions for the sample splitting.
"""
return self._n_rep
@property
def apply_cross_fitting(self):
"""
Indicates whether cross-fitting should be applied.
"""
return self._apply_cross_fitting
@property
def dml_procedure(self):
"""
The double machine learning algorithm.
"""
return self._dml_procedure
@property
def n_rep_boot(self):
"""
The number of bootstrap replications.
"""
return self._n_rep_boot
@property
def score(self):
"""
The score function.
"""
return self._score
@property
def learner(self):
"""
The machine learners for the nuisance functions.
"""
return self._learner
@property
def learner_names(self):
"""
The names of the learners.
"""
return list(self._learner.keys())
@property
def params(self):
"""
The hyperparameters of the learners.
"""
return self._params
@property
def params_names(self):
"""
The names of the nuisance models with hyperparameters.
"""
return list(self._params.keys())
@property
def predictions(self):
"""
The predictions of the nuisance models.
"""
return self._predictions
def get_params(self, learner):
"""
Get hyperparameters for the nuisance model of DoubleML models.
Parameters
----------
learner : str
The nuisance model / learner (see attribute ``params_names``).
Returns
-------
params : dict
Parameters for the nuisance model / learner.
"""
valid_learner = self.params_names
if (not isinstance(learner, str)) | (learner not in valid_learner):
raise ValueError('Invalid nuisance learner ' + str(learner) + '. ' +
'Valid nuisance learner ' + ' or '.join(valid_learner) + '.')
return self._params[learner]
# The private function _get_params delivers the single treatment, single (cross-fitting) sample subselection.
# The slicing is based on the two properties self._i_treat, the index of the treatment variable, and
# self._i_rep, the index of the cross-fitting sample.
def _get_params(self, learner):
return self._params[learner][self._dml_data.d_cols[self._i_treat]][self._i_rep]
@property
def smpls(self):
"""
The partition used for cross-fitting.
"""
if self._smpls is None:
if self._is_cluster_data:
err_msg = 'Sample splitting not specified. Draw samples via .draw_sample splitting().'
else:
err_msg = ('Sample splitting not specified. Either draw samples via .draw_sample splitting() ' +
'or set external samples via .set_sample_splitting().')
raise ValueError(err_msg)
return self._smpls
@property
def smpls_cluster(self):
"""
The partition of clusters used for cross-fitting.
"""
if self._is_cluster_data:
if self._smpls_cluster is None:
raise ValueError('Sample splitting not specified. Draw samples via .draw_sample splitting().')
return self._smpls_cluster
@property
def psi(self):
"""
Values of the score function :math:`\\psi(W; \\theta, \\eta) = \\psi_a(W; \\eta) \\theta + \\psi_b(W; \\eta)`
after calling :meth:`fit`.
"""
return self._psi
@property
def psi_a(self):
"""
Values of the score function component :math:`\\psi_a(W; \\eta)` after calling :meth:`fit`.
"""
return self._psi_a
@property
def psi_b(self):
"""
Values of the score function component :math:`\\psi_b(W; \\eta)` after calling :meth:`fit`.
"""
return self._psi_b
@property
def coef(self):
"""
Estimates for the causal parameter(s) after calling :meth:`fit`.
"""
return self._coef
@coef.setter
def coef(self, value):
self._coef = value
@property
def se(self):
"""
Standard errors for the causal parameter(s) after calling :meth:`fit`.
"""
return self._se
@se.setter
def se(self, value):
self._se = value
@property
def t_stat(self):
"""
t-statistics for the causal parameter(s) after calling :meth:`fit`.
"""
t_stat = self.coef / self.se
return t_stat
@property
def pval(self):
"""
p-values for the causal parameter(s) after calling :meth:`fit`.
"""
pval = 2 * norm.cdf(-np.abs(self.t_stat))
return pval
@property
def boot_coef(self):
"""
Bootstrapped coefficients for the causal parameter(s) after calling :meth:`fit` and :meth:`bootstrap`.
"""
return self._boot_coef
@property
def boot_t_stat(self):
"""
Bootstrapped t-statistics for the causal parameter(s) after calling :meth:`fit` and :meth:`bootstrap`.
"""
return self._boot_t_stat
@property
def all_coef(self):
"""
Estimates of the causal parameter(s) for the ``n_rep`` different sample splits after calling :meth:`fit`.
"""
return self._all_coef
@property
def all_se(self):
"""
Standard errors of the causal parameter(s) for the ``n_rep`` different sample splits after calling :meth:`fit`.
"""
return self._all_se
@property
def all_dml1_coef(self):
"""
Estimates of the causal parameter(s) for the ``n_rep`` x ``n_folds`` different folds after calling :meth:`fit`
with ``dml_procedure='dml1'``.
"""
return self._all_dml1_coef
@property
def summary(self):
"""
A summary for the estimated causal effect after calling :meth:`fit`.
"""
col_names = ['coef', 'std err', 't', 'P>|t|']
if np.isnan(self.coef).all():
df_summary = pd.DataFrame(columns=col_names)
else:
summary_stats = np.transpose(np.vstack(
[self.coef, self.se,
self.t_stat, self.pval]))
df_summary = pd.DataFrame(summary_stats,
columns=col_names,
index=self._dml_data.d_cols)
ci = self.confint()
df_summary = df_summary.join(ci)
return df_summary
# The private properties with __ always deliver the single treatment, single (cross-fitting) sample subselection.
# The slicing is based on the two properties self._i_treat, the index of the treatment variable, and
# self._i_rep, the index of the cross-fitting sample.
@property
def __smpls(self):
return self._smpls[self._i_rep]
@property
def __smpls_cluster(self):
return self._smpls_cluster[self._i_rep]
@property
def __psi(self):
return self._psi[:, self._i_rep, self._i_treat]
@property
def __psi_a(self):
return self._psi_a[:, self._i_rep, self._i_treat]
@property
def __psi_b(self):
return self._psi_b[:, self._i_rep, self._i_treat]
@property
def __all_coef(self):
return self._all_coef[self._i_treat, self._i_rep]
@property
def __all_se(self):
return self._all_se[self._i_treat, self._i_rep]
def fit(self, n_jobs_cv=None, keep_scores=True, store_predictions=False):
"""
Estimate DoubleML models.
Parameters
----------
n_jobs_cv : None or int
The number of CPUs to use to fit the learners. ``None`` means ``1``.
Default is ``None``.
keep_scores : bool
Indicates whether the score function evaluations should be stored in ``psi``, ``psi_a`` and ``psi_b``.
Default is ``True``.
store_predictions : bool
Indicates whether the predictions for the nuisance functions should be be stored in ``predictions``.
Default is ``False``.
Returns
-------
self : object
"""
if n_jobs_cv is not None:
if not isinstance(n_jobs_cv, int):
raise TypeError('The number of CPUs used to fit the learners must be of int type. '
f'{str(n_jobs_cv)} of type {str(type(n_jobs_cv))} was passed.')
if not isinstance(keep_scores, bool):
raise TypeError('keep_scores must be True or False. '
f'Got {str(keep_scores)}.')
if not isinstance(store_predictions, bool):
raise TypeError('store_predictions must be True or False. '
f'Got {str(store_predictions)}.')
if store_predictions:
self._initialize_predictions()
for i_rep in range(self.n_rep):
self._i_rep = i_rep
for i_d in range(self._dml_data.n_treat):
self._i_treat = i_d
# this step could be skipped for the single treatment variable case
if self._dml_data.n_treat > 1:
self._dml_data.set_x_d(self._dml_data.d_cols[i_d])
# ml estimation of nuisance models and computation of score elements
self._psi_a[:, self._i_rep, self._i_treat], self._psi_b[:, self._i_rep, self._i_treat], preds =\
self._ml_nuisance_and_score_elements(self.__smpls, n_jobs_cv)
if store_predictions:
self._store_predictions(preds)
# estimate the causal parameter
self._all_coef[self._i_treat, self._i_rep] = self._est_causal_pars()
# compute score (depends on estimated causal parameter)
self._psi[:, self._i_rep, self._i_treat] = self._compute_score()
# compute standard errors for causal parameter
self._all_se[self._i_treat, self._i_rep] = self._se_causal_pars()
# aggregated parameter estimates and standard errors from repeated cross-fitting
self._agg_cross_fit()
if not keep_scores:
self._clean_scores()
return self
def bootstrap(self, method='normal', n_rep_boot=500):
"""
Multiplier bootstrap for DoubleML models.
Parameters
----------
method : str
A str (``'Bayes'``, ``'normal'`` or ``'wild'``) specifying the multiplier bootstrap method.
Default is ``'normal'``
n_rep_boot : int
The number of bootstrap replications.
Returns
-------
self : object
"""
if np.isnan(self.coef).all():
raise ValueError('Apply fit() before bootstrap().')
if (not isinstance(method, str)) | (method not in ['Bayes', 'normal', 'wild']):
raise ValueError('Method must be "Bayes", "normal" or "wild". '
f'Got {str(method)}.')
if not isinstance(n_rep_boot, int):
raise TypeError('The number of bootstrap replications must be of int type. '
f'{str(n_rep_boot)} of type {str(type(n_rep_boot))} was passed.')
if n_rep_boot < 1:
raise ValueError('The number of bootstrap replications must be positive. '
f'{str(n_rep_boot)} was passed.')
if self._is_cluster_data:
raise NotImplementedError('bootstrap not yet implemented with clustering.')
self._n_rep_boot, self._boot_coef, self._boot_t_stat = self._initialize_boot_arrays(n_rep_boot)
for i_rep in range(self.n_rep):
self._i_rep = i_rep
# draw weights for the bootstrap
if self.apply_cross_fitting:
n_obs = self._dml_data.n_obs
else:
# be prepared for the case of test sets of different size in repeated no-cross-fitting
smpls = self.__smpls
test_index = smpls[0][1]
n_obs = len(test_index)
weights = _draw_weights(method, n_rep_boot, n_obs)
for i_d in range(self._dml_data.n_treat):
self._i_treat = i_d
i_start = self._i_rep * self.n_rep_boot
i_end = (self._i_rep + 1) * self.n_rep_boot
self._boot_coef[self._i_treat, i_start:i_end], self._boot_t_stat[self._i_treat, i_start:i_end] =\
self._compute_bootstrap(weights)
return self
def confint(self, joint=False, level=0.95):
"""
Confidence intervals for DoubleML models.
Parameters
----------
joint : bool
Indicates whether joint confidence intervals are computed.
Default is ``False``
level : float
The confidence level.
Default is ``0.95``.
Returns
-------
df_ci : pd.DataFrame
A data frame with the confidence interval(s).
"""
if not isinstance(joint, bool):
raise TypeError('joint must be True or False. '
f'Got {str(joint)}.')
if not isinstance(level, float):
raise TypeError('The confidence level must be of float type. '
f'{str(level)} of type {str(type(level))} was passed.')
if (level <= 0) | (level >= 1):
raise ValueError('The confidence level must be in (0,1). '
f'{str(level)} was passed.')
a = (1 - level)
ab = np.array([a / 2, 1. - a / 2])
if joint:
if np.isnan(self.boot_coef).all():
raise ValueError('Apply fit() & bootstrap() before confint(joint=True).')
sim = np.amax(np.abs(self.boot_t_stat), 0)
hatc = np.quantile(sim, 1 - a)
ci = np.vstack((self.coef - self.se * hatc, self.coef + self.se * hatc)).T
else:
if np.isnan(self.coef).all():
raise ValueError('Apply fit() before confint().')
fac = norm.ppf(ab)
ci = np.vstack((self.coef + self.se * fac[0], self.coef + self.se * fac[1])).T
df_ci = pd.DataFrame(ci,
columns=['{:.1f} %'.format(i * 100) for i in ab],
index=self._dml_data.d_cols)
return df_ci
def p_adjust(self, method='romano-wolf'):
"""
Multiple testing adjustment for DoubleML models.
Parameters
----------
method : str
A str (``'romano-wolf''``, ``'bonferroni'``, ``'holm'``, etc) specifying the adjustment method.
In addition to ``'romano-wolf''``, all methods implemented in
:py:func:`statsmodels.stats.multitest.multipletests` can be applied.
Default is ``'romano-wolf'``.
Returns
-------
p_val : pd.DataFrame
A data frame with adjusted p-values.
"""
if np.isnan(self.coef).all():
raise ValueError('Apply fit() before p_adjust().')
if not isinstance(method, str):
raise TypeError('The p_adjust method must be of str type. '
f'{str(method)} of type {str(type(method))} was passed.')
if method.lower() in ['rw', 'romano-wolf']:
if np.isnan(self.boot_coef).all():
raise ValueError(f'Apply fit() & bootstrap() before p_adjust("{method}").')
pinit = np.full_like(self.pval, np.nan)
p_val_corrected = np.full_like(self.pval, np.nan)
boot_t_stats = self.boot_t_stat
t_stat = self.t_stat
stepdown_ind = np.argsort(t_stat)[::-1]
ro = np.argsort(stepdown_ind)
for i_d in range(self._dml_data.n_treat):
if i_d == 0:
sim = np.max(boot_t_stats, axis=0)
pinit[i_d] = np.minimum(1, np.mean(sim >= np.abs(t_stat[stepdown_ind][i_d])))
else:
sim = np.max(np.delete(boot_t_stats, stepdown_ind[:i_d], axis=0),
axis=0)
pinit[i_d] = np.minimum(1, np.mean(sim >= np.abs(t_stat[stepdown_ind][i_d])))
for i_d in range(self._dml_data.n_treat):
if i_d == 0:
p_val_corrected[i_d] = pinit[i_d]
else:
p_val_corrected[i_d] = np.maximum(pinit[i_d], p_val_corrected[i_d - 1])
p_val = p_val_corrected[ro]
else:
_, p_val, _, _ = multipletests(self.pval, method=method)
p_val = pd.DataFrame(np.vstack((self.coef, p_val)).T,
columns=['coef', 'pval'],
index=self._dml_data.d_cols)
return p_val
def tune(self,
param_grids,
tune_on_folds=False,
scoring_methods=None, # if None the estimator's score method is used
n_folds_tune=5,
search_mode='grid_search',
n_iter_randomized_search=100,
n_jobs_cv=None,
set_as_params=True,
return_tune_res=False):
"""
Hyperparameter-tuning for DoubleML models.
The hyperparameter-tuning is performed using either an exhaustive search over specified parameter values
implemented in :class:`sklearn.model_selection.GridSearchCV` or via a randomized search implemented in
:class:`sklearn.model_selection.RandomizedSearchCV`.
Parameters
----------
param_grids : dict
A dict with a parameter grid for each nuisance model / learner (see attribute ``learner_names``).
tune_on_folds : bool
Indicates whether the tuning should be done fold-specific or globally.
Default is ``False``.
scoring_methods : None or dict
The scoring method used to evaluate the predictions. The scoring method must be set per nuisance model via
a dict (see attribute ``learner_names`` for the keys).
If None, the estimator’s score method is used.
Default is ``None``.
n_folds_tune : int
Number of folds used for tuning.
Default is ``5``.
search_mode : str
A str (``'grid_search'`` or ``'randomized_search'``) specifying whether hyperparameters are optimized via
:class:`sklearn.model_selection.GridSearchCV` or :class:`sklearn.model_selection.RandomizedSearchCV`.
Default is ``'grid_search'``.
n_iter_randomized_search : int
If ``search_mode == 'randomized_search'``. The number of parameter settings that are sampled.
Default is ``100``.
n_jobs_cv : None or int
The number of CPUs to use to tune the learners. ``None`` means ``1``.
Default is ``None``.
set_as_params : bool
Indicates whether the hyperparameters should be set in order to be used when :meth:`fit` is called.
Default is ``True``.
return_tune_res : bool
Indicates whether detailed tuning results should be returned.
Default is ``False``.
Returns
-------
self : object
Returned if ``return_tune_res`` is ``False``.
tune_res: list
A list containing detailed tuning results and the proposed hyperparameters.
Returned if ``return_tune_res`` is ``False``.
"""
if (not isinstance(param_grids, dict)) | (not all(k in param_grids for k in self.learner_names)):
raise ValueError('Invalid param_grids ' + str(param_grids) + '. '
'param_grids must be a dictionary with keys ' + ' and '.join(self.learner_names) + '.')
if scoring_methods is not None:
if (not isinstance(scoring_methods, dict)) | (not all(k in self.learner_names for k in scoring_methods)):
raise ValueError('Invalid scoring_methods ' + str(scoring_methods) + '. ' +
'scoring_methods must be a dictionary. ' +
'Valid keys are ' + ' and '.join(self.learner_names) + '.')
if not all(k in scoring_methods for k in self.learner_names):
# if there are learners for which no scoring_method was set, we fall back to None, i.e., default scoring
for learner in self.learner_names:
if learner not in scoring_methods:
scoring_methods[learner] = None
if not isinstance(tune_on_folds, bool):
raise TypeError('tune_on_folds must be True or False. '
f'Got {str(tune_on_folds)}.')
if not isinstance(n_folds_tune, int):
raise TypeError('The number of folds used for tuning must be of int type. '
f'{str(n_folds_tune)} of type {str(type(n_folds_tune))} was passed.')
if n_folds_tune < 2:
raise ValueError('The number of folds used for tuning must be at least two. '
f'{str(n_folds_tune)} was passed.')
if (not isinstance(search_mode, str)) | (search_mode not in ['grid_search', 'randomized_search']):
raise ValueError('search_mode must be "grid_search" or "randomized_search". '
f'Got {str(search_mode)}.')
if not isinstance(n_iter_randomized_search, int):
raise TypeError('The number of parameter settings sampled for the randomized search must be of int type. '
f'{str(n_iter_randomized_search)} of type '
f'{str(type(n_iter_randomized_search))} was passed.')
if n_iter_randomized_search < 2:
raise ValueError('The number of parameter settings sampled for the randomized search must be at least two. '
f'{str(n_iter_randomized_search)} was passed.')
if n_jobs_cv is not None:
if not isinstance(n_jobs_cv, int):
raise TypeError('The number of CPUs used to fit the learners must be of int type. '
f'{str(n_jobs_cv)} of type {str(type(n_jobs_cv))} was passed.')
if not isinstance(set_as_params, bool):
raise TypeError('set_as_params must be True or False. '
f'Got {str(set_as_params)}.')
if not isinstance(return_tune_res, bool):
raise TypeError('return_tune_res must be True or False. '
f'Got {str(return_tune_res)}.')
if tune_on_folds:
tuning_res = [[None] * self.n_rep] * self._dml_data.n_treat
else:
tuning_res = [None] * self._dml_data.n_treat
for i_d in range(self._dml_data.n_treat):
self._i_treat = i_d
# this step could be skipped for the single treatment variable case
if self._dml_data.n_treat > 1:
self._dml_data.set_x_d(self._dml_data.d_cols[i_d])
if tune_on_folds:
nuisance_params = list()
for i_rep in range(self.n_rep):
self._i_rep = i_rep
# tune hyperparameters
res = self._ml_nuisance_tuning(self.__smpls,
param_grids, scoring_methods,
n_folds_tune,
n_jobs_cv,
search_mode, n_iter_randomized_search)
tuning_res[i_rep][i_d] = res
nuisance_params.append(res['params'])
if set_as_params:
for nuisance_model in nuisance_params[0].keys():
params = [x[nuisance_model] for x in nuisance_params]
self.set_ml_nuisance_params(nuisance_model, self._dml_data.d_cols[i_d], params)
else:
smpls = [(np.arange(self._dml_data.n_obs), np.arange(self._dml_data.n_obs))]
# tune hyperparameters
res = self._ml_nuisance_tuning(smpls,
param_grids, scoring_methods,
n_folds_tune,
n_jobs_cv,
search_mode, n_iter_randomized_search)
tuning_res[i_d] = res
if set_as_params:
for nuisance_model in res['params'].keys():
params = res['params'][nuisance_model]
self.set_ml_nuisance_params(nuisance_model, self._dml_data.d_cols[i_d], params[0])
if return_tune_res:
return tuning_res
else:
return self
def set_ml_nuisance_params(self, learner, treat_var, params):
"""
Set hyperparameters for the nuisance models of DoubleML models.
Parameters
----------
learner : str
The nuisance model / learner (see attribute ``params_names``).
treat_var : str
The treatment variable (hyperparameters can be set treatment-variable specific).
params : dict or list
A dict with estimator parameters (used for all folds) or a nested list with fold specific parameters. The
outer list needs to be of length ``n_rep`` and the inner list of length ``n_folds``.
Returns
-------
self : object
"""
valid_learner = self.params_names
if learner not in valid_learner:
raise ValueError('Invalid nuisance learner ' + learner + '. ' +
'Valid nuisance learner ' + ' or '.join(valid_learner) + '.')
if treat_var not in self._dml_data.d_cols:
raise ValueError('Invalid treatment variable ' + treat_var + '. ' +
'Valid treatment variable ' + ' or '.join(self._dml_data.d_cols) + '.')
if params is None:
all_params = [None] * self.n_rep
elif isinstance(params, dict):
if self.apply_cross_fitting:
all_params = [[params] * self.n_folds] * self.n_rep
else:
all_params = [[params] * 1] * self.n_rep
else:
# ToDo: Add meaningful error message for asserts and corresponding uni tests
assert len(params) == self.n_rep
if self.apply_cross_fitting:
assert np.all(np.array([len(x) for x in params]) == self.n_folds)
else:
assert np.all(np.array([len(x) for x in params]) == 1)
all_params = params
self._params[learner][treat_var] = all_params
return self
@abstractmethod
def _initialize_ml_nuisance_params(self):
pass
@abstractmethod
def _ml_nuisance_and_score_elements(self, smpls, n_jobs_cv):
pass
@abstractmethod
def _ml_nuisance_tuning(self, smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,
search_mode, n_iter_randomized_search):
pass
@staticmethod
def _check_learner(learner, learner_name, regressor, classifier):
err_msg_prefix = f'Invalid learner provided for {learner_name}: '
warn_msg_prefix = f'Learner provided for {learner_name} is probably invalid: '
if isinstance(learner, type):
raise TypeError(err_msg_prefix + 'provide an instance of a learner instead of a class.')
if not hasattr(learner, 'fit'):
raise TypeError(err_msg_prefix + f'{str(learner)} has no method .fit().')
if not hasattr(learner, 'set_params'):
raise TypeError(err_msg_prefix + f'{str(learner)} has no method .set_params().')
if not hasattr(learner, 'get_params'):
raise TypeError(err_msg_prefix + f'{str(learner)} has no method .get_params().')
if regressor & classifier:
if is_classifier(learner):
learner_is_classifier = True
elif is_regressor(learner):
learner_is_classifier = False
else:
warnings.warn(warn_msg_prefix + f'{str(learner)} is (probably) neither a regressor nor a classifier. ' +
'Method predict is used for prediction.')
learner_is_classifier = False
elif classifier:
if not is_classifier(learner):
warnings.warn(warn_msg_prefix + f'{str(learner)} is (probably) no classifier.')
learner_is_classifier = True
else:
assert regressor # classifier, regressor or both must be True
if not is_regressor(learner):
warnings.warn(warn_msg_prefix + f'{str(learner)} is (probably) no regressor.')
learner_is_classifier = False
# check existence of the prediction method
if learner_is_classifier:
if not hasattr(learner, 'predict_proba'):
raise TypeError(err_msg_prefix + f'{str(learner)} has no method .predict_proba().')
else:
if not hasattr(learner, 'predict'):
raise TypeError(err_msg_prefix + f'{str(learner)} has no method .predict().')
return learner_is_classifier
def _initialize_arrays(self):
psi = np.full((self._dml_data.n_obs, self.n_rep, self._dml_data.n_treat), np.nan)
psi_a = np.full((self._dml_data.n_obs, self.n_rep, self._dml_data.n_treat), np.nan)
psi_b = np.full((self._dml_data.n_obs, self.n_rep, self._dml_data.n_treat), np.nan)
coef = np.full(self._dml_data.n_treat, np.nan)
se = np.full(self._dml_data.n_treat, np.nan)
all_coef = np.full((self._dml_data.n_treat, self.n_rep), np.nan)
all_se = np.full((self._dml_data.n_treat, self.n_rep), np.nan)
if self.dml_procedure == 'dml1':
if self.apply_cross_fitting:
all_dml1_coef = np.full((self._dml_data.n_treat, self.n_rep, self.n_folds), np.nan)
else:
all_dml1_coef = np.full((self._dml_data.n_treat, self.n_rep, 1), np.nan)
else:
all_dml1_coef = None
return psi, psi_a, psi_b, coef, se, all_coef, all_se, all_dml1_coef
def _initialize_boot_arrays(self, n_rep_boot):
boot_coef = np.full((self._dml_data.n_treat, n_rep_boot * self.n_rep), np.nan)
boot_t_stat = np.full((self._dml_data.n_treat, n_rep_boot * self.n_rep), np.nan)
return n_rep_boot, boot_coef, boot_t_stat
def _initialize_predictions(self):
self._predictions = {learner: np.full((self._dml_data.n_obs, self.n_rep, self._dml_data.n_treat), np.nan)
for learner in self.params_names}
def _store_predictions(self, preds):
for learner in self.params_names:
self._predictions[learner][:, self._i_rep, self._i_treat] = preds[learner]
def draw_sample_splitting(self):
"""
Draw sample splitting for DoubleML models.
The samples are drawn according to the attributes
``n_folds``, ``n_rep`` and ``apply_cross_fitting``.
Returns
-------
self : object
"""
if self._is_cluster_data:
obj_dml_resampling = DoubleMLClusterResampling(n_folds=self._n_folds_per_cluster,
n_rep=self.n_rep,
n_obs=self._dml_data.n_obs,
apply_cross_fitting=self.apply_cross_fitting,
n_cluster_vars=self._dml_data.n_cluster_vars,
cluster_vars=self._dml_data.cluster_vars)
self._smpls, self._smpls_cluster = obj_dml_resampling.split_samples()
else:
obj_dml_resampling = DoubleMLResampling(n_folds=self.n_folds,
n_rep=self.n_rep,
n_obs=self._dml_data.n_obs,
apply_cross_fitting=self.apply_cross_fitting)
self._smpls = obj_dml_resampling.split_samples()
return self
def set_sample_splitting(self, all_smpls):
"""
Set the sample splitting for DoubleML models.
The attributes ``n_folds`` and ``n_rep`` are derived from the provided partition.
Parameters
----------
all_smpls : list or tuple
If nested list of lists of tuples:
The outer list needs to provide an entry per repeated sample splitting (length of list is set as
``n_rep``).
The inner list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as
``n_folds``). If tuples for more than one fold are provided, it must form a partition and
``apply_cross_fitting`` is set to True. Otherwise ``apply_cross_fitting`` is set to False and
``n_folds=2``.
If list of tuples:
The list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as
``n_folds``). If tuples for more than one fold are provided, it must form a partition and
``apply_cross_fitting`` is set to True. Otherwise ``apply_cross_fitting`` is set to False and
``n_folds=2``.
``n_rep=1`` is always set.
If tuple:
Must be a tuple with two elements train_ind and test_ind. No sample splitting is achieved if train_ind
and test_ind are range(n_rep). Otherwise ``n_folds=2``.
``apply_cross_fitting=False`` and ``n_rep=1`` is always set.
Returns
-------
self : object
Examples
--------
>>> import numpy as np
>>> import doubleml as dml
>>> from doubleml.datasets import make_plr_CCDDHNR2018
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.base import clone
>>> np.random.seed(3141)
>>> learner = RandomForestRegressor(max_depth=2, n_estimators=10)
>>> ml_g = learner
>>> ml_m = learner
>>> obj_dml_data = make_plr_CCDDHNR2018(n_obs=10, alpha=0.5)
>>> dml_plr_obj = dml.DoubleMLPLR(obj_dml_data, ml_g, ml_m)
>>> # simple sample splitting with two folds and without cross-fitting
>>> smpls = ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9])
>>> dml_plr_obj.set_sample_splitting(smpls)
>>> # sample splitting with two folds and cross-fitting
>>> smpls = [([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]),
>>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])]
>>> dml_plr_obj.set_sample_splitting(smpls)
>>> # sample splitting with two folds and repeated cross-fitting with n_rep = 2
>>> smpls = [[([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]),
>>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])],
>>> [([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]),
>>> ([1, 3, 5, 7, 9], [0, 2, 4, 6, 8])]]
>>> dml_plr_obj.set_sample_splitting(smpls)
"""
if self._is_cluster_data:
raise NotImplementedError('Externally setting the sample splitting for DoubleML is '
'not yet implemented with clustering.')
if isinstance(all_smpls, tuple):
if not len(all_smpls) == 2:
raise ValueError('Invalid partition provided. '
'Tuple for train_ind and test_ind must consist of exactly two elements.')
all_smpls = _check_smpl_split_tpl(all_smpls, self._dml_data.n_obs)
if (_check_is_partition([all_smpls], self._dml_data.n_obs) &
_check_is_partition([(all_smpls[1], all_smpls[0])], self._dml_data.n_obs)):
self._n_rep = 1
self._n_folds = 1
self._apply_cross_fitting = False
self._smpls = [[all_smpls]]
else:
self._n_rep = 1
self._n_folds = 2
self._apply_cross_fitting = False
self._smpls = _check_all_smpls([[all_smpls]], self._dml_data.n_obs, check_intersect=True)
else:
if not isinstance(all_smpls, list):
raise TypeError('all_smpls must be of list or tuple type. '
f'{str(all_smpls)} of type {str(type(all_smpls))} was passed.')
all_tuple = all([isinstance(tpl, tuple) for tpl in all_smpls])
if all_tuple:
if not all([len(tpl) == 2 for tpl in all_smpls]):
raise ValueError('Invalid partition provided. '
'All tuples for train_ind and test_ind must consist of exactly two elements.')
self._n_rep = 1
all_smpls = _check_smpl_split(all_smpls, self._dml_data.n_obs)
if _check_is_partition(all_smpls, self._dml_data.n_obs):
if ((len(all_smpls) == 1) &
_check_is_partition([(all_smpls[0][1], all_smpls[0][0])], self._dml_data.n_obs)):
self._n_folds = 1
self._apply_cross_fitting = False
self._smpls = [all_smpls]
else:
self._n_folds = len(all_smpls)
self._apply_cross_fitting = True
self._smpls = _check_all_smpls([all_smpls], self._dml_data.n_obs, check_intersect=True)
else:
if not len(all_smpls) == 1:
raise ValueError('Invalid partition provided. '
'Tuples for more than one fold provided that don\'t form a partition.')
self._n_folds = 2
self._apply_cross_fitting = False
self._smpls = _check_all_smpls([all_smpls], self._dml_data.n_obs, check_intersect=True)
else:
all_list = all([isinstance(smpl, list) for smpl in all_smpls])
if not all_list:
raise ValueError('Invalid partition provided. '
'all_smpls is a list where neither all elements are tuples '
'nor all elements are lists.')
all_tuple = all([all([isinstance(tpl, tuple) for tpl in smpl]) for smpl in all_smpls])
if not all_tuple:
raise TypeError('For repeated sample splitting all_smpls must be list of lists of tuples.')
all_pairs = all([all([len(tpl) == 2 for tpl in smpl]) for smpl in all_smpls])
if not all_pairs:
raise ValueError('Invalid partition provided. '
'All tuples for train_ind and test_ind must consist of exactly two elements.')
n_folds_each_smpl = np.array([len(smpl) for smpl in all_smpls])
if not np.all(n_folds_each_smpl == n_folds_each_smpl[0]):
raise ValueError('Invalid partition provided. '
'Different number of folds for repeated sample splitting.')
all_smpls = _check_all_smpls(all_smpls, self._dml_data.n_obs)
smpls_are_partitions = [_check_is_partition(smpl, self._dml_data.n_obs) for smpl in all_smpls]
if all(smpls_are_partitions):
if ((len(all_smpls) == 1) & (len(all_smpls[0]) == 1) &
_check_is_partition([(all_smpls[0][0][1], all_smpls[0][0][0])], self._dml_data.n_obs)):
self._n_rep = 1
self._n_folds = 1
self._apply_cross_fitting = False
self._smpls = all_smpls
else:
self._n_rep = len(all_smpls)
self._n_folds = n_folds_each_smpl[0]
self._apply_cross_fitting = True
self._smpls = _check_all_smpls(all_smpls, self._dml_data.n_obs, check_intersect=True)
else:
if not n_folds_each_smpl[0] == 1:
raise ValueError('Invalid partition provided. '
'Tuples for more than one fold provided '
'but at least one does not form a partition.')
self._n_rep = len(all_smpls)
self._n_folds = 2
self._apply_cross_fitting = False
self._smpls = _check_all_smpls(all_smpls, self._dml_data.n_obs, check_intersect=True)
self._psi, self._psi_a, self._psi_b, \
self._coef, self._se, self._all_coef, self._all_se, self._all_dml1_coef = self._initialize_arrays()
self._initialize_ml_nuisance_params()
return self
def _est_causal_pars(self):
dml_procedure = self.dml_procedure
smpls = self.__smpls
if not self._is_cluster_data:
if dml_procedure == 'dml1':
# Note that len(smpls) is only not equal to self.n_folds if self.apply_cross_fitting = False
thetas = np.zeros(len(smpls))
for idx, (_, test_index) in enumerate(smpls):
thetas[idx] = self._orth_est(test_index)
theta_hat = np.mean(thetas)
coef = theta_hat
self._all_dml1_coef[self._i_treat, self._i_rep, :] = thetas
else:
assert dml_procedure == 'dml2'
theta_hat = self._orth_est()
coef = theta_hat
else:
coef = self._orth_est_cluster_data()
return coef
def _se_causal_pars(self):
if not self._is_cluster_data:
se = np.sqrt(self._var_est())
else:
se = np.sqrt(self._var_est_cluster_data())
return se
def _agg_cross_fit(self):
# aggregate parameters from the repeated cross-fitting
# don't use the getter (always for one treatment variable and one sample), but the private variable
self.coef = np.median(self._all_coef, 1)
# TODO: In the documentation of standard errors we need to cleary state what we return here, i.e.,
# the asymptotic variance sigma_hat/N and not sigma_hat (which sometimes is also called the asympt var)!
# TODO: In the edge case of repeated no-cross-fitting, the test sets might have different size and therefore
# it would note be valid to always use the same self._var_scaling_factor
xx = np.tile(self.coef.reshape(-1, 1), self.n_rep)
self.se = np.sqrt(np.divide(np.median(np.multiply(np.power(self._all_se, 2), self._var_scaling_factor) +
np.power(self._all_coef - xx, 2), 1), self._var_scaling_factor))
def _est_causal_pars_and_se(self):
for i_rep in range(self.n_rep):
self._i_rep = i_rep
for i_d in range(self._dml_data.n_treat):
self._i_treat = i_d
# estimate the causal parameter
self._all_coef[self._i_treat, self._i_rep] = self._est_causal_pars()
# compute score (depends on estimated causal parameter)
self._psi[:, self._i_rep, self._i_treat] = self._compute_score()
# compute standard errors for causal parameter
self._all_se[self._i_treat, self._i_rep] = self._se_causal_pars()
# aggregated parameter estimates and standard errors from repeated cross-fitting
self._agg_cross_fit()
def _compute_bootstrap(self, weights):
if self.apply_cross_fitting:
J = np.mean(self.__psi_a)
boot_coef = np.matmul(weights, self.__psi) / (self._dml_data.n_obs * J)
boot_t_stat = np.matmul(weights, self.__psi) / (self._dml_data.n_obs * self.__all_se * J)
else:
# be prepared for the case of test sets of different size in repeated no-cross-fitting
smpls = self.__smpls
test_index = smpls[0][1]
J = np.mean(self.__psi_a[test_index])
boot_coef = np.matmul(weights, self.__psi[test_index]) / (len(test_index) * J)
boot_t_stat = np.matmul(weights, self.__psi[test_index]) / (len(test_index) * self.__all_se * J)
return boot_coef, boot_t_stat
def _var_est(self):
"""
Estimate the standard errors of the structural parameter
"""
psi_a = self.__psi_a
psi = self.__psi
if self.apply_cross_fitting:
self._var_scaling_factor = self._dml_data.n_obs
else:
# In case of no-cross-fitting, the score function was only evaluated on the test data set
smpls = self.__smpls
test_index = smpls[0][1]
psi_a = psi_a[test_index]
psi = psi[test_index]
self._var_scaling_factor = len(test_index)
J = np.mean(psi_a)
sigma2_hat = 1 / self._var_scaling_factor * np.mean(np.power(psi, 2)) / np.power(J, 2)
return sigma2_hat
def _var_est_cluster_data(self):
psi_a = self.__psi_a
psi = self.__psi
if self._dml_data.n_cluster_vars == 1:
this_cluster_var = self._dml_data.cluster_vars[:, 0]
clusters = np.unique(this_cluster_var)
gamma_hat = 0
j_hat = 0
for i_fold in range(self.n_folds):
test_inds = self.__smpls[i_fold][1]
test_cluster_inds = self.__smpls_cluster[i_fold][1]
I_k = test_cluster_inds[0]
const = 1 / len(I_k)
for cluster_value in I_k:
ind_cluster = (this_cluster_var == cluster_value)
gamma_hat += const * np.sum(np.outer(psi[ind_cluster], psi[ind_cluster]))
j_hat += np.sum(psi_a[test_inds]) / len(I_k)
gamma_hat = gamma_hat / self._n_folds_per_cluster
j_hat = j_hat / self._n_folds_per_cluster
self._var_scaling_factor = len(clusters)
sigma2_hat = gamma_hat / (j_hat ** 2) / self._var_scaling_factor
else:
assert self._dml_data.n_cluster_vars == 2
first_cluster_var = self._dml_data.cluster_vars[:, 0]
second_cluster_var = self._dml_data.cluster_vars[:, 1]
gamma_hat = 0
j_hat = 0
for i_fold in range(self.n_folds):
test_inds = self.__smpls[i_fold][1]
test_cluster_inds = self.__smpls_cluster[i_fold][1]
I_k = test_cluster_inds[0]
J_l = test_cluster_inds[1]
const = min(len(I_k), len(J_l)) / ((len(I_k) * len(J_l)) ** 2)
for cluster_value in I_k:
ind_cluster = (first_cluster_var == cluster_value) & np.in1d(second_cluster_var, J_l)
gamma_hat += const * np.sum(np.outer(psi[ind_cluster], psi[ind_cluster]))
for cluster_value in J_l:
ind_cluster = (second_cluster_var == cluster_value) & np.in1d(first_cluster_var, I_k)
gamma_hat += const * np.sum(np.outer(psi[ind_cluster], psi[ind_cluster]))
j_hat += np.sum(psi_a[test_inds]) / (len(I_k) * len(J_l))
gamma_hat = gamma_hat / (self._n_folds_per_cluster ** 2)
j_hat = j_hat / (self._n_folds_per_cluster ** 2)
n_first_clusters = len(np.unique(first_cluster_var))
n_second_clusters = len(np.unique(second_cluster_var))
self._var_scaling_factor = min(n_first_clusters, n_second_clusters)
sigma2_hat = gamma_hat / (j_hat ** 2) / self._var_scaling_factor
return sigma2_hat
def _orth_est(self, inds=None):
"""
Estimate the structural parameter
"""
psi_a = self.__psi_a
psi_b = self.__psi_b
if inds is not None:
psi_a = psi_a[inds]
psi_b = psi_b[inds]
theta = -np.mean(psi_b) / np.mean(psi_a)
return theta
def _orth_est_cluster_data(self):
dml_procedure = self.dml_procedure
smpls = self.__smpls
psi_a = self.__psi_a
psi_b = self.__psi_b
if dml_procedure == 'dml1':
# note that in the dml1 case we could also simply apply the standard function without cluster adjustment
thetas = np.zeros(len(smpls))
for i_fold, (_, test_index) in enumerate(smpls):
test_cluster_inds = self.__smpls_cluster[i_fold][1]
scaling_factor = 1./np.prod(np.array([len(inds) for inds in test_cluster_inds]))
thetas[i_fold] = - (scaling_factor * np.sum(psi_b[test_index])) / \
(scaling_factor * np.sum(psi_a[test_index]))
theta = np.mean(thetas)
self._all_dml1_coef[self._i_treat, self._i_rep, :] = thetas
else:
assert dml_procedure == 'dml2'
# See Chiang et al. (2021) Algorithm 1
psi_a_subsample_mean = 0.
psi_b_subsample_mean = 0.
for i_fold, (_, test_index) in enumerate(smpls):
test_cluster_inds = self.__smpls_cluster[i_fold][1]
scaling_factor = 1./np.prod(np.array([len(inds) for inds in test_cluster_inds]))
psi_a_subsample_mean += scaling_factor * np.sum(psi_a[test_index])
psi_b_subsample_mean += scaling_factor * np.sum(psi_b[test_index])
theta = -psi_b_subsample_mean / psi_a_subsample_mean
return theta
def _compute_score(self):
psi = self.__psi_a * self.__all_coef + self.__psi_b
return psi
def _clean_scores(self):
del self._psi
del self._psi_a
del self._psi_b
|
import numpy as np
import tensorflow as tf
from game import Game
from randomBot import RandomBot
class ReinforcementBot:
def __init__(self):
self.discountFactor = 0.9
self.buildNet()
self.observeGames(10000, 100)
def buildNet(self):
self.net = tf.keras.models.Sequential()
self.net.add(tf.keras.layers.Dense(36, input_shape=(18,)))
self.net.add(tf.keras.layers.Dense(18, activation='sigmoid'))
# self.net.add(tf.keras.layers.GaussianNoise(1))
self.net.add(tf.keras.layers.Dense(9))
self.net.compile(optimizer=tf.keras.optimizers.SGD(0.5), loss='mse', metrics=['accuracy'])
def observeGames(self, numberOfGames=1000, qEpochs=100):
games = []
states = []
nextStates = []
actions = []
rewards = []
r = RandomBot()
for gameI in range(numberOfGames):
print(gameI, 'observing')
g = Game(r, r)
g.runGame()
winner = g.whoWon(g.board)
reward = 1 if winner is not None else 0
board = g.board[:]
for move in reversed(g.moveHistory):
nextStates.append(self.boardToInputs(board))
board[move] = None
states.append(self.boardToInputs(board))
actions.append(move)
rewards.append(reward)
reward = None #only reward for ending move
#None here lets us tell between `0` as:
# "game ended in draw", and
# "game still going, no reward--take -1 * max(q')"
for qEpochI in range(qEpochs):
print(qEpochI, 'qEpoch')
outputs = self.net.predict(np.array(states))
nextOutputs = self.net.predict(np.array(nextStates))
for i, o in enumerate(outputs):
r = rewards[i]
if r is None:
r = -1 * self.discountFactor * max(nextOutputs[i])
o[actions[i]] = r
# np.array for acts in place
self.net.fit(np.array(states), outputs, epochs=1)
def fire(self, board):
return self.net.predict(np.array([self.boardToInputs(board)]))[0]
@staticmethod
def boardToInputs(board):
inputList = []
for space in board: #code even, odd inputs to each player
inputList.append(1 if 0 is space else 0)
inputList.append(1 if 1 is space else 0)
return inputList
def getMove(self, board, whichPlayerAmI):
q = self.fire(board)
maxResult = max([x for i, x in enumerate(q) if board[i] is None])
for move, r in enumerate(q):
if r == maxResult and board[move] is None:
return move
raise Exception('NO MOVE!')
|
# Import the random package to radomly select individuals
import random
# Import the numpy package for the circular list
import numpy as np
# Import the superclass (also called base class), which is an abstract class,
# to implement the subclass ThresholdSelection
from SelectionOperator import *
# Import the circular list
from CircularList import *
# The subclass that inherits of SelectionOperator
class ThresholdSelection(SelectionOperator):
# Constructor
# aThreshold: the number of dimensions
# anAlternativeSelectionOperator: when the threshold operator fails to find a suitable candidate in aMaxIteration, use anAlternativeSelectionOperator instead to select the individual
# aMaxIteration: the max number of iterations
def __init__(self,
aThreshold,
anAlternativeSelectionOperator,
aMaxIteration = 50):
# Call the constructor of the superclass
super().__init__("Threshold selection");
# Store the attributes of the class
self.threshold = aThreshold;
self.alternative_selection_operator = anAlternativeSelectionOperator;
self.max_iteration = aMaxIteration;
self.max_iteration_reached_counter = 0;
self.circular_list = CircularList(50, -1);
# Get a SystemRandom instance out of random package
self.system_random = random.SystemRandom();
self.number_of_good_flies = 0
self.number_of_bad_flies = 0
# Nothing to do
def preProcess(self, anIndividualSet):
return
# Method used for print()
def __str__(self) -> str:
return super().__str__() + "\t" + "threshold:\t" + str(self.threshold) + "\tmax_iteration:\t" + str(self.max_iteration) + "\talternative selection operator:\t" + self.alternative_selection_operator;
# Select an idividual
# anIndividualSet: The set of individual to choose from
# aFlag == True for selecting good individuals,
# aFlag == False for selecting bad individuals,
def __select__(self, anIndividualSet, aFlag):
# The max individual ID
max_ind = len(anIndividualSet) - 1;
# Run the selection for a max of self.max_iteration times
for _ in range(self.max_iteration):
selected_index = self.system_random.randint(0, max_ind)
fitness = anIndividualSet[selected_index].computeObjectiveFunction()
# Try to find a good individual (candidate for reproduction)
if aFlag == True:
# The fitness is greater than the threshold, it's a good individual
if fitness > self.threshold:
self.number_of_good_flies += 1;
return selected_index;
else:
self.number_of_bad_flies += 1;
# Try to find a bad individual (candidate for death)
else:
# The fitness is lower than or equal to the threshold, it's a bad individual
if fitness <= self.threshold:
self.max_iteration_reached_counter -= 1;
self.circular_list.append(-1);
self.number_of_bad_flies += 1;
return selected_index;
else:
self.number_of_good_flies += 1;
# The threshold selection has failed self.max_iteration times,
# use self.alternative_selection_operator instead
# Try to find a bad individual (candidate for death)
if aFlag == False:
self.max_iteration_reached_counter += 1;
self.circular_list.append(1);
return self.alternative_selection_operator.__select__(anIndividualSet, aFlag);
|
import pymongo
from pymongo import MongoClient
from pymongo import IndexModel, ASCENDING, DESCENDING
import time
import hashlib
import sys
class MongoUrlManager:
def __init__(self, mongo_ip='localhost', mongo_port=27017,
client=None, database_name='Zaojv', table_name='zaojv_items'):
# 连接mongodb
self.client = MongoClient(host=mongo_ip, port=mongo_port)
self.db = self.client[database_name]
self.table = self.db[table_name]
# 为 mongodb 创建索引, 便于实现快速查询
if self.table.count() is 0:
self.table.create_index([("status", ASCENDING), ("reset_num", ASCENDING)])
def enqueueUrl(self, url, depth):
try:
if self.table.find_one({'_id':hashlib.md5(url.encode('utf-8', 'ignore')).hexdigest()}):
return None
self.table.insert(
{
'_id': hashlib.md5(url.encode('utf-8', 'ignore')).hexdigest(),
'url': url,
"depth": depth,
'status': "new",
'reset_num': 0,
'queue_time': time.strftime("%Y%m%d %H%M%S"),
}
)
except Exception as e:
print("lineNum {0}: {1}".format(str(sys._getframe().f_lineno), e))
def dequeueUrl(self):
record = self.table.find_one_and_update(
{'status': 'new'},
{'$set': {'status': 'downloading'}},
upsert=False,
returnNewDocument=False,
sort=[("depth", DESCENDING),('queue_time', ASCENDING)],
)
if record:
return record
else:
return None
def finishUrl(self, url, error_flag=False):
if error_flag == False:
record = {'status': 'done', 'done_time': time.strftime("%Y%m%d %H%M%S")}
else:
record = {'status': 'done', 'done_time': time.strftime("%Y%m%d %H%M%S"), 'error_flag': 'True'}
self.table.update({'_id': hashlib.md5(url.encode('utf-8', 'ignore')).hexdigest()}, {'$set': record},
upsert=False)
def resetUrl(self, url):
tmp = self.table.find_one({'_id': hashlib.md5(url.encode('utf-8')).hexdigest()})
num = tmp['reset_num'] + 1
record = {'status': 'new', 'queue_time':time.strftime("%Y%m%d %H%M%S"), 'reset_num':num}
self.table.update({'_id': hashlib.md5(url.encode('utf-8', 'ignore')).hexdigest()}, {'$set': record},
upsert=False)
def clear(self):
self.table.drop()
|
#coding:gb2312
#写入文件
filename = 'write.txt'
with open(filename,'w') as f:
"""
'r'--读取模式
'w'--写入模式
'a'--附加模式
' r+'--读取和写入文件模式
"""
f.write("I love python!\n")
f.write("Python is very useful!")
|
def Rcalculator():
A_string=raw_input("Please Enter the area of the wall (in m2):\n ")
R_tot=0
U=0
n_layer_string=raw_input("Please Enter the total number of layers \n ")
n_parallel_string=raw_input("Please Enter the total number of parallel layers \n ")
n_layer=int(n_layer_string)
A=float(A_string)
n_parallel=int(n_parallel_string)
if n_parallel==0:
answer=raw_input("Does it contain any heat convective term? Y/N \n")
if answer=="Y":
for index in range(n_layer):
answer2=raw_input("Is this one the convective term? Y/N \n")
if answer2=="Y":
h=(raw_input("Please Enter the convective term(in W/(m2 K): "))
R=1/(float(h)*A)
elif answer2=="N":
L=raw_input("Please Enter the length of the layer (in m): ")
k=(raw_input("Please Enter the conductivity of the layer(in W/(m K): "))
R=float(L)/(float(k)*A)
R_tot=R_tot+R
elif answer=='N':
for index in range(n_layer):
print("Layer number "+str(index+1)+"\n")
L=raw_input("Please Enter the length of the layer (in m): ")
k=(raw_input("Please Enter the conductivity of the layer(in W/(m K): "))
R=float(L)/(float(k)*A)
R_tot=R_tot+R
print("This is the global wall thermal conductivity resistance "+str(R_tot))
else:
for index in range(n_layer):
print("Layer number "+str(index+1)+"\n")
answer3=raw_input("Is this one of the parallel layer? Y/N\n")
if answer3=="Y":
n_parallel_elements_string=raw_input("How many parallel elements are resent?\n")
n_parallel_elements=int(n_parallel_elements_string)
for index2 in range(n_parallel_elements):
print("Parallel element number "+str(index2+1)+"\n")
answer2=raw_input("Is this one the convective term? Y/N \n")
if answer2=="Y":
h=(raw_input("Please Enter the convective term(in W/(m2 K): "))
R=1/(float(h)*A)
U=(1/R)+U
elif answer2=="N":
L=raw_input("Please Enter the length of the layer (in m): ")
k=(raw_input("Please Enter the conductivity of the layer(in W/(m K): "))
R=float(L)/(float(k)*A)
U=(1/R)+U
R_tot=R_tot+(1/U)
else:
answer2=raw_input("Is this one the convective term? Y/N \n")
if answer2=="Y":
h=(raw_input("Please Enter the convective term(in W/(m2 K): "))
R=1/(float(h)*A)
elif answer2=="N":
L=raw_input("Please Enter the length of the layer (in m): ")
k=(raw_input("Please Enter the conductivity of the layer(in W/(m K): "))
R=float(L)/(float(k)*A)
R_tot=R_tot+R
print("This is the global wall thermal conductivity resistance "+str(R_tot))
return(R_tot)
|
import boto
localIP=boto.utils.get_instance_metadata()['local-ipv4'][1]
region=boto.utils.get_instance_metadata()['local-hostname'].split('.')[1]
hostname=boto.utils.get_instance_metadata()['local-hostname']
|
def test_helper_clear_groups(db):
cursor = db.connection.cursor()
cursor.execute("DELETE FROM group_list")
cursor.close()
def test_helper_clear_contacts(db):
cursor = db.connection.cursor()
cursor.execute("DELETE FROM addressbook")
cursor.close()
def test_helper_clear_relations(db):
cursor = db.connection.cursor()
cursor.execute("DELETE FROM address_in_groups")
cursor.close()
|
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import math
from copy import deepcopy
from itertools import combinations, permutations, product, combinations_with_replacement
from bisect import bisect_left, bisect_right
import sys
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
mod = 10 ** 9 + 7
MOD = 998244353
sys.setrecursionlimit(1000000)
INF = float('inf')
eps = 10 ** (-10)
dy = [0, 1, 0, -1]
dx = [1, 0, -1, 0]
#############
# Main Code #
#############
# O(1)deque
# 単位元は0でやっている
class Deque:
def __init__(self, src_arr = [], max_size = 300000):
self.N = max(max_size, len(src_arr)) + 1
self.buf = list(src_arr) + [None] * (self.N - len(src_arr)) # 空白地帯をNoneで埋める
self.head = 0 # 先頭のindex
self.tail = len(src_arr) # 末尾のindex
def __index(self, i):
l = len(self)
if not -l <= i < l: raise IndexError('index out of range: ' + str(i))
if i < 0:
i += l
return (self.head + i) % self.N
# 内部操作 あらかじめのサイズを越えると配列が拡張される
# 長さが+=300000になる
# O(N)だけど300000回に1回とかなので気にしなくていい
def __extend(self):
ex = self.N - 1
self.buf[self.tail+1 : self.tail+1] = [None] * ex
self.N = len(self.buf)
if self.head > 0:
self.head += ex
def is_full(self):
return len(self) >= self.N - 1
def is_empty(self):
return len(self) == 0
# いつものappend
def append(self, x):
if self.is_full(): self.__extend()
self.buf[self.tail] = x
self.tail += 1
self.tail %= self.N
# 配列の-1, -2...に値を書き込んでいく
# head = -2とかならmod Nか取られて正の値になる
def appendleft(self, x):
if self.is_full(): self.__extend()
self.buf[(self.head - 1) % self.N] = x
self.head -= 1
self.head %= self.N
def pop(self):
if self.is_empty(): raise IndexError('pop() when buffer is empty')
ret = self.buf[(self.tail - 1) % self.N]
self.tail -= 1
self.tail %= self.N
return ret
def popleft(self):
if self.is_empty(): raise IndexError('popleft() when buffer is empty')
ret = self.buf[self.head]
self.head += 1
self.head %= self.N
return ret
def __len__(self):
return (self.tail - self.head) % self.N
def __getitem__(self, key):
return self.buf[self.__index(key)]
def __setitem__(self, key, value):
self.buf[self.__index(key)] = value
def __str__(self):
return 'Deque({0})'.format(str(list(self)))
|
import os,sys,time
from subprocess import *
import datetime,time
import threading
### This script do auto testing before release code.
test_list = ["mnist_f", "mnist_q", "mbnet_f", "mbnet_q"]
def runcmd(cmd):
r=Popen(cmd,stdin=PIPE,stdout=PIPE,stderr=PIPE, shell=True)
a=[]
for line in r.stdout.readlines():
a.append(line.decode("utf8").strip())
return a
print("This script only test INT8/FP32, you need change OPT0&OPT1")
t00= time.time()
print("================Step1: test MNIST")
if "mnist_f" in test_list:
t0= time.time()
print("========Step1.1: test MNIST fp32")
print("====Step1.1.1: MNIST fp32 cvt")
cmd="cd ../../tools/ && python3 h5_to_tflite.py h5/mnist_valid.h5 tflite/mnist_valid_f.tflite 0 && python3 tflite2tmdl.py tflite/mnist_valid_f.tflite tmdl/mnist_valid_f.tmdl fp32 1 28,28,1 10"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "Saved to tinymaix model header to tmdl/mnist_valid_f.h":
print("====Step1.1.1: OK~")
else:
print("====Step1.1.1: ERR!!!")
exit(-1)
print("====Step1.1.2: MNIST fp32 compile&run")
cmd="cd ../mnist && sed -i 's/#define TM_MDL_TYPE TM_MDL_INT8/#define TM_MDL_TYPE TM_MDL_FP32/g' ../../include/tm_port.h && rm -rf build && mkdir build && cd build && cmake .. && make && ./mnist"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "### Predict output is: Number 2, prob 1.000":
print("====Step1.1.2: OK~")
runcmd("rm ../../tools/tmdl/mnist_valid_f.tmdl") #clean tmdl
else:
print("====Step1.1.2: ERR!!!")
exit(-2)
t1= time.time()
print("========Step1.1: test MNIST fp32 OK~ use %.1fs"%(t1-t0))
if "mnist_q" in test_list:
t0= time.time()
print("========Step1.2: test MNIST int8")
print("====Step1.2.1: MNIST int8 cvt")
cmd="cd ../../tools/ && python3 h5_to_tflite.py h5/mnist_valid.h5 tflite/mnist_valid_q.tflite 1 quant_img_mnist/ 0to1 && python3 tflite2tmdl.py tflite/mnist_valid_q.tflite tmdl/mnist_valid_q.tmdl int8 1 28,28,1 10"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "Saved to tinymaix model header to tmdl/mnist_valid_q.h":
print("====Step1.2.1: OK~")
else:
print("====Step1.2.1: ERR!!!")
exit(-1)
print("====Step1.2.2: MNIST int8 compile&run")
cmd="cd ../mnist && sed -i 's/#define TM_MDL_TYPE TM_MDL_FP32/#define TM_MDL_TYPE TM_MDL_INT8/g' ../../include/tm_port.h && rm -rf build && mkdir build && cd build && cmake .. && make && ./mnist"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "### Predict output is: Number 2, prob 0.996":
print("====Step1.2.2: OK~")
runcmd("rm ../../tools/tmdl/mnist_valid_q.tmdl") #clean tmdl
else:
print("====Step1.2.2: ERR!!!")
exit(-2)
t1= time.time()
print("========Step1.2: test MNIST int8 OK~ use %.1fs"%(t1-t0))
print("================Step2: test MBNET")
if "mbnet_f" in test_list:
t0= time.time()
print("========Step2.1: test MBNET fp32")
print("====Step2.1.1: MBNET fp32 cvt")
cmd="cd ../../tools/ && python3 h5_to_tflite.py h5/mbnet128_0.25.h5 tflite/mbnet128_0.25_f.tflite 0 && python3 tflite2tmdl.py tflite/mbnet128_0.25_f.tflite tmdl/mbnet128_0.25_f.tmdl fp32 1 128,128,3 1000"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "Saved to tinymaix model header to tmdl/mbnet128_0.25_f.h":
print("====Step2.1.1: OK~")
else:
print("====Step2.1.1: ERR!!!")
exit(-1)
print("====Step2.1.2: MBNET fp32 compile&run")
cmd="cd ../mbnet && sed -i 's/#define TM_MDL_TYPE TM_MDL_INT8/#define TM_MDL_TYPE TM_MDL_FP32/g' ../../include/tm_port.h && rm -rf build && mkdir build && cd build && cmake .. && make && ./mbnet"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "### Predict output is: Class 292 (tiger, Panthera tigris), Prob 0.866" or \
res[-1] == "### Predict output is: Class 292 (tiger, Panthera tigris), Prob 0.891":
print("====Step2.1.2: OK~")
runcmd("rm ../../tools/tmdl/mbnet128_0.25_f.tmdl") #clean tmdl
else:
print("====Step2.1.2: ERR!!!")
exit(-2)
t1= time.time()
print("========Step2.1: test MBNET fp32 OK~ use %.1fs"%(t1-t0))
if "mbnet_q" in test_list:
t0= time.time()
print("========Step2.2: test MBNET int8")
print("====Step2.2.1: MBNET int8 cvt")
cmd="cd ../../tools/ && python3 h5_to_tflite.py h5/mbnet128_0.25.h5 tflite/mbnet128_0.25_q.tflite 1 quant_img128/ 0to1 && python3 tflite2tmdl.py tflite/mbnet128_0.25_q.tflite tmdl/mbnet128_0.25_q.tmdl int8 1 128,128,3 1000"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "Saved to tinymaix model header to tmdl/mbnet128_0.25_q.h":
print("====Step2.2.1: OK~")
else:
print("====Step2.2.1: ERR!!!")
exit(-1)
print("====Step2.2.2: MBNET int8 compile&run")
cmd="cd ../mbnet && sed -i 's/#define TM_MDL_TYPE TM_MDL_FP32/#define TM_MDL_TYPE TM_MDL_INT8/g' ../../include/tm_port.h && rm -rf build && mkdir build && cd build && cmake .. && make && ./mbnet"
res = runcmd(cmd)
print(res[-1])
if res[-1] == "### Predict output is: Class 292 (tiger, Panthera tigris), Prob 0.824":
print("====Step2.2.2: OK~")
runcmd("rm ../../tools/tmdl/mbnet128_0.25_q.tmdl") #clean tmdl
else:
print("====Step2.2.2: ERR!!!")
exit(-2)
t1= time.time()
print("========Step2.2: test MBNET int8 OK~ use %.1fs"%(t1-t0))
t11= time.time()
print("================ALL PASS~ cost %.1fs"%(t11-t00))
|
import os; dirname = os.path.abspath(os.path.dirname(__file__))
import sys; sys.path.append(os.path.join(dirname, 'image-similarity-clustering'))
from flask import Flask, flash, redirect, render_template, request, send_from_directory, url_for
from werkzeug.utils import secure_filename
from features import extract_features
from predictor import predict_location
import numpy as np
UPLOAD_FOLDER = os.path.join(dirname, 'temp')
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
# check if the file extension is allowed
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Open the route for the upload_file function
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No files part')
return redirect(request.url)
files = request.files.getlist('file')
safe_files = []
for file in files:
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
# check if the filename is malicious
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
safe_files.append(filename)
return results(safe_files)
return render_template('upload.html')
# Open the route for the results function
@app.route('/results', methods=['GET', 'POST'])
def results(filenames):
print(filenames)
# extract the features from the images
features_df = extract_features(app.config['UPLOAD_FOLDER'], filenames=filenames)
# use the model to predict the location from the features
prediction_conf = predict_location(features_df, model=os.path.join(dirname, 'Model_04.h5'))
cities = ['Coimbra', 'Lisboa', 'Porto']
position_max = [np.argmax(p) for p in prediction_conf]
photo_urls = [url_for('uploaded_file',filename=filename) for filename in filenames]
# show results
html_code = ""
for i, photo in enumerate(photo_urls):
html_code += (f'''<div class="image_box">
<img src={photo} width='224' height='224' class="image_thumbnail" />
<div class="label">
<table>
<tr>
<td class="city">{cities[position_max[i]]}</td>
<td class="pred">{prediction_conf[i][position_max[i]] * 100:.2f} %</td>
</tr>
</table>
</div>
</div>''')
return render_template('upload.html', html_code=html_code)
@app.route('/uploads/<filename>')
def uploaded_file(filename):
# prepare the photo to be shown
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
# running the flask app
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=port, threaded=False)
|
# In the section on Functions, we looked at 2 different ways to calculate the factorial
# of a number. We used an iterative approach, and also used a recursive function.
#
# This challenge is to use the timeit module to see which performs better.
#
# The two functions appear below.
#
# Hint: change the number of iterations to 1,000 or 10,000. The default
# of one million will take a long time to run.
import timeit
def fact(n):
result = 1
if n > 1:
for f in range(2, n + 1):
result *= f
return result
def factorial(n):
# n! can also be defined as n * (n-1)!
if n <= 1:
return 1
else:
return n * factorial(n - 1)
if __name__ == "__main__":
print(timeit.timeit("x=fact(130)", setup="from __main__ import fact", number=10000))
print(timeit.timeit("x=factorial(130)", setup="from __main__ import factorial", number=10000))
print(timeit.repeat("x=fact(130)", setup="from __main__ import fact", number=10000)) #Timeit repeat repeats timeit test 3 times and returns result in a list.
print(timeit.repeat("x=factorial(130)", setup="from __main__ import factorial", number=10000))
|
# -*- coding: utf-8 -*-
"""
Practical core of application - configuration and student's choices.
"""
from datetime import datetime
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from app.accounts.models import Student, Teacher
from app.courses.models import Course, CourseCategory
# also import in Choose.clean function:
# from app.choosing.helpers import get_student_choosings
class ChoosingPhase:
PREPARING_0 = 0
COURSES_CHOOSING_1 = 1
CHOOSES_EVALUATING_2 = 2
CHOOSES_CHANING_3 = 3
GROUPS_CREATING_4 = 4
GROUPS_CHANGES_5 = 5
FINISHED_6 = 6
CHOOSING_PHASE = (
(ChoosingPhase.PREPARING_0, _("Preparation")),
(ChoosingPhase.COURSES_CHOOSING_1, _("Choosing of courses")),
(ChoosingPhase.CHOOSES_EVALUATING_2, _("Evaluating of options")),
(ChoosingPhase.CHOOSES_CHANING_3, _("Changes in choosings")),
(ChoosingPhase.GROUPS_CREATING_4, _("Creating groups")),
(ChoosingPhase.GROUPS_CHANGES_5, _("Group changes")),
(ChoosingPhase.FINISHED_6, _("Finished"))
)
class Choosing(models.Model):
"""
Configuration of choosing - we distinquish between choosing subjects and seminars,
also between years / grades.
"""
class Meta:
ordering = ("priority",)
verbose_name = _("Choosing configuration")
verbose_name_plural = _("Choosing configurations")
acronym = models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=100, unique=True)
description = models.TextField(blank=True, default="")
#schoolyear = models.CharField(max_length=9, null=False, blank=False)
#
# Management
#
time_start = models.DateTimeField()
time_end = models.DateTimeField()
phase = models.IntegerField(choices=CHOOSING_PHASE)
active = models.BooleanField(default=True)
# Allow requesting for teacher?
allow_teacher_requests = models.BooleanField(default=False)
# Which courses can be choosed - and how many.
course_category = models.ForeignKey(to=CourseCategory)
courses_min = models.IntegerField()
courses_max = models.IntegerField()
# Helper fields
priority = models.IntegerField()
for_grade = models.IntegerField(null=True, blank=True, default=None)
# Courses not opened because of low interest - for phase 3.
denied_courses = models.ManyToManyField(Course, blank=True)
def __str__(self):
return self.name
def get_chooses_for(self, student_id):
return self.choose_set.filter(student__id=student_id).all()
def resolve_courses(self):
courses = []
for course in self.course_category.course_set.all():
if course not in self.denied_courses.all():
courses.append(course)
return courses
@staticmethod
def get_active_for_grade(grade):
l = list()
all_active = Choosing.objects.filter(active=True, for_grade=grade,
time_start__lte=datetime.now(), time_end__gte=datetime.now()
).all()
for c in all_active:
l.append(c)
return l
CHOOSE_PHASE = (
(0, _("Waiting")),
(1, _("Approved")),
(2, _("Denied")),
(3, _("Deleted"))
)
class Choose(models.Model):
"""
Student's couse choose.
"""
class Meta:
verbose_name = _("Student's choose")
verbose_name_plural = _("Students' chooses")
unique_together = ('student', 'choosing', 'course')
student = models.ForeignKey(to=Student)
choosing = models.ForeignKey(to=Choosing)
course = models.ForeignKey(to=Course)
created_at = models.DateTimeField(auto_now_add=True)
phase = models.IntegerField(choices=CHOOSE_PHASE, default=0)
def clean(self):
from app.choosing.helpers import get_student_choosings
student_choosing_set = get_student_choosings(self.student)
if self.choosing not in student_choosing_set:
raise ValidationError(_('Student cannot choose in this choosing.'))
if self.course not in self.choosing.course_category.course_set.all():
raise ValidationError(_('Course does not belong to the choosing course category.'))
others_count = Choose.objects.filter(student=self.student, choosing=self.choosing, phase__lt=2).count()
if self.id:
others_count -= 1
if others_count >= self.choosing.courses_max:
raise ValidationError(_('Student reached maximum limit of choosed courses.'))
if self.phase == 3:
return
if self.choosing.phase in (0, 1):
return
if self.course in self.choosing.denied_courses.all():
if self.phase == 2:
return
raise ValidationError(_('Could not choose denied course.'))
def __repr__(self):
return "<Choose: %d>" % self.id
def __str__(self):
return "%s / %s" % (self.student, self.course.name)
@property
def phase_str(self):
for phase in CHOOSE_PHASE:
if phase[0] == self.phase:
return phase[1]
return "N/A"
@property
def cancelable(self):
if self.choosing.phase in (0,2,4,6):
return False
return True if self.phase == 0 else False
def is_owner(self, user):
return user.student and self.student == user.student
def accept(self):
self.phase = 1
self.save()
def reject(self):
self.phase = 2
if self.teacherrequest_set.all() > 0:
for req in self.teacherrequest_set.all():
req.phase = 2
req.save()
self.save()
TEACHER_REQUEST_PHASE = (
(0, _("Waiting")),
(1, _("Approved")),
(2, _("Denied"))
)
class TeacherRequest(models.Model):
"""
Student wants specific teacher, but there are no guarantees because
of time-table integration restrictions.
"""
class Meta:
verbose_name = _("Request for teacher")
verbose_name_plural = _("Requests for teachers")
choose = models.ForeignKey(to=Choose)
teacher = models.ForeignKey(to=Teacher)
phase = models.IntegerField(choices=TEACHER_REQUEST_PHASE, default=0)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.choose)
@property
def phase_str(self):
for phase in CHOOSE_PHASE:
if phase[0] == self.phase:
return phase[1]
return "N/A"
@property
def cancelable(self):
if self.choose.choosing.phase in (0,2,4,6):
return False
return True if self.phase == 0 else False
class ResolvedCourse(models.Model):
"""
Combinations of choosing-course, with it's resolving state for continuing phases.
And yes, it could be merged with ResolvedCombination (with teacher.nullable=True).
"""
choosing = models.ForeignKey(Choosing)
course = models.ForeignKey(Course)
accepted = models.BooleanField()
class Meta:
unique_together = ('choosing', 'course',)
def clean(self):
if self.course not in self.choosing.course_category.course_set.all():
raise ValidationError("Course is not associated with choosing.")
def __str_(self):
return "{} / {}".format(self.course, self.teacher)
class ResolvedCombination(models.Model):
"""
Combinations of choosing-course-teacher, with it's resolving state for
continuing phases.
"""
choosing = models.ForeignKey(Choosing)
course = models.ForeignKey(Course)
teacher = models.ForeignKey(Teacher)
accepted = models.BooleanField()
class Meta:
unique_together = ('choosing', 'course', 'teacher')
def clean(self):
if self.course not in self.choosing.course_category.course_set.all():
raise ValidationError("Course is not associated with choosing.")
if self.teacher not in self.course.teachers.all():
raise ValidationError("Selected teacher does not teach this course.")
def __str_(self):
return "{} / {}".format(self.course, self.teacher)
|
"""
CCT 建模优化代码
工具集
作者:赵润晓
日期:2021年6月7日
"""
import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
PathProject = os.path.split(rootPath)[0]
sys.path.append(rootPath)
sys.path.append(PathProject)
from cctpy import *
R = 0.95
bl = (
Beamline.set_start_point()
.first_drift()
.append_agcct(
big_r=0.95, # 偏转半径
small_rs=[130*MM, 114*MM, 98*MM, 83*MM], # 从大到小的绕线半径
# 四极交变 CCT 的偏转角度,无需给出二极 CCT 的偏转角度,因为就是这三个数的和
bending_angles=[17.05, 27.27, 23.18],
tilt_angles=[
[30, 88.773, 98.139, 91.748], # 二极 CCT 倾斜角,即 ak 的值,任意长度数组
[101.792, 30, 62.677, 89.705] # 四极 CCT 倾斜角,即 ak 的值,任意长度数组
],
winding_numbers=[
[128], # 二极 CCT 匝数
[25, 40, 34] # 四极 CCT 匝数
],
currents=[9409.261, -7107.359] # 二极 CCT 和四极 CCT 电流
)
)
# 提取 CCT
dicct_out = CCT.as_cct(bl.magnets[0])
dicct_in = CCT.as_cct(bl.magnets[1])
agcct3_in = CCT.as_cct(bl.magnets[2])
agcct3_out = CCT.as_cct(bl.magnets[3])
agcct4_in = CCT.as_cct(bl.magnets[4])
agcct4_out = CCT.as_cct(bl.magnets[5])
agcct5_in = CCT.as_cct(bl.magnets[6])
agcct5_out = CCT.as_cct(bl.magnets[7])
# 转为 wire
wdicct_out = Wire.create_by_cct(dicct_out)
wdicct_in = Wire.create_by_cct(dicct_in)
wagcct3_in = Wire.create_by_cct(agcct3_in)
wagcct3_out = Wire.create_by_cct(agcct3_out)
wagcct4_in = Wire.create_by_cct(agcct4_in)
wagcct4_out = Wire.create_by_cct(agcct4_out)
wagcct5_in = Wire.create_by_cct(agcct5_in)
wagcct5_out = Wire.create_by_cct(agcct5_out)
# 当前进行分析的 CCT
if True:
if True:
delta_angle = 10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 # 起止 ksi
s_end = 360*128-delta_angle/2
s_number = 36*128 # 数目
current_cct = dicct_in # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./二极CCT内层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = -10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 # 起止 ksi
s_end = -360*128-delta_angle/2
s_number = 36*128 # 数目
current_cct = dicct_out # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./二极CCT外层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = 10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 # 起止 ksi,从 0 开始,是因为原本值为 2pi 的整数倍,所以可以直接从 0 开始
s_end = 360*25-delta_angle/2
s_number = 36*25 # 数目
current_cct = agcct3_in # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./四极CCT第1段内层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = -10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 # 起止 ksi
s_end = -360*25-delta_angle/2
s_number = 36*25 # 数目
current_cct = agcct3_out # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./四极CCT第1段外层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = -10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 + 25*360 # 起止 ksi
s_end = -360*40-delta_angle/2+25*360
s_number = 36*40 # 数目
current_cct = agcct4_in # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./四极CCT第2段内层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = 10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 - 25*360 # 起止 ksi
s_end = 360*40-delta_angle/2-25*360
s_number = 36*40 # 数目
current_cct = agcct4_out # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./四极CCT第2段外层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = 10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 + 25*360 - 40*360 # 起止 ksi
s_end = 360*34-delta_angle/2+25*360 - 40*360
s_number = 36*34 # 数目
current_cct = agcct5_in # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = False # else 压强
file_name = f'./四极CCT第3段内层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
if False:
delta_angle = -10 # 当 CCT 负 ksi 方向绕线时,写负数
s_start = 0+delta_angle/2 - 25*360 + 40*360 # 起止 ksi
s_end = -360*34-delta_angle/2-25*360 + 40*360
s_number = 36*34 # 数目
current_cct = agcct5_out # 当前 CCT 和 wire
固定坐标系 = False
洛伦兹力 = True # else 压强
file_name = f'./四极CCT第3段外层{"固定" if 固定坐标系 else "滑动"}坐标系-{"洛伦兹力" if 洛伦兹力 else "压强"}.txt'
current_wire = Wire.create_by_cct(current_cct)
other_magnet = CombinedMagnet(*bl.magnets)
other_magnet.remove(current_cct)
def task(s):
if 固定坐标系:
lcp = LocalCoordinateSystem.global_coordinate_system()
else:
lcp = LocalCoordinateSystem(
location=current_wire.function_line3.point_at_p3_function(BaseUtils.angle_to_radian(s)),
x_direction=current_wire.function_line3.direct_at_p3_function(
BaseUtils.angle_to_radian(s)),
z_direction=current_cct.bipolar_toroidal_coordinate_system.main_normal_direction_at(
current_cct.p2_function(BaseUtils.angle_to_radian(s))
)
)
if 洛伦兹力:
fon = current_wire.lorentz_force_on_wire(
s=BaseUtils.angle_to_radian(s),
delta_length=current_cct.small_r *
BaseUtils.angle_to_radian(delta_angle),
local_coordinate_point=lcp,
other_magnet=other_magnet
)
else:
fon = current_wire.pressure_on_wire_MPa(
s=BaseUtils.angle_to_radian(s),
delta_length=current_cct.small_r *
BaseUtils.angle_to_radian(delta_angle),
local_coordinate_point=lcp,
other_magnet=other_magnet,
channel_width=3.2*MM,
channel_depth=11*MM
)
print(fon)
return fon
if __name__ == "__main__":
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()
ss = BaseUtils.linspace(s_start, s_end, s_number)
fons = BaseUtils.submit_process_task(
task=task, param_list=[[s] for s in ss])
data = []
for i in range(len(fons)):
p, f = fons[i]
data.append([i+1, p.x, p.y, p.z, f.x, f.y, f.z])
data = numpy.array(data)
numpy.savetxt(file_name, data)
data = numpy.loadtxt(file_name)
if True: # 画图
Plot2.plot_ndarry2ds(data[:, (0, 4)], describe='r-')
Plot2.plot_ndarry2ds(data[:, (0, 5)], describe='b-')
Plot2.plot_ndarry2ds(data[:, (0, 6)], describe='y-')
if 固定坐标系:
Plot2.legend('x', 'y', 'z', font_size=18,
font_family="Microsoft YaHei")
else:
Plot2.legend('绕线方向', 'rib方向', '径向', font_size=18,
font_family="Microsoft YaHei")
if 洛伦兹力:
Plot2.info('index', 'lorentz_force/N', file_name,
font_size=18, font_family="Microsoft YaHei")
else:
Plot2.info('index', 'pressure/MPa', '',
font_size=18, font_family="Microsoft YaHei")
Plot2.show()
|
from shapely.geometry import Polygon, Point
import numpy as np
from functions.plot_manager import save_discarded_homography
from objects.constants import Constants
from functions.rgb_histogram_matching import evaluete_homography
from objects.homography import Homography
class Ratio:
def __init__(self, Hom, ratios, angles):
self.H = Hom
# ratio on side 0 in pixel/cm (left)
self.r0 = ratios[0]
# ratio on side 1 in pixel/cm (down)
self.r1 = ratios[1]
# ratio on side 2 in pixel/cm (right)
self.r2 = ratios[2]
# ratio on side 3 in pixel/cm (up)
self.r3 = ratios[3] # ratio on side 3 in pixel/cm (up)
# angle 0 in degrees (up-left)
self.angle0 = angles[0]
# angle 1 in degrees (down-left)
self.angle1 = angles[1]
# angle 2 in degrees (down-right)
self.angle2 = angles[2]
# angle 3 in degrees (up-right)
self.angle3 = angles[3]
def getRatios(self):
return self.r0, self.r1, self.r2, self.r3
def getAngles(self):
return self.angle0, self.angle1, self.angle2, self.angle3
class RatioList:
def __init__(self, test_image):
self.list: [Ratio] = []
self.test_image_height, self.test_image_width = test_image.shape[0:2]
# This are only for plotting, can be deleted in the future
self.last_centroids_plotted: [Point] = []
self.test_image = test_image
# _________________________________________________________
def __gaussian(self, x0=None, y0=None):
"""
Create a 2d Gaussian over the image with center in x0, y0
This gaussian is used to generate the weights of the weighted mean in order to estimate the ratio.
In this way, each of all the homographies found in the image gives a contribution based on the distance
from the point where the ratio is going to be estimated
:param x0:
:param y0:
:return: a matrix
"""
width = self.test_image_width
height = self.test_image_height
if x0 is None and y0 is None:
x0 = width / 2
y0 = height / 2
sigma = 1
x, y = np.meshgrid(np.linspace(-1, 1, width), np.linspace(-1, 1, height))
deltax = x0 / width * 2
deltay = y0 / height * 2
d = ((x + 1) - deltax) ** 2 + ((y + 1) - deltay) ** 2
g = np.exp(-(d / (2.0 * sigma ** 2)))
return g
def __calculate_side_ratio(self, object_polygon, template):
"""
This function, starting from the object polygon of the homography, calculate the actual ratios
:param object_polygon: the polygon associated to the homography
:return: 4 floats
"""
# Coordinates of the verteces of the object polygon
coords = object_polygon.exterior.coords
# Sides is a list where all the sides' length are stored
sides = []
# Build sides list
for i in range(len(coords) - 1):
j = i + 1
# Two consecutive points A and B
A = coords[i]
B = coords[j]
# Coordinates of the two points
xA = A[0]
yA = A[1]
xB = B[0]
yB = B[1]
# Side is the distance between them
side = ((xA - xB) ** 2 + (yA - yB) ** 2) ** 0.5 # in pixel
sides.append(side)
side0, side1 = template.size
side0 = float(side0) # in cm (left)
side1 = float(side1) # in cm (down)
side2 = float(side0) # in cm (right)
side3 = float(side1) # in cm (up)
new_r0 = sides[0] / side0
new_r1 = sides[1] / side1
new_r2 = sides[2] / side2
new_r3 = sides[3] / side3
return new_r0, new_r1, new_r2, new_r3
def __sparse_representation_ratios(self):
"""
The sparse representation consists in a matrix with the same dimension of the image where
most of element are zeros. If I have found an homography with centroid in x0, y0 and I have
estemated a ratio for side 0 of 23.234, there will be 23.234 in the matrix refering to side 0
in position x0, y0
:return: 4 matrix
"""
ratio0_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
ratio1_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
ratio2_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
ratio3_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
for ratio in self.list:
x, y = list(ratio.H.polygon.centroid.coords)[0]
x = np.round(x).astype(int)
y = np.round(y).astype(int)
r0, r1, r2, r3 = ratio.getRatios()
ratio0_sparse_rep[y][x] = r0
ratio1_sparse_rep[y][x] = r1
ratio2_sparse_rep[y][x] = r2
ratio3_sparse_rep[y][x] = r3
return ratio0_sparse_rep, ratio1_sparse_rep, ratio2_sparse_rep, ratio3_sparse_rep
def __sparse_representation_angles(self):
"""
The sparse representation consists in a matrix with the same dimension of the image where
most of element are zeros. If I have found an homography with centroid in x0, y0 and it has angle = 84,
there will be 84 in the matrix refering to angle0 in position x0, y0
:return: 4 matrix
"""
angle0_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
angle1_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
angle2_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
angle3_sparse_rep = np.zeros((self.test_image_height, self.test_image_width))
for ratio in self.list:
x, y = list(ratio.H.polygon.centroid.coords)[0]
x = np.round(x).astype(int)
y = np.round(y).astype(int)
a0, a1, a2, a3 = ratio.getAngles()
angle0_sparse_rep[y][x] = a0
angle1_sparse_rep[y][x] = a1
angle2_sparse_rep[y][x] = a2
angle3_sparse_rep[y][x] = a3
return angle0_sparse_rep, angle1_sparse_rep, angle2_sparse_rep, angle3_sparse_rep
def __calculate_norm_dev(self, object_polygon, template):
"""
Calculate the normalized deviation of the actual ratios from the expected ones
:param object_polygon:
:return: array of floats representing the deviation
"""
# Build a sparse representation of the ratios
ratio0_sparse_rep, \
ratio1_sparse_rep, \
ratio2_sparse_rep, \
ratio3_sparse_rep = self.__sparse_representation_ratios()
# Build a Gaussian over the image with center in the homography considered
x, y = list(object_polygon.centroid.coords)[0]
x = np.round(x).astype(int)
y = np.round(y).astype(int)
gauss = self.__gaussian(x0=x, y0=y)
# Pick the weights where the ratio is not zero
weights = gauss[ratio0_sparse_rep != 0]
sum_of_weights = sum(weights)
# Weighted average of ratios
expected_r0 = np.multiply(gauss, ratio0_sparse_rep).sum() / sum_of_weights
expected_r1 = np.multiply(gauss, ratio1_sparse_rep).sum() / sum_of_weights
expected_r2 = np.multiply(gauss, ratio2_sparse_rep).sum() / sum_of_weights
expected_r3 = np.multiply(gauss, ratio3_sparse_rep).sum() / sum_of_weights
# Calculate the ratio of the sides
r0, r1, r2, r3 = self.__calculate_side_ratio(object_polygon, template)
# Calculate the normalized deviations of each ratio based on the estimated ratio
normalized_deviation = np.array([(r0 - expected_r0) / expected_r0,
(r1 - expected_r1) / expected_r1,
(r2 - expected_r2) / expected_r2,
(r3 - expected_r3) / expected_r3])
return normalized_deviation
def __calculate_norm_dev_angles(self, object_polygon):
"""
Calculate the normalized deviation of the actual angles from the expected ones
:param object_polygon:
:return: array of floats representing the deviation
"""
# Build a sparse representation of the angles
angle0_sparse_rep, \
angle1_sparse_rep, \
angle2_sparse_rep, \
angle3_sparse_rep = self.__sparse_representation_angles()
# Build a Gaussian over the image with center in the homography considered
x, y = list(object_polygon.centroid.coords)[0]
x = np.round(x).astype(int)
y = np.round(y).astype(int)
gauss = self.__gaussian(x0=x, y0=y)
# Pick the weights where the ratio is not zero
weights = gauss[angle0_sparse_rep != 0]
sum_of_weights = sum(weights)
# Weighted average of angles
expected_a0 = np.multiply(gauss, angle0_sparse_rep).sum() / sum_of_weights
expected_a1 = np.multiply(gauss, angle1_sparse_rep).sum() / sum_of_weights
expected_a2 = np.multiply(gauss, angle2_sparse_rep).sum() / sum_of_weights
expected_a3 = np.multiply(gauss, angle3_sparse_rep).sum() / sum_of_weights
# Calculate the ratio of the sides
a0, a1, a2, a3 = self.__calculate_angles(object_polygon)
# Calculate the normalized deviations of each ratio based on the estimated ratio
normalized_deviation = np.array([(a0 - expected_a0) / expected_a0,
(a1 - expected_a1) / expected_a1,
(a2 - expected_a2) / expected_a2,
(a3 - expected_a3) / expected_a3])
return normalized_deviation
def __root_mean_square_error(self, deviations):
"""
This method just returns the RMS error
:param normalized_deviations: the vector of deviations
:return: a float
"""
return ((deviations ** 2).sum() / len(deviations)) ** 0.5
def __calculate_angles(self, polygon: Polygon):
"""
This function calculates the angles of a polygon (4 sides)
:param polygon:
:return: 4 angles in degrees
"""
# Coordinates of the verteces of the object polygon
coords = polygon.exterior.coords
# a______________d
# \ /|
# \ / |
# \ / |
# \ / |
# \ / |
# \ / |
# b/_____c|
# Points
a = Point(coords[0])
b = Point(coords[1])
c = Point(coords[2])
d = Point(coords[3])
angle_0 = self.__get_angle_of_triangle(b, a, d)
angle_1 = self.__get_angle_of_triangle(a, b, d) + self.__get_angle_of_triangle(d, b, c)
angle_2 = self.__get_angle_of_triangle(b, c, d)
angle_3 = self.__get_angle_of_triangle(b, d, c) + self.__get_angle_of_triangle(b, d, a)
return angle_0, angle_1, angle_2, angle_3
def __get_angle_of_triangle(self, a: Point, b: Point, c: Point):
"""
Given 3 points returns the angle laying in the middle point b.
The Cosine Theorem has been leveraged:
Cosine Theorem:
Notation: angle_aBc is the angle with vertix in B of the triangle a-b-c
ca^2 = ab^2 + bc^2 - 2*ab*bc*cos(angle_aBc)
:param a: first point
:param b: angle vertix
:param c: second point
:return: angle in degree
"""
ab = a.distance(b)
bc = b.distance(c)
ca = c.distance(a)
# ca^2 = ab^2 + bc^2 - 2*ab*bc*cos(angle_aBc)
# ca^2 - ab^2 - bc^2 = - 2*ab*bc*cos(angle_aBc)
# cos(angle_aBc) = (ab^2 + bc^2 - ca^2) / (2*ab*bc)
# angle_aBc = arccos[(ab^2 + bc^2 - ca^2) / (2*ab*bc)]
return np.degrees(np.arccos((ab ** 2 + bc ** 2 - ca ** 2) / (2 * ab * bc)))
def add_new_ratio(self, Hom, template):
# Calculate new ratios to add
r0, r1, r2, r3 = self.__calculate_side_ratio(Hom.polygon, template)
# print("\t{}: {}".format(template.name, ratios))
# Calculate new angles to add
angles = self.__calculate_angles(Hom.polygon)
# Build the object Ratio
ratio = Ratio(Hom, (r0, r1, r2, r3), angles)
# Add the new object
self.list.append(ratio)
def is_homography_likely(self, object_polygon, template, test_image, idd,
big_window, id_hom_global, threshold=0.3):
plot = None
"""
This function estimate whether or not the polygon in input can be realistic to obserb in the sc ene or not
:param plots:
:param id_hom_global:
:param big_window:
:param idd:
:param template: TemplateImage
:param test_image:
:param object_polygon:
:param threshold:
:return: A boolean. True if it is likely
"""
# One sample is not enough to make a acceptable estimate of the ratio
if self.list is None or len(self.list) < Constants.MIN_HOM_RATION:
return True, plot
# Calculate normalized deviation between real ratios and the estimated ones
normalized_deviation = self.__calculate_norm_dev(object_polygon, template)
normalized_deviation_angles = self.__calculate_norm_dev_angles(object_polygon)
# All the ratio must be not too distant from the estimated ratio
all_sides_likely = np.all(np.absolute(normalized_deviation) < threshold)
# All the angles must be not too distant from the estimated angles
all_angles_likely = np.all(np.absolute(normalized_deviation_angles) < threshold)
# If the ratios are similar to the original one return True
if all_sides_likely:
if all_angles_likely:
return True, plot
else:
if Constants.SAVE:
new_centroid = object_polygon.centroid
not_already_plotted = True
for centroid in self.last_centroids_plotted:
not_already_plotted = not_already_plotted and np.abs(new_centroid.distance(centroid)) > 0.1
if not_already_plotted:
self.last_centroids_plotted.append(new_centroid)
plot = save_discarded_homography(object_polygon, template.name, idd, "angles not likely", str(np.absolute(normalized_deviation_angles)))
return False, plot
else:
# Ratios are not too similar,
# so I have to check whether there's a scale trasformation or there's a wrong homography
# A scale transformation is likely if the deviations are similar between each other
# So I normalized again the array based on its mean
mean = np.mean(normalized_deviation)
bi_normalized_deviation = (normalized_deviation - mean) / mean
# Then I check whether the difference between each other is not too big
is_likely_to_be_scaled = np.all(np.absolute(bi_normalized_deviation) < threshold)
if is_likely_to_be_scaled and all_angles_likely and False:
# no angles check! It could be another objects with different angles
return True, plot
else:
if Constants.SAVE:
new_centroid = object_polygon.centroid
not_already_plotted = True
for centroid in self.last_centroids_plotted:
not_already_plotted = not_already_plotted and np.abs(new_centroid.distance(centroid)) > 0.1
if not_already_plotted:
self.last_centroids_plotted.append(new_centroid)
plot = save_discarded_homography(object_polygon, template.name, idd, "sides not likely", str(np.absolute(normalized_deviation)))
return False, plot
# def evaluete_homography(self, h1, template):
# object_polygon = h1.polygon
# normalized_deviations = self.__calculate_norm_dev(object_polygon, template)
# normalized_deviations_angles = self.__calculate_norm_dev_angles(object_polygon)
# error_sides = self.__root_mean_square_error(normalized_deviations)
# error_angles = self.__root_mean_square_error(normalized_deviations_angles)
# error1 = max(error_sides, error_angles)
# return error1
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/23 18:45
# @Author : J
# @File : 图像入门.py
# @Software: PyCharm
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
# 读取图像
img = cv.imread("../image.jpg",0) # 1是彩色图像 0是灰度图 -1是原始图像
cv.imshow("image",img)
cv.waitKey(0)
cv.destroyAllWindows()
# 显示图像
# cv.namedWindow("image",cv.WINDOW_NORMAL)# 可调整窗口大小
# cv.imshow("image",img)
# cv.waitKey(0) #持续存在知道有按键发生
# cv.destroyAllWindows()
# 保存图像为 png格式
# cv.imwrite("image.png",img)
# 总结
# cv.imshow("image",img)
# k = cv.waitKey(0)
# if k == 27 & 0xFF: #Esc的ASCII码为27,即判断是否按下esc键
# cv.destroyAllWindows()
# elif k == ord("s"):
# cv.imwrite("image.png",img)
# cv.destroyAllWindows()
# 使用Matplotlib
# OpenCV加载的彩色图像处于BGR模式。
# 但是Matplotlib以RGB模式显示。
# 因此,如果使用OpenCV读取彩色图像,
# 则Matplotlib中将无法正确显示彩色图像
# plt.imshow(img,cmap = "gray",interpolation = "bicubic")
# plt.xticks([]),plt.yticks([])
# plt.show()
|
def foobar(li):
for item in li:
if item % 15 == 0:
print('foobar')
elif item % 5 == 0:
print('bar')
elif item % 3 == 0:
print('foo')
else:
print(item)
my_list = []
for i in range(1,35001):
my_list.append(i)
foobar(my_list)
|
import ROOT
from array import array
import re
import json
import os
import subprocess
import argparse
from compare import scatter
def makeNtuple(file_in, file_out):
print "Input file: {0}".format(file_in)
print "Output file: {0}".format(file_out)
with open("barcode.json", 'r') as b:
barcodes = json.load(b)
infile = ROOT.TFile.Open(file_in)
outfile = ROOT.TFile(file_out, "recreate")
tree = ROOT.TTree('t1', 't1')
array_dict = {}
branches = ["rbx", "cu", "rm", "sipm_ch", "fiber", "fib_ch", "energy"]
for key in branches:
array_dict[key] = array('f', [0.0])
name = "{0}/F".format(key)
tree.Branch(key, array_dict[key], name)
rbxList = []
rbxList += list("HEP{0:02d}".format(i) for i in xrange(1,19))
rbxList += list("HEM{0:02d}".format(i) for i in xrange(1,19))
nchannels = 0
rbx_dict = {}
for rbxName in rbxList:
m = re.search("([A-Z]*)([0-9]*)", rbxName)
rbxStem = m.group(1)
rbxNum = int(m.group(2))
if "M" in rbxStem:
rbxNum *= -1
rbx_dict[rbxName] = rbxNum
cu = barcodes["{0}-calib".format(rbxName)]
for rm in xrange(1,5):
canvas = infile.Get("Energy/{0}/{1}-{2}-Energy".format(rbxName, rbxName, rm))
pad = 1
sipm_ch = 0
for fiber in xrange(8):
for fib_ch in xrange(6):
histo = canvas.GetPad(pad).GetPrimitive("ENERGY_{0}_RM{1}_fiber{2}_channel{3}".format(rbxName, rm, fiber, fib_ch))
energy = histo.GetMean()
#print "{0} CU{1} RM{2} SiPM{3} ({4}, {5}) : {6}".format(rbxName, cu, rm, sipm_ch, fiber, fib_ch, energy)
array_dict["rbx"][0] = float(rbxNum)
array_dict["cu"][0] = float(cu)
array_dict["rm"][0] = float(rm)
array_dict["sipm_ch"][0] = float(sipm_ch)
array_dict["fiber"][0] = float(fiber)
array_dict["fib_ch"][0] = float(fib_ch)
array_dict["energy"][0] = float(energy)
tree.Fill()
pad += 1
sipm_ch += 1
nchannels += 1
outfile.Write()
outfile.Close()
infile.Close()
print "n channels: {0}".format(nchannels)
return
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--run", "-r", default="", help="run number")
options = parser.parse_args()
run = options.run
# DQM runs from ~hcaldqm/xplotdqm/gui/runs
dqm_data_dir = "/nfshome0/hcaldqm/xplotdqm/gui/runs"
my_data_dir = "Point5_Data"
my_plot_dir = "Point5_Plots"
dqm_file = ""
my_file = ""
if dqm_data_dir[-1] != "/":
dqm_data_dir += "/"
if my_data_dir[-1] != "/":
my_data_dir += "/"
if my_plot_dir[-1] != "/":
my_plot_dir += "/"
if run:
for f in os.listdir(dqm_data_dir):
if run in f:
print "Found DQM file for run {0}: {1}".format(run, f)
dqm_file = f
else:
print "Please provide run number using -r option."
return
if not dqm_file:
print "No DQM file found for run {0}".format(run)
return
my_file = my_data_dir + "processed_" + dqm_file + ".root"
full_dqm_file = dqm_data_dir + dqm_file
print "Using DQM file: {0}".format(full_dqm_file)
# rsync file from DQM directory to personal directory
# subprocess.call(["rsync", "-avz", full_dqm_file, my_file])
# process root file, output ntuple: makeNtuple(file_in, file_out)
makeNtuple(full_dqm_file, my_file)
# files for scatter plot
file_904 = "Nov17-18_Final_CU_Data/sipm.root"
file_point5 = my_file
file_out = my_plot_dir + dqm_file
# make scatter plot
scatter(file_904, file_point5, file_out, run)
if __name__ == "__main__":
main()
|
"""Search UniProt for GO codes."""
import logging
import requests
import pandas as pd
_LOGGER = logging.getLogger(__name__)
SEARCH_GO_URL = "https://www.uniprot.org/uniprot/?query=database:pdb+go:{go}&format=tab&columns=id,entry name,protein names"
SEARCH_ID_URL = "https://www.uniprot.org/uniprot/?query=database:pdb+id:{id}&format=tab&columns=id,entry name,protein names"
MAPPING_URL = "https://www.uniprot.org/uploadlists/"
def search_id(uniprot_ids, ssl_verify) -> pd.DataFrame:
"""Search UniProt by ID.
:param list uniprot_ids: list of UniProt IDs to search
:param bool ssl_verify: does SSL work?
:returns: UniProt IDs and additional information
"""
rows = []
for uni_id in uniprot_ids:
search_url = SEARCH_ID_URL.format(id=uni_id)
req = requests.get(search_url, verify=ssl_verify)
lines = req.text.splitlines()
rows += [line.split("\t") for line in lines[1:]]
df = pd.DataFrame(
data=rows,
columns=[
"UniProt entry ID",
"UniProt entry name",
"UniProt protein names",
],
)
return df.drop_duplicates(ignore_index=True)
def search_go(go_codes, ssl_verify) -> pd.DataFrame:
"""Search UniProt by GO code.
:param list go_codes: list of GO codes to search
:param bool ssl_verify: does SSL work?
:returns: UniProt IDs and additional information
"""
rows = []
for go_code in go_codes:
_, code = go_code.split(":")
search_url = SEARCH_GO_URL.format(go=code)
req = requests.get(search_url, verify=ssl_verify)
lines = req.text.splitlines()
rows += [line.split("\t") + [go_code] for line in lines[1:]]
df = pd.DataFrame(
data=rows,
columns=[
"UniProt entry ID",
"UniProt entry name",
"UniProt protein names",
"UniProt GO code",
],
)
return df.drop_duplicates(ignore_index=True)
def get_pdb_ids(uniprot_ids, ssl_verify) -> pd.DataFrame:
"""Get PDB IDs corresponding to UniProt IDs.
:param list uniprot_ids: list of UniProt IDs.
:param bool ssl_verify: does SSL work?
:returns: mapping of UniProt IDs to PDB IDs.
"""
params = {
"from": "ACC+ID",
"to": "PDB_ID",
"format": "tab",
"query": " ".join(uniprot_ids),
}
req = requests.post(MAPPING_URL, data=params, verify=ssl_verify)
lines = req.text.splitlines()
rows = [line.split("\t") for line in lines[1:]]
df = pd.DataFrame(data=rows, columns=["UniProt entry ID", "PDB ID"])
return df.drop_duplicates(ignore_index=True)
|
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import TemplateView
from .views import (
IndexView,
)
app_name = 'api'
urlpatterns = [
url(r'^$', IndexView, name='index'),
]
|
import bisect
import random
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect.bisect(breakpoints, score)
return grades[i]
def insort():
my_list = []
for _ in range(10):
number = random.randrange(20)
bisect.insort(my_list, number)
return my_list
if __name__ == '__main__':
print("60分的等级为:", grade(60))
print("使用insrot随机插入10个在[0,20)之间的数结果为:", insort())
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
from numba import jit, cuda
from math import exp, sqrt, floor, ceil, atan
import numpy as np
@jit(target_backend='cuda', nopython=True)
def get_avg_fold_float32(quadrants, nQuadrant, fold_height, fold_width, threshold):
result = np.zeros((fold_height, fold_width))
if nQuadrant > 0:
for x in range(fold_width):
for y in range(fold_height):
sum_val = 0.0
n_fold = 0
for i in range(nQuadrant):
fold = quadrants[i]
if fold[y,x] > threshold:
# Case where there is an edge effect near gaps creating thin lines on the image
# Neighbor pixels considered as part of the gap: 2
if 2 < y < fold_height-2 and 2 < x < fold_width-2:
not_gap_edge = True
for k in range(y-2, y+3):
for l in range(x-2, x+3):
if fold[k,l] < threshold:
not_gap_edge = False
if not_gap_edge:
sum_val += fold[y,x]
n_fold += 1
else:
sum_val += fold[y,x]
n_fold += 1
if n_fold == 0 :
result[y,x] = 0
else:
result[y,x] = sum_val/n_fold
return result
@jit(target_backend='cuda', nopython=True)
def createAngularBG(width, height, subtr, nBins):
backgound = np.zeros((height, width), dtype = np.float32)
centerX = width - 1
centerY = height - 1
theta_size = 90./nBins
for x in range(0, width):
for y in range(0, height):
rad = qfdistance(centerX, centerY, x, y)
floor_rad = floor(rad)
ceil_rad = ceil(rad)
ifloor_rad = int(floor_rad)
iceil_rad = int(ceil_rad)
if ifloor_rad == iceil_rad:
beta_rad = 0.5
alpha_rad = 0.5
else:
alpha_rad = 1. - (rad - floor_rad)
beta_rad = 1. - (ceil_rad - rad)
deltax = float(abs(x - centerX))
if deltax == 0.0:
deg = 90.0
else:
deltay = float(abs(y - centerY))
slope = deltay / deltax
deg = atan(slope)*180.0/np.pi
fbin = 1.*deg/theta_size
ibin = int(round(fbin))
if ibin == 0:
backgound[y, x] = alpha_rad*subtr[ibin, ifloor_rad] + beta_rad*subtr[ibin, iceil_rad]
elif ibin == nBins:
backgound[y, x] = alpha_rad*subtr[ibin-1, ifloor_rad] + beta_rad*subtr[ibin-1, iceil_rad]
else:
floor_bin = floor(fbin)
ceil_bin = ceil(fbin)
alpha = 1. - (fbin - floor_bin)
beta = 1. - (ceil_bin - fbin)
if alpha == 1.0 and beta == 1.0:
alpha = 0.5
beta = 0.5
ifloor = int(floor_bin - 1.0)
iceil = int(ceil_bin)
elif alpha > beta :
floor_bin = floor_bin - 0.5
ceil_bin = ceil_bin - 0.5
alpha = 1. - (fbin - floor_bin)
beta = 1. - (ceil_bin - fbin)
ifloor = int(floor(floor_bin))
iceil = int(floor(ceil_bin))
else:
floor_bin = floor_bin + 0.5
ceil_bin = ceil_bin + 0.5
alpha = 1. - (fbin - floor_bin)
beta = 1. - (ceil_bin - fbin)
ifloor = int(floor(floor_bin))
iceil = int(floor(ceil_bin))
backgound[y, x] = alpha * (alpha_rad * subtr[ifloor, ifloor_rad] + beta_rad * subtr[ifloor, iceil_rad])+ beta * (alpha_rad * subtr[iceil, ifloor_rad] + beta_rad * subtr[iceil, iceil_rad])
return backgound
@jit(target_backend='cuda', nopython=True)
def createCircularlySymBG(width, height, spline):
backgound = np.zeros((height, width), dtype = np.float32)
centerX = width - 0.5
centerY = height - 0.5
for x in range(width):
for y in range(height):
fx = float(x)
fy = float(y)
rad = sqrt((fx-centerX)**2+(fy-centerY)**2)
ffloor = floor(rad)
fceil = ceil(rad)
alpha = 1.-(rad-ffloor)
beta = 1.-(fceil-rad)
ifloor = int(ffloor)
iceil = int(fceil)
if ifloor == iceil:
alpha = 0.5
beta = 0.5
backgound[y, x] = alpha*spline[ifloor] + beta*spline[iceil]
return backgound
@jit(target_backend='cuda', nopython=True)
def replaceRmin(img, rmin, val):
height = img.shape[0]
width = img.shape[1]
centerX = width
centerY = height
frmin = float(rmin)
replace_val = float(val)
for x in range(width-rmin-1, width):
float_x = float(x)
for y in range(height-rmin-1, height):
float_y = float(y)
distance = sqrt((float_x-centerX)**2+(float_y-centerY)**2)
if distance <= frmin:
img[y, x] = replace_val
return img
@jit(target_backend='cuda', nopython=True)
def getCircularDiscreteBackground(img, rmin, start_p, end_p, radial_bin, nBin, max_pts):
height = img.shape[0]
width = img.shape[1]
xs = np.zeros(nBin, dtype = np.float32)
ys = np.zeros(nBin, dtype = np.float32)
all_pts = np.zeros(max_pts, dtype = np.float32)
centerX = width - 0.5
centerY = height - 0.5
nPoints = 0
for bin in range(0, nBin):
nPoints = 0
d1 = float(rmin + bin*radial_bin)
d2 = d1 + float(radial_bin)
# Get all points in a bin
for x in range(width):
float_x = float(x)
for y in range(height):
float_y = float(y)
distance = sqrt((float_x-centerX)**2+(float_y-centerY)**2)
if d1 <= distance and distance < d2 and nPoints < max_pts:
all_pts[nPoints] = img[y, x]
nPoints = nPoints+1
# Sort all pixels
sorted_pts = all_pts[:nPoints]
sorted_pts.sort()
# Get background value from all points (between percentage of start_p and end_p)
start_ind = int(round(float(nPoints)*start_p/100))
end_ind = int(round(float(nPoints)*end_p/100.))
if start_ind < end_ind:
sumVal = 0.0
for i in range(start_ind, end_ind):
sumVal = sumVal + sorted_pts[i]
ys[bin] = sumVal/float(end_ind-start_ind)
else:
ys[bin] = all_pts[start_ind]
xs[bin] = (d1+d2)/2.
return xs, ys
@jit(target_backend='cuda', nopython=True)
def make2DConvexhullBG2(pchipLines, width, height, centerX, centerY, rmin, rmax):
backgound = np.zeros((height, width), dtype = np.float32)
zero = 0.0
for x in range(width):
for y in range(height):
rad = qfdistance(centerX, centerY, x, y)
ceil_rad = ceil(rad)
floor_rad = floor(rad)
irad_ceil = int(ceil_rad)
irad_floor = int(floor_rad)
if irad_floor == irad_ceil:
beta_rad = 0.5
alpha_rad = 0.5
else:
alpha_rad = 1. - (rad - floor_rad)
beta_rad = 1. - (ceil_rad - rad)
deltax = float(abs(x - centerX))
if deltax == zero:
deg = 90.0
else:
deltay = float(abs(y - centerY))
slope = deltay / deltax
deg = atan(slope)*180.0/np.pi
floor_deg = floor(deg)
ceil_deg = ceil(deg)
ifloor = int(floor_deg)
iceil = int(ceil_deg)
if ifloor == iceil:
alpha = 0.5
beta = 0.5
else:
alpha = 1. - (deg - floor_deg)
beta = 1. - (ceil_deg - deg)
if irad_ceil < rmax and irad_floor >= rmin:
pos1 = irad_floor - rmin
pos2 = irad_ceil - rmin
backgound[y, x] = alpha * (alpha_rad * pchipLines[ifloor, pos1] + beta_rad * pchipLines[ifloor, pos2]) \
+ beta * (alpha_rad * pchipLines[iceil, pos1] + beta_rad * pchipLines[iceil, pos2])
return backgound
@jit(target_backend='cuda', nopython=True)
def combine_bgsub_float32(img1, img2, center_x, center_y, sigmoid_k, radius):
img_height = img1.shape[0]
img_width = img1.shape[1]
result = np.zeros((img_height, img_width), dtype = np.float32)
for x in range(img_width):
for y in range(img_height):
r = qfdistance(x, y, center_x, center_y)
tophat_ratio = sigmoid(sigmoid_k, radius, r)
radial_ratio = 1.0 - tophat_ratio
tophat_val = tophat_ratio * img2[y,x]
radial_val = radial_ratio * img1[y,x]
result[y,x] = tophat_val+radial_val
return result
@jit(target_backend='cuda', nopython=True)
def qfdistance(x1, y1, x2, y2):
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
@jit(target_backend='cuda', nopython=True)
def sigmoid(k, x0, x):
return 1.0 / (1.0 + exp(-k * (x - x0)))
|
"""
Models for the MVC setup of the Azzaip applications. Defines the
means by which the application interfaces with the database.
"""
import datetime
from django.db import models
class Message(models.Model):
"""
A single Azzaip post with all relevant fields.
"""
author_uri = models.CharField(max_length=40)
course_uri = models.CharField(max_length=40)
message_text = models.CharField(max_length=500)
message_title = models.CharField(max_length=120)
created_at = models.DateTimeField(default=datetime.datetime.now,
editable=False)
modified_at = models.DateTimeField(default=datetime.datetime.now)
created_by = models.CharField(max_length=40)
modified_by = models.CharField(max_length=40)
class Meta:
"""
Meta information for a Message, defining additional properties.
"""
app_label = 'azzaip'
def __str__(self):
return self.utln
|
from binascii import hexlify, unhexlify
from datetime import datetime, timedelta
import ecdsa
b58_digits = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def encode58(b):
"""Encode bytes to a base58-encoded string"""
# Convert big-endian bytes to integer
n = int('0x0' + hexlify(b).decode('utf8'), 16)
# Divide that integer into bas58
res = []
while n > 0:
n, r = divmod (n, 58)
res.append(b58_digits[r])
res = ''.join(res[::-1])
# Encode leading zeros as base58 zeros
import sys
czero = b'\x00'
if sys.version > '3':
# In Python3 indexing a bytes returns numbers, not characters.
czero = 0
pad = 0
for c in b:
if c == czero: pad += 1
else: break
return b58_digits[0] * pad + res
def decode58(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in b58_digits:
raise InvalidBase58Error('Character %r is not a valid base58 character' % c)
digit = b58_digits.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = unhexlify(h.encode('utf8'))
# Add padding back.
pad = 0
for c in s[:-1]:
if c == b58_digits[0]: pad += 1
else: break
return b'\x00' * pad + res
# s is hexadecimal privkey
# returns a hexadecimal pubkey
def privateKeyToPublicKey(s):
sk = ecdsa.SigningKey.from_string(s, curve=ecdsa.SECP256k1)
vk = sk.verifying_key
return ('\04' + vk.to_string()).encode('hex')
def est_date(confirms):
now = datetime.now()
td = timedelta(minutes=10*confirms)
return now - td
def secrets_for_post():
return {}
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import itertools
import os.path
from dataclasses import dataclass
from pathlib import PurePath
from textwrap import dedent
from typing import Iterable, List, Set, Tuple, Type
import pytest
from pants.base.specs import Specs
from pants.base.specs_parser import SpecsParser
from pants.engine.addresses import Address, Addresses, AddressInput, UnparsedAddressInputs
from pants.engine.environment import EnvironmentName
from pants.engine.fs import CreateDigest, Digest, DigestContents, FileContent, Snapshot
from pants.engine.internals.graph import (
AmbiguousCodegenImplementationsException,
CycleException,
Owners,
OwnersRequest,
TransitiveExcludesNotSupportedError,
_DependencyMapping,
_DependencyMappingRequest,
_TargetParametrizations,
)
from pants.engine.internals.native_engine import AddressParseException
from pants.engine.internals.parametrize import Parametrize, _TargetParametrizationsRequest
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import Get, MultiGet, rule
from pants.engine.target import (
AllTargets,
AllUnexpandedTargets,
AlwaysTraverseDeps,
AsyncFieldMixin,
CoarsenedTargets,
Dependencies,
DependenciesRequest,
DepsTraversalBehavior,
ExplicitlyProvidedDependencies,
Field,
FieldDefaultFactoryRequest,
FieldDefaultFactoryResult,
FieldSet,
GeneratedSources,
GenerateSourcesRequest,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
InvalidFieldException,
InvalidTargetException,
MultipleSourcesField,
OverridesField,
ShouldTraverseDepsPredicate,
SingleSourceField,
SourcesPaths,
SourcesPathsRequest,
SpecialCasedDependencies,
StringField,
Tags,
Target,
TargetFilesGenerator,
Targets,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.testutil.rule_runner import QueryRule, RuleRunner, engine_error
from pants.util.ordered_set import FrozenOrderedSet
class MockSingleSourceField(SingleSourceField):
pass
class MockDependencies(Dependencies):
supports_transitive_excludes = True
deprecated_alias = "deprecated_field"
deprecated_alias_removal_version = "9.9.9.dev0"
class SpecialCasedDeps1(SpecialCasedDependencies):
alias = "special_cased_deps1"
class SpecialCasedDeps2(SpecialCasedDependencies):
alias = "special_cased_deps2"
class ResolveField(StringField, AsyncFieldMixin):
alias = "resolve"
default = None
_DEFAULT_RESOLVE = "default_test_resolve"
class ResolveFieldDefaultFactoryRequest(FieldDefaultFactoryRequest):
field_type = ResolveField
@rule
def resolve_field_default_factory(
request: ResolveFieldDefaultFactoryRequest,
) -> FieldDefaultFactoryResult:
return FieldDefaultFactoryResult(lambda f: f.value or _DEFAULT_RESOLVE)
class MockMultipleSourcesField(MultipleSourcesField):
pass
class MockTarget(Target):
alias = "target"
core_fields = (
MockDependencies,
MockMultipleSourcesField,
SpecialCasedDeps1,
SpecialCasedDeps2,
ResolveField,
Tags,
)
deprecated_alias = "deprecated_target"
deprecated_alias_removal_version = "9.9.9.dev0"
class MockGeneratedTarget(Target):
alias = "generated"
core_fields = (MockDependencies, Tags, MockSingleSourceField, ResolveField)
class MockTargetGenerator(TargetFilesGenerator):
alias = "generator"
core_fields = (MockMultipleSourcesField, OverridesField)
generated_target_cls = MockGeneratedTarget
copied_fields = ()
moved_fields = (Dependencies, Tags, ResolveField)
@pytest.fixture
def transitive_targets_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
QueryRule(AllTargets, []),
QueryRule(AllUnexpandedTargets, []),
QueryRule(CoarsenedTargets, [Addresses]),
QueryRule(Targets, [DependenciesRequest]),
QueryRule(TransitiveTargets, [TransitiveTargetsRequest]),
],
target_types=[MockTarget, MockTargetGenerator, MockGeneratedTarget],
objects={"parametrize": Parametrize},
# NB: The `graph` module masks the environment is most/all positions. We disable the
# inherent environment so that the positions which do require the environment are
# highlighted.
inherent_environment=None,
)
def test_transitive_targets(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='t1')
target(name='t2', dependencies=[':t1'])
target(name='d1', dependencies=[':t1'])
target(name='d2', dependencies=[':t2'])
target(name='d3')
target(name='root', dependencies=[':d1', ':d2', ':d3'])
"""
),
}
)
def get_target(name: str) -> Target:
return transitive_targets_rule_runner.get_target(Address("", target_name=name))
t1 = get_target("t1")
t2 = get_target("t2")
d1 = get_target("d1")
d2 = get_target("d2")
d3 = get_target("d3")
root = get_target("root")
direct_deps = transitive_targets_rule_runner.request(
Targets, [DependenciesRequest(root[Dependencies])]
)
assert direct_deps == Targets([d1, d2, d3])
transitive_targets = transitive_targets_rule_runner.request(
TransitiveTargets, [TransitiveTargetsRequest([root.address, d2.address])]
)
assert transitive_targets.roots == (root, d2)
# NB: `//:d2` is both a target root and a dependency of `//:root`.
assert transitive_targets.dependencies == FrozenOrderedSet([d1, d2, d3, t2, t1])
assert transitive_targets.closure == FrozenOrderedSet([root, d2, d1, d3, t2, t1])
def test_transitive_targets_transitive_exclude(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='base')
target(name='intermediate', dependencies=[':base'])
target(name='root', dependencies=[':intermediate', '!!:base'])
"""
),
}
)
def get_target(name: str) -> Target:
return transitive_targets_rule_runner.get_target(Address("", target_name=name))
base = get_target("base")
intermediate = get_target("intermediate")
root = get_target("root")
intermediate_direct_deps = transitive_targets_rule_runner.request(
Targets, [DependenciesRequest(intermediate[Dependencies])]
)
assert intermediate_direct_deps == Targets([base])
transitive_targets = transitive_targets_rule_runner.request(
TransitiveTargets, [TransitiveTargetsRequest([root.address, intermediate.address])]
)
assert transitive_targets.roots == (root, intermediate)
assert transitive_targets.dependencies == FrozenOrderedSet([intermediate])
assert transitive_targets.closure == FrozenOrderedSet([root, intermediate])
# Regression test that we work with deeply nested levels of excludes.
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='t1')
target(name='t2', dependencies=[':t1'])
target(name='t3', dependencies=[':t2'])
target(name='t4', dependencies=[':t3'])
target(name='t5', dependencies=[':t4'])
target(name='t6', dependencies=[':t5'])
target(name='t7', dependencies=[':t6'])
target(name='t8', dependencies=[':t7'])
target(name='t9', dependencies=[':t8'])
target(name='t10', dependencies=[':t9'])
target(name='t11', dependencies=[':t10'])
target(name='t12', dependencies=[':t11'])
target(name='t13', dependencies=[':t12'])
target(name='t14', dependencies=[':t13'])
target(name='t15', dependencies=[':t14', '!!:t1', '!!:t5'])
"""
),
}
)
transitive_targets = transitive_targets_rule_runner.request(
TransitiveTargets, [TransitiveTargetsRequest([get_target("t15").address])]
)
assert transitive_targets.dependencies == FrozenOrderedSet(
[
get_target("t14"),
get_target("t13"),
get_target("t12"),
get_target("t11"),
get_target("t10"),
get_target("t9"),
get_target("t8"),
get_target("t7"),
get_target("t6"),
get_target("t4"),
get_target("t3"),
get_target("t2"),
]
)
def test_special_cased_dependencies(transitive_targets_rule_runner: RuleRunner) -> None:
"""Test that subclasses of `SpecialCasedDependencies` show up if requested, but otherwise are
left off.
This uses the same test setup as `test_transitive_targets`, but does not use the `dependencies`
field like normal.
"""
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='t1')
target(name='t2', special_cased_deps1=[':t1'])
target(name='d1', special_cased_deps1=[':t1'])
target(name='d2', special_cased_deps2=[':t2'])
target(name='d3')
target(name='root', special_cased_deps1=[':d1', ':d2'], special_cased_deps2=[':d3'])
"""
),
}
)
def get_target(name: str) -> Target:
return transitive_targets_rule_runner.get_target(Address("", target_name=name))
t1 = get_target("t1")
t2 = get_target("t2")
d1 = get_target("d1")
d2 = get_target("d2")
d3 = get_target("d3")
root = get_target("root")
direct_deps = transitive_targets_rule_runner.request(
Targets, [DependenciesRequest(root[Dependencies])]
)
assert direct_deps == Targets()
direct_deps = transitive_targets_rule_runner.request(
Targets,
[
DependenciesRequest(
root[Dependencies], should_traverse_deps_predicate=AlwaysTraverseDeps()
)
],
)
assert direct_deps == Targets([d1, d2, d3])
transitive_targets = transitive_targets_rule_runner.request(
TransitiveTargets, [TransitiveTargetsRequest([root.address, d2.address])]
)
assert transitive_targets.roots == (root, d2)
assert transitive_targets.dependencies == FrozenOrderedSet()
assert transitive_targets.closure == FrozenOrderedSet([root, d2])
transitive_targets = transitive_targets_rule_runner.request(
TransitiveTargets,
[
TransitiveTargetsRequest(
[root.address, d2.address],
should_traverse_deps_predicate=AlwaysTraverseDeps(),
)
],
)
assert transitive_targets.roots == (root, d2)
assert transitive_targets.dependencies == FrozenOrderedSet([d1, d2, d3, t2, t1])
assert transitive_targets.closure == FrozenOrderedSet([root, d2, d1, d3, t2, t1])
# TODO(#12871): Fix this to not be based on generated targets.
def test_transitive_targets_tolerates_generated_target_cycles(
transitive_targets_rule_runner: RuleRunner,
) -> None:
"""For certain file-level targets like `python_source`, we should tolerate cycles because the
underlying language tolerates them."""
transitive_targets_rule_runner.write_files(
{
"dep.txt": "",
"t1.txt": "",
"t2.txt": "",
"BUILD": dedent(
"""\
generator(name='dep', sources=['dep.txt'])
generator(name='t1', sources=['t1.txt'], dependencies=['dep.txt:dep', 't2.txt:t2'])
generator(name='t2', sources=['t2.txt'], dependencies=['t1.txt:t1'])
"""
),
}
)
result = transitive_targets_rule_runner.request(
TransitiveTargets,
[TransitiveTargetsRequest([Address("", target_name="t2")])],
)
assert len(result.roots) == 1
assert result.roots[0].address == Address("", target_name="t2")
assert [tgt.address for tgt in result.dependencies] == [
Address("", relative_file_path="t2.txt", target_name="t2"),
Address("", relative_file_path="t1.txt", target_name="t1"),
Address("", relative_file_path="dep.txt", target_name="dep"),
]
def test_transitive_targets_with_should_traverse_deps_predicate(
transitive_targets_rule_runner: RuleRunner,
) -> None:
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='t1')
target(name='t2', dependencies=[':t1'])
target(name='t3')
target(name='d1', dependencies=[':t1'])
target(name='d2', dependencies=[':t2'])
target(name='d3')
target(name='skipped', dependencies=[':t3'])
target(name='d4', tags=['skip_deps'], dependencies=[':skipped'])
target(name='root', dependencies=[':d1', ':d2', ':d3', ':d4'])
"""
),
}
)
def get_target(name: str) -> Target:
return transitive_targets_rule_runner.get_target(Address("", target_name=name))
t1 = get_target("t1")
t2 = get_target("t2")
t3 = get_target("t3")
d1 = get_target("d1")
d2 = get_target("d2")
d3 = get_target("d3")
skipped = get_target("skipped")
d4 = get_target("d4")
root = get_target("root")
direct_deps = transitive_targets_rule_runner.request(
Targets, [DependenciesRequest(root[Dependencies])]
)
assert direct_deps == Targets([d1, d2, d3, d4])
class SkipDepsTagOrTraverse(ShouldTraverseDepsPredicate):
def __call__(
self, target: Target, field: Dependencies | SpecialCasedDependencies
) -> DepsTraversalBehavior:
if "skip_deps" in (target[Tags].value or []):
return DepsTraversalBehavior.EXCLUDE
return DepsTraversalBehavior.INCLUDE
predicate = SkipDepsTagOrTraverse()
# Assert the class is frozen even though it was not decorated with @dataclass(frozen=True)
with pytest.raises(dataclasses.FrozenInstanceError):
# The dataclass(frozen=True) decorator is only needed if the subclass adds fields.
predicate._callable = SkipDepsTagOrTraverse.__call__ # type: ignore[misc] # noqa
transitive_targets = transitive_targets_rule_runner.request(
TransitiveTargets,
[
TransitiveTargetsRequest(
[root.address, d2.address], should_traverse_deps_predicate=predicate
)
],
)
assert transitive_targets.roots == (root, d2)
# NB: `//:d2` is both a target root and a dependency of `//:root`.
assert transitive_targets.dependencies == FrozenOrderedSet([d1, d2, d3, d4, t2, t1])
assert transitive_targets.closure == FrozenOrderedSet([root, d2, d1, d3, d4, t2, t1])
# `//:d4` depends on `//:skipped` which depends on `//:t3`.
# Nothing else depends on `//:skipped` or `//:t3`, so they should not
# be present in the list of transitive deps thanks to `should_traverse_deps_predicate`.
assert skipped not in transitive_targets.dependencies
assert t3 not in transitive_targets.dependencies
assert skipped not in transitive_targets.closure
assert t3 not in transitive_targets.closure
def test_coarsened_targets(transitive_targets_rule_runner: RuleRunner) -> None:
"""CoarsenedTargets should "coarsen" a cycle into a single CoarsenedTarget instance."""
transitive_targets_rule_runner.write_files(
{
"dep.txt": "",
"t1.txt": "",
"t2.txt": "",
# Cycles are only tolerated for file-level targets like `python_source`.
# TODO(#12871): Stop relying on only generated targets having cycle tolerance.
"BUILD": dedent(
"""\
generator(name='dep', sources=['dep.txt'])
generator(name='t1', sources=['t1.txt'], dependencies=['dep.txt:dep', 't2.txt:t2'])
generator(name='t2', sources=['t2.txt'], dependencies=['t1.txt:t1'])
"""
),
}
)
def assert_coarsened(
a: Address, expected_members: List[Address], expected_dependencies: List[Address]
) -> None:
coarsened_targets = transitive_targets_rule_runner.request(
CoarsenedTargets,
[Addresses([a])],
)
assert sorted(t.address for t in coarsened_targets[0].members) == expected_members
# NB: Only the direct dependencies are compared.
assert (
sorted(d.address for ct in coarsened_targets[0].dependencies for d in ct.members)
== expected_dependencies
)
# Non-file-level targets are already validated to not have cycles, so they coarsen to
# themselves.
assert_coarsened(
Address("", target_name="dep"),
[Address("", target_name="dep")],
[Address("", relative_file_path="dep.txt", target_name="dep")],
)
assert_coarsened(
Address("", target_name="t1"),
[Address("", target_name="t1")],
[
Address("", relative_file_path="t1.txt", target_name="t1"),
Address("", relative_file_path="t2.txt", target_name="t2"),
],
)
assert_coarsened(
Address("", target_name="t2"),
[Address("", target_name="t2")],
[
Address("", relative_file_path="t1.txt", target_name="t1"),
Address("", relative_file_path="t2.txt", target_name="t2"),
],
)
# File-level targets not involved in cycles coarsen to themselves.
assert_coarsened(
Address("", relative_file_path="dep.txt", target_name="dep"),
[Address("", relative_file_path="dep.txt", target_name="dep")],
[],
)
# File-level targets involved in cycles will coarsen to the cycle, and have only dependencies
# outside of the cycle.
cycle_files = [
Address("", relative_file_path="t1.txt", target_name="t1"),
Address("", relative_file_path="t2.txt", target_name="t2"),
]
assert_coarsened(
Address("", relative_file_path="t1.txt", target_name="t1"),
cycle_files,
[Address("", relative_file_path="dep.txt", target_name="dep")],
)
assert_coarsened(
Address("", relative_file_path="t2.txt", target_name="t2"),
cycle_files,
[Address("", relative_file_path="dep.txt", target_name="dep")],
)
def assert_failed_cycle(
rule_runner: RuleRunner,
*,
root_target_name: str,
subject_target_name: str,
path_target_names: Tuple[str, ...],
) -> None:
with pytest.raises(ExecutionError) as e:
rule_runner.request(
TransitiveTargets,
[TransitiveTargetsRequest([Address("", target_name=root_target_name)])],
)
(cycle_exception,) = e.value.wrapped_exceptions
assert isinstance(cycle_exception, CycleException)
assert cycle_exception.subject == Address("", target_name=subject_target_name)
assert cycle_exception.path == tuple(Address("", target_name=p) for p in path_target_names)
def test_dep_cycle_self(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files({"BUILD": "target(name='t1', dependencies=[':t1'])"})
assert_failed_cycle(
transitive_targets_rule_runner,
root_target_name="t1",
subject_target_name="t1",
path_target_names=("t1", "t1"),
)
def test_dep_cycle_direct(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='t1', dependencies=[':t2'])
target(name='t2', dependencies=[':t1'])
"""
)
}
)
assert_failed_cycle(
transitive_targets_rule_runner,
root_target_name="t1",
subject_target_name="t1",
path_target_names=("t1", "t2", "t1"),
)
assert_failed_cycle(
transitive_targets_rule_runner,
root_target_name="t2",
subject_target_name="t2",
path_target_names=("t2", "t1", "t2"),
)
def test_dep_cycle_indirect(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name='t1', dependencies=[':t2'])
target(name='t2', dependencies=[':t3'])
target(name='t3', dependencies=[':t2'])
"""
)
}
)
assert_failed_cycle(
transitive_targets_rule_runner,
root_target_name="t1",
subject_target_name="t2",
path_target_names=("t1", "t2", "t3", "t2"),
)
assert_failed_cycle(
transitive_targets_rule_runner,
root_target_name="t2",
subject_target_name="t2",
path_target_names=("t2", "t3", "t2"),
)
def test_dep_no_cycle_indirect(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"t1.txt": "",
"t2.txt": "",
# TODO(#12871): Stop relying on only generated targets having cycle tolerance.
"BUILD": dedent(
"""\
generator(name='t1', dependencies=['t2.txt:t2'], sources=['t1.txt'])
generator(name='t2', dependencies=[':t1'], sources=['t2.txt'])
"""
),
}
)
result = transitive_targets_rule_runner.request(
TransitiveTargets,
[TransitiveTargetsRequest([Address("", target_name="t1")])],
)
assert len(result.roots) == 1
assert result.roots[0].address == Address("", target_name="t1")
assert {tgt.address for tgt in result.dependencies} == {
Address("", relative_file_path="t1.txt", target_name="t1"),
Address("", relative_file_path="t2.txt", target_name="t2"),
}
def test_name_explicitly_set(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"dir1/f.txt": "",
"dir1/BUILD": dedent(
"""\
generator(sources=['f.txt'])
generator(name='nombre', sources=['f.txt'])
"""
),
"dir2/BUILD": dedent(
"""\
target()
target(name='nombre')
"""
),
"dir3/BUILD": dedent(
"""\
target(resolve=parametrize('r1', 'r2'))
target(name='nombre', resolve=parametrize('r1', 'r2'))
"""
),
"same_name/BUILD": "target(name='same_name')",
}
)
def assert_is_set(addr: Address, expected: bool) -> None:
tgt = transitive_targets_rule_runner.get_target(addr)
assert tgt.name_explicitly_set is expected
assert_is_set(Address("dir1"), False)
assert_is_set(Address("dir1", target_name="nombre"), True)
# We don't bother with generated targets.
assert_is_set(Address("dir1", relative_file_path="f.txt"), True)
assert_is_set(Address("dir1", target_name="nombre", relative_file_path="f.txt"), True)
assert_is_set(Address("dir2"), False)
assert_is_set(Address("dir2", target_name="nombre"), True)
for r in ("r1", "r2"):
assert_is_set(Address("dir3", parameters={"resolve": r}), False)
assert_is_set(Address("dir3", target_name="nombre", parameters={"resolve": r}), True)
# Even if the name is the same as the default, we should recognize when it's explicitly set.
assert_is_set(Address("same_name"), True)
def test_deprecated_field_name(transitive_targets_rule_runner: RuleRunner, caplog) -> None:
transitive_targets_rule_runner.write_files({"BUILD": "target(name='t', deprecated_field=[])"})
transitive_targets_rule_runner.get_target(Address("", target_name="t"))
assert len(caplog.records) == 1
assert "Instead, use `dependencies`" in caplog.text
def test_resolve_deprecated_target_name(transitive_targets_rule_runner: RuleRunner, caplog) -> None:
transitive_targets_rule_runner.write_files({"BUILD": "deprecated_target(name='t')"})
transitive_targets_rule_runner.get_target(Address("", target_name="t"))
assert len(caplog.records) == 1
assert "Instead, use `target`" in caplog.text
def test_resolve_generated_target(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"f1.txt": "",
"f2.txt": "",
"f3.txt": "",
"no_owner.txt": "",
"BUILD": dedent(
"""\
generator(name='generator', sources=['f1.txt', 'f2.txt'])
target(name='non-generator', sources=['f1.txt'])
"""
),
}
)
generated_target_address = Address("", target_name="generator", relative_file_path="f1.txt")
assert transitive_targets_rule_runner.get_target(
generated_target_address
) == MockGeneratedTarget({SingleSourceField.alias: "f1.txt"}, generated_target_address)
# The target generator must actually generate the requested target.
with pytest.raises(ExecutionError):
transitive_targets_rule_runner.get_target(
Address("", target_name="generator", relative_file_path="no_owner.txt")
)
# TODO: Using a "file address" on a target that does not generate file-level targets will fall
# back to the target generator. See https://github.com/pantsbuild/pants/issues/14419.
non_generator_file_address = Address(
"", target_name="non-generator", relative_file_path="f1.txt"
)
assert transitive_targets_rule_runner.get_target(non_generator_file_address) == MockTarget(
{MultipleSourcesField.alias: ["f1.txt"]},
non_generator_file_address.maybe_convert_to_target_generator(),
)
def test_find_all_targets(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"f1.txt": "",
"f2.txt": "",
"f3.txt": "",
"no_owner.txt": "",
"BUILD": dedent(
"""\
generator(name='generator', sources=['f1.txt', 'f2.txt'])
target(name='non-generator', sources=['f1.txt'])
"""
),
"dir/BUILD": "target()",
}
)
all_tgts = transitive_targets_rule_runner.request(AllTargets, [])
expected = {
Address("", target_name="generator", relative_file_path="f1.txt"),
Address("", target_name="generator", relative_file_path="f2.txt"),
Address("", target_name="non-generator"),
Address("dir"),
}
assert {t.address for t in all_tgts} == expected
all_unexpanded = transitive_targets_rule_runner.request(AllUnexpandedTargets, [])
assert {t.address for t in all_unexpanded} == {*expected, Address("", target_name="generator")}
def test_invalid_target(transitive_targets_rule_runner: RuleRunner) -> None:
transitive_targets_rule_runner.write_files(
{
"path/to/BUILD": dedent(
"""\
target(name='t1', frobnot='is-unknown')
"""
),
}
)
def get_target(name: str) -> Target:
return transitive_targets_rule_runner.get_target(Address("path/to", target_name=name))
with engine_error(InvalidTargetException, contains="path/to/BUILD:1: Unrecognized field"):
get_target("t1")
@pytest.fixture
def owners_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
QueryRule(Owners, [OwnersRequest]),
],
target_types=[
MockTarget,
MockTargetGenerator,
MockGeneratedTarget,
],
# NB: The `graph` module masks the environment is most/all positions. We disable the
# inherent environment so that the positions which do require the environment are
# highlighted.
inherent_environment=None,
)
def assert_owners(
rule_runner: RuleRunner,
requested: Iterable[str],
*,
expected: Set[Address],
match_if_owning_build_file_included_in_sources: bool = False,
) -> None:
result = rule_runner.request(
Owners,
[
OwnersRequest(
tuple(requested),
match_if_owning_build_file_included_in_sources=match_if_owning_build_file_included_in_sources,
)
],
)
assert set(result) == expected
def test_owners_source_file_does_not_exist(owners_rule_runner: RuleRunner) -> None:
"""Test when a source file belongs to a target, even though the file does not actually exist.
This happens, for example, when the file is deleted and we're computing `--changed-since`. In
this case, we can only use target generators rather than their generated targets.
"""
owners_rule_runner.write_files(
{
"demo/f.txt": "",
"demo/BUILD": dedent(
"""\
target(name='not-generator', sources=['*.txt'])
generator(name='generator', sources=['*.txt'])
"""
),
}
)
assert_owners(
owners_rule_runner,
["demo/deleted.txt"],
expected={
Address("demo", target_name="generator"),
Address("demo", target_name="not-generator"),
},
)
# For files that do exist, we should use generated targets when possible.
assert_owners(
owners_rule_runner,
["demo/f.txt"],
expected={
Address("demo", target_name="generator", relative_file_path="f.txt"),
Address("demo", target_name="not-generator"),
},
)
# If another generated target comes from the same target generator, then both that generated
# target and its generator should be used.
assert_owners(
owners_rule_runner,
["demo/f.txt", "demo/deleted.txt"],
expected={
Address("demo", target_name="generator", relative_file_path="f.txt"),
Address("demo", target_name="generator"),
Address("demo", target_name="not-generator"),
},
)
def test_owners_multiple_owners(owners_rule_runner: RuleRunner) -> None:
owners_rule_runner.write_files(
{
"demo/f1.txt": "",
"demo/f2.txt": "",
"demo/BUILD": dedent(
"""\
target(name='not-generator-all', sources=['*.txt'])
target(name='not-generator-f2', sources=['f2.txt'])
generator(name='generator-all', sources=['*.txt'])
generator(name='generator-f2', sources=['f2.txt'])
"""
),
}
)
assert_owners(
owners_rule_runner,
["demo/f1.txt"],
expected={
Address("demo", target_name="generator-all", relative_file_path="f1.txt"),
Address("demo", target_name="not-generator-all"),
},
)
assert_owners(
owners_rule_runner,
["demo/f2.txt"],
expected={
Address("demo", target_name="generator-all", relative_file_path="f2.txt"),
Address("demo", target_name="not-generator-all"),
Address("demo", target_name="generator-f2", relative_file_path="f2.txt"),
Address("demo", target_name="not-generator-f2"),
},
)
def test_owners_build_file(owners_rule_runner: RuleRunner) -> None:
"""A BUILD file owns every target defined in it."""
owners_rule_runner.write_files(
{
"demo/f1.txt": "",
"demo/f2.txt": "",
"demo/BUILD": dedent(
"""\
target(name='f1', sources=['f1.txt'])
target(name='f2_first', sources=['f2.txt'])
target(name='f2_second', sources=['f2.txt'])
generator(name='generated', sources=['*.txt'])
"""
),
}
)
assert_owners(
owners_rule_runner,
["demo/BUILD"],
match_if_owning_build_file_included_in_sources=False,
expected=set(),
)
assert_owners(
owners_rule_runner,
["demo/BUILD"],
match_if_owning_build_file_included_in_sources=True,
expected={
Address("demo", target_name="f1"),
Address("demo", target_name="f2_first"),
Address("demo", target_name="f2_second"),
Address("demo", target_name="generated", relative_file_path="f1.txt"),
Address("demo", target_name="generated", relative_file_path="f2.txt"),
},
)
# -----------------------------------------------------------------------------------------------
# Test file-level target generation and parameterization.
# -----------------------------------------------------------------------------------------------
@pytest.fixture
def generated_targets_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
QueryRule(Addresses, [Specs]),
QueryRule(_DependencyMapping, [_DependencyMappingRequest]),
QueryRule(_TargetParametrizations, [_TargetParametrizationsRequest, EnvironmentName]),
UnionRule(FieldDefaultFactoryRequest, ResolveFieldDefaultFactoryRequest),
resolve_field_default_factory,
],
target_types=[MockTargetGenerator, MockGeneratedTarget],
objects={"parametrize": Parametrize},
# NB: The `graph` module masks the environment is most/all positions. We disable the
# inherent environment so that the positions which do require the environment are
# highlighted.
inherent_environment=None,
)
def assert_generated(
rule_runner: RuleRunner,
address: Address,
build_content: str,
files: list[str],
expected_targets: set[Target] | None = None,
*,
expected_dependencies: dict[str, set[str]] | None = None,
) -> None:
rule_runner.write_files(
{
f"{address.spec_path}/BUILD": build_content,
**{os.path.join(address.spec_path, f): "" for f in files},
}
)
parametrizations = rule_runner.request(
_TargetParametrizations,
[
_TargetParametrizationsRequest(address, description_of_origin="tests"),
EnvironmentName(None),
],
)
if expected_targets is not None:
assert expected_targets == {
t
for parametrization in parametrizations
for t in parametrization.parametrization.values()
}
if expected_dependencies is not None:
# TODO: Adjust the `TransitiveTargets` API to expose the complete mapping.
# see https://github.com/pantsbuild/pants/issues/11270
specs = SpecsParser(root_dir=rule_runner.build_root).parse_specs(
["::"], description_of_origin="tests"
)
addresses = rule_runner.request(Addresses, [specs])
dependency_mapping = rule_runner.request(
_DependencyMapping,
[
_DependencyMappingRequest(
TransitiveTargetsRequest(addresses),
expanded_targets=False,
)
],
)
assert {
k.spec: {a.spec for a in v} for k, v in dependency_mapping.mapping.items()
} == expected_dependencies
def test_generate_multiple(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo"),
"generator(tags=['tag'], sources=['*.ext'])",
["f1.ext", "f2.ext"],
{
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["tag"]},
Address("demo", relative_file_path="f1.ext"),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f2.ext", Tags.alias: ["tag"]},
Address("demo", relative_file_path="f2.ext"),
residence_dir="demo",
),
},
)
def test_generate_subdir(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("src/fortran", target_name="demo"),
"generator(name='demo', sources=['**/*.f95'])",
["subdir/demo.f95"],
{
MockGeneratedTarget(
{SingleSourceField.alias: "subdir/demo.f95"},
Address("src/fortran", target_name="demo", relative_file_path="subdir/demo.f95"),
residence_dir="src/fortran/subdir",
)
},
)
def test_generate_overrides(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("example"),
"generator(sources=['*.ext'], tags=['override_me'], overrides={'f1.ext': {'tags': ['overridden']}})",
["f1.ext"],
{
MockGeneratedTarget(
{
SingleSourceField.alias: "f1.ext",
Tags.alias: ["overridden"],
},
Address("example", relative_file_path="f1.ext"),
),
},
)
def test_generate_overrides_unused(generated_targets_rule_runner: RuleRunner) -> None:
with engine_error(
contains="Unused file paths in the `overrides` field for demo:demo: ['fake.ext']"
):
assert_generated(
generated_targets_rule_runner,
Address("demo"),
"generator(sources=['*.ext'], overrides={'fake.ext': {'tags': ['irrelevant']}})",
["f1.ext"],
set(),
)
def test_parametrize(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo"),
"generator(tags=parametrize(t1=['t1'], t2=['t2']), sources=['f1.ext'])",
["f1.ext"],
{
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["t1"]},
Address("demo", relative_file_path="f1.ext", parameters={"tags": "t1"}),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["t2"]},
Address("demo", relative_file_path="f1.ext", parameters={"tags": "t2"}),
residence_dir="demo",
),
},
expected_dependencies={
"demo@tags=t1": {"demo/f1.ext@tags=t1"},
"demo@tags=t2": {"demo/f1.ext@tags=t2"},
"demo/f1.ext@tags=t1": set(),
"demo/f1.ext@tags=t2": set(),
},
)
def test_parametrize_multi(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo"),
"generator(tags=parametrize(t1=['t1'], t2=['t2']), resolve=parametrize('a', 'b'), sources=['f1.ext'])",
["f1.ext"],
{
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["t1"], ResolveField.alias: "a"},
Address(
"demo", relative_file_path="f1.ext", parameters={"tags": "t1", "resolve": "a"}
),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["t2"], ResolveField.alias: "a"},
Address(
"demo", relative_file_path="f1.ext", parameters={"tags": "t2", "resolve": "a"}
),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["t1"], ResolveField.alias: "b"},
Address(
"demo", relative_file_path="f1.ext", parameters={"tags": "t1", "resolve": "b"}
),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", Tags.alias: ["t2"], ResolveField.alias: "b"},
Address(
"demo", relative_file_path="f1.ext", parameters={"tags": "t2", "resolve": "b"}
),
residence_dir="demo",
),
},
)
def test_parametrize_overrides(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo"),
"generator(overrides={'f1.ext': {'resolve': parametrize('a', 'b')}}, resolve='c', sources=['*.ext'])",
["f1.ext", "f2.ext"],
{
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", ResolveField.alias: "a"},
Address("demo", relative_file_path="f1.ext", parameters={"resolve": "a"}),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", ResolveField.alias: "b"},
Address("demo", relative_file_path="f1.ext", parameters={"resolve": "b"}),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f2.ext", ResolveField.alias: "c"},
Address("demo", relative_file_path="f2.ext"),
residence_dir="demo",
),
},
expected_dependencies={
"demo:demo": {
"demo/f1.ext@resolve=a",
"demo/f1.ext@resolve=b",
"demo/f2.ext",
},
"demo/f1.ext@resolve=a": set(),
"demo/f1.ext@resolve=b": set(),
"demo/f2.ext": set(),
},
)
def test_parametrize_atom(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo"),
"generated(resolve=parametrize('a', 'b'), source='f1.ext')",
["f1.ext"],
{
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", ResolveField.alias: "a"},
Address("demo", target_name="demo", parameters={"resolve": "a"}),
residence_dir="demo",
),
MockGeneratedTarget(
{SingleSourceField.alias: "f1.ext", ResolveField.alias: "b"},
Address("demo", target_name="demo", parameters={"resolve": "b"}),
residence_dir="demo",
),
},
expected_dependencies={
"demo@resolve=a": set(),
"demo@resolve=b": set(),
},
)
def test_parametrize_partial_atom_to_atom(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo", target_name="t2"),
dedent(
"""\
generated(
name='t1',
resolve=parametrize('a', 'b'),
source='f1.ext',
)
generated(
name='t2',
resolve=parametrize('a', 'b'),
source='f2.ext',
dependencies=[':t1'],
)
"""
),
["f1.ext", "f2.ext"],
expected_dependencies={
"demo:t1@resolve=a": set(),
"demo:t1@resolve=b": set(),
"demo:t2@resolve=a": {"demo:t1@resolve=a"},
"demo:t2@resolve=b": {"demo:t1@resolve=b"},
},
)
assert_generated(
generated_targets_rule_runner,
Address("demo", target_name="t2"),
dedent(
"""\
generated(
name='t1',
resolve=parametrize('default_test_resolve', 'b'),
source='f1.ext',
)
generated(
name='t2',
source='f2.ext',
dependencies=[':t1'],
)
"""
),
["f1.ext", "f2.ext"],
expected_dependencies={
"demo:t1@resolve=default_test_resolve": set(),
"demo:t1@resolve=b": set(),
"demo:t2": {"demo:t1@resolve=default_test_resolve"},
},
)
def test_parametrize_partial_generator_to_generator(
generated_targets_rule_runner: RuleRunner,
) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo", target_name="t2"),
dedent(
"""\
generator(
name='t1',
resolve=parametrize('a', 'b'),
sources=['f1.ext'],
)
generator(
name='t2',
resolve=parametrize('a', 'b'),
sources=['f2.ext'],
dependencies=[':t1'],
)
"""
),
["f1.ext", "f2.ext"],
expected_dependencies={
"demo/f1.ext:t1@resolve=a": set(),
"demo/f1.ext:t1@resolve=b": set(),
"demo/f2.ext:t2@resolve=a": {"demo:t1@resolve=a"},
"demo/f2.ext:t2@resolve=b": {"demo:t1@resolve=b"},
"demo:t1@resolve=a": {
"demo/f1.ext:t1@resolve=a",
},
"demo:t1@resolve=b": {
"demo/f1.ext:t1@resolve=b",
},
"demo:t2@resolve=a": {
"demo/f2.ext:t2@resolve=a",
},
"demo:t2@resolve=b": {
"demo/f2.ext:t2@resolve=b",
},
},
)
def test_parametrize_partial_generator_to_generated(
generated_targets_rule_runner: RuleRunner,
) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo", target_name="t2"),
dedent(
"""\
generator(
name='t1',
resolve=parametrize('a', 'b'),
sources=['f1.ext'],
)
generator(
name='t2',
resolve=parametrize('a', 'b'),
sources=['f2.ext'],
dependencies=['./f1.ext:t1'],
)
"""
),
["f1.ext", "f2.ext"],
expected_dependencies={
"demo/f1.ext:t1@resolve=a": set(),
"demo/f1.ext:t1@resolve=b": set(),
"demo/f2.ext:t2@resolve=a": {"demo/f1.ext:t1@resolve=a"},
"demo/f2.ext:t2@resolve=b": {"demo/f1.ext:t1@resolve=b"},
"demo:t1@resolve=a": {
"demo/f1.ext:t1@resolve=a",
},
"demo:t1@resolve=b": {
"demo/f1.ext:t1@resolve=b",
},
"demo:t2@resolve=a": {
"demo/f2.ext:t2@resolve=a",
},
"demo:t2@resolve=b": {
"demo/f2.ext:t2@resolve=b",
},
},
)
def test_parametrize_partial_exclude(generated_targets_rule_runner: RuleRunner) -> None:
assert_generated(
generated_targets_rule_runner,
Address("demo", target_name="t2"),
dedent(
"""\
generator(
name='t1',
resolve=parametrize('a', 'b'),
sources=['f1.ext', 'f2.ext'],
)
generator(
name='t2',
resolve=parametrize('a', 'b'),
sources=['f3.ext'],
dependencies=[
'./f1.ext:t1',
'./f2.ext:t1',
'!./f2.ext:t1',
],
)
"""
),
["f1.ext", "f2.ext", "f3.ext"],
expected_dependencies={
"demo/f1.ext:t1@resolve=a": set(),
"demo/f2.ext:t1@resolve=a": set(),
"demo/f1.ext:t1@resolve=b": set(),
"demo/f2.ext:t1@resolve=b": set(),
"demo/f3.ext:t2@resolve=a": {"demo/f1.ext:t1@resolve=a"},
"demo/f3.ext:t2@resolve=b": {"demo/f1.ext:t1@resolve=b"},
"demo:t1@resolve=a": {
"demo/f1.ext:t1@resolve=a",
"demo/f2.ext:t1@resolve=a",
},
"demo:t1@resolve=b": {
"demo/f1.ext:t1@resolve=b",
"demo/f2.ext:t1@resolve=b",
},
"demo:t2@resolve=a": {
"demo/f3.ext:t2@resolve=a",
},
"demo:t2@resolve=b": {
"demo/f3.ext:t2@resolve=b",
},
},
)
def test_parametrize_16190(generated_targets_rule_runner: RuleRunner) -> None:
class ParentField(Field):
alias = "parent"
help = "foo"
class ChildField(ParentField):
alias = "child"
help = "foo"
class ParentTarget(Target):
alias = "parent_tgt"
help = "foo"
core_fields = (ParentField, Dependencies)
class ChildTarget(Target):
alias = "child_tgt"
help = "foo"
core_fields = (ChildField, Dependencies)
rule_runner = RuleRunner(
rules=generated_targets_rule_runner.rules,
target_types=[ParentTarget, ChildTarget],
objects={"parametrize": Parametrize},
inherent_environment=None,
)
build_content = dedent(
"""\
child_tgt(name="child", child=parametrize("a", "b"))
parent_tgt(name="parent", parent=parametrize("a", "b"), dependencies=[":child"])
"""
)
assert_generated(
rule_runner,
Address("demo", target_name="child"),
build_content,
[],
expected_dependencies={
"demo:child@child=a": set(),
"demo:child@child=b": set(),
"demo:parent@parent=a": {"demo:child@child=a"},
"demo:parent@parent=b": {"demo:child@child=b"},
},
)
@pytest.mark.parametrize(
"field_content",
[
"tagz=['tag']",
"tagz=parametrize(['tag1'], ['tag2'])",
],
)
def test_parametrize_16910(generated_targets_rule_runner: RuleRunner, field_content: str) -> None:
with engine_error(
InvalidTargetException, contains=f"demo/BUILD:1: Unrecognized field `{field_content}`"
):
assert_generated(
generated_targets_rule_runner,
Address("demo"),
f"generator({field_content}, sources=['*.ext'])",
["f1.ext", "f2.ext"],
)
# -----------------------------------------------------------------------------------------------
# Test `SourcesField`. Also see `engine/target_test.py`.
# -----------------------------------------------------------------------------------------------
@pytest.fixture
def sources_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
QueryRule(HydratedSources, [HydrateSourcesRequest]),
QueryRule(SourcesPaths, [SourcesPathsRequest]),
],
# NB: The `graph` module masks the environment is most/all positions. We disable the
# inherent environment so that the positions which do require the environment are
# highlighted.
inherent_environment=None,
)
def test_sources_normal_hydration(sources_rule_runner: RuleRunner) -> None:
addr = Address("src/fortran", target_name="lib")
sources_rule_runner.write_files(
{f"src/fortran/{f}": "" for f in ["f1.f95", "f2.f95", "f1.f03", "ignored.f03"]}
)
sources = MultipleSourcesField(["f1.f95", "*.f03", "!ignored.f03", "!**/ignore*"], addr)
hydrated_sources = sources_rule_runner.request(
HydratedSources, [HydrateSourcesRequest(sources)]
)
assert hydrated_sources.snapshot.files == ("src/fortran/f1.f03", "src/fortran/f1.f95")
single_source = SingleSourceField("f1.f95", addr)
hydrated_single_source = sources_rule_runner.request(
HydratedSources, [HydrateSourcesRequest(single_source)]
)
assert hydrated_single_source.snapshot.files == ("src/fortran/f1.f95",)
# Test that `SourcesPaths` works too.
sources_paths = sources_rule_runner.request(SourcesPaths, [SourcesPathsRequest(sources)])
assert sources_paths.files == ("src/fortran/f1.f03", "src/fortran/f1.f95")
sources_paths = sources_rule_runner.request(SourcesPaths, [SourcesPathsRequest(single_source)])
assert sources_paths.files == ("src/fortran/f1.f95",)
# Also test that the Filespec is correct. This does not need the engine to be calculated.
assert (
sources.filespec
== {
"includes": ["src/fortran/f1.f95", "src/fortran/*.f03"],
"excludes": ["src/fortran/ignored.f03", "src/fortran/**/ignore*"],
}
== hydrated_sources.filespec
)
assert (
single_source.filespec
== {"includes": ["src/fortran/f1.f95"]}
== hydrated_single_source.filespec
)
def test_sources_output_type(sources_rule_runner: RuleRunner) -> None:
class SourcesSubclass(MultipleSourcesField):
pass
class SingleSourceSubclass(SingleSourceField):
pass
addr = Address("", target_name="lib")
sources_rule_runner.write_files({f: "" for f in ["f1.f95"]})
valid_sources = SourcesSubclass(["*"], addr)
hydrated_valid_sources = sources_rule_runner.request(
HydratedSources,
[HydrateSourcesRequest(valid_sources, for_sources_types=[SourcesSubclass])],
)
assert hydrated_valid_sources.snapshot.files == ("f1.f95",)
assert hydrated_valid_sources.sources_type == SourcesSubclass
valid_single_sources = SingleSourceSubclass("f1.f95", addr)
hydrated_valid_sources = sources_rule_runner.request(
HydratedSources,
[HydrateSourcesRequest(valid_single_sources, for_sources_types=[SingleSourceSubclass])],
)
assert hydrated_valid_sources.snapshot.files == ("f1.f95",)
assert hydrated_valid_sources.sources_type == SingleSourceSubclass
invalid_sources = MultipleSourcesField(["*"], addr)
hydrated_invalid_sources = sources_rule_runner.request(
HydratedSources,
[HydrateSourcesRequest(invalid_sources, for_sources_types=[SourcesSubclass])],
)
assert hydrated_invalid_sources.snapshot.files == ()
assert hydrated_invalid_sources.sources_type is None
invalid_single_sources = SingleSourceField("f1.f95", addr)
hydrated_invalid_sources = sources_rule_runner.request(
HydratedSources,
[HydrateSourcesRequest(invalid_single_sources, for_sources_types=[SingleSourceSubclass])],
)
assert hydrated_invalid_sources.snapshot.files == ()
assert hydrated_invalid_sources.sources_type is None
def test_sources_unmatched_globs(sources_rule_runner: RuleRunner) -> None:
sources_rule_runner.set_options(["--unmatched-build-file-globs=error"])
sources_rule_runner.write_files({f: "" for f in ["f1.f95"]})
sources = MultipleSourcesField(["non_existent.f95"], Address("", target_name="lib"))
with engine_error(contains="non_existent.f95"):
sources_rule_runner.request(HydratedSources, [HydrateSourcesRequest(sources)])
single_sources = SingleSourceField("non_existent.f95", Address("", target_name="lib"))
with engine_error(contains="non_existent.f95"):
sources_rule_runner.request(HydratedSources, [HydrateSourcesRequest(single_sources)])
def test_sources_default_globs(sources_rule_runner: RuleRunner) -> None:
class DefaultSources(MultipleSourcesField):
default = ("default.f95", "default.f03", "*.f08", "!ignored.f08")
addr = Address("src/fortran", target_name="lib")
# NB: Not all globs will be matched with these files, specifically `default.f03` will not
# be matched. This is intentional to ensure that we use `any` glob conjunction rather
# than the normal `all` conjunction.
sources_rule_runner.write_files(
{f"src/fortran/{f}": "" for f in ["default.f95", "f1.f08", "ignored.f08"]}
)
sources = DefaultSources(None, addr)
assert set(sources.value or ()) == set(DefaultSources.default)
hydrated_sources = sources_rule_runner.request(
HydratedSources, [HydrateSourcesRequest(sources)]
)
assert hydrated_sources.snapshot.files == ("src/fortran/default.f95", "src/fortran/f1.f08")
def test_sources_expected_file_extensions(sources_rule_runner: RuleRunner) -> None:
class ExpectedExtensionsSources(MultipleSourcesField):
expected_file_extensions = (".f95", ".f03", "")
class ExpectedExtensionsSingleSource(SingleSourceField):
expected_file_extensions = (".f95", ".f03", "")
addr = Address("src/fortran", target_name="lib")
sources_rule_runner.write_files(
{f"src/fortran/{f}": "" for f in ["s.f95", "s.f03", "s.f08", "s"]}
)
def get_source(src: str) -> str:
sources = sources_rule_runner.request(
HydratedSources, [HydrateSourcesRequest(ExpectedExtensionsSources([src], addr))]
).snapshot.files
single_source = sources_rule_runner.request(
HydratedSources, [HydrateSourcesRequest(ExpectedExtensionsSingleSource(src, addr))]
).snapshot.files
assert sources == single_source
assert len(sources) == 1
return sources[0]
with engine_error(contains="['src/fortran/s.f08']"):
get_source("s.f08")
# Also check that we support valid sources
assert get_source("s.f95") == "src/fortran/s.f95"
assert get_source("s") == "src/fortran/s"
def test_sources_expected_num_files(sources_rule_runner: RuleRunner) -> None:
class ExpectedNumber(MultipleSourcesField):
expected_num_files = 2
class ExpectedRange(MultipleSourcesField):
# We allow for 1 or 3 files
expected_num_files = range(1, 4, 2)
sources_rule_runner.write_files({f: "" for f in ["f1.txt", "f2.txt", "f3.txt", "f4.txt"]})
def hydrate(sources_cls: Type[MultipleSourcesField], sources: Iterable[str]) -> HydratedSources:
return sources_rule_runner.request(
HydratedSources,
[
HydrateSourcesRequest(sources_cls(sources, Address("", target_name="example"))),
],
)
with engine_error(contains="must have 2 files"):
hydrate(ExpectedNumber, [])
with engine_error(contains="must have 1 or 3 files"):
hydrate(ExpectedRange, ["f1.txt", "f2.txt"])
# Also check that we support valid # files.
assert hydrate(ExpectedNumber, ["f1.txt", "f2.txt"]).snapshot.files == ("f1.txt", "f2.txt")
assert hydrate(ExpectedRange, ["f1.txt"]).snapshot.files == ("f1.txt",)
assert hydrate(ExpectedRange, ["f1.txt", "f2.txt", "f3.txt"]).snapshot.files == (
"f1.txt",
"f2.txt",
"f3.txt",
)
# -----------------------------------------------------------------------------------------------
# Test codegen. Also see `engine/target_test.py`.
# -----------------------------------------------------------------------------------------------
class SmalltalkSource(SingleSourceField):
pass
class AvroSources(MultipleSourcesField):
pass
class AvroLibrary(Target):
alias = "avro_library"
core_fields = (AvroSources,)
class GenerateSmalltalkFromAvroRequest(GenerateSourcesRequest):
input = AvroSources
output = SmalltalkSource
@rule
async def generate_smalltalk_from_avro(
request: GenerateSmalltalkFromAvroRequest,
) -> GeneratedSources:
protocol_files = request.protocol_sources.files
# Many codegen implementations will need to look up a protocol target's dependencies in their
# rule. We add this here to ensure that this does not result in rule graph issues.
_ = await Get(TransitiveTargets, TransitiveTargetsRequest([request.protocol_target.address]))
def generate_fortran(fp: str) -> FileContent:
parent = str(PurePath(fp).parent).replace("src/avro", "src/smalltalk")
file_name = f"{PurePath(fp).stem}.st"
return FileContent(str(PurePath(parent, file_name)), b"Generated")
result = await Get(Snapshot, CreateDigest([generate_fortran(fp) for fp in protocol_files]))
return GeneratedSources(result)
@pytest.fixture
def codegen_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
generate_smalltalk_from_avro,
QueryRule(HydratedSources, [HydrateSourcesRequest, EnvironmentName]),
QueryRule(GeneratedSources, [GenerateSmalltalkFromAvroRequest, EnvironmentName]),
UnionRule(GenerateSourcesRequest, GenerateSmalltalkFromAvroRequest),
],
target_types=[AvroLibrary],
)
def setup_codegen_protocol_tgt(rule_runner: RuleRunner) -> Address:
rule_runner.write_files(
{"src/avro/f.avro": "", "src/avro/BUILD": "avro_library(name='lib', sources=['*.avro'])"}
)
return Address("src/avro", target_name="lib")
def test_codegen_generates_sources(codegen_rule_runner: RuleRunner) -> None:
addr = setup_codegen_protocol_tgt(codegen_rule_runner)
protocol_sources = AvroSources(["*.avro"], addr)
assert (
protocol_sources.can_generate(SmalltalkSource, codegen_rule_runner.union_membership) is True
)
# First, get the original protocol sources.
hydrated_protocol_sources = codegen_rule_runner.request(
HydratedSources, [HydrateSourcesRequest(protocol_sources)]
)
assert hydrated_protocol_sources.snapshot.files == ("src/avro/f.avro",)
# Test directly feeding the protocol sources into the codegen rule.
tgt = codegen_rule_runner.get_target(addr)
generated_sources = codegen_rule_runner.request(
GeneratedSources,
[GenerateSmalltalkFromAvroRequest(hydrated_protocol_sources.snapshot, tgt)],
)
assert generated_sources.snapshot.files == ("src/smalltalk/f.st",)
# Test that HydrateSourcesRequest can also be used.
generated_via_hydrate_sources = codegen_rule_runner.request(
HydratedSources,
[
HydrateSourcesRequest(
protocol_sources, for_sources_types=[SmalltalkSource], enable_codegen=True
)
],
)
assert generated_via_hydrate_sources.snapshot.files == ("src/smalltalk/f.st",)
assert generated_via_hydrate_sources.sources_type == SmalltalkSource
def test_codegen_works_with_subclass_fields(codegen_rule_runner: RuleRunner) -> None:
addr = setup_codegen_protocol_tgt(codegen_rule_runner)
class CustomAvroSources(AvroSources):
pass
protocol_sources = CustomAvroSources(["*.avro"], addr)
assert (
protocol_sources.can_generate(SmalltalkSource, codegen_rule_runner.union_membership) is True
)
generated = codegen_rule_runner.request(
HydratedSources,
[
HydrateSourcesRequest(
protocol_sources, for_sources_types=[SmalltalkSource], enable_codegen=True
)
],
)
assert generated.snapshot.files == ("src/smalltalk/f.st",)
def test_codegen_cannot_generate_language(codegen_rule_runner: RuleRunner) -> None:
addr = setup_codegen_protocol_tgt(codegen_rule_runner)
class AdaSources(MultipleSourcesField):
pass
protocol_sources = AvroSources(["*.avro"], addr)
assert protocol_sources.can_generate(AdaSources, codegen_rule_runner.union_membership) is False
generated = codegen_rule_runner.request(
HydratedSources,
[
HydrateSourcesRequest(
protocol_sources, for_sources_types=[AdaSources], enable_codegen=True
)
],
)
assert generated.snapshot.files == ()
assert generated.sources_type is None
def test_ambiguous_codegen_implementations_exception() -> None:
# This error message is quite complex. We test that it correctly generates the message.
class SmalltalkGenerator1(GenerateSourcesRequest):
input = AvroSources
output = SmalltalkSource
class SmalltalkGenerator2(GenerateSourcesRequest):
input = AvroSources
output = SmalltalkSource
class AdaSources(MultipleSourcesField):
pass
class AdaGenerator(GenerateSourcesRequest):
input = AvroSources
output = AdaSources
class IrrelevantSources(MultipleSourcesField):
pass
# Test when all generators have the same input and output.
exc = AmbiguousCodegenImplementationsException.create(
[SmalltalkGenerator1, SmalltalkGenerator2], for_sources_types=[SmalltalkSource]
)
assert "can generate SmalltalkSource from AvroSources" in str(exc)
assert "* SmalltalkGenerator1" in str(exc)
assert "* SmalltalkGenerator2" in str(exc)
# Test when the generators have different input and output, which usually happens because
# the call site used too expansive of a `for_sources_types` argument.
exc = AmbiguousCodegenImplementationsException.create(
[SmalltalkGenerator1, AdaGenerator],
for_sources_types=[SmalltalkSource, AdaSources, IrrelevantSources],
)
assert "can generate one of ['AdaSources', 'SmalltalkSource'] from AvroSources" in str(exc)
assert "IrrelevantSources" not in str(exc)
assert "* SmalltalkGenerator1 -> SmalltalkSource" in str(exc)
assert "* AdaGenerator -> AdaSources" in str(exc)
# -----------------------------------------------------------------------------------------------
# Test the Dependencies field. Also see `engine/target_test.py`.
# -----------------------------------------------------------------------------------------------
def test_transitive_excludes_error() -> None:
class Valid1(Target):
alias = "valid1"
core_fields = (MockDependencies,)
class Valid2(Target):
alias = "valid2"
core_fields = (MockDependencies,)
class InvalidDepsField(Dependencies):
pass
class Invalid(Target):
alias = "invalid"
core_fields = (InvalidDepsField,)
exc = TransitiveExcludesNotSupportedError(
bad_value="!!//:bad",
address=Address("demo"),
registered_target_types=[Valid1, Valid2, Invalid],
union_membership=UnionMembership({}),
)
assert "Bad value '!!//:bad' in the `dependencies` field for demo:demo" in exc.args[0]
assert "work with these target types: ['valid1', 'valid2']" in exc.args[0]
class SmalltalkDependencies(Dependencies):
supports_transitive_excludes = True
class CustomSmalltalkDependencies(SmalltalkDependencies):
pass
class SmalltalkLibrarySource(SmalltalkSource):
pass
class SmalltalkLibrary(Target):
alias = "smalltalk_library"
# Note that we use MockDependencies so that we support transitive excludes (`!!`).
core_fields = (MockDependencies, SmalltalkLibrarySource)
class SmalltalkLibraryGenerator(TargetFilesGenerator):
alias = "smalltalk_libraries"
core_fields = (MultipleSourcesField,)
generated_target_cls = SmalltalkLibrary
copied_fields = ()
# Note that we use MockDependencies so that we support transitive excludes (`!!`).
moved_fields = (MockDependencies,)
@dataclass(frozen=True)
class SmalltalkDependenciesInferenceFieldSet(FieldSet):
required_fields = (SmalltalkLibrarySource,)
source: SmalltalkLibrarySource
class InferSmalltalkDependencies(InferDependenciesRequest):
infer_from = SmalltalkDependenciesInferenceFieldSet
@rule
async def infer_smalltalk_dependencies(request: InferSmalltalkDependencies) -> InferredDependencies:
# To demo an inference rule, we simply treat each `sources` file to contain a list of
# addresses, one per line.
hydrated_sources = await Get(HydratedSources, HydrateSourcesRequest(request.field_set.source))
digest_contents = await Get(DigestContents, Digest, hydrated_sources.snapshot.digest)
all_lines = [
line.strip()
for line in itertools.chain.from_iterable(
file_content.content.decode().splitlines() for file_content in digest_contents
)
]
include = await MultiGet(
Get(Address, AddressInput, AddressInput.parse(line, description_of_origin="smalltalk rule"))
for line in all_lines
if not line.startswith("!")
)
exclude = await MultiGet(
Get(
Address,
AddressInput,
AddressInput.parse(line[1:], description_of_origin="smalltalk rule"),
)
for line in all_lines
if line.startswith("!")
)
return InferredDependencies(include, exclude=exclude)
@pytest.fixture
def dependencies_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
infer_smalltalk_dependencies,
QueryRule(Addresses, [DependenciesRequest]),
QueryRule(ExplicitlyProvidedDependencies, [DependenciesRequest]),
UnionRule(InferDependenciesRequest, InferSmalltalkDependencies),
],
target_types=[SmalltalkLibrary, SmalltalkLibraryGenerator, MockTarget],
# NB: The `graph` module masks the environment is most/all positions. We disable the
# inherent environment so that the positions which do require the environment are
# highlighted.
inherent_environment=None,
)
def assert_dependencies_resolved(
rule_runner: RuleRunner,
requested_address: Address,
*,
expected: Iterable[Address],
) -> None:
target = rule_runner.get_target(requested_address)
result = rule_runner.request(Addresses, [DependenciesRequest(target.get(Dependencies))])
assert sorted(result) == sorted(expected)
def test_explicitly_provided_dependencies(dependencies_rule_runner: RuleRunner) -> None:
"""Ensure that we correctly handle `!` and `!!` ignores.
We leave the rest of the parsing to AddressInput and Address.
"""
dependencies_rule_runner.write_files(
{
"files/f.txt": "",
"files/transitive_exclude.txt": "",
"files/BUILD": "smalltalk_libraries(sources=['*.txt'])",
"a/b/c/BUILD": "smalltalk_libraries()",
"demo/subdir/BUILD": dedent(
"""\
target(
dependencies=[
'a/b/c',
'!a/b/c',
'files/f.txt',
'!files/f.txt',
'!!files/transitive_exclude.txt',
],
)
"""
),
}
)
target = dependencies_rule_runner.get_target(Address("demo/subdir"))
result = dependencies_rule_runner.request(
ExplicitlyProvidedDependencies, [DependenciesRequest(target[Dependencies])]
)
assert result.address == target.address
expected_addresses = {Address("a/b/c"), Address("files", relative_file_path="f.txt")}
assert set(result.includes) == expected_addresses
assert set(result.ignores) == {
*expected_addresses,
Address("files", relative_file_path="transitive_exclude.txt"),
}
def test_normal_resolution(dependencies_rule_runner: RuleRunner) -> None:
dependencies_rule_runner.write_files(
{
"src/smalltalk/BUILD": dedent(
"""\
target(dependencies=['//:dep1', '//:dep2', ':sibling'])
target(name='sibling')
"""
),
"no_deps/BUILD": "target()",
# An ignore should override an include.
"ignore/BUILD": (
"target(dependencies=['//:dep1', '!//:dep1', '//:dep2', '!!//:dep2'])"
),
"BUILD": dedent(
"""\
target(name='dep1')
target(name='dep2')
"""
),
}
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("src/smalltalk"),
expected=[
Address("", target_name="dep1"),
Address("", target_name="dep2"),
Address("src/smalltalk", target_name="sibling"),
],
)
assert_dependencies_resolved(dependencies_rule_runner, Address("no_deps"), expected=[])
assert_dependencies_resolved(dependencies_rule_runner, Address("ignore"), expected=[])
def test_target_error_message_for_bad_field_values(dependencies_rule_runner: RuleRunner) -> None:
dependencies_rule_runner.write_files(
{
"src/BUILD": "smalltalk_libraries(sources=['//'])",
"dep/BUILD": "target(dependencies=['//::typo#addr'])",
}
)
err = "src/BUILD:1: Invalid field value for 'sources' in target src:src: Absolute paths not supported: \"//\""
with engine_error(InvalidFieldException, contains=err):
dependencies_rule_runner.get_target(Address("src"))
err = "dep/BUILD:1: Failed to get dependencies for dep:dep: Failed to parse address spec"
with engine_error(InvalidFieldException, contains=err):
target = dependencies_rule_runner.get_target(Address("dep"))
dependencies_rule_runner.request(Addresses, [DependenciesRequest(target[Dependencies])])
def test_explicit_file_dependencies(dependencies_rule_runner: RuleRunner) -> None:
dependencies_rule_runner.write_files(
{
"src/smalltalk/util/f1.st": "",
"src/smalltalk/util/f2.st": "",
"src/smalltalk/util/f3.st": "",
"src/smalltalk/util/f4.st": "",
"src/smalltalk/util/BUILD": "smalltalk_libraries(sources=['*.st'])",
"src/smalltalk/BUILD": dedent(
"""\
target(
dependencies=[
'./util/f1.st',
'src/smalltalk/util/f2.st',
'./util/f3.st',
'./util/f4.st',
'!./util/f3.st',
'!!./util/f4.st',
]
)
"""
),
}
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("src/smalltalk"),
expected=[
Address("src/smalltalk/util", relative_file_path="f1.st", target_name="util"),
Address("src/smalltalk/util", relative_file_path="f2.st", target_name="util"),
],
)
def test_dependency_inference(dependencies_rule_runner: RuleRunner) -> None:
"""We test that dependency inference works generally and that we merge it correctly with
explicitly provided dependencies.
For consistency, dep inference does not merge generated subtargets with BUILD targets: if both
are inferred, expansion to Targets will remove the redundancy while converting to subtargets.
"""
dependencies_rule_runner.write_files(
{
"inferred1.st": "",
"inferred2.st": "",
"inferred_but_ignored1.st": "",
"inferred_but_ignored2.st": "",
"inferred_and_provided1.st": "",
"inferred_and_provided2.st": "",
"BUILD": dedent(
"""\
smalltalk_libraries(name='inferred1')
smalltalk_libraries(name='inferred2')
smalltalk_libraries(name='inferred_but_ignored1', sources=['inferred_but_ignored1.st'])
smalltalk_libraries(name='inferred_but_ignored2', sources=['inferred_but_ignored2.st'])
target(name='inferred_and_provided1')
target(name='inferred_and_provided2')
"""
),
"demo/f1.st": dedent(
"""\
//:inferred1
inferred2.st:inferred2
"""
),
"demo/f2.st": dedent(
"""\
//:inferred_and_provided1
inferred_and_provided2.st:inferred_and_provided2
inferred_but_ignored1.st:inferred_but_ignored1
//:inferred_but_ignored2
"""
),
"demo/f3.st": dedent(
"""\
//:inferred1
!:inferred_and_provided1
!//:inferred_and_provided2
"""
),
"demo/BUILD": dedent(
"""\
smalltalk_libraries(
sources=['*.st'],
dependencies=[
'//:inferred_and_provided1',
'//:inferred_and_provided2',
'!inferred_but_ignored1.st:inferred_but_ignored1',
'!//:inferred_but_ignored2',
],
)
"""
),
}
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("demo"),
expected=[
Address("demo", relative_file_path="f1.st"),
Address("demo", relative_file_path="f2.st"),
Address("demo", relative_file_path="f3.st"),
],
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("demo", relative_file_path="f1.st", target_name="demo"),
expected=[
Address("", target_name="inferred1"),
Address("", relative_file_path="inferred2.st", target_name="inferred2"),
Address("", target_name="inferred_and_provided1"),
Address("", target_name="inferred_and_provided2"),
],
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("demo", relative_file_path="f2.st", target_name="demo"),
expected=[
Address("", target_name="inferred_and_provided1"),
Address("", target_name="inferred_and_provided2"),
Address(
"",
relative_file_path="inferred_and_provided2.st",
target_name="inferred_and_provided2",
),
],
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("demo", relative_file_path="f3.st", target_name="demo"),
expected=[
Address("", target_name="inferred1"),
],
)
def test_depends_on_generated_targets(dependencies_rule_runner: RuleRunner) -> None:
"""If the address is a target generator, then it depends on all of its generated targets."""
dependencies_rule_runner.write_files(
{
"src/smalltalk/f1.st": "",
"src/smalltalk/f2.st": "",
"src/smalltalk/BUILD": "smalltalk_libraries(sources=['*.st'])",
"src/smalltalk/util/BUILD": "smalltalk_libraries()",
}
)
assert_dependencies_resolved(
dependencies_rule_runner,
Address("src/smalltalk"),
expected=[
Address("src/smalltalk", relative_file_path="f1.st"),
Address("src/smalltalk", relative_file_path="f2.st"),
],
)
def test_depends_on_atom_via_14419(dependencies_rule_runner: RuleRunner) -> None:
"""See #14419."""
dependencies_rule_runner.write_files(
{
"src/smalltalk/f1.st": "",
"src/smalltalk/f2.st": "",
"src/smalltalk/BUILD": dedent(
"""\
smalltalk_library(source='f1.st')
smalltalk_library(
name='t2',
source='f2.st',
dependencies=['./f1.st'],
)
"""
),
}
)
# Due to the accommodation for #14419, the file address `./f1.st` resolves to the atom target
# with the default name.
assert_dependencies_resolved(
dependencies_rule_runner,
Address("src/smalltalk", target_name="t2"),
expected=[
Address("src/smalltalk"),
],
)
def test_resolve_unparsed_address_inputs() -> None:
rule_runner = RuleRunner(
rules=[QueryRule(Addresses, [UnparsedAddressInputs])], target_types=[MockTarget]
)
rule_runner.write_files(
{
"project/BUILD": dedent(
"""\
target(name="t1")
target(name="t2")
target(name="t3")
"""
)
}
)
def resolve(addresses: list[str], skip_invalid_addresses: bool = False) -> set[Address]:
return set(
rule_runner.request(
Addresses,
[
UnparsedAddressInputs(
addresses,
owning_address=Address("project", target_name="t3"),
description_of_origin="from my tests",
skip_invalid_addresses=skip_invalid_addresses,
)
],
)
)
t1 = Address("project", target_name="t1")
assert resolve(["project:t1", ":t2"]) == {t1, Address("project", target_name="t2")}
invalid_addresses = ["project:t1", "bad::", "project/fake.txt:tgt"]
assert resolve(invalid_addresses, skip_invalid_addresses=True) == {t1}
with engine_error(AddressParseException, contains="from my tests"):
resolve(invalid_addresses)
|
# 本脚本实现了 5.3.1 节的 *不使用数据增强的快速特征提取* 算法
from keras.applications import VGG16
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150,150,3))
conv_base.summary()
train_dir = 'train'
validation_dir = 'validation'
test_dir = 'test'
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150,150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
break
return features, labels
# 得到卷积基模型,保存在 numpy.ndarray 中
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
type(train_features) # numpy.ndarray
train_features.shape # (2000, 4, 4, 512)
train_features_res = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features_res = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features_res = np.reshape(test_features, (1000, 4 * 4 * 512))
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim= 4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(train_features_res, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features_res, validation_labels))
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
# 这里 `val` 前缀表示 validation,所以 val_loss 表示 loss on validation set
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
|
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def web_submit(submit,chrome_driver,debug=0):
# test
if debug == 1:
site = 'https://www.cpagrip.com/show.php?l=0&u=218456&id=26188'
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
sleep(3)
# page1
# select
num_ = random.randint(1,2)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="vote"]'))
s1.select_by_index(num_)
sleep(1)
# input
chrome_driver.find_element_by_xpath('//*[@id="email"]').send_keys(submit['health']['email'])
sleep(1)
# checkbox
chrome_driver.find_element_by_xpath('//*[@id="email-checkbox"]/label/span[1]').click()
sleep(1)
# click
chrome_driver.find_element_by_xpath('//*[@id="button-row"]/div/button').click()
sleep(3)
# page2
# zipcode
zipcode = submit['health']['zip'].split('.')[0]
chrome_driver.find_element_by_xpath('//*[@id="zip"]').send_keys(zipcode)
# firstname
chrome_driver.find_element_by_xpath('//*[@id="fname"]').send_keys(submit['health']['firstname'])
# lasename
chrome_driver.find_element_by_xpath('//*[@id="lname"]').send_keys(submit['health']['lastname'])
# address
chrome_driver.find_element_by_xpath('//*[@id="address"]').send_keys(submit['health']['address'])
# dateofbirth
chrome_driver.find_element_by_xpath('//*[@id="dob_holder"]').click()
sleep(1)
# if 'dateofbirth' in submit['health']:
# date_of_birth = Submit_handle.get_auto_birthday(submit['health']['dateofbirth'])
# else:
date_of_birth = Submit_handle.get_auto_birthday('')
for key in date_of_birth[0]:
chrome_driver.find_element_by_xpath('//*[@id="dob_month_digit"]').send_keys(key)
for key in date_of_birth[1]:
chrome_driver.find_element_by_xpath('//*[@id="dob_day"]').send_keys(key)
for key in date_of_birth[2]:
chrome_driver.find_element_by_xpath('//*[@id="dob_year"]').send_keys(key)
# Mobile phone number
home_phone = submit['health']['homephone'].split('.')[0]
chrome_driver.find_element_by_xpath('//*[@id="phone"]').send_keys(home_phone)
# male female
num_ = random.randint(0,1)
if num_ == 0:
chrome_driver.find_element_by_xpath('//*[@id="reg-form"]/div[12]/div/div/div[1]/label/span').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="gender-f"]/label/span').click()
# continue
chrome_driver.find_element_by_xpath('//*[@id="reg-form"]/div[13]/div/button').click()
sleep(120)
return 1
def test():
# db.email_test()
# date_of_birth = Submit_handle.get_auto_birthday('')
Mission_list = ['10026']
Excel_name = ['health','']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
print(submit)
submit['Mission_Id'] = '10026'
chrome_driver = Chrome_driver.get_chrome(submit)
web_submit(submit,chrome_driver,1)
if __name__=='__main__':
test()
|
#-*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
class Autor(models.Model):
usuario = models.OneToOneField(User, verbose_name = 'Usuário')
class Meta:
verbose_name_plural = 'Autores'
db_table = 'blog_autor'
def __unicode__(self):
return '%s' % self.usuario
class Publicacao(models.Model):
titulo = models.CharField('Título', max_length = 100)
texto = models.TextField('Texto')
url = models.URLField(verbose_name = 'URL', blank = True)
data = models.DateTimeField(auto_now_add = True)
autor = models.ForeignKey(Autor)
class Meta:
verbose_name = 'Publicação'
verbose_name_plural = 'Publicações'
db_table = 'blog_publicacao'
def __unicode__(self):
return '%s' % self.titulo
class Comentario(models.Model):
texto = models.TextField()
autor = models.CharField(max_length = 100)
data = models.DateTimeField(auto_now_add = True)
publicacao = models.ForeignKey(Publicacao, verbose_name = 'Publicação')
class Meta:
verbose_name = 'Comentário'
verbose_name_plural = 'Comentários'
def __unicode__(self):
return '%s' % self.texto
|
from heapq import *
def solution(operations):
answer = []
heapify(answer)
check = 0
for i in operations:
if i[0] == 'D' and check == 0:
continue
else:
if i[0] == 'I':
heappush(answer, int(i[2:]))
check += 1
elif i[0] == 'D' and i[2] == '-':
heappop(answer)
check -= 1
else:
answer.pop()
check -= 1
if len(answer) == 0:
return [0, 0]
else:
return [max(answer), min(answer)]
|
#!/usr/bin/env python3
import time
import mpd
def connect_client():
"""Connect to MPD Client"""
client = mpd.MPDClient()
client.connect("localhost", 6600)
return client
def get_pl_tuples(client):
pl_tuples = []
for song in client.playlistinfo():
mytuple = (song["id"], song["album"])
pl_tuples.append(mytuple)
return pl_tuples
def get_id_of_album_start(client, album):
pl_tuples = get_pl_tuples(client)
for song in pl_tuples:
if song[1] == album:
return song[0]
def check_for_new_album(album, client):
"""See if a new album is playing and if so reset the variable"""
try:
currentalbum = client.currentsong['album']
except:
currentalbum = 'Not Sure'
if currentalbum == album:
print( "no new album.")
return False
else:
print( "new album.")
return True
client = connect_client()
#get current album
album = client.currentsong()['album']
print(album)
#get id of of current album's first song
firstAlbumSong = get_id_of_album_start(client, album)
print(str(firstAlbumSong))
#while loop to stay on that album
while True:
for line in client.idle():
if (client.currentsong()['album']) == album:
print("same album")
else:
print("different album")
client.playid(firstAlbumSong)
|
from GameLogic.Character import *
from GameLogic.MapHelpers import getAroundingTiles
def getUnitPrice(unitType, character):
from GameLogic.Unit import Soldier, Robot, Tank, Boat
if unitType is Soldier:
if type(character) is IceCharacter:
return 120
else:
return 150
elif unitType is Robot:
if type(character) is ForestCharacter:
return 240
else:
return 300
elif unitType is Tank:
if type(character) is DesertCharacter:
return 600
else:
return 750
elif unitType is Boat:
if type(character) is SwampCharacter:
return 800
else:
return 1000
else:
raise Exception("This is not a valid unit type")
def BuyUnit(gameLogic, unitType, tile, player):
price = getUnitPrice(unitType, player.Character)
if player.Money < price:
return None
# check if there is a building
if next((False for tile in getAroundingTiles(tile, gameLogic.Map) if
tile.Building is not None and tile.Building.Owner == player), True): # check if it is his own building
return None
from GameLogic.Map import SeaTile
import GameLogic.Unit
if type(tile) is not SeaTile:
if unitType is GameLogic.Unit.Boat:
return
elif tile.Unit is None:
player.Money -= price
unit = unitType(tile, player, gameLogic)
tile.Unit = unit
return unit
elif type(tile.Unit) is GameLogic.Unit.UnitGroup:
if tile.Unit.CountUnits > 3:
return None
else:
player.Money -= price
unit = unitType(tile, player, gameLogic)
tile.Unit.AddUnit(unit)
return unit
elif isinstance(tile.Unit, GameLogic.Unit.Unit):
existingUnit = tile.Unit
group = GameLogic.Unit.UnitGroup(tile, player, gameLogic)
group.AddUnit(existingUnit)
player.Money -= price
unit = unitType(tile, player, gameLogic)
group.AddUnit(unit)
tile.Unit = group
return unit
elif unitType is GameLogic.Unit.Boat:
if tile.Unit is None:
player.Money -= price
unit = unitType(tile, player, gameLogic)
tile.Unit = unit
return unit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.