prompt
stringlengths 45
59.5k
| completion
stringlengths 6
228
| api
stringlengths 12
68
|
|---|---|---|
#!/usr/bin/env python
r"""
Parallel assembling and solving of a Poisson's equation, using commands for
interactive use.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u
= \int_{\Omega} v f
\;, \quad \forall s \;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example does not use a nonlinear solver.
- This example can serve as a template for solving a linear single-field scalar
problem - just replace the equations in :func:`create_local_problem()`.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/diffusion/poisson_parallel_interactive.py -h
See PETSc options::
$ python examples/diffusion/poisson_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/diffusion/poisson_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 5 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --verify --metis -ksp_monitor -ksp_converged_reason
View the results using::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import csv
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.timing import Timer
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem, State)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.evaluate import apply_ebc_to_matrix
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
import sfepy.parallel.parallel as pl
import sfepy.parallel.plot_parallel_dofs as ppd
def create_local_problem(omega_gi, order):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
field_i = Field.from_args('fu', nm.float64, 1, omega_i,
approx_order=order)
output('number of local field DOFs:', field_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field_i)
v_i = FieldVariable('v_i', 'test', field_i, primary_var_name='u_i')
integral = Integral('i', order=2*order)
mat = Material('m', lam=10, mu=5)
t1 = Term.new('dw_laplace(m.lam, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
def _get_load(coors):
val = nm.ones_like(coors[:, 0])
for coor in coors.T:
val *= nm.sin(4 * nm.pi * coor)
return val
def get_load(ts, coors, mode=None, **kwargs):
if mode == 'qp':
return {'val' : _get_load(coors).reshape(coors.shape[0], 1, 1)}
load = Material('load', function=Function('get_load', get_load))
t2 = Term.new('dw_volume_lvf(load.val, v_i)',
integral, omega_i, load=load, v_i=v_i)
eq = Equation('balance', t1 - 100 * t2)
eqs = Equations([eq])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.all' : 0.1})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2]))
pb.update_materials()
return pb
def verify_save_dof_maps(field, cell_tasks, dof_maps, id_map, options,
verbose=False):
vec = pl.verify_task_dof_maps(dof_maps, id_map, field, verbose=verbose)
order = options.order
mesh = field.domain.mesh
sfield = Field.from_args('aux', nm.float64, 'scalar', field.region,
approx_order=order)
aux = FieldVariable('aux', 'parameter', sfield,
primary_var_name='(set-to-None)')
out = aux.create_output(vec,
linearization=Struct(kind='adaptive',
min_level=order-1,
max_level=order-1,
eps=1e-8))
filename = os.path.join(options.output_dir,
'para-domains-dofs.h5')
if field.is_higher_order():
out['aux'].mesh.write(filename, out=out)
else:
mesh.write(filename, out=out)
out = Struct(name='cells', mode='cell',
data=cell_tasks[:, None, None, None])
filename = os.path.join(options.output_dir,
'para-domains-cells.h5')
mesh.write(filename, out={'cells' : out})
def solve_problem(mesh_filename, options, comm):
order = options.order
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
stats = Struct()
timer = Timer('solve_timer')
timer.start()
mesh = Mesh.from_file(mesh_filename)
stats.t_read_mesh = timer.stop()
timer.start()
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
stats.t_partition_mesh = timer.stop()
output('creating global domain and field...')
timer.start()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order)
stats.t_create_global_fields = timer.stop()
output('...done in', timer.dt)
output('distributing field %s...' % field.name)
timer.start()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute([field], cell_tasks,
is_overlap=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
lfd = lfds[0]
stats.t_distribute_fields_dofs = timer.stop()
output('...done in', timer.dt)
if rank == 0:
dof_maps = gfds[0].dof_maps
id_map = gfds[0].id_map
if options.verify:
verify_save_dof_maps(field, cell_tasks,
dof_maps, id_map, options, verbose=True)
if options.plot:
ppd.plot_partitioning([None, None], field, cell_tasks, gfds[0],
options.output_dir, size)
output('creating local problem...')
timer.start()
omega_gi = Region.from_cells(lfd.cells, field.domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, order)
variables = pb.get_variables()
eqs = pb.equations
u_i = variables['u_i']
field_i = u_i.field
stats.t_create_local_problem = timer.stop()
output('...done in', timer.dt)
if options.plot:
ppd.plot_local_dofs([None, None], field, field_i, omega_gi,
options.output_dir, rank)
output('allocating global system...')
timer.start()
sizes, drange = pl.get_sizes(lfd.petsc_dofs_range, field.n_nod, 1)
output('sizes:', sizes)
output('drange:', drange)
pdofs = pl.get_local_ordering(field_i, lfd.petsc_dofs_conn)
output('pdofs:', pdofs)
pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange,
is_overlap=True, comm=comm,
verbose=True)
stats.t_allocate_global_system = timer.stop()
|
output('...done in', timer.dt)
|
sfepy.base.base.output
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*
|
F.ones([a.shape[0], 1])
|
megengine.functional.ones
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap":
|
mge.tensor(dtype="float32")
|
megengine.tensor
|
"""Anime CRUD controller."""
import sqlmodel
from sqlmodel.ext.asyncio import session as aio_session
from app.crud import base
from app.models import anime
class AnimeCRUD(base.BaseCRUD[anime.Anime, anime.AnimeCreate,
anime.AnimeUpdate]):
"""CRUD controller for anime.
It contains Create, Read, Update, and Delete methods.
"""
@classmethod
async def get_by_title(cls, session: aio_session.AsyncSession,
title: str) -> anime.Anime | None:
"""Gets an anime by their title.
Args:
session: The database session.
title: The anime's title.
"""
anime_list = await session.exec(
|
sqlmodel.select(anime.Anime)
|
sqlmodel.select
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") /
|
dist.get_world_size()
|
megengine.distributed.get_world_size
|
#!/usr/bin/env python
"""
Convert a mesh file from one SfePy-supported format to another.
Examples::
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s2.5
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1 -c 0
"""
import sys
sys.path.append('.')
from optparse import OptionParser
from sfepy.base.base import nm, output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import (output_mesh_formats, MeshIO,
supported_cell_types)
usage = '%prog [options] filename_in filename_out\n' + __doc__.rstrip()
help = {
'scale' : 'scale factor (float or comma-separated list for each axis)'
' [default: %default]',
'center' : 'center of the output mesh (0 for origin or'
' comma-separated list for each axis) applied after scaling'
' [default: %default]',
'refine' : 'uniform refinement level [default: %default]',
'format' : 'output mesh format (overrides filename_out extension)',
'list' : 'list supported readable/writable output mesh formats',
}
def _parse_val_or_vec(option, name, parser):
if option is not None:
try:
try:
option = float(option)
except ValueError:
option = [float(ii) for ii in option.split(',')]
option = nm.array(option, dtype=nm.float64, ndmin=1)
except:
output('bad %s! (%s)' % (name, option))
parser.print_help()
sys.exit(1)
return option
def main():
parser = OptionParser(usage=usage)
parser.add_option('-s', '--scale', metavar='scale',
action='store', dest='scale',
default=None, help=help['scale'])
parser.add_option('-c', '--center', metavar='center',
action='store', dest='center',
default=None, help=help['center'])
parser.add_option('-r', '--refine', metavar='level',
action='store', type=int, dest='refine',
default=0, help=help['refine'])
parser.add_option('-f', '--format', metavar='format',
action='store', type='string', dest='format',
default=None, help=help['format'])
parser.add_option('-l', '--list', action='store_true',
dest='list', help=help['list'])
(options, args) = parser.parse_args()
if options.list:
output('Supported readable mesh formats:')
output('--------------------------------')
output_mesh_formats('r')
output('')
output('Supported writable mesh formats:')
output('--------------------------------')
output_mesh_formats('w')
sys.exit(0)
if len(args) != 2:
parser.print_help()
sys.exit(1)
scale = _parse_val_or_vec(options.scale, 'scale', parser)
center = _parse_val_or_vec(options.center, 'center', parser)
filename_in, filename_out = args
mesh = Mesh.from_file(filename_in)
if scale is not None:
if len(scale) == 1:
tr = nm.eye(mesh.dim, dtype=nm.float64) * scale
elif len(scale) == mesh.dim:
tr = nm.diag(scale)
else:
raise ValueError('bad scale! (%s)' % scale)
mesh.transform_coors(tr)
if center is not None:
cc = 0.5 * mesh.get_bounding_box().sum(0)
shift = center - cc
tr = nm.c_[nm.eye(mesh.dim, dtype=nm.float64), shift[:, None]]
mesh.transform_coors(tr)
if options.refine > 0:
domain =
|
FEDomain(mesh.name, mesh)
|
sfepy.discrete.fem.FEDomain
|
import random
from datetime import datetime
from typing import List, Optional
from sqlmodel import or_, select
from config.notif_config import NotifConfig
from src.api.fixtures_client import FixturesClient
from src.db.db_manager import NotifierDBManager
from src.db.notif_sql_models import Fixture as DBFixture
from src.db.notif_sql_models import League as DBLeague
from src.db.notif_sql_models import Team as DBTeam
from src.emojis import Emojis
from src.entities import Fixture, TeamStanding
from src.senders.email_sender import send_email_html
from src.senders.telegram_sender import send_telegram_message
from src.utils.date_utils import get_date_spanish_text_format
from src.utils.fixtures_utils import (
get_image_search,
get_last_fixture,
get_last_fixture_db,
get_next_fixture,
get_next_fixture_db,
get_youtube_highlights_videos,
)
from src.utils.message_utils import (
get_first_phrase_msg,
get_team_intro_message,
is_subscripted_for_team,
)
class TeamFixturesManager:
def __init__(self, season: str, team_id: str) -> None:
self._season = season
self._team_id = team_id
self._fixtures_client = FixturesClient()
self._notifier_db_manager = NotifierDBManager()
def get_next_team_fixture_text(self, user: str = "") -> tuple:
next_team_fixture = self.get_next_team_fixture()
return (
self._telegram_next_fixture_notification(next_team_fixture, True, user)
if next_team_fixture
else ("Fixture para el equipo no encontrado", "")
)
def get_last_team_fixture_text(self, user: str = "") -> tuple:
last_team_fixture = self.get_last_team_fixture()
return (
self._telegram_last_fixture_notification(last_team_fixture, user)
if last_team_fixture
else ("Fixture para el equipo no encontrado", "")
)
def get_next_team_fixture(self) -> Optional[Fixture]:
fixtures_statement = select(DBFixture).where(
or_(
DBFixture.home_team == self._team_id,
DBFixture.away_team == self._team_id,
)
)
team_fixtures = self._notifier_db_manager.select_records(fixtures_statement)
next_team_fixture = None
if len(team_fixtures):
next_team_fixture = get_next_fixture_db(team_fixtures)
return next_team_fixture
def notify_next_fixture_db(self) -> None:
next_team_fixture = self.get_next_team_fixture()
if next_team_fixture:
if next_team_fixture.remaining_time().days < NotifConfig.NEXT_MATCH_THRESHOLD:
self._perform_fixture_notification(next_team_fixture)
def notify_next_fixture(self) -> None:
team_fixtures = self._fixtures_client.get_fixtures_by(
self._season, self._team_id
)
next_team_fixture = None
if "response" in team_fixtures.as_dict:
next_team_fixture = get_next_fixture(
team_fixtures.as_dict["response"], self._team_id
)
if next_team_fixture:
if next_team_fixture.remaining_time().days < 500:
self._perform_fixture_notification(next_team_fixture)
def notify_fixture_line_up_update(self) -> None:
team_fixtures = self._fixtures_client.get_fixtures_by(
self._season, self._team_id
)
next_team_fixture = None
if "response" in team_fixtures.as_dict:
next_team_fixture = get_next_fixture(
team_fixtures.as_dict["response"], self._team_id
)
if next_team_fixture:
if (
next_team_fixture.remaining_time().days < 1
and next_team_fixture.remaining_time().hours < 6
and next_team_fixture.line_up
):
self._perform_line_up_confirmed_notification(next_team_fixture)
else:
print(
f"There is still no line up for the match of {next_team_fixture.home_team} vs {next_team_fixture.away_team}"
)
print(str(next_team_fixture.remaining_time()))
def get_last_team_fixture(self) -> Optional[Fixture]:
fixtures_statement = select(DBFixture).where(
or_(
DBFixture.home_team == self._team_id,
DBFixture.away_team == self._team_id,
)
)
team_fixtures = self._notifier_db_manager.select_records(fixtures_statement)
last_team_fixture = None
if team_fixtures:
last_team_fixture = get_last_fixture_db(team_fixtures)
return last_team_fixture
def notify_last_fixture_db(self) -> None:
fixtures_statement = select(DBFixture).where(
or_(
DBFixture.home_team == self._team_id,
DBFixture.away_team == self._team_id,
)
)
team_fixtures = self._notifier_db_manager.select_records(fixtures_statement)
last_team_fixture = None
if team_fixtures:
last_team_fixture = get_last_fixture_db(team_fixtures)
if last_team_fixture:
if (
NotifConfig.LAST_MATCH_THRESHOLD_DAYS
<= last_team_fixture.remaining_time().days
<= 0
):
self._perform_last_fixture_notification(last_team_fixture)
def notify_last_fixture(self) -> None:
team_fixtures = self._fixtures_client.get_fixtures_by(
self._season, self._team_id
)
last_team_fixture = get_last_fixture(
team_fixtures.as_dict["response"], self._team_id
)
if last_team_fixture:
if (
-1
<= last_team_fixture.remaining_time().days
<= NotifConfig.LAST_MATCH_THRESHOLD_DAYS
):
last_team_fixture.highlights = get_youtube_highlights_videos(
last_team_fixture.home_team, last_team_fixture.away_team
)
self._perform_last_fixture_notification(last_team_fixture)
def _telegram_last_fixture_notification(
self, team_fixture: Fixture, user: str = ""
) -> tuple:
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
spanish_format_date = get_date_spanish_text_format(team_fixture.bsas_date)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["last_match"]
highlights_yt_url = f"https://www.youtube.com/results?search_query={team_fixture.home_team.name}+vs+{team_fixture.away_team.name}+jugadas+resumen"
highlights_text = f"{Emojis.FILM_PROJECTOR.value} <a href='{highlights_yt_url}'>HIGHLIGHTS</a>"
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola {user}!\n\n"
f"{team_intro_message} "
f"jugó el {spanish_format_date}! \nEste fue el resultado: \n\n"
f"{team_fixture.matched_played_telegram_like_repr()}"
f"{highlights_text}"
)
return (telegram_message, match_image_url)
def _telegram_next_fixture_notification(
self, team_fixture: Fixture, is_on_demand: False, user: str = ""
) -> tuple:
spanish_format_date = get_date_spanish_text_format(team_fixture.bsas_date)
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
date_text = (
"es HOY!"
if team_fixture.bsas_date.day == datetime.today().day
else f"es el {Emojis.SPIRAL_CALENDAR.value} {spanish_format_date}."
)
first_phrase = get_first_phrase_msg(True, is_on_demand)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["next_match"]
intro_message = f"{first_phrase} {team_intro_message}"
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola {user}! "
f"\n\n{intro_message} {date_text}\n\n{team_fixture.telegram_like_repr()}"
)
return (telegram_message, match_image_url)
def _perform_last_fixture_notification(
self, team_fixture: Fixture, team_standing: TeamStanding = None
) -> None:
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
# telegram
team_standing_msg = (
f"{Emojis.RED_EXCLAMATION_MARK.value} Situación actual en el campeonato: \n\n{team_standing.telegram_like_repr()}\n"
if team_standing
else ""
)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["last_match"]
highlights_yt_url = f"https://www.youtube.com/results?search_query={team_fixture.home_team.name}+vs+{team_fixture.away_team.name}+jugadas+resumen"
highlights_text = f"{Emojis.FILM_PROJECTOR.value} <a href='{highlights_yt_url}'>HIGHLIGHTS</a>"
FOOTBALL_TELEGRAM_RECIPIENTS = NotifConfig.TELEGRAM_RECIPIENTS
for recipient in FOOTBALL_TELEGRAM_RECIPIENTS:
if is_subscripted_for_team(recipient, self._team_id):
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n"
f"{team_intro_message} "
f"jugó ayer! \nEste fue el resultado: \n\n"
f"{team_fixture.matched_played_telegram_like_repr()}"
f"\n{highlights_text}"
)
send_telegram_message(
recipient.telegram_id,
telegram_message,
match_image_url,
)
# email
team_standing_email_msg = (
f"Situación actual en el campeonato: \n\n{team_standing.email_like_repr()}"
if team_standing
else ""
)
match_image_text = f"<img src='{match_image_url}'>"
email_standing_message = (
f"{Emojis.RED_EXCLAMATION_MARK.value}{team_standing_email_msg}\n"
)
highlights_text = f"https://www.youtube.com/results?search_query={team_fixture.home_team.name}+vs+{team_fixture.away_team.name}+jugadas+resumen"
EMAIL_RECIPIENTS = NotifConfig.EMAIL_RECIPIENTS
for recipient in EMAIL_RECIPIENTS:
message = (
f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{team_intro_message} "
f"jugó ayer!<br /><br />{match_image_text}<br /><br />Este fue el resultado: \n\n{team_fixture.matched_played_email_like_repr()}"
f"<br /><br />{email_standing_message}<br /><br />{highlights_text}"
)
send_email_html(
f"{team_fixture.home_team.name} ({team_fixture.match_score.home_score}) - "
f"({team_fixture.match_score.away_score}) {team_fixture.away_team.name}",
message,
recipient.email,
)
def _perform_fixture_notification(self, team_fixture: Fixture) -> None:
spanish_format_date = get_date_spanish_text_format(team_fixture.bsas_date)
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
match_image_text = f"<img width='100%' height='100%' src='{match_image_url}'>"
date_text = (
"es HOY!"
if team_fixture.bsas_date.day == datetime.today().day
else f"es el {Emojis.SPIRAL_CALENDAR.value} {spanish_format_date}."
)
first_phrase = get_first_phrase_msg(True)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["next_match"]
intro_message = f"{first_phrase} {team_intro_message}"
# telegram
FOOTBALL_TELEGRAM_RECIPIENTS = NotifConfig.TELEGRAM_RECIPIENTS
for recipient in FOOTBALL_TELEGRAM_RECIPIENTS:
if is_subscripted_for_team(recipient, self._team_id):
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola "
f"{recipient.name}!\n\n{intro_message} {date_text}\n\n{team_fixture.telegram_like_repr()}"
)
send_telegram_message(
recipient.telegram_id,
telegram_message,
photo=match_image_url,
)
# email
EMAIL_RECIPIENTS = NotifConfig.EMAIL_RECIPIENTS
for recipient in EMAIL_RECIPIENTS:
message = f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{intro_message} {date_text}\n\n<br /><br />{match_image_text}<br /><br />{team_fixture.email_like_repr()}"
send_email_html(
f"{team_fixture.home_team.name} vs. {team_fixture.away_team.name}",
message,
recipient.email,
)
def _perform_line_up_confirmed_notification(self, team_fixture: Fixture) -> None:
match_teams = f"{team_fixture.home_team.name} vs {team_fixture.away_team.name}"
match_image_url = get_image_search(match_teams)
match_image_text = f"<img src='{match_image_url}'>"
# telegram
FOOTBALL_TELEGRAM_RECIPIENTS = NotifConfig.TELEGRAM_RECIPIENTS
for recipient in FOOTBALL_TELEGRAM_RECIPIENTS:
intro_message = f"Se actualizó la alineación para {match_teams}:"
telegram_message = f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{intro_message}\n\n{team_fixture.telegram_like_repr()}"
send_telegram_message(
recipient.telegram_id,
telegram_message,
photo=match_image_url,
)
# email
EMAIL_RECIPIENTS = NotifConfig.EMAIL_RECIPIENTS
for recipient in EMAIL_RECIPIENTS:
message = f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{intro_message}\n\n<br /><br />{match_image_text}<br /><br />{team_fixture.email_like_repr()}"
send_email_html(
f"{team_fixture.home_team.name} vs. {team_fixture.away_team.name}",
message,
recipient.email,
)
def _get_match_images(self, league_id: int) -> List[str]:
match_image_url_team_statement = select(DBTeam).where(
DBTeam.id == self._team_id
)
match_image_url_league_statement =
|
select(DBLeague)
|
sqlmodel.select
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label =
|
megengine.tensor(label, dtype="int32")
|
megengine.tensor
|
from datetime import datetime
from sqlmodel import Session, SQLModel, create_engine, text
import sqlite3
database_loc = "backend/database.sqlite"
con_str = f"sqlite:///{database_loc}"
engine = create_engine(con_str, echo=True)
sqlite3_engine = sqlite3.connect(f"{database_loc}")
def get_session():
session = Session(engine)
return session
def create_db():
SQLModel.metadata.create_all(engine)
def execute_sample_sql(session):
"""Read sample sql database and import it."""
with open("backend/tests/sample.sql") as f:
content = f.read()
queries = filter(None, content.split(";\n"))
queries = [text(query) for query in queries]
for query in queries:
session.exec(query)
session.commit()
session.expire_all()
session =
|
Session(engine)
|
sqlmodel.Session
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size *
|
dist.get_world_size()
|
megengine.distributed.get_world_size
|
from typing import Optional
from loguru import logger
from sqlmodel import Field, Session, SQLModel, create_engine, or_, select
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str = Field(max_length=30)
age: Optional[int] = None
def test_database_with_sqlmodel():
hero_1 = Hero(name='Deadpond', secret_name='<NAME>')
hero_2 = Hero(name='Spider-Boy', secret_name='<NAME>')
hero_3 = Hero(name='Rusty-Man', secret_name='<NAME>', age=48)
# engine = create_engine('sqlite:///temp.db')
engine = create_engine('sqlite:///:memory:')
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
for hero in [hero_1, hero_2, hero_3]:
session.add(hero)
session.commit()
with Session(engine) as session:
statement = select(Hero).where(Hero.name == 'Spider-Boy')
hero = session.exec(statement).first()
logger.info(hero)
# Or statement
statement = select(Hero).where((Hero.name == 'Spider-Boy') | (Hero.name == 'Rusty-Man'))
heroes = session.exec(statement)
for hero in heroes:
logger.info(hero)
# Or statement, alternative way
statement =
|
select(Hero)
|
sqlmodel.select
|
from sqlite3.dbapi2 import Timestamp, adapt
from typing import Optional
from sqlmodel import Field, SQLModel
from pydantic import validator
from datetime import datetime, date
class Rate(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="user.id")
client_id: int =
|
Field(foreign_key="client.id")
|
sqlmodel.Field
|
"""
dayong.operations
~~~~~~~~~~~~~~~~~
Data model operations which include retrieval and update commands.
"""
from typing import Any
import tanjun
from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
from sqlmodel import SQLModel, select
from sqlmodel.engine.result import ScalarResult
from sqlmodel.ext.asyncio.session import AsyncSession
from dayong.abc import Database
from dayong.core.configs import DayongConfig, DayongDynamicLoader
class DatabaseImpl(Database):
"""Implementaion of a database connection for transacting and interacting with
database tables —those that derive from SQLModel.
"""
_conn: AsyncEngine
@staticmethod
async def update(instance: Any, update: Any) -> Any:
"""Overwrite value of class attribute.
Args:
instance (Any): A Class instance.
update (Any): A dictionary containing the attributes to be overwritten.
Returns:
Any: A class instance with updated attribute values.
"""
for key, value in update.items():
setattr(instance, key, value)
return instance
async def connect(
self, config: DayongConfig = tanjun.injected(type=DayongConfig)
) -> None:
self._conn = create_async_engine(
config.database_uri
if config.database_uri
else DayongDynamicLoader().load().database_uri
)
async def create_table(self) -> None:
async with self._conn.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async def add_row(self, table_model: SQLModel) -> None:
async with AsyncSession(self._conn) as session:
session.add(table_model)
await session.commit()
async def remove_row(self, table_model: SQLModel, attribute: str) -> None:
model = type(table_model)
async with AsyncSession(self._conn) as session:
# Temp ignore incompatible type passed to `exec()`. See:
# https://github.com/tiangolo/sqlmodel/issues/54
# https://github.com/tiangolo/sqlmodel/pull/58
row: ScalarResult[Any] = await session.exec(
select(model).where(
getattr(model, attribute) == getattr(table_model, attribute)
) # type: ignore
)
await session.delete(row.one())
await session.commit()
async def get_row(self, table_model: SQLModel, attribute: str) -> ScalarResult[Any]:
model = type(table_model)
async with AsyncSession(self._conn) as session:
# Temp ignore incompatible type passed to `exec()`. See:
# https://github.com/tiangolo/sqlmodel/issues/54
# https://github.com/tiangolo/sqlmodel/pull/58
row: ScalarResult[Any] = await session.exec(
select(model).where(
getattr(model, attribute) == getattr(table_model, attribute)
) # type: ignore
)
return row
async def get_all_row(self, table_model: type[SQLModel]) -> ScalarResult[Any]:
async with AsyncSession(self._conn) as session:
return await session.exec(select(table_model)) # type: ignore
async def update_row(self, table_model: SQLModel, attribute: str) -> None:
model = type(table_model)
table = table_model.__dict__
async with
|
AsyncSession(self._conn)
|
sqlmodel.ext.asyncio.session.AsyncSession
|
#!/usr/bin/env python
# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from __future__ import absolute_import
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import sys
import six
sys.path.append('.')
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
from functools import partial
def define( K=8.333, mu_nh=3.846, mu_mr=1.923, kappa=1.923, lam=5.769, mu=3.846 ):
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_times' : 'all',
}
functions = {
'linear_pressure' : (linear_pressure,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
materials = {
'solid' : ({
'K' : K, # bulk modulus
'mu_nh' : mu_nh, # shear modulus of neoHookean term
'mu_mr' : mu_mr, # shear modulus of Mooney-Rivlin term
'kappa' : kappa, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=lam, mu=mu),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 26, # has precedence over dt!
'verbose' : 1,
}),
}
return locals()
##
# Pressure tractions.
def linear_pressure(ts, coor, mode=None, coef=1, **kwargs):
if mode == 'qp':
val = np.tile(coef * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(np.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function, material_type):
eq = problem.conf.equations[material_type]
problem.set_equations({material_type : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
out = []
problem.solve(save_results=False, step_hook=store_top_u(out))
displacements = np.array(out, dtype=np.float64)
return displacements
helps = {
'no_plot' : 'do not show plot window',
}
def plot_mesh(pb):
# plot mesh for macro problem
coors = pb.domain.mesh.coors
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
fig2 = plt.figure(figsize=(5,6))
ax = fig2.add_subplot(111, projection='3d')
for e in range(graph.shape[0]):
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors='white',
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_xlim3d(-1.2, 1.2)
ax.set_ylim3d(-1.2, 1.2)
ax.set_zlim3d(-0.01, 3.2)
ax.set_title('3D plot of macro system')
plt.show(fig2)
return None
def one_simulation(material_type, define_args, coef_tension=0.25, coef_compression=-0.25,
plot_mesh_bool=False, return_load=False):
#parser = ArgumentParser(description=__doc__,
# formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument('--version', action='version', version='%(prog)s')
#options = parser.parse_args()
output.set_output(filename='sfepy_log.txt', quiet=True)
required, other =
|
get_standard_keywords()
|
sfepy.base.conf.get_standard_keywords
|
import io
import numpy as np
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.module as M
import megengine.utils.network_node as N
from megengine.jit.tracing import trace
from megengine.tensor import Tensor
from megengine.utils.comp_graph_tools import GraphInference
from megengine.utils.network import Network as Net
from megengine.utils.network import as_oprnode, set_symbolic_shape
from megengine.utils.network_node import Host2DeviceCopy, VarNode
def test_metadata():
x = Tensor(0)
@trace(symbolic=True, capture_as_const=True)
def fwd(x):
return x * 2
fwd(x)
orig_model = io.BytesIO()
fwd.dump(orig_model, user_info="test", optimize_for_inference=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": "test",
"graph_modified": False, # False: tracing.dump
"optimized_for_inference": False,
}
orig_model.seek(0)
graph.dump(
orig_model,
user_info={"str": "x", "tensor": x, "module": M.Module, "none": None},
optimize_for_inference=True,
enable_nchw4=True,
enable_ioc16=True,
)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None},
"graph_modified": True, # True: Network.dump
"optimized_for_inference": True,
"enable_nchw4": True,
"enable_ioc16": True,
}
orig_model.seek(0)
fwd.dump(orig_model, enable_metadata=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata is None
def test_replace_var():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out = F.mul(vara, varb)
out = F.relu(out)
opnode = list(graph.opr_filter.has_input(vara))
repl_dict = {opnode[0].outputs[0]: out}
graph.replace_vars(repl_dict)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [6, 16])
def test_replace_opr():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out1 = F.sub(vara, varb)
out1 = F.relu(out1)
out1 = graph.add_dep_oprs(out1)
orig_opr = graph.opr_filter.has_input(vara).as_unique()
repl_dict = {orig_opr: out1[0].owner}
graph.replace_oprs(repl_dict)
modified_model1 = io.BytesIO()
graph.dump(modified_model1)
modified_model1.seek(0)
load_graph = GraphInference(modified_model1)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [0, 0])
def test_splice_network():
x = F.ones((2,))
y = F.ones((2,))
@trace(symbolic=True, capture_as_const=True)
def fun1(a, b):
return (a + b) * 2
@trace(symbolic=True, capture_as_const=True)
def fun2(a):
return a * 2 - 1
model = io.BytesIO()
fun1(x, y)
fun2(x)
fun1.dump(
model,
arg_names=["net1_i0", "net1_i1"],
output_names=["net1_o0"],
optimize_for_inference=False,
)
model.seek(0)
net1 = Net.load(model)
model.seek(0)
fun2.dump(
model,
arg_names=["net2_i0"],
output_names=["net2_o0"],
optimize_for_inference=False,
)
model.seek(0)
net2 = Net.load(model)
net1.add_output(*net2.output_vars)
var = net1.var_filter.name("net1_i0").as_unique()
repl_var = net2.var_filter.name("net2_o0").as_unique()
net1.replace_vars({var: repl_var})
assert "net1_i0" not in [var.name for var in net1.all_vars]
assert "net2_i0" in [var.name for var in net1.all_vars]
model.seek(0)
net1.dump(model, keep_var_name=2, optimize_for_inference=False)
model.seek(0)
net = Net.load(model)
assert "net1_i0" not in [var.name for var in net.all_vars]
assert "net2_i0" in [var.name for var in net.all_vars]
def test_modify_params():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
param_const = graph.params_filter.as_unique()
param_const.set_value(3)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [12, 18])
def test_make_const():
a = Tensor([1, 2])
b = Tensor([3, 4])
@
|
trace(symbolic=True, capture_as_const=True)
|
megengine.jit.tracing.trace
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm =
|
ad.GradManager()
|
megengine.autodiff.GradManager
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import re
import subprocess
import sys
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit, tensor
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=False):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt):
pred = net(data)
loss = F.cross_entropy_with_softmax(pred, label)
opt.backward(loss)
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
data = tensor(dtype=np.float32)
label = tensor(dtype=np.int32)
data.set_value(checkpoint["data"])
label.set_value(checkpoint["label"])
opt.zero_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.save(checkpoint, model_path)
def run_test(model_path, use_jit, use_symbolic):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
data = tensor(dtype=np.float32)
label =
|
tensor(dtype=np.int32)
|
megengine.tensor
|
from datetime import date
from typing import List
from sqlmodel import select
from config.config_utils import get_managed_teams_config
from src.api.fixtures_client import FixturesClient
from src.db.db_manager import NotifierDBManager
from src.db.notif_sql_models import Fixture as DBFixture
from src.db.notif_sql_models import League as DBLeague
from src.db.notif_sql_models import Team as DBTeam
from src.entities import Championship, Team
from src.utils.fixtures_utils import convert_fixture_response_to_db
NOTIFIER_DB_MANAGER = NotifierDBManager()
def insert_league(fixture_league: Championship) -> DBLeague:
league_statement = select(DBLeague).where(DBLeague.id == fixture_league.league_id)
retrieved_league = NOTIFIER_DB_MANAGER.select_records(league_statement)
if not len(retrieved_league):
league = DBLeague(
id=fixture_league.league_id,
name=fixture_league.name,
logo=fixture_league.logo,
country=fixture_league.country,
)
NOTIFIER_DB_MANAGER.insert_record(league)
retrieved_league = NOTIFIER_DB_MANAGER.select_records(league_statement)
return retrieved_league
def insert_team(fixture_team: Team) -> DBTeam:
team_statement =
|
select(DBTeam)
|
sqlmodel.select
|
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
import megengine.module as Float
import megengine.module.qat as QAT
import megengine.module.quantized as Q
from megengine.core.tensor import dtype
from megengine.quantization import min_max_fakequant_qconfig
from megengine.quantization.quantize import (
disable_fake_quant,
disable_observer,
propagate_qconfig,
)
"""
Calculate testing scales based on ``min_max_fakequant_qconfig``
"""
inp_scale = np.float32(np.random.rand() + 1)
min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
def quant(x, scale):
inp_dtype = dtype.qint8(scale)
return x.astype(inp_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
def init_qat_net(net):
if net.with_weight:
net.weight_observer.min_val.set_value(min_val[0])
net.weight_observer.max_val.set_value(max_val[0])
if net.with_act:
net.act_observer.min_val.set_value(min_val[1])
net.act_observer.max_val.set_value(max_val[1])
def test_quant_stub():
normal_net = Float.QuantStub()
normal_net.eval()
qat_from_float = QAT.QuantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.QuantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.QuantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_dequant_stub():
normal_net = Float.DequantStub()
normal_net.eval()
qat_from_float = QAT.DequantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
qat_net = QAT.DequantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.DequantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = normal_net(x)
qat = qat_net(x)
q = q_net(quant(x, inp_scale)).numpy()
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("kind", ["COS", "RELU", "ADD", "MUL", "FUSE_ADD_RELU"])
def test_elemwise(kind):
normal_net = Float.Elemwise(kind)
normal_net.eval()
qat_from_float = QAT.Elemwise.from_float_module(normal_net)
qat_from_float.eval()
|
disable_observer(qat_from_float)
|
megengine.quantization.quantize.disable_observer
|
import os, sys
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr
import megengine as mge
from megengine import functional as F
import pdb
def _compute_center(boxes):
ptrx = 0.5 * (boxes[:, 0] + boxes[:, 2])
ptry = 0.5 * (boxes[:, 1] + boxes[:, 3])
centre = F.stack([ptrx, ptry], axis=1)
return centre
def _compute_pos_area(gtboxes, ratio = 0.3):
H, W = gtboxes[:, 3] - gtboxes[:, 1], gtboxes[:, 2] - gtboxes[:, 0]
centres = _compute_center(gtboxes)
l = centres[:, 0] - ratio * W
r = centres[:, 0] + ratio * W
t = centres[:, 1] - ratio * H
b = centres[:, 1] + ratio * H
boundary = F.stack([l, t, r, b], axis = 1)
return boundary
def _anchor_double_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5].astype(np.int32), :]
dummy = -F.ones([1, gt_boxes.shape[1]]).to(gt_boxes.device)
gt_boxes = F.concat([gt_boxes, dummy], axis=0)
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes)
# gtboxes_centers = gtboxes_centers * valid_mask.unsqueeze(1)
gtboxes_centers = gtboxes_centers * F.expand_dims(valid_mask, axis=1)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps *= F.expand_dims(valid_mask, axis=0)
default_num = 16
ious_list = []
for l in range(start, end):
_, index = F.cond_take(all_anchors[:, 4] == l, all_anchors[:, 4])
level_dist = distance[index, :].transpose(1, 0)
ious = overlaps[index, :].transpose(1, 0)
sorted_index = F.argsort(level_dist, descending=False)
n = min(sorted_index.shape[1], default_num)
ious = F.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = F.mean(ious, axis = 0)
std_var = F.std(ious, 0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = F.maximum(iou_thresh_per_gt, 0.2)
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
pos_area = _compute_pos_area(gt_boxes, 0.3)
# pos_area = pos_area.unsqueeze(0).repeat(N, 1, 1)
pos_area = F.broadcast_to(F.expand_dims(pos_area, axis=0), (N, K, pos_area.shape[-1]))
l = anchor_points[:, :, 0] - pos_area[:, :, 0]
r = pos_area[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - pos_area[:, :, 1]
b = pos_area[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= F.expand_dims(iou_thresh_per_gt, axis=0)) * is_in_gt.astype(np.float32)
ious = overlaps * valid_mask
sorted_index = F.argsort(ious, 1)
sorted_overlaps = F.gather(ious, 1, sorted_index)
max_overlaps = sorted_overlaps[:, :2].flatten()
argmax_overlaps = sorted_index[:, :2].flatten()
n, c = all_anchors.shape
device = all_anchors.device
labels = -F.ones(2 * n).to(device)
positive_mask = (max_overlaps >= 0.2).to(device).astype(np.float32)
negative_mask = (max_overlaps < 0.2).to(device).astype(np.float32)
labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)
bbox_targets = gt_boxes[argmax_overlaps, :4]
all_anchors = F.broadcast_to(F.expand_dims(all_anchors, axis=1), (n,2, c)).reshape(-1, c)
bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)
labels_cat = gt_boxes[argmax_overlaps, 4]
labels_cat = labels_cat * (1 - F.equal(labels, -1).astype(np.float32)) - F.equal(labels, -1).astype(np.float32)
return labels, bbox_targets, labels_cat
def _anchor_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5], :]
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes) * F.expand_dims(valid_mask, axis=0)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps = overlaps * valid_mask.unsqueeze(0)
default_num = 9
ious_list = []
for l in range(start, end):
index = torch.nonzero(all_anchors[:,4].eq(l), as_tuple=False)[:, 0]
level_dist = level_dist[index, :].transpose(1, 0)
ious = distance[index, :].transpose(1, 0)
sorted_index = torch.argsort(ious, 1, descending=False)
n = min(default_num, sorted_index.shape[1])
ious = torch.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = ious.mean(0)
std_var = ious.std(0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = torch.clamp(iou_thresh_per_gt, 0.35)
n = iou_thresh_per_gt.shape[0]
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
proxies = gt_boxes.unsqueeze(0).repeat(N, 1, 1)
l = anchor_points[:, :, 0] - proxies[:, :, 0]
r = proxies[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - proxies[:, :, 1]
b = proxies[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= iou_thresh_per_gt.unsqueeze(0)) * is_in_gt
ious = overlaps * valid_mask
argmax_overlaps = torch.argmax(ious, axis=1)
max_overlaps = torch.gather(ious, 1, argmax_overlaps.unsqueeze(1))
n = all_anchors.shape[0]
labels = -F.ones(n)
positive_mask = max_overlaps > 0
negative_mask = max_overlaps < config.rpn_negative_overlap
labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)
bbox_targets = gt_boxes[argmax_overlaps, :4]
bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)
labels_cat = gt_boxes[argmax_overlaps, 4]
labels_cat = labels_cat * (1 - labels.eq(0).astype(np.float32))
labels_cat = labels_cat * (1 - labels.eq(-1).astype(np.float32)) - labels.eq(-1).astype(np.float32)
return labels, bbox_targets, labels_cat
def rpn_anchor_target_opr(gt_boxes, im_info, anchors):
rpn_label_list, rpn_target_boxes_list, iou_thresh_list = [], [], []
for i in range(config.train_batch_per_gpu):
rpn_labels, rpn_target_boxes, _ = _anchor_double_target(gt_boxes[i], im_info[i], anchors)
rpn_labels = rpn_labels.reshape(-1, 2)
c = rpn_target_boxes.shape[1]
rpn_target_boxes = rpn_target_boxes.reshape(-1, 2, c)
# mask the anchors overlapping with ignore regions
ignore_label = mask_anchor_opr(gt_boxes[i], im_info[i], anchors, rpn_labels[:, 0])
rpn_labels = rpn_labels - F.equal(rpn_labels, 0).astype(np.float32) * F.expand_dims(ignore_label < 0, 1).astype(np.float32)
# rpn_labels = rpn_labels - rpn_labels.eq(0).astype(np.float32) * (ignore_label < 0).unsqueeze(1).astype(np.float32)
rpn_label_list.append(F.expand_dims(rpn_labels, 0))
rpn_target_boxes_list.append(F.expand_dims(rpn_target_boxes, 0))
rpn_labels = F.concat(rpn_label_list, axis = 0)
rpn_target_boxes = F.concat(rpn_target_boxes_list, axis = 0)
return rpn_labels, rpn_target_boxes
def mask_anchor_opr(gtboxes, im_info, anchors, labels):
eps = 1e-6
gtboxes = gtboxes[:im_info[5].astype(np.int32), :]
ignore_mask = (gtboxes[:, 4] < 0).astype(np.float32)
mask_flag = F.zeros(labels.shape[0])
N, K = anchors.shape[0], gtboxes.shape[0]
p_pred = F.broadcast_to(F.expand_dims(anchors, 1), (N, K, anchors.shape[1]))
p_gt = F.broadcast_to(F.expand_dims(gtboxes, 0), (N, K, gtboxes.shape[1]))
max_off = F.concat([F.maximum(p_pred[:,:, :2], p_gt[:,:,:2]),
F.minimum(p_pred[:, :, 2:4], p_gt[:, :, 2:4])],
axis = 2)
I = F.maximum(max_off[:, :, 2] - max_off[:, :, 0] + 1, 0) * F.maximum(
max_off[:, :, 3] - max_off[:, :, 1] + 1, 0)
A = F.maximum(p_pred[:, :, 2] - p_pred[:, :, 0] + 1, 0) * F.maximum(
p_pred[:, :, 3] - p_pred[:, :, 1] + 1, 0)
# I = F.maximum(I, 0)
# A = F.maximum(A, 0)
IoA = I / (A + eps)
IoA = IoA * F.expand_dims(ignore_mask, 0)
mask_flag = (IoA > 0.5).sum(axis=1) > 0
labels = labels -
|
F.equal(labels, 0)
|
megengine.functional.equal
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.traced_module.test_tflite import _test_convert_result
from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .tm_utils import get_traced_module
max_error = 1e-4
tmp_file = "test_model"
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def test_qat_conv_qint8():
class QConvOpr(M.Module):
def __init__(self):
super().__init__()
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), padding=(3, 1), dilation=(2, 2),
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.normal_conv(x)
return x
net = QConvOpr()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
print(traced_module.flatten().graph)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convrelu():
net = ConvRelu2dOpr()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convbn():
net = ConvBn2dOpr()
net.eval()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convbnrelu():
net = ConvBnRelu2dOpr()
net.eval()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_deconv_qint8():
net = ConvOpr("tflite_transpose")
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 64, 64))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
print(traced_module.flatten().graph)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(
|
mge.tensor(self.data1)
|
megengine.tensor
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x =
|
F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
|
megengine.functional.vision.interpolate
|
from collections import OrderedDict
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Tensor
from megengine.core._imperative_rt.core2 import apply
from megengine.core.ops import builtin
from megengine.module import Module
from megengine.traced_module import TracedModule, enable_expr_checker, trace_module
from megengine.traced_module.expr import Apply, CallFunction, Constant
class MyModule1(M.Module):
def forward(self, x):
y = Tensor(x)
y += 1
x = x + 2
return x, y
class MyModule2(M.Module):
def forward(self, x):
y = Tensor([1, x, 1])
y += 1
x = x + 2
return x, y
class MyModule3(M.Module):
def __init__(self):
super().__init__()
self.modules = [
M.Elemwise("ADD"),
M.Elemwise("ADD"),
OrderedDict([("a", M.Elemwise("ADD")), ("b", M.Elemwise("ADD"))]),
M.Elemwise("RELU"),
M.Elemwise("RELU"),
]
def forward(self, a, b):
x = self.modules[0](a, b)
y = self.modules[1](a, b)
assert list(self.modules[2].keys()) == ["a", "b"]
for _, m in self.modules[2].items():
y = m(x, y)
for m in self.modules[3:]:
y = m(y)
return y
class MyModule4(M.Module):
def __init__(self):
super().__init__()
self.add = F.add
def forward(self, x, y):
return self.add(x, y)
def test_trace_module():
enable_expr_checker()
x = Tensor(1)
m1 = MyModule1()
tm1 = trace_module(m1, x)
m2 = MyModule2()
tm2 = trace_module(m2, x)
inp = Tensor(2)
gt = m1(inp)
output = tm1(inp)
for a, b in zip(output, gt):
np.testing.assert_equal(a.numpy(), b.numpy())
gt1 = m2(inp)
output1 = tm2(inp)
for a, b in zip(output1, gt1):
np.testing.assert_equal(a.numpy(), b.numpy())
a, b = Tensor(1), Tensor(2)
m3 = MyModule3()
gt = m3(a, b)
tm3 = trace_module(m3, a, b)
out = tm3(a, b)
np.testing.assert_equal(out.numpy(), gt.numpy())
assert isinstance(tm3.modules.__dict__["0"], M.Elemwise)
assert isinstance(tm3.modules.__dict__["2"], TracedModule)
assert isinstance(tm3.modules.__dict__["2"].a, M.Elemwise)
assert isinstance(tm3.modules.__dict__["3"], M.Elemwise)
m4 = MyModule4()
tm4 = trace_module(m4, a, b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, x=a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 =
|
trace_module(tm4, a, y=b)
|
megengine.traced_module.trace_module
|
# MegFlow is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2019-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#!/usr/bin/env python
# coding=utf-8
from math import log
from loguru import logger
import megengine as mge
import cv2
import megengine.functional as F
import numpy as np
from .model import Model
if __name__ == "__main__":
import sys
if len(sys.argv) < 5:
print("usage: python3 -m reid_alignedreid/demo reid.pkl positive1.png positive2.png negtive.jpg")
sys.exit(0)
model = Model()
sd =
|
mge.load(sys.argv[1])
|
megengine.load
|
from datetime import datetime
from typing import TYPE_CHECKING, List, Optional
from uuid import UUID, uuid4
from pydantic import EmailStr, constr, validator
from sqlmodel import Column, Field, Relationship, SQLModel
from sqlmodel.sql.sqltypes import GUID
from ...utils.date import now_datetime
if TYPE_CHECKING:
from .order import Order
from .user import User
class BaseClient(SQLModel):
name: str = Field(description="Client name")
email: Optional[EmailStr] = Field(description="Client email", nullable=True)
phone: Optional[constr(regex=r"^\d{2}9\d{8}$")] = Field(description="Client cellphone", nullable=True) # noqa
zip_code: Optional[str] = Field(description="Postal code", nullable=True)
address: Optional[str] = Field(description="Address of Client", nullable=True)
@validator("name")
def validate_name(cls, value: str) -> str:
return value.title()
class CreateClient(BaseClient):
pass
class UpdateClient(BaseClient):
id: UUID = Field(description="Client ID")
class QueryClient(SQLModel):
name: Optional[str] =
|
Field(description="Name of client for query")
|
sqlmodel.Field
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
out = F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
out2 = F.vision.interpolate(inp, 4, mode="linear")
np.testing.assert_allclose(
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
np.testing.assert_allclose(
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
def many_batch_interpolate():
inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4])
out2 = F.vision.interpolate(inp, scale_factor=2.0)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def assign_corner_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4], align_corners=True)
out2 = F.vision.interpolate(inp, scale_factor=2.0, align_corners=True)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def error_shape_linear_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
def inappropriate_scale_linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=[2.0, 3.0], mode="linear")
linear_interpolate()
many_batch_interpolate()
assign_corner_interpolate()
error_shape_linear_interpolate()
inappropriate_scale_linear_interpolate()
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
def _gen_roi_inp():
inp_feat = np.random.randn(2, 32, 256, 256)
rois = np.zeros((4, 5))
rois[:, 0] = [0, 0, 1, 1]
rois[:, 1:3] = np.random.rand(4, 2) * 100
rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
inp_feat = tensor(inp_feat)
rois = tensor(rois)
return inp_feat, rois
def test_roi_align():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_align(
inp_feat,
rois,
output_shape=output_shape,
mode="average",
spatial_scale=1.0 / 4,
sample_points=2,
aligned=True,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def _gen_correlation(random=True, constant=1, image_shape=(2, 1, 160, 160)):
if random:
inp_feat1 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
inp_feat2 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
else:
inp_feat1 = np.ones(image_shape) * constant
inp_feat2 = np.ones(image_shape) * constant
return tensor(inp_feat1), tensor(inp_feat2)
def test_correlation():
##test case 0 check the grad shape
data1, data2 = _gen_correlation()
grad = Grad().wrt(data1, callback=_save_to(data1))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=5,
max_displacement=4,
stride1=2,
stride2=2,
pad_size=2,
is_multiply=True,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(data1.grad.shape) == make_shape_tuple(data1.shape)
##test case 1 from https://github.com/NVIDIA/flownet2-pytorch/issues/194
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=True,
)
assert abs(out_feat.sum() - 1) < 1e-9
##test case 2 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 3 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 4 check correlation
data1, _ = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=2.0
)
_, data2 = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=1.0
)
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=2,
stride1=1,
stride2=2,
pad_size=0,
is_multiply=False,
)
assert abs(out_feat.mean() - 1) < 1e-9
def test_roi_pooling():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_pooling(
inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def test_adaptive_avg_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_avg_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
]
]
],
dtype=np.float32,
),
)
def test_adaptive_max_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_max_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[5, 7], [13, 15]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
]
]
],
dtype=np.float32,
),
)
def test_one_hot():
def onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp, num_classes=4)
np.testing.assert_allclose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
dtype=np.int32,
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
onehot_low_dimension()
onehot_high_dimension()
def test_interpolate_fastpath():
# check shape
test_cases = [
[(1, 1, 10, 10), (5, 5)],
[(1, 3, 10, 10), (20, 20)],
[(10, 1, 10, 10), (1, 1)],
# [(10, 10, 1, 1), (10, 10)], # FIXME, it causes random CI failure
]
for inp_shape, target_shape in test_cases:
x = tensor(np.random.randn(*inp_shape), dtype=np.float32)
out = F.vision.interpolate(x, target_shape, mode="bilinear")
assert out.shape[0] == x.shape[0] and out.shape[1] == x.shape[1]
assert out.shape[2] == target_shape[0] and out.shape[3] == target_shape[1]
# check value
x = tensor(np.ones((3, 3, 10, 10)), dtype=np.float32)
out = F.vision.interpolate(x, (15, 5), mode="bilinear")
np.testing.assert_equal(out.numpy(), np.ones((3, 3, 15, 5)).astype(np.float32))
np_x = np.arange(32)
x = tensor(np_x).astype(np.float32).reshape(1, 1, 32, 1)
out = F.vision.interpolate(x, (1, 1), mode="bilinear")
np.testing.assert_equal(out.item(), np_x.mean())
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective(dt):
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
outp = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(outp.numpy(), np.array([[[[5, 6], [9, 10]]]], dtype=dt))
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective_mat_idx(dt):
inp_shape = (2, 1, 4, 4)
x = tensor(np.arange(32, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
M = F.concat([M,] * 4, 0)
outp = F.vision.warp_perspective(x, M, (2, 2), mat_idx=[0, 1, 1, 0])
np.testing.assert_equal(
outp.numpy(),
np.array(
[
[[[5, 6], [9, 10]]],
[[[21, 22], [25, 26]]],
[[[21, 22], [25, 26]]],
[[[5, 6], [9, 10]]],
],
dtype=dt,
),
)
def test_warp_affine():
inp_shape = (1, 3, 3, 3)
x = tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
weightv = [[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]]
outp = F.vision.warp_affine(x, tensor(weightv), (2, 2), border_mode="wrap")
res = np.array(
[
[
[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]],
[[18.75, 19.75, 20.75], [14.90625, 15.90625, 16.90625]],
]
],
dtype=np.float32,
)
if not is_cuda_available():
np.testing.assert_almost_equal(outp.numpy(), res, 5)
def test_remap():
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(
np.array(
[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
).reshape(map_xy_shape)
)
outp = F.vision.remap(inp, map_xy)
np.testing.assert_equal(
outp.numpy(), np.array([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)
)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, atol=5e-4)
np.random.seed(123)
data1 = np.random.uniform(size=data1_shape).astype(np.float32)
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = np.random.uniform(size=data2_shape).astype(np.float32)
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)
cases = [
{"input": [sigmoid(data1), label1], "output": expect1,},
{"input": [sigmoid(data2), label2], "output": expect2,},
]
opr_test(
cases,
partial(F.nn.binary_cross_entropy, with_logits=False),
compare_fn=compare_fn,
)
def test_hinge_loss():
np.random.seed(123)
# case with L1 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
cases.append({"input": [data, label], "output": expect})
opr_test(cases, F.nn.hinge_loss)
# cases with L2 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
cases.append({"input": [data, label], "output": expect})
def hinge_loss_with_l2_norm(pred, label):
return F.nn.hinge_loss(pred, label, "L2")
opr_test(cases, hinge_loss_with_l2_norm)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_nms(is_symbolic):
def fn(inp, scores):
return F.vision.nms(
inp,
scores=scores,
iou_thresh=0.5,
max_output=None if is_symbolic is None else 4,
)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
x = np.array(
[
[0, 0, 100, 100],
[10, 10, 100, 100],
[50, 50, 100, 100],
[100, 100, 150, 150],
],
dtype=np.float32,
)
inp = tensor(x)
scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
x = np.array([], dtype=np.float32,).reshape(0, 4)
inp = tensor(x)
scores = tensor([], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([], dtype=np.int32))
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype =
|
dtype.qint32(inp_scale * w_scale)
|
megengine.core.tensor.dtype.qint32
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=
|
RandomSampler(dataset, batch_size=6, drop_last=False)
|
megengine.data.sampler.RandomSampler
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
|
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
|
megengine.module.init.msra_normal_
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return
|
F.transpose(x, self.perm)
|
megengine.functional.transpose
|
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.domain import Domain
from joj.horse.schemas.domain_invitation import DomainInvitationDetail
class DomainInvitation(DomainURLORMModel, DomainInvitationDetail, table=True): # type: ignore[call-arg]
__tablename__ = "domain_invitations"
__table_args__ = (
UniqueConstraint("domain_id", "url"),
UniqueConstraint("domain_id", "code"),
)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" =
|
Relationship(back_populates="invitations")
|
sqlmodel.Relationship
|
import os, sys
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr
import megengine as mge
from megengine import functional as F
import pdb
def _compute_center(boxes):
ptrx = 0.5 * (boxes[:, 0] + boxes[:, 2])
ptry = 0.5 * (boxes[:, 1] + boxes[:, 3])
centre = F.stack([ptrx, ptry], axis=1)
return centre
def _compute_pos_area(gtboxes, ratio = 0.3):
H, W = gtboxes[:, 3] - gtboxes[:, 1], gtboxes[:, 2] - gtboxes[:, 0]
centres = _compute_center(gtboxes)
l = centres[:, 0] - ratio * W
r = centres[:, 0] + ratio * W
t = centres[:, 1] - ratio * H
b = centres[:, 1] + ratio * H
boundary = F.stack([l, t, r, b], axis = 1)
return boundary
def _anchor_double_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5].astype(np.int32), :]
dummy = -F.ones([1, gt_boxes.shape[1]]).to(gt_boxes.device)
gt_boxes = F.concat([gt_boxes, dummy], axis=0)
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes)
# gtboxes_centers = gtboxes_centers * valid_mask.unsqueeze(1)
gtboxes_centers = gtboxes_centers * F.expand_dims(valid_mask, axis=1)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps *= F.expand_dims(valid_mask, axis=0)
default_num = 16
ious_list = []
for l in range(start, end):
_, index = F.cond_take(all_anchors[:, 4] == l, all_anchors[:, 4])
level_dist = distance[index, :].transpose(1, 0)
ious = overlaps[index, :].transpose(1, 0)
sorted_index = F.argsort(level_dist, descending=False)
n = min(sorted_index.shape[1], default_num)
ious = F.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = F.mean(ious, axis = 0)
std_var = F.std(ious, 0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = F.maximum(iou_thresh_per_gt, 0.2)
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
pos_area = _compute_pos_area(gt_boxes, 0.3)
# pos_area = pos_area.unsqueeze(0).repeat(N, 1, 1)
pos_area = F.broadcast_to(F.expand_dims(pos_area, axis=0), (N, K, pos_area.shape[-1]))
l = anchor_points[:, :, 0] - pos_area[:, :, 0]
r = pos_area[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - pos_area[:, :, 1]
b = pos_area[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= F.expand_dims(iou_thresh_per_gt, axis=0)) * is_in_gt.astype(np.float32)
ious = overlaps * valid_mask
sorted_index = F.argsort(ious, 1)
sorted_overlaps = F.gather(ious, 1, sorted_index)
max_overlaps = sorted_overlaps[:, :2].flatten()
argmax_overlaps = sorted_index[:, :2].flatten()
n, c = all_anchors.shape
device = all_anchors.device
labels = -
|
F.ones(2 * n)
|
megengine.functional.ones
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp =
|
tensor(data[rank])
|
megengine.tensor
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 09:33:53 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
import numpy as np
helps = {
'show' : 'show the results figure',
}
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/fluid_mesh.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field_1 = Field.from_args(name='3_velocity', dtype=nm.float64, shape=3, region=omega, approx_order=1)
field_2 = Field.from_args(name='pressure', dtype=nm.float64, shape=1, region=omega, approx_order=1)
region_0 = domain.create_region(name='Walls1', select='vertices in (y < -0.049)', kind='facet')
region_1 = domain.create_region(name='Walls2', select='vertices in (y > 0.049)', kind='facet')
region_2 = domain.create_region(name='Inlet', select='vertices in (x < -0.499)', kind='facet')
region_3 = domain.create_region(name='Outlet', select='vertices in (x > -0.499)', kind='facet')
ebc_1 = EssentialBC(name='Walls1', region=region_0, dofs={'u.[0,1,2]' : 0.0})
ebc_2 = EssentialBC(name='Walls2', region=region_1, dofs={'u.[0,1,2]' : 0.0})
ebc_3 = EssentialBC(name='Inlet', region=region_2, dofs={'u.0' : 1.0, 'u.[1,2]' : 0.0})
ebc_4 = EssentialBC(name='Outlet', region=region_3, dofs={'p':0.0, 'u.[1,2]' : 0.0})
viscosity = Material(name='viscosity', value=1.25e-3)
variable_1 = FieldVariable('u', 'unknown', field_1)
variable_2 = FieldVariable(name='v', kind='test', field=field_1, primary_var_name='u')
variable_3 = FieldVariable(name='p', kind='unknown', field=field_2)
variable_4 = FieldVariable(name='q', kind='test', field=field_2, primary_var_name='p')
integral_1 = Integral('i1', order=2)
integral_2 = Integral('i2', order=3)
t1 = Term.new(name='dw_div_grad(viscosity.value, v, u)',
integral=integral_2, region=omega, viscosity=viscosity, v=variable_2, u=variable_1)
t2 = Term.new(name='dw_convect(v, u)',
integral=integral_2, region=omega, v=variable_2, u=variable_1)
t3 = Term.new(name='dw_stokes(v, p)',
integral=integral_1, region=omega, v=variable_2, p=variable_3)
t4 = Term.new(name='dw_stokes(u, q)',
integral=integral_1, region=omega, u=variable_1, q=variable_4)
eq1 = Equation('balance', t1+t2-t3)
eq2 = Equation('incompressibility', t4)
eqs = Equations([eq1,eq2])
ls =
|
ScipyDirect({})
|
sfepy.solvers.ls.ScipyDirect
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
Node._set_next_id(159)
|
Expr._set_next_id(1024)
|
megengine.traced_module.expr.Expr._set_next_id
|
import os
from sqlmodel import create_engine, Session, select, update
from functools import lru_cache
from typing import Union
from sqlalchemy.exc import NoResultFound
engine = create_engine(os.environ.get('DB_CONN'))
# Grim hack to get the imports working with crawler and main.
# TODO: Split poke models and other common functions out into a separate package the api+crawler can share.
# TODO: After split crawler code out into a separate part of the repo and create an individual Docker image for it.
try:
from poke.poke_model import pokemon as pokemon_model
except:
from poke_model import pokemon as pokemon_model
@lru_cache(maxsize=16)
def get_pokemon(poke_id: int) -> pokemon_model:
""" Get a pokemon's data from the database from its ID.
Args:
poke_id: ID of the pokemon you want the data for.
Returns:
pokemon_model object containing the data for the pokemon found in the DB.
Raises:
NoResultFound: If there isn't a pokemon in the DB with the passed in ID.
"""
with
|
Session(engine)
|
sqlmodel.Session
|
import megengine as mge
import megengine.functional as F
import numpy as np
def bilinear_sampler(img, coords, mode="bilinear", mask=False):
"""Wrapper for grid_sample, uses pixel coordinates"""
H, W = img.shape[-2:]
img = F.remap(img, coords, border_mode="constant")
if mask:
mask = (
(coords[:, :, :, 0:1] < 0)
| (coords[:, :, :, 0:1] > W - 1)
| (coords[:, :, :, 1:2] < 0)
| (coords[:, :, :, 1:2] > H - 1)
)
mask = F.logical_not(mask)
return img, mask.astype("float32")
return img
def coords_grid(batch, ht, wd):
x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht))
y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor(
x_grid, dtype="float32"
)
coords = F.stack([x_grid, y_grid], axis=0)
coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0)
return coords
def manual_pad(x, pady, padx):
if pady > 0:
u = F.repeat(x[:, :, 0:1, :], pady, axis=2)
d = F.repeat(x[:, :, -1:, :], pady, axis=2)
x = F.concat([u, x, d], axis=2)
if padx > 0:
l = F.repeat(x[:, :, :, 0:1], padx, axis=3)
r =
|
F.repeat(x[:, :, :, -1:], padx, axis=3)
|
megengine.functional.repeat
|
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run simulation:
#
# ./simple.py example_poropiezo-1/poropiezo_macro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.micmac import get_homog_coefs_linear
from sfepy.homogenization.recovery import recover_micro_hook_eps
data_dir = 'example_poropiezo-1'
def set_grad(ts, coors, mode=None, problem=None, **kwargs):
if mode == 'qp':
out = problem.data.reshape((coors.shape[0], 1, 1))
return {'cs': out}
# projection of values from integration points into mesh vertices
def linear_projection(pb, cval):
from sfepy.discrete import (FieldVariable, Material, Integral,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.base.base import IndexedStruct
mesh = Mesh.from_file(pb.conf.filename_mesh)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('scf', nm.float64, 'scalar', omega,
approx_order=1)
g = FieldVariable('g', 'unknown', field)
f = FieldVariable('f', 'test', field, primary_var_name='g')
integral = Integral('i', order=2)
m = Material('m', function=set_grad)
t1 = Term.new('dw_volume_dot(f, g)', integral, omega, f=f, g=g)
t2 = Term.new('dw_volume_lvf(m.cs, f)',
integral, omega, m=m, f=f)
eq = Equation('balance', t1 - t2)
eqs =
|
Equations([eq])
|
sfepy.discrete.Equations
|
from typing import Optional
from sqlmodel import Field, SQLModel
from pydantic import validator
from datetime import datetime
import numpy as np
class Forecast(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int =
|
Field(foreign_key="app_db.appuser.id")
|
sqlmodel.Field
|
from typing import Optional, Dict, List, Any, Union
import datetime as dt
from sqlmodel import Field, Session, SQLModel, create_engine, select
import threading as th
import queue
# ~~~ Database ~~~~~~~~~~~~~~~
class Database:
def __init__(self, uri: str):
self.engine = create_engine(uri)
SQLModel.metadata.create_all(self.engine)
def create_all(self, items: List[SQLModel]):
with Session(self.engine) as session:
for item in items:
session.add(item)
session.commit()
def get_by_id(self, id: Union[str, int], model: SQLModel):
with Session(self.engine) as session:
stmt = select(model).where(model.id == id)
return session.exec(stmt).first()
def get_by_field(self, key: str, value: Any, model: SQLModel):
stmt = select(model).where(getattr(model, key) == value)
print(stmt)
return self.exec(stmt)
def exec(self, stmt: str, params = {}):
with Session(self.engine) as session:
return session.exec(stmt, params=params).all()
class DatabaseWorker(th.Thread):
def __init__(self,
uri: str,
queue: queue.Queue,
batch: int = None,
timeout: int = 10
):
super().__init__()
self.q = queue
self.db = None
self.uri = uri
self.timeout = timeout
self.batch = batch
def run(self):
self.db = Database(self.uri)
while True:
cache = []
try:
cache.append(self.q.get(timeout=self.timeout))
if self.batch:
if len(cache) % self.batch == 0:
self.db.create_all(cache)
cache = []
else:
cache = []
except queue.Empty:
self.db.create_all(cache)
break
# ~~~ Models ~~~~~~~~~~~~~~~~~
class Document(SQLModel, table=True):
id: str = Field(primary_key=True)
name: str
href: str
date: dt.datetime
text: Optional[str] = None
date_collected: dt.datetime
collected_by: str
class Paragraph(SQLModel, table=True):
id: str = Field(primary_key=True)
text: str
document_id: str = Field(foreign_key="document.id")
sentiment: str
sent_score: float
class Entity(SQLModel, table=True):
id: str = Field(primary_key=True)
name: str
description: Optional[str]
class EntityMention(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
text: str
score: Optional[float]
label: str
start: int
end: int
paragraph_id: str =
|
Field(foreign_key="paragraph.id")
|
sqlmodel.Field
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
|
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
|
megengine.module.Conv2d
|
"""
Finite element reference mappings.
"""
import numpy as nm
from sfepy import Config
from sfepy.base.base import get_default, output
from sfepy.base.mem_usage import raise_if_too_large
from sfepy.discrete.common.mappings import Mapping
from sfepy.discrete.common.extmods.mappings import CMapping
from sfepy.discrete import PolySpace
class FEMapping(Mapping):
"""
Base class for finite element mappings.
"""
def __init__(self, coors, conn, poly_space=None, gel=None, order=1):
self.coors = coors
self.conn = conn
try:
nm.take(self.coors, self.conn)
except IndexError:
output('coordinates shape: %s' % list(coors.shape))
output('connectivity: min: %d, max: %d' % (conn.min(), conn.max()))
msg = 'incompatible connectivity and coordinates (see above)'
raise IndexError(msg)
self.n_el, self.n_ep = conn.shape
self.dim = self.coors.shape[1]
if poly_space is None:
poly_space = PolySpace.any_from_args(None, gel, order,
base='lagrange',
force_bubble=False)
self.poly_space = poly_space
self.indices = slice(None)
def get_geometry(self):
"""
Return reference element geometry as a GeometryElement instance.
"""
return self.poly_space.geometry
def get_base(self, coors, diff=False):
"""
Get base functions or their gradient evaluated in given
coordinates.
"""
bf = self.poly_space.eval_base(coors, diff=diff)
return bf
def get_physical_qps(self, qp_coors):
"""
Get physical quadrature points corresponding to given reference
element quadrature points.
Returns
-------
qps : array
The physical quadrature points ordered element by element,
i.e. with shape (n_el, n_qp, dim).
"""
bf = self.get_base(qp_coors)
qps = nm.dot(nm.atleast_2d(bf.squeeze()), self.coors[self.conn])
# Reorder so that qps are really element by element.
qps = nm.ascontiguousarray(nm.swapaxes(qps, 0, 1))
return qps
class VolumeMapping(FEMapping):
"""
Mapping from reference domain to physical domain of the same space
dimension.
"""
def get_mapping(self, qp_coors, weights, poly_space=None, ori=None,
transform=None):
"""
Get the mapping for given quadrature points, weights, and
polynomial space.
Returns
-------
cmap : CMapping instance
The volume mapping.
"""
poly_space = get_default(poly_space, self.poly_space)
bf_g = self.get_base(qp_coors, diff=True)
ebf_g = poly_space.eval_base(qp_coors, diff=True, ori=ori,
force_axis=True, transform=transform)
size = ebf_g.nbytes * self.n_el
site_config = Config()
raise_if_too_large(size, site_config.refmap_memory_factor())
flag = (ori is not None) or (ebf_g.shape[0] > 1)
cmap = CMapping(self.n_el, qp_coors.shape[0], self.dim,
poly_space.n_nod, mode='volume', flag=flag)
cmap.describe(self.coors, self.conn, bf_g, ebf_g, weights)
return cmap
class SurfaceMapping(FEMapping):
"""
Mapping from reference domain to physical domain of the space
dimension higher by one.
"""
def set_basis_indices(self, indices):
"""
Set indices to cell-based basis that give the facet-based basis.
"""
self.indices = indices
def get_base(self, coors, diff=False):
"""
Get base functions or their gradient evaluated in given
coordinates.
"""
bf = self.poly_space.eval_base(coors, diff=diff)
ii = max(self.dim - 1, 1)
return nm.ascontiguousarray(bf[..., :ii:, self.indices])
def get_mapping(self, qp_coors, weights, poly_space=None, mode='surface'):
"""
Get the mapping for given quadrature points, weights, and
polynomial space.
Returns
-------
cmap : CMapping instance
The surface mapping.
"""
poly_space =
|
get_default(poly_space, self.poly_space)
|
sfepy.base.base.get_default
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
import torch
import torch.nn as nn
from basecls.configs import BaseConfig
from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss
@pytest.mark.parametrize("name", [CrossEntropy, "BinaryCrossEntropy", "CrossEntropy"])
def test_build_loss(name):
cfg = BaseConfig(loss=dict(name=name))
m = build_loss(cfg)
assert isinstance(m, M.Module)
def test_bce():
x = np.random.rand(2, 8, 4).astype("float32")
y = np.random.rand(2, 8, 4).astype("float32")
ml = BinaryCrossEntropy()(mge.Tensor(x), mge.Tensor(y)).numpy()
tl = nn.BCEWithLogitsLoss()(torch.tensor(x), torch.tensor(y)).numpy()
np.testing.assert_allclose(ml, tl, rtol=1e-4, atol=1e-6)
def test_ce():
K = 4
x = np.random.rand(2, 8, K).astype("float32")
y = np.random.randint(K, size=(2, 8)).astype("int32")
oy = np.eye(K, dtype="int32")[y]
ml = CrossEntropy(axis=2)(
|
mge.Tensor(x)
|
megengine.Tensor
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from sqlmodel import Session, select
from sqlalchemy.exc import NoResultFound
from ..models.client import Client
from ..models.epic import Epic
from datetime import datetime
router = APIRouter(prefix="/api/clients", tags=["client"])
@router.post("/")
async def post_client(*, client: Client, session: Session = Depends(get_session)):
"""
Post a new client.
Parameters
----------
client : Client
Client that is to be added to the database.
session : Session
SQL session that is to be used to add the client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.name == client.name)
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(client)
session.commit()
session.refresh(client)
return client
@router.get("/")
async def read_clients(session: Session = Depends(get_session)):
"""
Get a list of all clients.
Parameters
----------
session : Session
SQL session that is to be used to get a list of the clients.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client)
results = session.exec(statement).all()
return results
@router.get("/active")
async def read_clients(session: Session = Depends(get_session)):
"""
Get a list of all active clients.
Parameters
----------
session : Session
SQL session that is to be used to get a list of all of the active clients.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.is_active == True).order_by(Client.id.asc())
results = session.exec(statement).all()
return results
@router.get("/{client_id}")
async def read_clients(
*, client_id: int = None, session: Session = Depends(get_session)
):
"""
Get a client by client_id.
Parameters
----------
client_id : int
ID of client that is to be read.
session : Session
SQL session that is to be used to read a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
try:
result = session.exec(statement).one()
return result
except NoResultFound:
msg = f"""There is no client with id = {client_id}"""
return msg
@router.get("/names/{name}")
async def read_clients_by_name(
*, name: str = None, session: Session = Depends(get_session)
):
"""
Get a client by client_name.
Parameters
----------
name : str
Name of client to be read.
session : Session
SQL session that is to be used to read a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.name == name)
result = session.exec(statement).one()
return result
@router.get("/{client_id}/epics/")
async def read_clients_epics(
client_id: int = None, session: Session = Depends(get_session)
):
"""
Get epics from a client_id.
Parameters
----------
client_id : int
ID of client that is to be used to pull epics from.
session : Session
SQL session that is to be used to pull the epics.
Defaults to creating a dependency on the running SQL model session.
"""
statement = (
select(Client.id, Client.name, Epic.name)
.select_from(Client)
.join(Epic)
.where(Client.id == client_id)
)
results = session.exec(statement).all()
return results
@router.put("/{client_id}/deactivate-client")
async def update_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""Deactivate a client"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.active = False
statement2 = select(Epic).join(Client)
client_to_update = session.exec(statement).one()
client_to_update.active = False
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
@router.put("/{client_id}/activate")
async def activate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Activate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be activated.
session : Session
SQL session that is to be used to activate a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.is_active = True
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
@router.put("/{client_id}/deactivate")
async def deactivate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Deactivate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be deactivated.
session : Session
SQL session that is to be used to deactivate a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.is_active = False
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
@router.put("/{client_id}/deactivate-epics")
async def update_clients_and_epics(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""Deactivate a client and its epics"""
"""
Deactivate a client and its epics using the client's ID as a key.
Parameters
----------
client_id : int
ID of the client to deactivate.
session : Session
SQL session that is to be used to deactivate the client and its respective epics.
Defaults to creating a dependency on the running SQL model session.
"""
statement1 = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement1).one()
client_to_update.is_active = False
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
statement2 =
|
select(Epic)
|
sqlmodel.select
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if
|
mge.is_cuda_available()
|
megengine.is_cuda_available
|
import typing as t
if t.TYPE_CHECKING:
from .other import DB_AccessToken, DB_APIKey
from .discussions import DB_Discussion
from datetime import datetime
from sqlmodel import SQLModel, Field, Relationship, Column, JSON
from ..extensions.tags import DB_Tag, DB_TagUser
class DB_User(SQLModel, table=True):
__tablename__ = 'users'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the user. This is handled by the database."""
username: str = Field(max_length=100, sa_column_kwargs={"unique": True})
"""The user's username."""
email: str = Field(max_length=150, sa_column_kwargs={"unique": True})
"""The user's E-mail address."""
is_email_confirmed: bool = Field(default=False)
"""Whether or not the user confirmed their E-mail address."""
password: str = Field(max_length=100)
"""The user's password (<PASSWORD>)."""
avatar_url: t.Optional[str] = Field(max_length=100)
"""The file name of user's avatar. Avatars are located in the `public/assets/avatars` directory of your forum root."""
preferences: t.Dict[str, bool] = Field(sa_column=Column(JSON), default={"notify_discussionRenamed_alert": True,"notify_postLiked_alert": True,"notify_discussionLocked_alert": True,"notify_postMentioned_alert": True,"notify_postMentioned_email": False,"notify_userMentioned_alert": True,"notify_userMentioned_email": False,"notify_newPost_alert": True, "notify_newPost_email": True, "notify_userSuspended_alert": True, "notify_userUnsuspended_alert": True, "followAfterReply": True, "discloseOnline": True, "indexProfile": True, "locale": None })
"""The user's preferences (e. g.: for notifications)."""
joined_at: t.Optional[datetime] = Field(default=None)
"""When did the user join the forum."""
last_seen_at: t.Optional[datetime] = Field(default=None)
"""When was the user last seen at."""
marked_all_as_read_at: t.Optional[datetime] = Field(default=None)
"""When did the user mark all discussions as read."""
read_notifications_at: t.Optional[datetime] = Field(default=None)
"""When did the user read their notifications."""
discussion_count: int = Field(default=0)
"""The user's discussion count."""
comment_count: int = Field(default=0)
"""The user's comment (post) count."""
access_tokens: t.List['DB_AccessToken'] = Relationship(back_populates='user')
"""List of access tokens belonging to this user."""
api_keys: t.List['DB_APIKey'] = Relationship(back_populates='user')
"""List of API keys that perform actions on behalf of this user."""
discussions: t.List['DB_Discussion'] = Relationship(back_populates='author')
"""List of discussions that this user made."""
tags: t.List['DB_Tag'] =
|
Relationship(back_populates='users', link_model=DB_TagUser)
|
sqlmodel.Relationship
|
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core._imperative_rt.core2 import config_async_level, get_async_level
def test_basic():
config_async_level(2)
assert get_async_level() == 2
with pytest.raises(RuntimeError):
config_async_level(3)
def test_level1_infer_value():
config_async_level(1)
a = mge.tensor([[1, 2], [2, 3], [3, 4]], dtype="float32")
b = mge.tensor([1, 1], dtype="float32")
# make DepType::VALUE unknown
c = b * 2
with pytest.raises(RuntimeError):
d = F.reshape(a, c)
def test_level1_infer_shape_with_unknown():
config_async_level(2)
a = mge.tensor([[1, 2, 2, 3]], dtype="float32")
b = mge.tensor([1, 1])
c = b * 2
# make DepType::SHAPE unknown
d =
|
F.reshape(a, c)
|
megengine.functional.reshape
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
from megengine import Tensor
from megengine.core._imperative_rt.core2 import (
_set_drop_flag,
_set_swap_flag,
get_option,
set_option,
)
from megengine.module import Linear, Module
from megengine.optimizer import SGD
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
def calculate_precision(data: np.ndarray, pred: np.ndarray) -> float:
""" Calculate precision for given data and prediction.
:type data: [[x, y], ...]
:param data: Input data
:type pred: [[x_pred, y_pred], ...]
:param pred: Network output data
"""
correct = 0
assert len(data) == len(pred)
for inp_data, pred_output in zip(data, pred):
label = 0 if np.prod(inp_data) < 0 else 1
pred_label = np.argmax(pred_output)
if pred_label == label:
correct += 1
return float(correct) / len(data)
class XORNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
def forward(self, x):
y = self.fc0(x)
x._swap_out()
x = F.tanh(y)
y = self.fc1(x)
x = F.tanh(y)
x = self.fc2(x)
y = (x + x) / 2 # in order to test drop()
y._drop()
return y
def test_training_converge_with_swap_and_drop():
_set_swap_flag(True)
_set_drop_flag(True)
old_buffer_length = get_option("buffer_length")
set_option("buffer_length", 0)
net = XORNet()
opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
gm = ad.GradManager().attach(net.parameters())
def train(data, label):
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
return loss
def infer(data):
return net(data)
train_dataset = minibatch_generator()
losses = []
for data, label in itertools.islice(train_dataset, 2000):
data = Tensor(data, dtype=np.float32)
label = Tensor(label, dtype=np.int32)
opt.clear_grad()
loss = train(data, label)
opt.step()
losses.append(loss.numpy())
assert np.mean(losses[-100:]) < 0.1, "Final training Loss must be low enough"
ngrid = 10
x = np.linspace(-1.0, 1.0, ngrid)
xx, yy = np.meshgrid(x, x)
xx = xx.reshape((ngrid * ngrid, 1))
yy = yy.reshape((ngrid * ngrid, 1))
data = mge.tensor(np.concatenate((xx, yy), axis=1).astype(np.float32))
pred = infer(Tensor(data)).numpy()
precision = calculate_precision(data.numpy(), pred)
assert precision == 1.0, "Test precision must be high enough, get {}".format(
precision
)
_set_swap_flag(False)
_set_drop_flag(False)
|
set_option("buffer_length", old_buffer_length)
|
megengine.core._imperative_rt.core2.set_option
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad =
|
Grad()
|
megengine.core.autodiff.grad.Grad
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 /
|
sqrt(bn_var + eps)
|
megengine.functional.sqrt
|
from datetime import datetime
from typing import Optional
import typer
from sqlalchemy.orm.exc import UnmappedInstanceError
from sqlmodel import Session, select
from .database import engine
from .functions_aux import Status
from .tables import ToDo, Timer
app = typer.Typer()
@app.command()
def task(id: str, task: str = None,
status: Optional[Status] = typer.Option(None),
tag: str = None, remarks: str = None, project: str = None,
due_date: datetime = typer.Option(None, formats=['%Y-%m-%d']),
reminder: datetime = typer.Option(None, formats=['%Y-%m-%d'])):
"""Edit record from to-do list"""
with Session(engine) as session:
try:
query = session.get(ToDo, id)
if task is not None:
query.task = task
if tag is not None:
query.tag = tag
if remarks is not None:
query.remarks = remarks
if project is not None:
query.project = project
if status is None or status == query.status:
pass
elif status == 'done':
query.status = status
query.date_end = datetime.now().date()
elif status == 'doing' and query.status == 'done':
query.status = status
query.date_end = None
elif status == 'to do':
timer = session.exec(select(Timer).where(
Timer.id_todo == id)).all()
if len(timer) > 0:
typer.secho(f'\nTask already started\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
else:
query.status = status
query.date_end = None
else:
query.status = status
today = datetime.today()
if due_date is not None and reminder \
is not None and reminder >= due_date:
typer.secho(
f'\nreminder must be smaller than {due_date.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif due_date is not None and due_date <= today:
typer.secho(f'\ndue date must be grater than {today.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif reminder is not None and reminder <= today:
typer.secho(
f'\nreminder must be grater than {today.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif due_date is not None and query.reminder \
is not None and due_date < query.reminder:
typer.secho(
f'\ndue date must be grater than {query.reminder.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif reminder is not None and query.due_date \
is not None and reminder >= query.due_date:
typer.secho(
f'\nreminder must be smaller than {query.due_date.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif reminder is not None:
query.reminder = reminder
elif due_date is not None:
query.due_date = due_date
session.add(query)
edit = typer.confirm(f"""Are you sure you want to edit:
{query}""")
if not edit:
typer.secho("Not editing",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("Editing it!",
fg=typer.colors.RED)
session.commit()
except AttributeError:
typer.secho(f'\nInvalid task id\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
except UnmappedInstanceError:
typer.secho(f'\nInvalid task id\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def project(project: str, new_project: str):
"""Edit project name in tasks"""
with Session(engine) as session:
tasks = session.exec(
|
select(ToDo)
|
sqlmodel.select
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a =
|
tensor(av)
|
megengine.core.tensor
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from tempfile import TemporaryFile
import numpy as np
from megengine.core import Buffer, Parameter, tensor
from megengine.test import assertTensorClose
def test_tensor_serialization():
def tensor_eq(a, b):
assert a.dtype == b.dtype
assert a.device == b.device
assert a.requires_grad == b.requires_grad
assertTensorClose(a, b)
with TemporaryFile() as f:
data = np.random.randint(low=0, high=7, size=[233])
a =
|
tensor(data, device="xpux", dtype=np.int32)
|
megengine.core.tensor
|
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m =
|
MinMaxObserver()
|
megengine.quantization.observer.MinMaxObserver
|
# Copyright (c) Megvii, Inc. and its affiliates.
"""do the evaluation work with single gpu
"""
import argparse
import os
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.functional as F
import numpy as np
from tqdm.auto import tqdm
from recognition.datasets import get_eval_dataset
from recognition.models import FaceRecognitionModel
from recognition.tools.utils import load_config_from_path
logger = mge.get_logger(__name__)
def get_inference_func(configs):
"""load checkpoint and construct inference function
Args:
configs (dict): configuration, required fields include:
base_dir: base directory of experiment outputs
evaluate_epoch: model of evaluate_epoch to evaluate
Raises:
FileNotFoundError: model of given epoch is not found
Returns:
inference_func (function): inference function mapping image to embedding
"""
model = FaceRecognitionModel(configs)
evaluate_epoch = configs["evaluate_epoch"]
checkpoint_path = os.path.join(configs["base_dir"], f"epoch-{evaluate_epoch}-checkpoint.pkl")
if os.path.exists(checkpoint_path):
checkpoint_data = mge.load(checkpoint_path)
model.load_state_dict(checkpoint_data["state_dict"], strict=False)
else:
raise FileNotFoundError(f"{checkpoint_path} not found!!!")
def inference_func(images):
model.eval()
# classic test-time mirror augment
embedding_origin = model.forward_embedding_only(images)
embedding_mirror = model.forward_embedding_only(images[:, :, :, ::-1])
embedding = embedding_origin + embedding_mirror
embedding = F.normalize(embedding, axis=1)
return embedding
return inference_func
def extract_feature_and_clean_noise(configs, inference_func):
"""extract feature and clean noise. the noise cleaning algorithm is proposed in
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
please refer to https://github.com/deepinsight/insightface/blob/master/Evaluation/Megaface/remove_noises.py for
more detail. this implement does basicly the same thing as the above, but with much higher speed
Args:
configs (dict): configuration, required fields include:
batch_size: inference batch size
feature_dim: model output feature dimension
base_dir: base directory of experiment outputs
dataset_dir: directory of dataset root
inference_func (function): constructed inference function
Returns:
facescrub_feature (np.array): noise-cleaned feature of facescrub (shape: n * (feature_dim + 1))
facescrub_label (np.array): label of facescrub (shape: n)
megaface_feature (np.array): noise-cleaned feature of megaface (shape: m * (feature_dim + 1))
"""
def prepare_dataset(name):
"""prepare dataset
Args:
name (str): name of the dataset, should be one of {facescrub, megaface}
Returns:
dataset (data.Dataset): required dataset
queue (data.DataLoader): corresponding dataloader
"""
preprocess = T.Compose([T.Normalize(mean=127.5, std=128), T.ToMode("CHW")])
dataset = get_eval_dataset(name, dataset_dir=configs["dataset_dir"])
sampler = data.SequentialSampler(dataset, batch_size=configs["batch_size"])
queue =
|
data.DataLoader(dataset, sampler=sampler, transform=preprocess)
|
megengine.data.DataLoader
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(
|
F.expand_dims(position_ids, 0)
|
megengine.functional.expand_dims
|
import os
from fastapi import *
from psycopg2.errors import UndefinedTable
from sqlmodel import Session, select, text
from sqlalchemy.exc import ProgrammingError
from .models.timelog import TimeLog
from .models.calendar import Calendar
from .utils import (
engine,
create_db,
tags_metadata,
execute_sample_sql,
)
from .api import (
user,
timelog,
forecast,
epic,
epic_area,
client,
rate,
team,
role,
sponsor,
capacity,
demand,
)
import csv
app = FastAPI(title="timeflow app API", openapi_tags=tags_metadata)
app.include_router(timelog.router)
app.include_router(forecast.router)
app.include_router(user.router)
app.include_router(epic.router)
app.include_router(epic_area.router)
app.include_router(client.router)
app.include_router(rate.router)
app.include_router(team.router)
app.include_router(role.router)
app.include_router(sponsor.router)
app.include_router(capacity.router)
app.include_router(demand.router)
@app.on_event("startup")
def on_startup():
with Session(engine) as session:
if os.getenv("TIMEFLOW_DEV") == "true":
try:
statement = select(TimeLog)
results = session.exec(statement)
except ProgrammingError:
create_db()
execute_sample_sql()
elif os.getenv("TIMEFLOW_DEV") == "false":
try:
statement =
|
select(TimeLog)
|
sqlmodel.select
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a +
|
mge.tensor(self.data1)
|
megengine.tensor
|
#!/usr/bin/env python
r"""
Parallel assembling and solving of a Poisson's equation, using commands for
interactive use.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u
= \int_{\Omega} v f
\;, \quad \forall s \;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example does not use a nonlinear solver.
- This example can serve as a template for solving a linear single-field scalar
problem - just replace the equations in :func:`create_local_problem()`.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/diffusion/poisson_parallel_interactive.py -h
See PETSc options::
$ python examples/diffusion/poisson_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/diffusion/poisson_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 5 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --verify --metis -ksp_monitor -ksp_converged_reason
View the results using::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import csv
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.timing import Timer
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem, State)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.evaluate import apply_ebc_to_matrix
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
import sfepy.parallel.parallel as pl
import sfepy.parallel.plot_parallel_dofs as ppd
def create_local_problem(omega_gi, order):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
field_i = Field.from_args('fu', nm.float64, 1, omega_i,
approx_order=order)
output('number of local field DOFs:', field_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field_i)
v_i = FieldVariable('v_i', 'test', field_i, primary_var_name='u_i')
integral = Integral('i', order=2*order)
mat = Material('m', lam=10, mu=5)
t1 = Term.new('dw_laplace(m.lam, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
def _get_load(coors):
val = nm.ones_like(coors[:, 0])
for coor in coors.T:
val *= nm.sin(4 * nm.pi * coor)
return val
def get_load(ts, coors, mode=None, **kwargs):
if mode == 'qp':
return {'val' : _get_load(coors).reshape(coors.shape[0], 1, 1)}
load = Material('load', function=Function('get_load', get_load))
t2 = Term.new('dw_volume_lvf(load.val, v_i)',
integral, omega_i, load=load, v_i=v_i)
eq = Equation('balance', t1 - 100 * t2)
eqs = Equations([eq])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.all' : 0.1})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2]))
pb.update_materials()
return pb
def verify_save_dof_maps(field, cell_tasks, dof_maps, id_map, options,
verbose=False):
vec = pl.verify_task_dof_maps(dof_maps, id_map, field, verbose=verbose)
order = options.order
mesh = field.domain.mesh
sfield = Field.from_args('aux', nm.float64, 'scalar', field.region,
approx_order=order)
aux = FieldVariable('aux', 'parameter', sfield,
primary_var_name='(set-to-None)')
out = aux.create_output(vec,
linearization=Struct(kind='adaptive',
min_level=order-1,
max_level=order-1,
eps=1e-8))
filename = os.path.join(options.output_dir,
'para-domains-dofs.h5')
if field.is_higher_order():
out['aux'].mesh.write(filename, out=out)
else:
mesh.write(filename, out=out)
out = Struct(name='cells', mode='cell',
data=cell_tasks[:, None, None, None])
filename = os.path.join(options.output_dir,
'para-domains-cells.h5')
mesh.write(filename, out={'cells' : out})
def solve_problem(mesh_filename, options, comm):
order = options.order
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
stats = Struct()
timer = Timer('solve_timer')
timer.start()
mesh = Mesh.from_file(mesh_filename)
stats.t_read_mesh = timer.stop()
timer.start()
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
stats.t_partition_mesh = timer.stop()
output('creating global domain and field...')
timer.start()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order)
stats.t_create_global_fields = timer.stop()
output('...done in', timer.dt)
output('distributing field %s...' % field.name)
timer.start()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute([field], cell_tasks,
is_overlap=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
lfd = lfds[0]
stats.t_distribute_fields_dofs = timer.stop()
output('...done in', timer.dt)
if rank == 0:
dof_maps = gfds[0].dof_maps
id_map = gfds[0].id_map
if options.verify:
verify_save_dof_maps(field, cell_tasks,
dof_maps, id_map, options, verbose=True)
if options.plot:
ppd.plot_partitioning([None, None], field, cell_tasks, gfds[0],
options.output_dir, size)
output('creating local problem...')
timer.start()
omega_gi = Region.from_cells(lfd.cells, field.domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, order)
variables = pb.get_variables()
eqs = pb.equations
u_i = variables['u_i']
field_i = u_i.field
stats.t_create_local_problem = timer.stop()
output('...done in', timer.dt)
if options.plot:
ppd.plot_local_dofs([None, None], field, field_i, omega_gi,
options.output_dir, rank)
output('allocating global system...')
timer.start()
sizes, drange =
|
pl.get_sizes(lfd.petsc_dofs_range, field.n_nod, 1)
|
sfepy.parallel.parallel.get_sizes
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
|
M.ReLU()
|
megengine.module.ReLU
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss =
|
F.loss.cross_entropy(logits, label, label_smooth=0.1)
|
megengine.functional.loss.cross_entropy
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
def test_bn_no_track_stat2():
nchannel = 3
m = BatchNorm2d(nchannel) # Init with track_running_stat = True
m.track_running_stats = False
# m.running_var and m.running_mean created during init time
saved_var = m.running_var.numpy()
assert saved_var is not None
saved_mean = m.running_mean.numpy()
assert saved_mean is not None
gm =
|
ad.GradManager()
|
megengine.autodiff.GradManager
|
from typing import TYPE_CHECKING, List, Optional, Type
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.link_tables import ProblemProblemSetLink
from joj.horse.schemas.problem import ProblemDetail, WithLatestRecordType
from joj.horse.services.db import db_session
if TYPE_CHECKING:
from joj.horse.models import (
Domain,
ProblemConfig,
ProblemGroup,
ProblemSet,
Record,
User,
)
class Problem(DomainURLORMModel, ProblemDetail, table=True): # type: ignore[call-arg]
__tablename__ = "problems"
__table_args__ = (UniqueConstraint("domain_id", "url"),)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="problems")
owner_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="SET NULL"), nullable=True
)
)
owner: Optional["User"] = Relationship(back_populates="owned_problems")
problem_group_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("problem_groups.id", ondelete="SET NULL"), nullable=True
)
)
problem_group: Optional["ProblemGroup"] =
|
Relationship(back_populates="problems")
|
sqlmodel.Relationship
|
"""
Notes
-----
Important attributes of continuous (order > 0) :class:`Field` and
:class:`SurfaceField` instances:
- `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]`
- `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]`
where `conn` is the mesh vertex connectivity, `econn` is the
region-local field connectivity.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, get_default, assert_
from sfepy.base.base import Struct
from sfepy.discrete.common.fields import parse_shape, Field
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import convert_complex_output
from sfepy.discrete.fem.utils import (extend_cell_data, prepare_remap,
invert_remap, get_min_value)
from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.fe_surface import FESurface
from sfepy.discrete.integrals import Integral
from sfepy.discrete.fem.linearizer import (get_eval_dofs, get_eval_coors,
create_output)
import six
def set_mesh_coors(domain, fields, coors, update_fields=False, actual=False,
clear_all=True, extra_dofs=False):
if actual:
if not hasattr(domain.mesh, 'coors_act'):
domain.mesh.coors_act = nm.zeros_like(domain.mesh.coors)
domain.mesh.coors_act[:] = coors[:domain.mesh.n_nod]
else:
domain.cmesh.coors[:] = coors[:domain.mesh.n_nod]
if update_fields:
for field in six.itervalues(fields):
field.set_coors(coors, extra_dofs=extra_dofs)
field.clear_mappings(clear_all=clear_all)
def eval_nodal_coors(coors, mesh_coors, region, poly_space, geom_poly_space,
econn, only_extra=True):
"""
Compute coordinates of nodes corresponding to `poly_space`, given
mesh coordinates and `geom_poly_space`.
"""
if only_extra:
iex = (poly_space.nts[:,0] > 0).nonzero()[0]
if iex.shape[0] == 0: return
qp_coors = poly_space.node_coors[iex, :]
econn = econn[:, iex].copy()
else:
qp_coors = poly_space.node_coors
##
# Evaluate geometry interpolation base functions in (extra) nodes.
bf = geom_poly_space.eval_base(qp_coors)
bf = bf[:,0,:].copy()
##
# Evaluate extra coordinates with 'bf'.
cmesh = region.domain.cmesh
conn = cmesh.get_incident(0, region.cells, region.tdim)
conn.shape = (econn.shape[0], -1)
ecoors = nm.dot(bf, mesh_coors[conn])
coors[econn] = nm.swapaxes(ecoors, 0, 1)
def _interp_to_faces(vertex_vals, bfs, faces):
dim = vertex_vals.shape[1]
n_face = faces.shape[0]
n_qp = bfs.shape[0]
faces_vals = nm.zeros((n_face, n_qp, dim), nm.float64)
for ii, face in enumerate(faces):
vals = vertex_vals[face,:dim]
faces_vals[ii,:,:] = nm.dot(bfs[:,0,:], vals)
return(faces_vals)
def get_eval_expression(expression,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None):
"""
Get the function for evaluating an expression given a list of elements,
and reference element coordinates.
"""
from sfepy.discrete.evaluate import eval_in_els_and_qp
def _eval(iels, coors):
val = eval_in_els_and_qp(expression, iels, coors,
fields, materials, variables,
functions=functions, mode=mode,
term_mode=term_mode,
extra_args=extra_args, verbose=verbose,
kwargs=kwargs)
return val[..., 0]
return _eval
def create_expression_output(expression, name, primary_field_name,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None,
min_level=0, max_level=1, eps=1e-4):
"""
Create output mesh and data for the expression using the adaptive
linearizer.
Parameters
----------
expression : str
The expression to evaluate.
name : str
The name of the data.
primary_field_name : str
The name of field that defines the element groups and polynomial
spaces.
fields : dict
The dictionary of fields used in `variables`.
materials : Materials instance
The materials used in the expression.
variables : Variables instance
The variables used in the expression.
functions : Functions instance, optional
The user functions for materials etc.
mode : one of 'eval', 'el_avg', 'qp'
The evaluation mode - 'qp' requests the values in quadrature points,
'el_avg' element averages and 'eval' means integration over
each term region.
term_mode : str
The term call mode - some terms support different call modes
and depending on the call mode different values are
returned.
extra_args : dict, optional
Extra arguments to be passed to terms in the expression.
verbose : bool
If False, reduce verbosity.
kwargs : dict, optional
The variables (dictionary of (variable name) : (Variable
instance)) to be used in the expression.
min_level : int
The minimum required level of mesh refinement.
max_level : int
The maximum level of mesh refinement.
eps : float
The relative tolerance parameter of mesh adaptivity.
Returns
-------
out : dict
The output dictionary.
"""
field = fields[primary_field_name]
vertex_coors = field.coors[:field.n_vertex_dof, :]
ps = field.poly_space
gps = field.gel.poly_space
vertex_conn = field.econn[:, :field.gel.n_vertex]
eval_dofs = get_eval_expression(expression,
fields, materials, variables,
functions=functions,
mode=mode, extra_args=extra_args,
verbose=verbose, kwargs=kwargs)
eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps)
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors,
vertex_conn.shape[0], ps,
min_level=min_level,
max_level=max_level, eps=eps)
mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids],
field.domain.mesh.descs)
out = {}
out[name] = Struct(name='output_data', mode='vertex',
data=vdofs, var_name=name, dofs=None,
mesh=mesh, level=level)
out = convert_complex_output(out)
return out
class FEField(Field):
"""
Base class for finite element fields.
Notes
-----
- interps and hence node_descs are per region (must have single
geometry!)
Field shape information:
- ``shape`` - the shape of the base functions in a point
- ``n_components`` - the number of DOFs per FE node
- ``val_shape`` - the shape of field value (the product of DOFs and
base functions) in a point
"""
def __init__(self, name, dtype, shape, region, approx_order=1):
"""
Create a finite element field.
Parameters
----------
name : str
The field name.
dtype : numpy.dtype
The field data type: float64 or complex128.
shape : int/tuple/str
The field shape: 1 or (1,) or 'scalar', space dimension (2, or (2,)
or 3 or (3,)) or 'vector', or a tuple. The field shape determines
the shape of the FE base functions and is related to the number of
components of variables and to the DOF per node count, depending
on the field kind.
region : Region
The region where the field is defined.
approx_order : int or tuple
The FE approximation order. The tuple form is (order, has_bubble),
e.g. (1, True) means order 1 with a bubble function.
Notes
-----
Assumes one cell type for the whole region!
"""
shape = parse_shape(shape, region.domain.shape.dim)
if not self._check_region(region):
raise ValueError('unsuitable region for field %s! (%s)' %
(name, region.name))
Struct.__init__(self, name=name, dtype=dtype, shape=shape,
region=region)
self.domain = self.region.domain
self._set_approx_order(approx_order)
self._setup_geometry()
self._setup_kind()
self._setup_shape()
self.surface_data = {}
self.point_data = {}
self.ori = None
self._create_interpolant()
self._setup_global_base()
self.setup_coors()
self.clear_mappings(clear_all=True)
self.clear_qp_base()
self.basis_transform = None
self.econn0 = None
self.unused_dofs = None
self.stored_subs = None
def _set_approx_order(self, approx_order):
"""
Set a uniform approximation order.
"""
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
self.force_bubble = approx_order[1]
else:
self.approx_order = approx_order
self.force_bubble = False
def get_true_order(self):
"""
Get the true approximation order depending on the reference
element geometry.
For example, for P1 (linear) approximation the true order is 1,
while for Q1 (bilinear) approximation in 2D the true order is 2.
"""
gel = self.gel
if (gel.dim + 1) == gel.n_vertex:
order = self.approx_order
else:
order = gel.dim * self.approx_order
if self.force_bubble:
bubble_order = gel.dim + 1
order = max(order, bubble_order)
return order
def is_higher_order(self):
"""
Return True, if the field's approximation order is greater than one.
"""
return self.force_bubble or (self.approx_order > 1)
def _setup_global_base(self):
"""
Setup global DOF/base functions, their indices and connectivity of the
field. Called methods implemented in subclasses.
"""
self._setup_facet_orientations()
self._init_econn()
self.n_vertex_dof, self.vertex_remap = self._setup_vertex_dofs()
self.vertex_remap_i = invert_remap(self.vertex_remap)
aux = self._setup_edge_dofs()
self.n_edge_dof, self.edge_dofs, self.edge_remap = aux
aux = self._setup_face_dofs()
self.n_face_dof, self.face_dofs, self.face_remap = aux
aux = self._setup_bubble_dofs()
self.n_bubble_dof, self.bubble_dofs, self.bubble_remap = aux
self.n_nod = self.n_vertex_dof + self.n_edge_dof \
+ self.n_face_dof + self.n_bubble_dof
self._setup_esurface()
def _setup_esurface(self):
"""
Setup extended surface entities (edges in 2D, faces in 3D),
i.e. indices of surface entities into the extended connectivity.
"""
node_desc = self.node_desc
gel = self.gel
self.efaces = gel.get_surface_entities().copy()
nd = node_desc.edge
if nd is not None:
efs = []
for eof in gel.get_edges_per_face():
efs.append(nm.concatenate([nd[ie] for ie in eof]))
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.efaces = nm.hstack((self.efaces, efs))
efs = node_desc.face
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.efaces = nm.hstack((self.efaces, efs))
if gel.dim == 3:
self.eedges = gel.edges.copy()
efs = node_desc.edge
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.eedges = nm.hstack((self.eedges, efs))
def set_coors(self, coors, extra_dofs=False):
"""
Set coordinates of field nodes.
"""
# Mesh vertex nodes.
if self.n_vertex_dof:
indx = self.vertex_remap_i
self.coors[:self.n_vertex_dof] = nm.take(coors,
indx.astype(nm.int32),
axis=0)
n_ex_dof = self.n_bubble_dof + self.n_edge_dof + self.n_face_dof
# extra nodes
if n_ex_dof:
if extra_dofs:
if self.n_nod != coors.shape[0]:
raise NotImplementedError
self.coors[:] = coors
else:
gps = self.gel.poly_space
ps = self.poly_space
eval_nodal_coors(self.coors, coors, self.region,
ps, gps, self.econn)
def setup_coors(self):
"""
Setup coordinates of field nodes.
"""
mesh = self.domain.mesh
self.coors = nm.empty((self.n_nod, mesh.dim), nm.float64)
self.set_coors(mesh.coors)
def get_vertices(self):
"""
Return indices of vertices belonging to the field region.
"""
return self.vertex_remap_i
def _get_facet_dofs(self, rfacets, remap, dofs):
facets = remap[rfacets]
return dofs[facets[facets >= 0]].ravel()
def get_data_shape(self, integral, integration='volume', region_name=None):
"""
Get element data dimensions.
Parameters
----------
integral : Integral instance
The integral describing used numerical quadrature.
integration : 'volume', 'surface', 'surface_extra', 'point' or 'custom'
The term integration type.
region_name : str
The name of the region of the integral.
Returns
-------
data_shape : 4 ints
The `(n_el, n_qp, dim, n_en)` for volume shape kind,
`(n_fa, n_qp, dim, n_fn)` for surface shape kind and
`(n_nod, 0, 0, 1)` for point shape kind.
Notes
-----
- `n_el`, `n_fa` = number of elements/facets
- `n_qp` = number of quadrature points per element/facet
- `dim` = spatial dimension
- `n_en`, `n_fn` = number of element/facet nodes
- `n_nod` = number of element nodes
"""
region = self.domain.regions[region_name]
shape = region.shape
dim = region.dim
if integration in ('surface', 'surface_extra'):
sd = self.surface_data[region_name]
# This works also for surface fields.
key = sd.face_type
weights = self.get_qp(key, integral).weights
n_qp = weights.shape[0]
if integration == 'surface':
data_shape = (sd.n_fa, n_qp, dim, sd.n_fp)
else:
data_shape = (sd.n_fa, n_qp, dim, self.econn.shape[1])
elif integration in ('volume', 'custom'):
_, weights = integral.get_qp(self.gel.name)
n_qp = weights.shape[0]
data_shape = (shape.n_cell, n_qp, dim, self.econn.shape[1])
elif integration == 'point':
dofs = self.get_dofs_in_region(region, merge=True)
data_shape = (dofs.shape[0], 0, 0, 1)
else:
raise NotImplementedError('unsupported integration! (%s)'
% integration)
return data_shape
def get_dofs_in_region(self, region, merge=True):
"""
Return indices of DOFs that belong to the given region and group.
"""
node_desc = self.node_desc
dofs = []
vdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.vertex is not None:
vdofs = self.vertex_remap[region.vertices]
vdofs = vdofs[vdofs >= 0]
dofs.append(vdofs)
edofs = nm.empty((0,), dtype=nm.int32)
if node_desc.edge is not None:
edofs = self._get_facet_dofs(region.edges,
self.edge_remap,
self.edge_dofs)
dofs.append(edofs)
fdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.face is not None:
fdofs = self._get_facet_dofs(region.faces,
self.face_remap,
self.face_dofs)
dofs.append(fdofs)
bdofs = nm.empty((0,), dtype=nm.int32)
if (node_desc.bubble is not None) and region.has_cells():
els = self.bubble_remap[region.cells]
bdofs = self.bubble_dofs[els[els >= 0]].ravel()
dofs.append(bdofs)
if merge:
dofs = nm.concatenate(dofs)
return dofs
def clear_qp_base(self):
"""
Remove cached quadrature points and base functions.
"""
self.qp_coors = {}
self.bf = {}
def get_qp(self, key, integral):
"""
Get quadrature points and weights corresponding to the given key
and integral. The key is 'v' or 's#', where # is the number of
face vertices.
"""
qpkey = (integral.order, key)
if qpkey not in self.qp_coors:
if (key[0] == 's') and not self.is_surface:
dim = self.gel.dim - 1
n_fp = self.gel.surface_facet.n_vertex
geometry = '%d_%d' % (dim, n_fp)
else:
geometry = self.gel.name
vals, weights = integral.get_qp(geometry)
self.qp_coors[qpkey] = Struct(vals=vals, weights=weights)
return self.qp_coors[qpkey]
def substitute_dofs(self, subs, restore=False):
"""
Perform facet DOF substitutions according to `subs`.
Modifies `self.econn` in-place and sets `self.econn0`,
`self.unused_dofs` and `self.basis_transform`.
"""
if restore and (self.stored_subs is not None):
self.econn0 = self.econn
self.econn, self.unused_dofs, basis_transform = self.stored_subs
else:
if subs is None:
self.econn0 = self.econn
return
else:
self.econn0 = self.econn.copy()
self._substitute_dofs(subs)
self.unused_dofs = nm.setdiff1d(self.econn0, self.econn)
basis_transform = self._eval_basis_transform(subs)
self.set_basis_transform(basis_transform)
def restore_dofs(self, store=False):
"""
Undoes the effect of :func:`FEField.substitute_dofs()`.
"""
if self.econn0 is None:
raise ValueError('no original DOFs to restore!')
if store:
self.stored_subs = (self.econn,
self.unused_dofs,
self.basis_transform)
else:
self.stored_subs = None
self.econn = self.econn0
self.econn0 = None
self.unused_dofs = None
self.basis_transform = None
def set_basis_transform(self, transform):
"""
Set local element basis transformation.
The basis transformation is applied in :func:`FEField.get_base()` and
:func:`FEField.create_mapping()`.
Parameters
----------
transform : array, shape `(n_cell, n_ep, n_ep)`
The array with `(n_ep, n_ep)` transformation matrices for each cell
in the field's region, where `n_ep` is the number of element DOFs.
"""
self.basis_transform = transform
def restore_substituted(self, vec):
"""
Restore values of the unused DOFs using the transpose of the applied
basis transformation.
"""
if (self.econn0 is None) or (self.basis_transform is None):
raise ValueError('no original DOF values to restore!!')
vec = vec.reshape((self.n_nod, self.n_components)).copy()
evec = vec[self.econn]
vec[self.econn0] = nm.einsum('cji,cjk->cik', self.basis_transform, evec)
return vec.ravel()
def get_base(self, key, derivative, integral, iels=None,
from_geometry=False, base_only=True):
qp = self.get_qp(key, integral)
if from_geometry:
ps = self.gel.poly_space
else:
ps = self.poly_space
_key = key if not from_geometry else 'g' + key
bf_key = (integral.order, _key, derivative)
if bf_key not in self.bf:
if (iels is not None) and (self.ori is not None):
ori = self.ori[iels]
else:
ori = self.ori
self.bf[bf_key] = ps.eval_base(qp.vals, diff=derivative, ori=ori,
transform=self.basis_transform)
if base_only:
return self.bf[bf_key]
else:
return self.bf[bf_key], qp.weights
def create_bqp(self, region_name, integral):
gel = self.gel
sd = self.surface_data[region_name]
bqpkey = (integral.order, sd.bkey)
if not bqpkey in self.qp_coors:
qp = self.get_qp(sd.face_type, integral)
ps_s = self.gel.surface_facet.poly_space
bf_s = ps_s.eval_base(qp.vals)
coors, faces = gel.coors, gel.get_surface_entities()
vals = _interp_to_faces(coors, bf_s, faces)
self.qp_coors[bqpkey] = Struct(name='BQP_%s' % sd.bkey,
vals=vals, weights=qp.weights)
def extend_dofs(self, dofs, fill_value=None):
"""
Extend DOFs to the whole domain using the `fill_value`, or the
smallest value in `dofs` if `fill_value` is None.
"""
if fill_value is None:
if nm.isrealobj(dofs):
fill_value =
|
get_min_value(dofs)
|
sfepy.discrete.fem.utils.get_min_value
|
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets =
|
F.indexing_one_hot(pred_offsets, vlabels, axis=1)
|
megengine.functional.indexing_one_hot
|
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = M.ReLU()
self.fc2 =
|
M.Linear(120, 84)
|
megengine.module.Linear
|
#!/usr/bin/env python
"""
Diametrically point loaded 2-D disk, using commands for interactive use. See
:ref:`sec-primer`.
The script combines the functionality of all the ``its2D_?.py`` examples and
allows setting various simulation parameters, namely:
- material parameters
- displacement field approximation order
- uniform mesh refinement level
The example shows also how to probe the results as in
:ref:`linear_elasticity-its2D_4`, and how to display the results using Mayavi.
Using :mod:`sfepy.discrete.probes` allows correct probing of fields with the
approximation order greater than one.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/linear_elasticity/its2D_interactive.py -h
Notes
-----
The ``--probe`` and ``--show`` options work simultaneously only if Mayavi and
Matplotlib use the same backend type (for example wx).
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Integrals,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.discrete.fem.geometry_element import geometry_data
from sfepy.discrete.probes import LineProbe
from sfepy.discrete.projections import project_by_component
from examples.linear_elasticity.its2D_2 import stress_strain
from examples.linear_elasticity.its2D_3 import nodal_stress
def gen_lines(problem):
"""
Define two line probes.
Additional probes can be added by appending to `ps0` (start points) and
`ps1` (end points) lists.
"""
ps0 = [[0.0, 0.0], [0.0, 0.0]]
ps1 = [[75.0, 0.0], [0.0, 75.0]]
# Use enough points for higher order approximations.
n_point = 1000
labels = ['%s -> %s' % (p0, p1) for p0, p1 in zip(ps0, ps1)]
probes = []
for ip in range(len(ps0)):
p0, p1 = ps0[ip], ps1[ip]
probes.append(LineProbe(p0, p1, n_point))
return probes, labels
def probe_results(u, strain, stress, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(u)
results['u'] = (pars, vals)
pars, vals = probe(strain)
results['cauchy_strain'] = (pars, vals)
pars, vals = probe(stress)
results['cauchy_stress'] = (pars, vals)
fig = plt.figure()
plt.clf()
fig.subplots_adjust(hspace=0.4)
plt.subplot(311)
pars, vals = results['u']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$u_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('displacements')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
sym_indices = ['11', '22', '12']
plt.subplot(312)
pars, vals = results['cauchy_strain']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$e_{%s}$' % sym_indices[ic],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy strain')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
plt.subplot(313)
pars, vals = results['cauchy_stress']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$\sigma_{%s}$' % sym_indices[ic],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy stress')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
return fig, results
helps = {
'young' : "the Young's modulus [default: %(default)s]",
'poisson' : "the Poisson's ratio [default: %(default)s]",
'load' : "the vertical load value (negative means compression)"
" [default: %(default)s]",
'order' : 'displacement field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--young', metavar='float', type=float,
action='store', dest='young',
default=2000.0, help=helps['young'])
parser.add_argument('--poisson', metavar='float', type=float,
action='store', dest='poisson',
default=0.4, help=helps['poisson'])
parser.add_argument('--load', metavar='float', type=float,
action='store', dest='load',
default=-1000.0, help=helps['load'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
options = parser.parse_args()
assert_((0.0 < options.poisson < 0.5),
"Poisson's ratio must be in ]0, 0.5[!")
assert_((0 < options.order),
'displacement approximation order must be at least 1!')
output('using values:')
output(" Young's modulus:", options.young)
output(" Poisson's ratio:", options.poisson)
output(' vertical load:', options.load)
output('uniform mesh refinement level:', options.refine)
# Build the problem definition.
mesh = Mesh.from_file(data_dir + '/meshes/2d/its2D.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.001', 'facet')
bottom = domain.create_region('Bottom',
'vertices in y < 0.001', 'facet')
top = domain.create_region('Top', 'vertex 2', 'vertex')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=options.order)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
D = stiffness_from_youngpoisson(2, options.young, options.poisson)
asphalt = Material('Asphalt', D=D)
load = Material('Load', values={'.val' : [0.0, options.load]})
integral = Integral('i', order=2*options.order)
integral0 = Integral('i', order=0)
t1 = Term.new('dw_lin_elastic(Asphalt.D, v, u)',
integral, omega, Asphalt=asphalt, v=v, u=u)
t2 = Term.new('dw_point_load(Load.val, v)',
integral0, top, Load=load, v=v)
eq = Equation('balance', t1 - t2)
eqs = Equations([eq])
xsym = EssentialBC('XSym', bottom, {'u.1' : 0.0})
ysym = EssentialBC('YSym', left, {'u.0' : 0.0})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs, nls=nls, ls=ls)
pb.time_update(ebcs=Conditions([xsym, ysym]))
# Solve the problem.
state = pb.solve()
output(nls_status)
# Postprocess the solution.
out = state.create_output_dict()
out = stress_strain(out, pb, state, extend=True)
pb.save_state('its2D_interactive.vtk', out=out)
gdata = geometry_data['2_3']
nc = len(gdata.coors)
integral_vn = Integral('ivn', coors=gdata.coors,
weights=[gdata.volume / nc] * nc)
nodal_stress(out, pb, state, integrals=Integrals([integral_vn]))
if options.probe:
# Probe the solution.
probes, labels = gen_lines(pb)
sfield = Field.from_args('sym_tensor', nm.float64, 3, omega,
approx_order=options.order - 1)
stress = FieldVariable('stress', 'parameter', sfield,
primary_var_name='(set-to-None)')
strain = FieldVariable('strain', 'parameter', sfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('component', nm.float64, 1, omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
ev = pb.evaluate
order = 2 * (options.order - 1)
strain_qp = ev('ev_cauchy_strain.%d.Omega(u)' % order, mode='qp')
stress_qp = ev('ev_cauchy_stress.%d.Omega(Asphalt.D, u)' % order,
mode='qp', copy_materials=False)
project_by_component(strain, strain_qp, component, order)
project_by_component(stress, stress_qp, component, order)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(u, strain, stress, probe, labels[ii])
fig.savefig('its2D_interactive_probe_%d.png' % ii)
all_results.append(results)
for ii, results in enumerate(all_results):
output('probe %d:' % ii)
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
if options.show:
# Show the solution. If the approximation order is greater than 1, the
# extra DOFs are simply thrown away.
from sfepy.postprocess.viewer import Viewer
view =
|
Viewer('its2D_interactive.vtk')
|
sfepy.postprocess.viewer.Viewer
|
from typing import Optional
import strawberry
from sqlmodel import (
SQLModel,
Field,
create_engine,
select,
Session
)
engine = create_engine('sqlite:///database.db')
class Person(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
nome: str
idade: int
SQLModel.metadata.create_all(engine)
def create_app(nome: str, idade:int):
person = Person(nome=nome, idade=idade)
with
|
Session(engine)
|
sqlmodel.Session
|
from typing import List, Optional
from fastapi import Depends, FastAPI, HTTPException, Query
from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select
class TeamBase(SQLModel):
name: str = Field(index=True)
headquarters: str
class Team(TeamBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
heroes: List["Hero"] = Relationship(back_populates="team")
class TeamCreate(TeamBase):
pass
class TeamRead(TeamBase):
id: int
class TeamUpdate(SQLModel):
id: Optional[int] = None
name: Optional[str] = None
headquarters: Optional[str] = None
class HeroBase(SQLModel):
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
class Hero(HeroBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
team: Optional[Team] =
|
Relationship(back_populates="heroes")
|
sqlmodel.Relationship
|
# Copyright (c) Megvii, Inc. and its affiliates.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .resnet import BasicBlock
class STN(M.Module):
"""spatial transformer networks from
`"Spatial Transformer Networks" <https://arxiv.org/pdf/1506.02025.pdf>`_
some detailed implements are highly simplified while good performance maintained
"""
def __init__(self, input_size=112):
assert input_size == 112, f"expected input_size == 112, got {input_size}"
super().__init__()
self.input_size = input_size
self.stem = M.Sequential(
M.Conv2d(3, 8, kernel_size=3, stride=2, padding=1, bias=False),
|
M.BatchNorm2d(8)
|
megengine.module.BatchNorm2d
|
"""
Functions for a mesh refinement with hanging nodes.
Notes
-----
Using LCBCs with hanging nodes is not supported.
"""
from __future__ import absolute_import
from six.moves import range, zip
import numpy as nm
from sfepy.base.base import assert_
from sfepy.discrete import Functions, Function
from sfepy.discrete.fem import Mesh, FEDomain
# Rows = facets of reference cell, columns = [sub_cell_i, local facet_i]
refine_edges_2_4 = nm.array([[[0, 0], [1, 3]],
[[1, 0], [2, 3]],
[[2, 0], [3, 3]],
[[3, 0], [0, 3]]])
refine_faces_3_8 = nm.array([[[0, 0], [1, 0], [2, 0], [3, 0]],
[[0, 1], [3, 2], [4, 2], [7, 1]],
[[0, 2], [1, 1], [4, 1], [5, 2]],
[[4, 0], [5, 0], [6, 0], [7, 0]],
[[1, 2], [2, 1], [5, 1], [6, 2]],
[[2, 2], [3, 1], [6, 1], [7, 2]]])
refine_edges_3_8 = nm.array([[[0, 0], [1, 3]],
[[1, 0], [2, 3]],
[[2, 0], [3, 3]],
[[3, 0], [0, 3]],
[[4, 3], [5, 0]],
[[5, 3], [6, 0]],
[[6, 3], [7, 0]],
[[7, 3], [4, 0]],
[[0, 8], [4, 8]],
[[1, 8], [5, 8]],
[[2, 8], [6, 8]],
[[3, 8], [7, 8]]])
def find_level_interface(domain, refine_flag):
"""
Find facets of the coarse mesh that are on the coarse-refined cell
boundary.
ids w.r.t. current mesh:
- facets: global, local w.r.t. cells[:, 0], local w.r.t. cells[:, 1]
- interface cells:
- cells[:, 0] - cells to refine
- cells[:, 1] - their facet sharing neighbors (w.r.t. both meshes)
- cells[:, 2] - facet kind: 0 = face, 1 = edge
"""
if not refine_flag.any():
facets = nm.zeros((0, 3), dtype=nm.uint32)
cells = nm.zeros((0, 3), dtype=nm.uint32)
return facets, cells, 0, None, None
def _get_refine(coors, domain=None):
return nm.nonzero(refine_flag)[0]
def _get_coarse(coors, domain=None):
return nm.nonzero(1 - refine_flag)[0]
get_refine = Function('get_refine', _get_refine)
get_coarse = Function('get_coarse', _get_coarse)
functions = Functions([get_refine, get_coarse])
region0 = domain.create_region('coarse', 'cells by get_coarse',
functions=functions, add_to_regions=False,
allow_empty=True)
region1 = domain.create_region('refine', 'cells by get_refine',
functions=functions, add_to_regions=False)
cmesh = domain.mesh.cmesh
dim = cmesh.dim
if dim == 2:
oe = 0
facets = nm.intersect1d(region0.facets, region1.facets)
cmesh.setup_connectivity(dim - 1, dim)
cells, offs = cmesh.get_incident(dim, facets, dim - 1,
ret_offsets=True)
assert_((nm.diff(offs) == 2).all())
ii = cmesh.get_local_ids(facets, dim - 1, cells, offs, dim)
ii = ii.reshape((-1, 2))
cells = cells.reshape((-1, 2))
ii = nm.where(refine_flag[cells], ii[:, :1], ii[:, 1:])
cells = nm.where(refine_flag[cells], cells[:, :1], cells[:, 1:])
facets = nm.c_[facets, ii]
cells = nm.c_[cells, nm.zeros_like(cells[:, 1])]
else: # if dim == 3:
gel = domain.geom_els['3_8']
epf = gel.get_edges_per_face()
cmesh.setup_connectivity(dim, dim)
fc, coffs = cmesh.get_incident(dim, region1.cells, dim,
ret_offsets=True)
cc = nm.repeat(region1.cells, nm.diff(coffs))
aux = nm.c_[cc, fc]
"""
nnn[:, 0] cells to refine, nnn[:, 1] non-refined neighbours, nnn[:, 2]
neighbour kind : 0 face, 1 edge.
"""
nn = aux[refine_flag[fc] == 0]
cf = nn[:, 0].copy().astype(nm.uint32)
cc = nn[:, 1].copy().astype(nm.uint32)
vc, vco = cmesh.get_incident(0, cc, dim, ret_offsets=True)
vf, vfo = cmesh.get_incident(0, cf, dim, ret_offsets=True)
vc = vc.reshape((-1, 8))
vf = vf.reshape((-1, 8))
nnn = []
oe = 0
ov = nn.shape[0]
for ii in range(vc.shape[0]):
aux = set(vc[ii]).intersection(vf[ii])
nc = len(aux)
if nc == 1:
nnn.append((0, 0, 2))
ov -= 1
elif nc == 4:
nnn.append((nn[ii, 0], nn[ii, 1], 0))
oe += 1
else:
nnn.append((nn[ii, 0], nn[ii, 1], 1))
nnn = nm.array(nnn)
if nnn.shape[0] == 0:
facets = nm.zeros((0, 3), dtype=nm.uint32)
cells = nm.zeros((0, 4), dtype=nm.uint32)
return facets, cells, 0, region0, region1
# Sort by neighbour kind, skip vertex-only neighbours.
ii = nm.argsort(nnn[:, 2])
nnn = nnn[ii][:ov]
cf = cf[ii][:ov]
cc = cc[ii][:ov]
ec, eco = cmesh.get_incident(1, cc, dim, ret_offsets=True)
ef, efo = cmesh.get_incident(1, cf, dim, ret_offsets=True)
ec = ec.reshape((-1, 12))
ef = ef.reshape((-1, 12))
fc, fco = cmesh.get_incident(2, cc, dim, ret_offsets=True)
ff, ffo = cmesh.get_incident(2, cf, dim, ret_offsets=True)
fc = fc.reshape((-1, 6))
ff = ff.reshape((-1, 6))
emask = nm.zeros((domain.shape.n_el, 12), dtype=nm.bool)
ffs = []
for ii in range(oe):
facet = nm.intersect1d(fc[ii], ff[ii])[0]
i1 = nm.where(ff[ii] == facet)[0][0]
i0 = nm.where(fc[ii] == facet)[0][0]
ffs.append((facet, i1, i0))
emask[nnn[ii, 0], epf[i1]] = True
for ii in range(oe, nnn.shape[0]):
facet = nm.intersect1d(ec[ii], ef[ii])[0]
i1 = nm.where(ef[ii] == facet)[0][0]
i0 = nm.where(ec[ii] == facet)[0][0]
ffs.append((facet, i1, i0))
ffs = nm.array(ffs)
ie = nm.where(nnn[:, 2] == 1)[0]
ennn = nnn[ie]
effs = ffs[ie]
omit = ie[emask[ennn[:, 0], effs[:, 1]]]
valid = nm.ones(nnn.shape[0], dtype=nm.bool)
valid[omit] = False
cells = nnn[valid]
facets = ffs[valid]
return facets, cells, oe, region0, region1
def refine_region(domain0, region0, region1):
"""
Coarse cell sub_cells[ii, 0] in mesh0 is split into sub_cells[ii, 1:] in
mesh1.
The new fine cells are interleaved among the original coarse cells so that
the indices of the coarse cells do not change.
The cell groups are preserved. The vertex groups are preserved only in the
coarse (non-refined) cells.
"""
if region1 is None:
return domain0, None
mesh0 = domain0.mesh
mesh1 = Mesh.from_region(region1, mesh0)
domain1 = FEDomain('d', mesh1)
domain1r = domain1.refine()
mesh1r = domain1r.mesh
n_cell = region1.shape.n_cell
n_sub = 4 if mesh0.cmesh.tdim == 2 else 8
sub_cells = nm.empty((n_cell, n_sub + 1), dtype=nm.uint32)
sub_cells[:, 0] = region1.cells
sub_cells[:, 1] = region1.cells
aux = nm.arange((n_sub - 1) * n_cell, dtype=nm.uint32)
sub_cells[:, 2:] = mesh0.n_el + aux.reshape((n_cell, -1))
coors0, vgs0, conns0, mat_ids0, descs0 = mesh0._get_io_data()
coors, vgs, _conns, _mat_ids, descs = mesh1r._get_io_data()
# Preserve vertex groups of non-refined cells.
vgs[:len(vgs0)] = vgs0
def _interleave_refined(c0, c1):
if c1.ndim == 1:
c0 = c0[:, None]
c1 = c1[:, None]
n_row, n_col = c1.shape
n_new = region0.shape.n_cell + n_row
out = nm.empty((n_new, n_col), dtype=c0.dtype)
out[region0.cells] = c0[region0.cells]
out[region1.cells] = c1[::n_sub]
aux = c1.reshape((-1, n_col * n_sub))
out[mesh0.n_el:] = aux[:, n_col:].reshape((-1, n_col))
return out
conn = _interleave_refined(conns0[0], _conns[0])
mat_id = _interleave_refined(mat_ids0[0], _mat_ids[0]).squeeze()
mesh = Mesh.from_data('a', coors, vgs, [conn], [mat_id], descs)
domain =
|
FEDomain('d', mesh)
|
sfepy.discrete.fem.FEDomain
|
"""init database
Revision ID: 60e58d3a26fa
Revises:
Create Date: 2021-11-24 18:06:53.935899
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '60e58d3a26fa'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('address',
sa.Column('street_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('house_number', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('city', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('zip_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_address_city'), 'address', ['city'], unique=False)
op.create_index(op.f('ix_address_house_number'), 'address', ['house_number'], unique=False)
op.create_index(op.f('ix_address_id'), 'address', ['id'], unique=False)
op.create_index(op.f('ix_address_street_name'), 'address', ['street_name'], unique=False)
op.create_index(op.f('ix_address_zip_code'), 'address', ['zip_code'], unique=False)
op.create_table('product',
sa.Column('name', sa.String(), nullable=True),
sa.Column('id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_product_id'), 'product', ['id'], unique=False)
op.create_table('customer',
sa.Column('mobile_number', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('first_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('last_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('birth_date',
|
sqlmodel.sql.sqltypes.AutoString()
|
sqlmodel.sql.sqltypes.AutoString
|
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistorySummaryTreatmsummaryConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id_order: int
history_id_conference: int
summary_treatmsummary_conference_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class SummaryTreatmsummaryConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
problem: str
question: str
summary_plan: str
surgeon_summary: str
pre_operation_abg: bool
post_operation_abg: bool
pre_operation_redo_abg: bool
pre_operation_jaw_surgery: bool
pre_operation_computing_design: bool
pre_operation_3d_print: bool
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class SummaryTreatmsummaryConferenceDoctorMap(SQLModel, table=True):
id: Optional[int] =
|
Field(default=None, primary_key=True)
|
sqlmodel.Field
|
# Copyright (c) Megvii, Inc. and its affiliates.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .resnet import BasicBlock
class STN(M.Module):
"""spatial transformer networks from
`"Spatial Transformer Networks" <https://arxiv.org/pdf/1506.02025.pdf>`_
some detailed implements are highly simplified while good performance maintained
"""
def __init__(self, input_size=112):
assert input_size == 112, f"expected input_size == 112, got {input_size}"
super().__init__()
self.input_size = input_size
self.stem = M.Sequential(
M.Conv2d(3, 8, kernel_size=3, stride=2, padding=1, bias=False),
M.BatchNorm2d(8),
M.ReLU(),
M.MaxPool2d(kernel_size=2, stride=2),
BasicBlock(8, 16),
BasicBlock(16, 32, stride=2),
BasicBlock(32, 64, stride=2),
)
self.fc = M.Linear(64, 9)
def _get_transformed_image(self, image, mat3x3):
"""apply perspective transform to the image
note: there is NO need to guarantee the bottom right element equals 1
Args:
image (Tensor): input images (shape: n * 3 * 112 * 112)
mat3x3 (Tensor): perspective matrix (shape: n * 3 * 3)
Returns:
transformed_image (Tensor): perspectively transformed image
"""
s = self.input_size
transformed_image = F.warp_perspective(image, mat3x3, [s, s])
return transformed_image
def _get_mat3x3(self, image):
"""get perspective matrix used in the transformation
note: there are only 8 degrees of freedom in a perspective matrix, while the output matrix has 9 variables.
Args:
image (Tensor): input images (shape: n * 3 * 112 * 112)
Returns:
mat3x3 (Tensor): perspective matrix (shape: n * 3 * 3)
"""
x = self.stem(image)
x = F.avg_pool2d(x, 7)
x = F.flatten(x, 1)
x = self.fc(x)
s = self.input_size
# 0.01 here is a magic number. it aims to maintain identity transform at early stage of training
residual = x.reshape(-1, 3, 3) * 0.01
base = mge.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).astype("float32")
base =
|
F.broadcast_to(base, residual.shape)
|
megengine.functional.broadcast_to
|
import io
import numpy as np
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.module as M
import megengine.utils.network_node as N
from megengine.jit.tracing import trace
from megengine.tensor import Tensor
from megengine.utils.comp_graph_tools import GraphInference
from megengine.utils.network import Network as Net
from megengine.utils.network import as_oprnode, set_symbolic_shape
from megengine.utils.network_node import Host2DeviceCopy, VarNode
def test_metadata():
x = Tensor(0)
@trace(symbolic=True, capture_as_const=True)
def fwd(x):
return x * 2
fwd(x)
orig_model = io.BytesIO()
fwd.dump(orig_model, user_info="test", optimize_for_inference=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": "test",
"graph_modified": False, # False: tracing.dump
"optimized_for_inference": False,
}
orig_model.seek(0)
graph.dump(
orig_model,
user_info={"str": "x", "tensor": x, "module": M.Module, "none": None},
optimize_for_inference=True,
enable_nchw4=True,
enable_ioc16=True,
)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None},
"graph_modified": True, # True: Network.dump
"optimized_for_inference": True,
"enable_nchw4": True,
"enable_ioc16": True,
}
orig_model.seek(0)
fwd.dump(orig_model, enable_metadata=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata is None
def test_replace_var():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out = F.mul(vara, varb)
out = F.relu(out)
opnode = list(graph.opr_filter.has_input(vara))
repl_dict = {opnode[0].outputs[0]: out}
graph.replace_vars(repl_dict)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [6, 16])
def test_replace_opr():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out1 = F.sub(vara, varb)
out1 = F.relu(out1)
out1 = graph.add_dep_oprs(out1)
orig_opr = graph.opr_filter.has_input(vara).as_unique()
repl_dict = {orig_opr: out1[0].owner}
graph.replace_oprs(repl_dict)
modified_model1 = io.BytesIO()
graph.dump(modified_model1)
modified_model1.seek(0)
load_graph = GraphInference(modified_model1)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [0, 0])
def test_splice_network():
x = F.ones((2,))
y = F.ones((2,))
@trace(symbolic=True, capture_as_const=True)
def fun1(a, b):
return (a + b) * 2
@trace(symbolic=True, capture_as_const=True)
def fun2(a):
return a * 2 - 1
model = io.BytesIO()
fun1(x, y)
fun2(x)
fun1.dump(
model,
arg_names=["net1_i0", "net1_i1"],
output_names=["net1_o0"],
optimize_for_inference=False,
)
model.seek(0)
net1 = Net.load(model)
model.seek(0)
fun2.dump(
model,
arg_names=["net2_i0"],
output_names=["net2_o0"],
optimize_for_inference=False,
)
model.seek(0)
net2 = Net.load(model)
net1.add_output(*net2.output_vars)
var = net1.var_filter.name("net1_i0").as_unique()
repl_var = net2.var_filter.name("net2_o0").as_unique()
net1.replace_vars({var: repl_var})
assert "net1_i0" not in [var.name for var in net1.all_vars]
assert "net2_i0" in [var.name for var in net1.all_vars]
model.seek(0)
net1.dump(model, keep_var_name=2, optimize_for_inference=False)
model.seek(0)
net = Net.load(model)
assert "net1_i0" not in [var.name for var in net.all_vars]
assert "net2_i0" in [var.name for var in net.all_vars]
def test_modify_params():
a = Tensor([1, 2])
b =
|
Tensor([3, 4])
|
megengine.tensor.Tensor
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b =
|
tensor(b_np)
|
megengine.tensor
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr =
|
TensorAttr()
|
megengine.core._imperative_rt.TensorAttr
|
from sqlmodel import create_engine
engine =
|
create_engine("sqlite:///database.db")
|
sqlmodel.create_engine
|
from typing import Optional, List
from sqlmodel import SQLModel, Field, Relationship
class SongBase(SQLModel):
name: str
artist: str
year: Optional[int] = None
class Song(SongBase, table=True):
id: int = Field(primary_key=True)
class SongRead(SongBase):
id: int
class SongCreate(SongBase):
pass
class Increment(SQLModel, table=True):
id: int = Field(primary_key=True)
# #############################################################################
class ListingBase(SQLModel):
url: str
class Listing(ListingBase, table=True):
__tablename__ = 'listings'
id: int = Field(primary_key=True)
images: List["Image"] = Relationship(back_populates="listing",
sa_relationship_kwargs={'lazy': 'selectin'})
class ListingRead(ListingBase):
id: str
# #############################################################################
class ImageBase(SQLModel):
url: str
size_x: float
size_y: float
listing_id: Optional[int] =
|
Field(default=None, foreign_key="listings.id")
|
sqlmodel.Field
|
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls =
|
ScipyDirect({})
|
sfepy.solvers.ls.ScipyDirect
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 =
|
F.nn.dropout(data, rate, training=True)
|
megengine.functional.nn.dropout
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
from typing import Iterable, List, Optional, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal import CompGraph, CompNode
from ..core import zeros
from ..core.graph import _use_default_if_none
from ..core.tensor import Tensor, wrap_io_tensor
from .elemwise import ceil
from .utils import _decide_comp_node_and_comp_graph
@wrap_io_tensor
def broadcast_to(inp: Tensor, shape: Union[int, Iterable[int]]) -> Tensor:
"""
Broadcast a tensor to ``shape``
:param inp: The input tensor
:param shape: The target shape
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.broadcast_to(data, (4, 2, 3))
print(out.numpy())
Outputs:
.. testoutput::
[[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]]
"""
if isinstance(shape, int):
shape = (shape,)
return mgb.opr.broadcast(inp, shape)
def _get_idx(index, axis):
index_dims = len(index.imm_shape)
idx = []
comp_node, comp_graph = _decide_comp_node_and_comp_graph(index)
for i in range(index_dims):
if i != axis:
shape = [1] * index_dims
shape[i] = index.axis_shape(i)
arange = mgb.opr.linspace(
0,
index.axis_shape(i) - 1,
index.axis_shape(i),
comp_node=comp_node,
comp_graph=comp_graph,
)
arange = (
arange.reshape(*shape)
.broadcast(index.shape)
.reshape(-1)
.astype(np.int32)
)
idx.append(arange)
else:
idx.append(index.reshape(-1))
return tuple(idx)
@wrap_io_tensor
def gather(inp: Tensor, axis: int, index: Tensor) -> Tensor:
r"""
Gather data from :attr:`inp` on :attr:`axis` using :attr:`index`.
For a 3-D tensor, the output is specified by::
out[i][j][k] = inp[index[i][j][k]][j][k] # if axis == 0
out[i][j][k] = inp[i][index[i][j][k]][k] # if axis == 1
out[i][j][k] = inp[i][j][index[i][j][k]] # if axis == 2
if :attr:`inp` is an n-dimensional tensor with size
:math:`(x_0,x_1,...,x_{i-1},x_i,x_{i+1},...,x_{n-1})` and axis=i,
then :attr:`index` must be an n-dimensional tensor with size
:math:`(x_0,x_1,...,x_{i-1},y,x_{i+1},...,x_{n-1})` where :math:`y\ge 1` and
output will have the same size as :attr:`index`.
:param inp: the source tensor
:param axis: the axis along which to index
:param index: the indices of elements to gather
Examples:
.. testcode::
import megengine.functional as F
from megengine.core import tensor
inp = tensor([
[1,2], [3,4], [5,6],
])
index = tensor([[0,2], [1,0]])
oup = F.gather(inp, 0, index)
print(oup.numpy())
Outputs:
.. testoutput::
[[1 6]
[3 2]]
"""
input_shape = inp.imm_shape
index_shape = index.imm_shape
input_dims = len(input_shape)
index_dims = len(index_shape)
if input_dims != index_dims:
raise ValueError(
"The index tensor must have same dimensions as input tensor, "
"But the input dims:{}, the index dims:{}".format(input_dims, index_dims)
)
if axis < 0 or axis >= input_dims:
raise ValueError(
"Index axis {} is output of bounds, should in range [0 {})".format(
axis, input_dims
)
)
for i in range(input_dims):
if i != axis and input_shape[i] != index_shape[i]:
raise ValueError(
"The input {} and index {} must have the same size apart from axis {}".format(
input_shape, index_shape, axis
)
)
idx = _get_idx(index, axis)
return mgb.opr.advanced_indexing(inp)[idx].reshape(
index.shape
) # pylint: disable=no-member
@wrap_io_tensor
def concat(
inps: Iterable[Tensor],
axis: int = 0,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Concat some tensors
:param inps: Input tensors to concat
:param axis: the dimension over which the tensors are concatenated. Default: 0
:param device: The comp node output on. Default: None
:param comp_graph: The graph in which output is. Default: None
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape((2, 3)))
data2 = tensor(np.arange(6, 12, dtype=np.float32).reshape((2, 3)))
out = F.concat([data1, data2])
print(out.numpy())
Outputs:
.. testoutput::
[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]]
"""
# Output buffer not supported
return mgb.opr.concat(
*list(inps), axis=axis, comp_node=device, comp_graph=comp_graph
)
@wrap_io_tensor
def scatter(inp: Tensor, axis: int, index: Tensor, source: Tensor) -> Tensor:
r"""
Writes all values from the tensor :attr:`source` into :attr:`inp` at the indices specified in the :attr:`index` tensor.
For each value in :attr:`source`, its output index is specified by its index
in :attr:`source` for ``axis != dimension`` and by the corresponding value in
:attr:`index` for ``axis = dimension``.
For a 3-D tensor, :attr:`inp` is updated as::
inp[index[i][j][k]][j][k] = source[i][j][k] # if axis == 0
inp[i][index[i][j][k]][k] = source[i][j][k] # if axis == 1
inp[i][j][index[i][j][k]] = source[i][j][k] # if axis == 2
:attr:`inp`, :attr:`index` and :attr:`source` should have same number of dimensions.
It is also required that ``source.shape(d) <= inp.shape(d)`` and ``index.shape(d) == source.shape(d)``
for all dimensions ``d``.
Moreover, the values of :attr:`index` must be between ``0`` and ``inp.shape(axis) - 1`` inclusive.
.. note::
Please notice that, due to performance issues, the result is uncertain on the GPU device
if scatter difference positions from source to the same destination position
regard to index tensor.
Show the case using the following examples, the oup[0][2] is maybe
from source[0][2] which value is 0.2256 or source[1][2] which value is 0.5339
if set the index[1][2] from 1 to 0.
:param inp: the inp tensor which to be scattered
:param axis: the axis along which to index
:param index: the indices of elements to scatter
:param source: the source element(s) to scatter
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
from megengine.core import tensor
inp = tensor(np.zeros(shape=(3,5),dtype=np.float32))
source = tensor([[0.9935,0.9465,0.2256,0.8926,0.4396],[0.7723,0.0718,0.5939,0.357,0.4576]])
index = tensor([[0,2,0,2,1],[2,0,1,1,2]])
oup = F.scatter(inp, 0, index,source)
print(oup.numpy())
Outputs:
.. testoutput::
[[0.9935 0.0718 0.2256 0. 0. ]
[0. 0. 0.5939 0.357 0.4396]
[0.7723 0.9465 0. 0.8926 0.4576]]
"""
input_shape = inp.imm_shape
index_shape = index.imm_shape
source_shape = source.imm_shape
input_dims = len(input_shape)
index_dims = len(index_shape)
source_dims = len(source_shape)
if input_dims != index_dims or input_dims != source_dims:
raise ValueError("The input, source and index tensor must have same dimensions")
if axis < 0 or axis >= input_dims:
raise ValueError(
"Index axis {} is output of bounds, should in range [0 {})".format(
axis, input_dims
)
)
for i in range(source_dims):
if source_shape[i] > input_shape[i]:
raise ValueError(
"The each shape size for source {} must be less than or equal to input {} ".format(
source_shape, input_shape
)
)
for i in range(index_dims):
if index_shape[i] != source_shape[i]:
raise ValueError(
"The each shape size for index {} must be equal to source {} ".format(
index_shape, source_shape
)
)
for i in range(index_dims):
if i != axis and index_shape[i] > input_shape[i]:
raise ValueError(
"The index {} must be less than or equal to input {} size apart from axis {}".format(
index_shape, input_shape, axis
)
)
idx = _get_idx(index, axis)
return mgb.opr.set_advanced_indexing(inp, source.flatten())[idx]
@wrap_io_tensor
def where(mask: Tensor, x: Tensor, y: Tensor) -> Tensor:
r"""
Select elements either from Tensor x or Tensor y, according to mask.
.. math::
\textrm{out}_i = x_i \textrm{ if } \textrm{mask}_i \textrm{ is True else } y_i
:param mask: a mask used for choosing x or y
:param x: the first choice
:param y: the second choice
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[1, 0], [0, 1]], dtype=np.int32))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
y = tensor(np.array([[5, 6], [7, 8]], dtype=np.float32))
out = F.where(mask, x, y)
print(out.numpy())
Outputs:
.. testoutput::
[[1. 6.]
[7. 4.]]
"""
v0, index0 = mgb.opr.cond_take(
x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=1
)
v1, index1 = mgb.opr.cond_take(
y, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=0
)
out = x.flatten()
index = mgb.opr.concat(index0, index1, axis=0)
v = mgb.opr.concat(v0, v1, axis=0)
out = mgb.opr.set_advanced_indexing(out, v)[index]
out = out.reshape(x.shape)
return out
@wrap_io_tensor
def cond_take(mask: Tensor, x: Tensor, val=1) -> Tensor:
r"""
Take elements from data if specific condition is satisfied on mask. This operator has two outputs: the first is the elements taken, and the second is the indices corresponding to those elements; they are both 1-dimensional. High-dimension input would first be flattened.
:param mask: condition param; must be the same shape with data
:param x: input tensor from which to take elements
:param val: value to be compared to by mode
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[1, 0], [0, 1]], dtype=np.int32))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
v, index = F.cond_take(mask, x, 1)
print(v, index)
Outputs:
.. testoutput::
Tensor([1. 4.]) Tensor([0 3], dtype=int32)
"""
v, index = mgb.opr.cond_take(
x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=val
)
return v, index
def shapeof(x: Tensor, axis=None):
r"""
The shape of input tensor.
"""
return x.shapeof(axis=axis)
@wrap_io_tensor
def dimshuffle(inp: Tensor, pattern: Iterable[int]) -> Tensor:
r"""
Swap shapes and strides according to given pattern
:param inp: Input tensor
:param pattern: a list of integers including 0, 1, ... , ``ndim``-1, and any number of ``'x'`` char in dimensions where this tensor should be broadcasted. For examples:
* (``'x'``) -> make a 0d (scalar) into a 1d vector
* (0, 1) -> identity for 2d vectors
* (1, 0) -> inverts the first and second dimensions
* (``'x'``, 0) -> make a row out of a 1d vector (N to 1xN)
* (0, ``'x'``) -> make a column out of a 1d vector (N to Nx1)
* (2, 0, 1) -> AxBxC to CxAxB
* (0, ``'x'``, 1) -> AxB to Ax1xB
* (1, ``'x'``, 0) -> AxB to Bx1xA
* (1,) -> This remove dimensions 0. It must be a broadcastable dimension (1xA to A)
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([[1, 1], [0, 0]], dtype=np.int32))
out = F.dimshuffle(x, (1, 0))
print(out.numpy())
Outputs:
.. testoutput::
[[1 0]
[1 0]]
"""
return
|
mgb.opr.dimshuffle(inp, pattern)
|
megengine._internal.opr.dimshuffle
|
"""
Quadratic eigenvalue problem solvers.
"""
from __future__ import absolute_import
import time
import numpy as nm
import scipy.sparse as sps
from sfepy.base.base import output, get_default
from sfepy.linalg.utils import max_diff_csr
from sfepy.solvers.solvers import QuadraticEVPSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for quadratic
eigensolvers.
"""
def _standard_call(self, mtx_m, mtx_d, mtx_k, n_eigs=None,
eigenvectors=None, status=None, conf=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx_m = get_default(mtx_m, self.mtx_m)
mtx_d = get_default(mtx_d, self.mtx_d)
mtx_k = get_default(mtx_k, self.mtx_k)
n_eigs = get_default(n_eigs, self.n_eigs)
eigenvectors = get_default(eigenvectors, self.eigenvectors)
status = get_default(status, self.status)
result = call(self, mtx_m, mtx_d, mtx_k,
n_eigs, eigenvectors, status, conf,
**kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
return result
return _standard_call
class LQuadraticEVPSolver(QuadraticEVPSolver):
"""
Quadratic eigenvalue problem solver based on the problem linearization.
(w^2 M + w D + K) x = 0.
"""
name = 'eig.qevp'
_parameters = [
('method', "{'companion', 'cholesky'}", 'companion', False,
'The linearization method.'),
('solver', 'dict', {'kind': 'eig.scipy', 'method': 'eig'}, False,
"""The configuration of an eigenvalue solver for
the linearized problem (A - w B) x = 0."""),
('mode', "{'normal', 'inverted'}", 'normal', False,
'Solve either A - w B (normal), or B - 1/w A (inverted).'),
('debug', 'bool', False, False,
'If True, print debugging information.'),
]
@standard_call
def __call__(self, mtx_m, mtx_d, mtx_k, n_eigs=None,
eigenvectors=None, status=None, conf=None):
if conf.debug:
ssym = status['matrix_info'] = {}
ssym['|M - M^T|'] = max_diff_csr(mtx_m, mtx_m.T)
ssym['|D - D^T|'] = max_diff_csr(mtx_d, mtx_d.T)
ssym['|K - K^T|'] = max_diff_csr(mtx_k, mtx_k.T)
ssym['|M - M^H|'] =
|
max_diff_csr(mtx_m, mtx_m.H)
|
sfepy.linalg.utils.max_diff_csr
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
|
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
|
megengine.module.init.msra_normal_
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
import megengine.module as Float
import megengine.module.qat as QAT
import megengine.module.quantized as Q
from megengine import Parameter, Tensor
from megengine.core.tensor import dtype
from megengine.quantization import (
FakeQuantize,
MinMaxObserver,
QConfig,
QuantMode,
create_qparams,
)
from megengine.quantization.quantize import (
disable_fake_quant,
disable_observer,
propagate_qconfig,
)
min_max_fakequant_qconfig = QConfig(
weight_observer=partial(MinMaxObserver, dtype="qint8_narrow"),
act_observer=partial(MinMaxObserver, dtype="qint8"),
weight_fake_quant=partial(FakeQuantize, dtype="qint8_narrow"),
act_fake_quant=partial(FakeQuantize, dtype="qint8"),
)
def gen_inp_scale():
return np.float32(np.random.rand() + 1)
min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
def quant(x, scale):
inp_dtype = dtype.qint8(scale)
return x.astype(inp_dtype)
def fake_quant(x, scale, qmin, qmax):
x = x / scale
x = F.round(x)
x = F.clip(x, qmin, qmax)
x = x * scale
return x
fake_quant_act = partial(fake_quant, qmin=-128, qmax=127)
fake_quant_weight = partial(fake_quant, qmin=-127, qmax=127)
fake_quant_bias = partial(fake_quant, qmin=-(2 ** 31), qmax=2 ** 31 - 1)
def init_qat_net(net):
if net.with_weight:
net.weight_observer.min_val[...] = Tensor(min_val[0])
net.weight_observer.max_val[...] = Tensor(max_val[0])
if net.with_act:
net.act_observer.min_val[...] = Tensor(min_val[1])
net.act_observer.max_val[...] = Tensor(max_val[1])
def test_quant_stub():
normal_net = Float.QuantStub()
normal_net.eval()
qat_from_float = QAT.QuantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.QuantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.QuantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_dequant_stub():
normal_net = Float.DequantStub()
normal_net.eval()
qat_from_float = QAT.DequantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
qat_net = QAT.DequantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.DequantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.scale = inp_scale
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = normal_net(x)
qat = qat_net(x)
q = q_net(quant(x, inp_scale)).numpy()
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("kind", ["cos", "relu", "add", "mul", "fuse_add_relu"])
def test_elemwise(kind):
normal_net = Float.Elemwise(kind)
normal_net.eval()
qat_from_float = QAT.Elemwise.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.Elemwise(kind)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.Elemwise.from_qat_module(qat_net)
q_net.eval()
x1_scale = np.float32(np.random.rand() + 1)
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1 = fake_quant_act(x1, x1_scale)
x1.qparams.scale = x1_scale
x2_scale = np.float32(np.random.rand() + 1)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2 = fake_quant_act(x2, x2_scale)
x2.qparams.scale = x2_scale
x1_int8 = quant(x1, x1_scale)
x2_int8 = quant(x2, x2_scale)
# test correctness of `Float`, `QAT` and `Quantized`
if kind in ("add", "mul", "fuse_add_relu"):
normal = normal_net(x1, x2)
qat_without_fakequant = qat_from_float(x1, x2)
fake_quant_normal = fake_quant_act(normal_net(x1, x2), act_scale)
qat = qat_net(x1, x2)
q = q_net(x1_int8, x2_int8).numpy() * act_scale
else:
normal = normal_net(x1)
qat_without_fakequant = qat_from_float(x1)
fake_quant_normal = fake_quant_act(normal_net(x1), act_scale)
qat = qat_net(x1)
q = q_net(x1_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_linear():
normal_net = Float.Linear(3, 3, bias=True)
normal_net.eval()
qat_net = QAT.Linear(3, 3, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3)).astype("float32")
bias = np.random.normal(size=(3,)).astype("float32")
normal_net.weight[...] = fake_quant_weight(weight, weight_scale)
normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
qat_net.weight[...] = Parameter(weight)
qat_net.bias[...] = Parameter(bias)
qat_from_float = QAT.Linear.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
q_net = Q.Linear.from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal.numpy())
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("module", ["Conv2d", "ConvBn2d", "ConvBnRelu2d"])
def test_conv(module):
normal_net = getattr(Float, module)(3, 3, 3, 1, 1, 1, bias=True)
normal_net.eval()
qat_net = getattr(QAT, module)(3, 3, 3, 1, 1, 1, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(1, 3, 3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3, 3, 3)).astype("float32")
bias = np.random.normal(size=(1, 3, 1, 1)).astype("float32")
if module in ("ConvBn2d", "ConvBnRelu2d"):
normal_net.conv.weight[...] = fake_quant_weight(weight, weight_scale)
normal_net.conv.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
qat_net.conv.weight[...] = Parameter(weight)
qat_net.conv.bias[...] = Parameter(bias)
else:
normal_net.weight[...] = fake_quant_weight(weight, weight_scale)
normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
qat_net.weight[...] = Parameter(weight)
qat_net.bias[...] = Parameter(bias)
qat_from_float = getattr(QAT, module).from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
q_net = getattr(Q, module).from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal, atol=1e-5)
np.testing.assert_allclose(qat, fake_quant_normal, atol=act_scale)
np.testing.assert_allclose(q, fake_quant_normal.numpy(), atol=act_scale)
def test_concat():
normal_net = Float.Concat()
normal_net.eval()
qat_net = QAT.Concat()
qat_net.eval()
disable_observer(qat_net)
|
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
|
megengine.quantization.quantize.propagate_qconfig
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = cgtools.get_dep_vars(oup, "Host2DeviceCopy")
if mge_version <= "0.6.0":
cg = oup[0].owner_graph
outputs = list(map(mgb.copy_output, oup))
f = cg.compile(inputs, outputs)
result = f(inp)
else:
if not isinstance(inp, (list, tuple)):
inp = (inp,)
replace_dict = {}
inp_node_list = []
for i in inputs:
inp_node = G.InputNode(
device="xpux", dtype=inputs[0].dtype, graph=inputs[0].graph
)
replace_dict[i] = inp_node.outputs[0]
inp_node_list.append(inp_node)
new_out = cgtools.replace_vars(oup, replace_dict)
out_node_list = [
|
G.OutputNode(i)
|
megengine.core.tensor.megbrain_graph.OutputNode
|
import numpy as nm
from sfepy.base.base import assert_, Struct
import sfepy.linalg as la
class ContactPlane(Struct):
def __init__(self, anchor, normal, bounds):
Struct.__init__(self, anchor=nm.array(anchor, dtype=nm.float64),
bounds=nm.asarray(bounds, dtype=nm.float64))
self.normal = nm.asarray(normal, dtype=nm.float64)
norm = nm.linalg.norm
self.normal /= norm(self.normal)
e3 = [0.0, 0.0, 1.0]
dd = nm.dot(e3, self.normal)
rot_angle = nm.arccos(dd)
if nm.abs(rot_angle) < 1e-14:
mtx = nm.eye(3, dtype=nm.float64)
bounds2d = self.bounds[:, :2]
else:
rot_axis = nm.cross([0.0, 0.0, 1.0], self.normal)
mtx = la.make_axis_rotation_matrix(rot_axis, rot_angle)
mm = la.insert_strided_axis(mtx, 0, self.bounds.shape[0])
rbounds = la.dot_sequences(mm, self.bounds)
bounds2d = rbounds[:, :2]
assert_(nm.allclose(nm.dot(mtx, self.normal), e3,
rtol=0.0, atol=1e-12))
self.adotn = nm.dot(self.anchor, self.normal)
self.rot_angle = rot_angle
self.mtx = mtx
self.bounds2d = bounds2d
def mask_points(self, points):
mm = la.insert_strided_axis(self.mtx, 0, points.shape[0])
points2d = la.dot_sequences(mm, points)[:, :2]
return la.flag_points_in_polygon2d(self.bounds2d, points2d)
def get_distance(self, points):
dist =
|
la.dot_sequences(points, self.normal)
|
sfepy.linalg.dot_sequences
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
|
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
|
megengine.quantization.quantize_qat
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
import megengine.module as Float
import megengine.module.qat as QAT
import megengine.module.quantized as Q
from megengine import Parameter, Tensor
from megengine.core.tensor import dtype
from megengine.quantization import (
FakeQuantize,
MinMaxObserver,
QConfig,
QuantMode,
create_qparams,
)
from megengine.quantization.quantize import (
disable_fake_quant,
disable_observer,
propagate_qconfig,
)
min_max_fakequant_qconfig = QConfig(
weight_observer=partial(MinMaxObserver, dtype="qint8_narrow"),
act_observer=partial(MinMaxObserver, dtype="qint8"),
weight_fake_quant=partial(FakeQuantize, dtype="qint8_narrow"),
act_fake_quant=partial(FakeQuantize, dtype="qint8"),
)
def gen_inp_scale():
return np.float32(np.random.rand() + 1)
min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
def quant(x, scale):
inp_dtype = dtype.qint8(scale)
return x.astype(inp_dtype)
def fake_quant(x, scale, qmin, qmax):
x = x / scale
x = F.round(x)
x = F.clip(x, qmin, qmax)
x = x * scale
return x
fake_quant_act = partial(fake_quant, qmin=-128, qmax=127)
fake_quant_weight = partial(fake_quant, qmin=-127, qmax=127)
fake_quant_bias = partial(fake_quant, qmin=-(2 ** 31), qmax=2 ** 31 - 1)
def init_qat_net(net):
if net.with_weight:
net.weight_observer.min_val[...] = Tensor(min_val[0])
net.weight_observer.max_val[...] = Tensor(max_val[0])
if net.with_act:
net.act_observer.min_val[...] = Tensor(min_val[1])
net.act_observer.max_val[...] = Tensor(max_val[1])
def test_quant_stub():
normal_net = Float.QuantStub()
normal_net.eval()
qat_from_float = QAT.QuantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.QuantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.QuantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_dequant_stub():
normal_net = Float.DequantStub()
normal_net.eval()
qat_from_float = QAT.DequantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
qat_net = QAT.DequantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.DequantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.scale = inp_scale
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = normal_net(x)
qat = qat_net(x)
q = q_net(quant(x, inp_scale)).numpy()
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("kind", ["cos", "relu", "add", "mul", "fuse_add_relu"])
def test_elemwise(kind):
normal_net = Float.Elemwise(kind)
normal_net.eval()
qat_from_float = QAT.Elemwise.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
|
disable_fake_quant(qat_from_float)
|
megengine.quantization.quantize.disable_fake_quant
|
#!/usr/bin/env python
"""
Convert a mesh file from one SfePy-supported format to another.
Examples::
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s2.5
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1 -c 0
"""
import sys
sys.path.append('.')
from optparse import OptionParser
from sfepy.base.base import nm, output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import (output_mesh_formats, MeshIO,
supported_cell_types)
usage = '%prog [options] filename_in filename_out\n' + __doc__.rstrip()
help = {
'scale' : 'scale factor (float or comma-separated list for each axis)'
' [default: %default]',
'center' : 'center of the output mesh (0 for origin or'
' comma-separated list for each axis) applied after scaling'
' [default: %default]',
'refine' : 'uniform refinement level [default: %default]',
'format' : 'output mesh format (overrides filename_out extension)',
'list' : 'list supported readable/writable output mesh formats',
}
def _parse_val_or_vec(option, name, parser):
if option is not None:
try:
try:
option = float(option)
except ValueError:
option = [float(ii) for ii in option.split(',')]
option = nm.array(option, dtype=nm.float64, ndmin=1)
except:
output('bad %s! (%s)' % (name, option))
parser.print_help()
sys.exit(1)
return option
def main():
parser = OptionParser(usage=usage)
parser.add_option('-s', '--scale', metavar='scale',
action='store', dest='scale',
default=None, help=help['scale'])
parser.add_option('-c', '--center', metavar='center',
action='store', dest='center',
default=None, help=help['center'])
parser.add_option('-r', '--refine', metavar='level',
action='store', type=int, dest='refine',
default=0, help=help['refine'])
parser.add_option('-f', '--format', metavar='format',
action='store', type='string', dest='format',
default=None, help=help['format'])
parser.add_option('-l', '--list', action='store_true',
dest='list', help=help['list'])
(options, args) = parser.parse_args()
if options.list:
output('Supported readable mesh formats:')
output('--------------------------------')
output_mesh_formats('r')
output('')
output('Supported writable mesh formats:')
output('--------------------------------')
output_mesh_formats('w')
sys.exit(0)
if len(args) != 2:
parser.print_help()
sys.exit(1)
scale = _parse_val_or_vec(options.scale, 'scale', parser)
center = _parse_val_or_vec(options.center, 'center', parser)
filename_in, filename_out = args
mesh = Mesh.from_file(filename_in)
if scale is not None:
if len(scale) == 1:
tr = nm.eye(mesh.dim, dtype=nm.float64) * scale
elif len(scale) == mesh.dim:
tr =
|
nm.diag(scale)
|
sfepy.base.base.nm.diag
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
out = F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
out2 = F.vision.interpolate(inp, 4, mode="linear")
np.testing.assert_allclose(
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
np.testing.assert_allclose(
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
def many_batch_interpolate():
inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4])
out2 = F.vision.interpolate(inp, scale_factor=2.0)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def assign_corner_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4], align_corners=True)
out2 = F.vision.interpolate(inp, scale_factor=2.0, align_corners=True)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def error_shape_linear_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
def inappropriate_scale_linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=[2.0, 3.0], mode="linear")
linear_interpolate()
many_batch_interpolate()
assign_corner_interpolate()
error_shape_linear_interpolate()
inappropriate_scale_linear_interpolate()
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
def _gen_roi_inp():
inp_feat = np.random.randn(2, 32, 256, 256)
rois = np.zeros((4, 5))
rois[:, 0] = [0, 0, 1, 1]
rois[:, 1:3] = np.random.rand(4, 2) * 100
rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
inp_feat = tensor(inp_feat)
rois = tensor(rois)
return inp_feat, rois
def test_roi_align():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_align(
inp_feat,
rois,
output_shape=output_shape,
mode="average",
spatial_scale=1.0 / 4,
sample_points=2,
aligned=True,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def _gen_correlation(random=True, constant=1, image_shape=(2, 1, 160, 160)):
if random:
inp_feat1 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
inp_feat2 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
else:
inp_feat1 = np.ones(image_shape) * constant
inp_feat2 = np.ones(image_shape) * constant
return tensor(inp_feat1), tensor(inp_feat2)
def test_correlation():
##test case 0 check the grad shape
data1, data2 = _gen_correlation()
grad = Grad().wrt(data1, callback=_save_to(data1))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=5,
max_displacement=4,
stride1=2,
stride2=2,
pad_size=2,
is_multiply=True,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(data1.grad.shape) == make_shape_tuple(data1.shape)
##test case 1 from https://github.com/NVIDIA/flownet2-pytorch/issues/194
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=True,
)
assert abs(out_feat.sum() - 1) < 1e-9
##test case 2 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 3 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 4 check correlation
data1, _ = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=2.0
)
_, data2 = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=1.0
)
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=2,
stride1=1,
stride2=2,
pad_size=0,
is_multiply=False,
)
assert abs(out_feat.mean() - 1) < 1e-9
def test_roi_pooling():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_pooling(
inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def test_adaptive_avg_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_avg_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
]
]
],
dtype=np.float32,
),
)
def test_adaptive_max_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_max_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[5, 7], [13, 15]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
]
]
],
dtype=np.float32,
),
)
def test_one_hot():
def onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp, num_classes=4)
np.testing.assert_allclose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
dtype=np.int32,
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
onehot_low_dimension()
onehot_high_dimension()
def test_interpolate_fastpath():
# check shape
test_cases = [
[(1, 1, 10, 10), (5, 5)],
[(1, 3, 10, 10), (20, 20)],
[(10, 1, 10, 10), (1, 1)],
# [(10, 10, 1, 1), (10, 10)], # FIXME, it causes random CI failure
]
for inp_shape, target_shape in test_cases:
x = tensor(np.random.randn(*inp_shape), dtype=np.float32)
out = F.vision.interpolate(x, target_shape, mode="bilinear")
assert out.shape[0] == x.shape[0] and out.shape[1] == x.shape[1]
assert out.shape[2] == target_shape[0] and out.shape[3] == target_shape[1]
# check value
x = tensor(np.ones((3, 3, 10, 10)), dtype=np.float32)
out = F.vision.interpolate(x, (15, 5), mode="bilinear")
np.testing.assert_equal(out.numpy(), np.ones((3, 3, 15, 5)).astype(np.float32))
np_x = np.arange(32)
x = tensor(np_x).astype(np.float32).reshape(1, 1, 32, 1)
out = F.vision.interpolate(x, (1, 1), mode="bilinear")
np.testing.assert_equal(out.item(), np_x.mean())
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective(dt):
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
outp = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(outp.numpy(), np.array([[[[5, 6], [9, 10]]]], dtype=dt))
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective_mat_idx(dt):
inp_shape = (2, 1, 4, 4)
x = tensor(np.arange(32, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
M = F.concat([M,] * 4, 0)
outp = F.vision.warp_perspective(x, M, (2, 2), mat_idx=[0, 1, 1, 0])
np.testing.assert_equal(
outp.numpy(),
np.array(
[
[[[5, 6], [9, 10]]],
[[[21, 22], [25, 26]]],
[[[21, 22], [25, 26]]],
[[[5, 6], [9, 10]]],
],
dtype=dt,
),
)
def test_warp_affine():
inp_shape = (1, 3, 3, 3)
x = tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
weightv = [[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]]
outp = F.vision.warp_affine(x, tensor(weightv), (2, 2), border_mode="wrap")
res = np.array(
[
[
[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]],
[[18.75, 19.75, 20.75], [14.90625, 15.90625, 16.90625]],
]
],
dtype=np.float32,
)
if not is_cuda_available():
np.testing.assert_almost_equal(outp.numpy(), res, 5)
def test_remap():
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(
np.array(
[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
).reshape(map_xy_shape)
)
outp = F.vision.remap(inp, map_xy)
np.testing.assert_equal(
outp.numpy(), np.array([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)
)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, atol=5e-4)
np.random.seed(123)
data1 = np.random.uniform(size=data1_shape).astype(np.float32)
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = np.random.uniform(size=data2_shape).astype(np.float32)
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)
cases = [
{"input": [sigmoid(data1), label1], "output": expect1,},
{"input": [sigmoid(data2), label2], "output": expect2,},
]
opr_test(
cases,
partial(F.nn.binary_cross_entropy, with_logits=False),
compare_fn=compare_fn,
)
def test_hinge_loss():
np.random.seed(123)
# case with L1 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
cases.append({"input": [data, label], "output": expect})
opr_test(cases, F.nn.hinge_loss)
# cases with L2 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
cases.append({"input": [data, label], "output": expect})
def hinge_loss_with_l2_norm(pred, label):
return F.nn.hinge_loss(pred, label, "L2")
opr_test(cases, hinge_loss_with_l2_norm)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_nms(is_symbolic):
def fn(inp, scores):
return F.vision.nms(
inp,
scores=scores,
iou_thresh=0.5,
max_output=None if is_symbolic is None else 4,
)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
x = np.array(
[
[0, 0, 100, 100],
[10, 10, 100, 100],
[50, 50, 100, 100],
[100, 100, 150, 150],
],
dtype=np.float32,
)
inp = tensor(x)
scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
x = np.array([], dtype=np.float32,).reshape(0, 4)
inp = tensor(x)
scores = tensor([], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([], dtype=np.int32))
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(get_device_count("gpu") > 0, reason="no int8 algorithm on cuda")
def test_batch_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N, IC, OC, IH, IW, KH, KW, PH, PW, SH, SW, has_bias=True,
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(N, OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_batch_conv_bias(inp, w, b):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
result = F.quantized.batch_conv_bias_activation(
inp, w, b, stride=(SH, SW), padding=(PH, PW), dtype=out_dtype,
)
return result.astype("float32")
expected = F.conv2d(inp_fp32, w_fp32[0], b_fp32 if has_bias else None)[0]
expected = expected.astype(out_dtype).astype("float32")
expected = F.flatten(expected)
result = run_batch_conv_bias(inp_int8, w_int8, b_int32)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 5, 5, 3, 3, 0, 0, 1, 1, True)
def test_conv2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float32)
weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
amp.enabled = False
expected = F.conv2d(
inp.astype("float16"),
weight.astype("float16"),
None,
(2, 2),
(3, 3),
(1, 1),
1,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv2d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
def test_conv3d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3, 3), dtype=np.float32)
out = F.conv3d(inp, weight, None, (2, 2, 2), (3, 3, 3), (1, 1, 1), 1)
out.numpy()
def test_conv1d():
inp = tensor(np.ones((2, 2, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2), dtype=np.float32))
out = F.conv1d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(),
np.array(
[[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]], dtype=np.float32
),
)
def test_batchnorm2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
tshape = (1, 3, 224, 224)
pshape = (1, 3, 1, 1)
inp = tensor(np.random.randn(*tshape), dtype=np.float32)
weight = tensor(np.ones(pshape, dtype=np.float32))
bias = tensor(np.zeros(pshape, dtype=np.float32))
out = F.batch_norm(inp, weight=weight, bias=bias, training=True, inplace=False)
amp.enabled = False
expected = F.batch_norm(
inp.astype("float16"),
weight=weight,
bias=bias,
training=True,
inplace=False,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv3d():
inp = tensor(np.ones((2, 2, 4, 4, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2, 2, 2), dtype=np.float32))
out = F.conv3d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(), np.ones((2, 3, 2, 2, 2), dtype=np.float32) * 16
)
def test_condtake():
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[True, False, True], [False, True, True]])
xx = tensor(x)
yy = tensor(y)
val, idx = F.cond_take(yy, xx)
np.testing.assert_equal(val.numpy(), x[y])
np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_condtake(is_symbolic):
shapes = [
(3, 3, 3),
(0,),
(3, 0, 3),
]
def fn(mask, data):
return F.cond_take(mask, data)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
for shp in shapes:
x_np = np.random.randn(*shp).astype("float32")
mask_np = x_np > 0
x = tensor(x_np)
mask = tensor(mask_np)
ref_out = x_np[mask_np]
ref_idx = mask_np.flatten().nonzero()[0]
for i in range(3):
out, idx = fn(mask, x)
np.testing.assert_equal(out.numpy(), ref_out)
np.testing.assert_equal(idx.numpy(), ref_idx)
if is_symbolic is None:
break
def test_condtake_is_same():
op1 = builtin.CondTake()
op2 = builtin.CondTake()
assert op1 == op2
def test_nms_is_same():
op1 = builtin.NMSKeep(0.7, 100)
op2 = builtin.NMSKeep(0.7, 100)
op3 = builtin.NMSKeep(0.8, 100)
op4 = builtin.NMSKeep(0.7, 200)
assert op1 == op2
assert op1 != op3
assert op1 != op4
assert op3 != op4
def test_argmxx_on_inf():
def run_argmax():
x = F.zeros((100, 100))
x[:] = -float("inf")
idxs = F.argmax(x, axis=0)
return idxs
def run_argmin():
x = F.zeros((100, 100))
x[:] = float("inf")
idxs = F.argmin(x, axis=0)
return idxs
assert all(run_argmax() >= 0)
assert all(run_argmin() >= 0)
def test_deformable_psroi_pooling():
inp = np.random.random((1, 256, 64, 64)).astype("float32")
rois = np.random.random((1, 5)).astype("float32")
trans = np.random.random((24, 2, 7, 7)).astype("float32")
pooled_h = 7
pooled_w = 7
sample_per_part = 4
no_trans = False
part_size = 7
spatial_scale = 1.0 / 64
trans_std = 0.1
y = F.deformable_psroi_pooling(
tensor(inp),
tensor(rois),
tensor(trans),
no_trans,
part_size,
pooled_h,
pooled_w,
sample_per_part,
spatial_scale,
trans_std,
)
def test_cvt_color():
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def bgr2gray(bgr):
return np.dot(bgr[..., :3], [0.114, 0.587, 0.299])
inp = np.random.randn(3, 3, 3, 3).astype(np.float32)
out = np.expand_dims(rgb2gray(inp), 3).astype(np.float32)
x = tensor(inp)
y = F.vision.cvt_color(x, mode="RGB2GRAY")
np.testing.assert_allclose(y.numpy(), out, atol=1e-5)
out1 = np.expand_dims(bgr2gray(inp), 3).astype(np.float32)
y1 = F.vision.cvt_color(x, mode="BGR2GRAY")
np.testing.assert_allclose(y1.numpy(), out1, atol=1e-5)
@pytest.mark.parametrize("val", [2, [2,], [2, 3]])
def test_ones(val):
shp = tensor(val)
np_shp = np.array(val)
np.testing.assert_equal(F.ones(shp), np.ones(np_shp))
def test_assert_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.00001
z = F.utils._assert_equal(x, y)
def test_assert_not_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.1
with pytest.raises(RuntimeError):
z = F.utils._assert_equal(x, y)
def test_neg_axis():
x = tensor(np.random.normal(0, 1, (32, 5)))
y = F.argmax(x, axis=-1)
yy = F.argmax(x, axis=1)
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmax(x, axis=(-1, -2))
yy = F.argmax(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmin(x, axis=(-1, -2))
yy = F.argmin(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
def test_sliding_window():
N, C, H, W = 2, 3, 7, 8
inp = np.random.normal(size=(N, C, H, W))
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp_pad = np.zeros((N, C, H + ph * 2, W + pw * 2))
inp_pad[:, :, ph : H + ph, pw : W + pw] = inp
gt_out = np.empty(
(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww), dtype=np.float32
)
for n, c, oh, ow in itertools.product(*map(range, gt_out.shape[:4])):
ih, iw = oh * sh, ow * sw
gt_out[n, c, oh, ow, :] = inp_pad[
n, c, ih : ih + (wh - 1) * dh + 1 : dh, iw : iw + (ww - 1) * dw + 1 : dw
]
out = F.sliding_window(
tensor(inp), (wh, ww), padding=(ph, pw), stride=(sh, sw), dilation=(dh, dw)
)
np.testing.assert_equal(gt_out, out.numpy())
def test_sliding_window_transpose():
N, C, H, W = 2, 3, 7, 8
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp = np.random.normal(
size=(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww)
).astype(np.float32)
gt_out = np.zeros((N, C, H, W), dtype=np.float32)
for n, c in itertools.product(*map(range, inp.shape[:2])):
oh = 0
for ih in range(-ph, H + ph - dh * (wh - 1), sh):
ow = 0
for iw in range(-pw, W + pw - dw * (ww - 1), sw):
for kh, kw in itertools.product(*map(range, inp.shape[-2:])):
ih2 = ih + dh * kh
iw2 = iw + dw * kw
if ih2 >= 0 and ih2 < H and iw2 >= 0 and iw2 < W:
gt_out[n, c, ih2, iw2] += inp[n, c, oh, ow, kh, kw]
ow += 1
oh += 1
out = F.sliding_window_transpose(
tensor(inp),
(H, W),
(wh, ww),
padding=(ph, pw),
stride=(sh, sw),
dilation=(dh, dw),
)
np.testing.assert_equal(gt_out, out.numpy())
def test_pad():
src = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
dst = np.pad(src, ((2, 2), (2, 2)), "constant")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "constant", constant_values=3)
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT", constant_value=3)
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "edge")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "EDGE")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "reflect")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "REFLECT")
np.testing.assert_allclose(res, dst, atol=1e-5)
def pixel_shuffle(data, r):
high_dim = data.shape[:-3]
data = data.reshape(-1, data.shape[-3], data.shape[-2], data.shape[-1])
inn, ic, ih, iw = data.shape
res = np.zeros((inn, int(ic / (r * r)), ih * r, iw * r))
for n in range(inn):
for c in range(ic):
for h in range(ih):
for w in range(iw):
res[
n,
int(c / r / r),
h * r + int((c % (r * r)) / r),
w * r + c % r,
] = data[n, c, h, w]
if len(high_dim) > 0:
res = res.reshape((*high_dim, int(ic / r / r), ih * r, iw * r))
else:
res = res[0]
return res
def test_pixel_shuffle():
# ndim = 3
inp = np.arange(16 * 3 * 3).reshape(16, 3, 3)
out = F.pixel_shuffle(tensor(inp), upscale_factor=4)
golden = pixel_shuffle(inp, 4)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 4
inp = np.arange(3 * 18 * 3 * 3).reshape(3, 18, 3, 3)
out = F.pixel_shuffle(tensor(inp), upscale_factor=3)
golden = pixel_shuffle(inp, 3)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 5
inp = np.arange(5 * 3 * 20 * 3 * 4).reshape(5, 3, 20, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=2)
golden = pixel_shuffle(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 6
inp = np.arange(6 * 5 * 3 * 25 * 3 * 4).reshape(6, 5, 3, 25, 3, 4)
out = F.pixel_shuffle(
|
tensor(inp)
|
megengine.tensor
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@
|
dist.launcher(n_gpus=2)
|
megengine.distributed.launcher
|
from typing import List
from fastapi import APIRouter, Depends
from sqlmodel import select, Session
from app.models import *
from utils import get_session
router = APIRouter()
@router.get("/users", response_model=List[UserRead])
async def get_users(*, session: Session=Depends(get_session)):
statement = select(User)
results = session.exec(statement).all()
return results
@router.post("/tasks", response_model=List[TaskRead])
async def get_tasks(user: UserQuery, session: Session=Depends(get_session)):
statement = select(Task).where(Task.owner_id == user.id)
results = session.exec(statement).all()
return results
@router.post("/task", response_model=TaskRead)
async def get_task(task: TaskQuery, session: Session=Depends(get_session)):
statement =
|
select(Task)
|
sqlmodel.select
|
"""add events
Revision ID: 02338256c6aa
Revises: 108677b68119
Create Date: 2022-06-01 03:17:51.063172+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
from common.database.tables.types import TimeStamp
# revision identifiers, used by Alembic.
revision = "02338256c6aa"
down_revision = "108677b68119"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"events",
sa.Column(
"valid_from",
TimeStamp(timezone=True),
nullable=False,
),
sa.Column(
"valid_until",
TimeStamp(timezone=True),
nullable=False,
),
sa.Column("name",
|
sqlmodel.sql.sqltypes.AutoString()
|
sqlmodel.sql.sqltypes.AutoString
|
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.testing import TestCommon, assert_
##
# c: 05.02.2008
class Test( TestCommon ):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes', 'test_read_dimension']
##
# c: 05.02.2008, r: 05.02.2008
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 05.02.2008, r: 05.02.2008
def test_read_meshes( self ):
"""Try to read all listed meshes."""
from sfepy.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate( filename_meshes ):
self.report( '%d. mesh: %s' % (ii + 1, filename) )
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.ngroups.shape[0]))
assert_(mesh.n_el == sum(mesh.n_els))
for ig, conn in enumerate( mesh.conns ):
assert_(conn.shape[0] == len(mesh.mat_ids[ig]))
assert_(conn.shape[0] == mesh.n_els[ig])
assert_(conn.shape[1] == mesh.n_e_ps[ig])
self.report( 'read ok' )
meshes[filename] = mesh
self.meshes = meshes
return True
##
# c: 05.02.2008, r: 05.02.2008
def test_compare_same_meshes( self ):
"""Compare same meshes in various formats."""
import numpy as nm
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report( 'comparing meshes from "%s" and "%s"' % (name0, name1) )
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report( 'dimension failed!' )
oks.append( ok0 )
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report( 'number of nodes failed!' )
oks.append( ok0 )
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report( 'number of elements failed!' )
oks.append( ok0 )
ok0 = mesh0.n_e_ps == mesh1.n_e_ps
if not ok0:
self.report( 'number of element points failed!' )
oks.append( ok0 )
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report( 'element types failed!' )
oks.append( ok0 )
ok0 = nm.allclose( mesh0.coors, mesh1.coors )
if not ok0:
self.report( 'nodes failed!' )
oks.append( ok0 )
ok0 = nm.all( mesh0.ngroups == mesh1.ngroups )
if not ok0:
self.report( 'node groups failed!' )
oks.append( ok0 )
for ii in range( len( mesh0.mat_ids ) ):
ok0 = nm.all( mesh0.mat_ids[ii] == mesh1.mat_ids[ii] )
if not ok0:
self.report( 'material ids failed!' )
oks.append( ok0 )
for ii in range( len( mesh0.mat_ids ) ):
ok0 = nm.all( mesh0.conns[ii] == mesh1.conns[ii] )
if not ok0:
self.report( 'connectivities failed!' )
oks.append( ok0 )
return sum( oks ) == len( oks )
##
# c: 03.07.2008, r: 03.07.2008
def test_read_dimension( self ):
from sfepy.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in meshes.iteritems():
self.report( 'mesh: %s, dimension %d' % (filename, adim) )
io =
|
MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
|
sfepy.fem.MeshIO.any_from_filename
|
import io
import numpy as np
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.module as M
import megengine.utils.network_node as N
from megengine.jit.tracing import trace
from megengine.tensor import Tensor
from megengine.utils.comp_graph_tools import GraphInference
from megengine.utils.network import Network as Net
from megengine.utils.network import as_oprnode, set_symbolic_shape
from megengine.utils.network_node import Host2DeviceCopy, VarNode
def test_metadata():
x = Tensor(0)
@trace(symbolic=True, capture_as_const=True)
def fwd(x):
return x * 2
fwd(x)
orig_model = io.BytesIO()
fwd.dump(orig_model, user_info="test", optimize_for_inference=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": "test",
"graph_modified": False, # False: tracing.dump
"optimized_for_inference": False,
}
orig_model.seek(0)
graph.dump(
orig_model,
user_info={"str": "x", "tensor": x, "module": M.Module, "none": None},
optimize_for_inference=True,
enable_nchw4=True,
enable_ioc16=True,
)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None},
"graph_modified": True, # True: Network.dump
"optimized_for_inference": True,
"enable_nchw4": True,
"enable_ioc16": True,
}
orig_model.seek(0)
fwd.dump(orig_model, enable_metadata=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata is None
def test_replace_var():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out = F.mul(vara, varb)
out = F.relu(out)
opnode = list(graph.opr_filter.has_input(vara))
repl_dict = {opnode[0].outputs[0]: out}
graph.replace_vars(repl_dict)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [6, 16])
def test_replace_opr():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out1 = F.sub(vara, varb)
out1 = F.relu(out1)
out1 = graph.add_dep_oprs(out1)
orig_opr = graph.opr_filter.has_input(vara).as_unique()
repl_dict = {orig_opr: out1[0].owner}
graph.replace_oprs(repl_dict)
modified_model1 = io.BytesIO()
graph.dump(modified_model1)
modified_model1.seek(0)
load_graph = GraphInference(modified_model1)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [0, 0])
def test_splice_network():
x = F.ones((2,))
y = F.ones((2,))
@trace(symbolic=True, capture_as_const=True)
def fun1(a, b):
return (a + b) * 2
@trace(symbolic=True, capture_as_const=True)
def fun2(a):
return a * 2 - 1
model = io.BytesIO()
fun1(x, y)
fun2(x)
fun1.dump(
model,
arg_names=["net1_i0", "net1_i1"],
output_names=["net1_o0"],
optimize_for_inference=False,
)
model.seek(0)
net1 = Net.load(model)
model.seek(0)
fun2.dump(
model,
arg_names=["net2_i0"],
output_names=["net2_o0"],
optimize_for_inference=False,
)
model.seek(0)
net2 =
|
Net.load(model)
|
megengine.utils.network.Network.load
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result =
|
F.concat(res, 0)
|
megengine.functional.concat
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step_size += 1
check_value()
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict =
|
load(fout)
|
megengine.load
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine, func, select
class Item(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
created: datetime
deleted: bool = False
category: str
version: float = 1
data: str
# Create and save records to show that the query itself is working.
item_1 = Item(created=datetime.now(), category="category_1", data="❤️ I love SQLModel.")
item_2 = Item(
created=datetime.now(),
category="category_1",
data="❤️ I love FastAPI.",
deleted=True,
)
item_3 = Item(
created=datetime.now(),
category="category_2",
data="🥰 I appreciate your work on all of it!",
)
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(item_1)
session.add(item_2)
session.add(item_3)
session.commit()
# This "statement" is where the issue presents itself in PyCharm
statement = (
select(
Item.category,
func.count(Item.id).label("my_count"),
|
func.total(Item.deleted)
|
sqlmodel.func.total
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Any, Mapping
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .modules import SE, activation, conv2d, gap2d, linear, norm2d
__all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"]
def build_head(
w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu"
) -> M.Module:
"""The factory function to build head.
Note:
if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do
nothing and return ``None``.
Args:
w_in: input width.
head_args: head args. Default: ``None``
norm_name: default normalization function, will be overridden by the same key in
``head_args``. Default: ``"BN"``
act_name: default activation function, will be overridden by the same key in ``head_args``.
Default: ``"relu"``
Returns:
A head.
"""
if head_args is None:
return None
head_args = copy.deepcopy(head_args)
head_name = head_args.pop("name", None)
if head_name is None:
return None
head_args["w_in"] = w_in
head_args.setdefault("norm_name", norm_name)
head_args.setdefault("act_name", act_name)
if callable(head_name):
return head_name(**head_args)
if isinstance(head_name, str):
head_funcs = {
"ClsHead": ClsHead,
"MBV3Head": MBV3Head,
"VGGHead": VGGHead,
}
if head_name in head_funcs:
return head_funcs[head_name](**head_args)
raise ValueError(f"Head '{head_name}' not supported")
class ClsHead(M.Module):
"""Cls head: Conv, BN, Act, AvgPool, FC.
Args:
w_in: input width.
w_out: output width, normally the number of classes. Default: ``1000``
width: width for first conv in head, conv will be omitted if set to 0. Default: ``0``
dropout_prob: dropout probability. Default: ``0.0``
norm_name: normalization function. Default: ``"BN"``
act_name: activation function. Default: ``"relu"``
bias: whether fc has bias. Default: ``True``
"""
def __init__(
self,
w_in: int,
w_out: int = 1000,
width: int = 0,
dropout_prob: float = 0.0,
norm_name: str = "BN",
act_name: str = "relu",
bias: bool = True,
):
super().__init__()
self.width = width
if self.width > 0:
self.conv = conv2d(w_in, self.width, 1)
self.bn = norm2d(norm_name, self.width)
self.act = activation(act_name)
w_in = self.width
self.avg_pool = gap2d()
if dropout_prob > 0.0:
self.dropout = M.Dropout(dropout_prob)
self.fc = linear(w_in, w_out, bias=bias)
def forward(self, x: mge.Tensor) -> mge.Tensor:
if self.width > 0:
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.avg_pool(x)
x = F.flatten(x, 1)
if getattr(self, "dropout", None) is not None:
x = self.dropout(x)
x = self.fc(x)
return x
class MBV3Head(M.Module):
"""MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC.
Args:
w_in: input width.
w_out: output width, normally the number of classes.
width: width for first conv in head.
w_h: width for first linear in head.
dropout_prob: dropout probability. Default: ``0.0``
se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0``
norm_name: normalization function. Default: ``"BN"``
act_name: activation function. Default: ``"hswish"``
bias: whether fc has bias. Default: ``True``
"""
def __init__(
self,
w_in: int,
w_out: int = 1000,
width: int = 960,
w_h: int = 1280,
dropout_prob: float = 0.0,
se_r: float = 0.0,
norm_name: str = "BN",
act_name: str = "hswish",
bias: bool = True,
):
super().__init__()
self.conv = conv2d(w_in, width, 1)
self.bn = norm2d(norm_name, width)
self.act = activation(act_name)
self.avg_pool = gap2d()
if se_r > 0.0:
self.se = SE(width, int(se_r * width), act_name)
self.h_fc = linear(width, w_h, bias=bias)
self.h_act = activation(act_name)
if dropout_prob > 0.0:
self.dropout = M.Dropout(dropout_prob)
self.fc = linear(w_h, w_out, bias=bias)
def forward(self, x: mge.Tensor) -> mge.Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.avg_pool(x)
if getattr(self, "se", None) is not None:
x = self.se(x)
x = F.flatten(x, 1)
x = self.h_fc(x)
x = self.h_act(x)
if getattr(self, "dropout", None) is not None:
x = self.dropout(x)
x = self.fc(x)
return x
class VGGHead(M.Module):
"""VGG head: AvgPool, [FC, Act, Dropout] x2, FC.
Args:
w_in: input width.
w_out: output width, normally the number of classes. Default: ``1000``
width: width for linear in head. Default: ``4096``
dropout_prob: dropout probability. Default: ``0.5``
act_name: activation function. Default: ``"relu"``
"""
def __init__(
self,
w_in: int,
w_out: int = 1000,
width: int = 4096,
dropout_prob: float = 0.5,
act_name: str = "relu",
**kwargs,
):
super().__init__()
self.avg_pool = gap2d(7)
self.classifier = M.Sequential(
linear(w_in * 7 * 7, width, bias=True),
activation(act_name),
M.Dropout(dropout_prob),
linear(width, width, bias=True),
activation(act_name),
|
M.Dropout(dropout_prob)
|
megengine.module.Dropout
|
"""
Nonlinear solvers.
"""
import time
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, debug, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.solvers.solvers import SolverMeta, NonlinearSolver
def check_tangent_matrix(conf, vec_x0, fun, fun_grad):
"""
Verify the correctness of the tangent matrix as computed by `fun_grad()` by
comparing it with its finite difference approximation evaluated by
repeatedly calling `fun()` with `vec_x0` items perturbed by a small delta.
"""
vec_x = vec_x0.copy()
delta = conf.delta
vec_r = fun(vec_x) # Update state.
mtx_a0 = fun_grad(vec_x)
mtx_a = mtx_a0.tocsc()
mtx_d = mtx_a.copy()
mtx_d.data[:] = 0.0
vec_dx = nm.zeros_like(vec_r)
for ic in range(vec_dx.shape[0]):
vec_dx[ic] = delta
xx = vec_x.copy() - vec_dx
vec_r1 = fun(xx)
vec_dx[ic] = -delta
xx = vec_x.copy() - vec_dx
vec_r2 = fun(xx)
vec_dx[ic] = 0.0;
vec = 0.5 * (vec_r2 - vec_r1) / delta
ir = mtx_a.indices[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]]
mtx_d.data[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]] = vec[ir]
vec_r = fun(vec_x) # Restore.
tt = time.clock()
output(mtx_a, '.. analytical')
output(mtx_d, '.. difference')
import sfepy.base.plotutils as plu
plu.plot_matrix_diff(mtx_d, mtx_a, delta, ['difference', 'analytical'],
conf.check)
return time.clock() - tt
def conv_test(conf, it, err, err0):
"""
Nonlinear solver convergence test.
Parameters
----------
conf : Struct instance
The nonlinear solver configuration.
it : int
The current iteration.
err : float
The current iteration error.
err0 : float
The initial error.
Returns
-------
status : int
The convergence status: -1 = no convergence (yet), 0 = solver converged
- tolerances were met, 1 = max. number of iterations reached.
"""
status = -1
if (abs(err0) < conf.macheps):
err_r = 0.0
else:
err_r = err / err0
output('nls: iter: %d, residual: %e (rel: %e)' % (it, err, err_r))
conv_a = err < conf.eps_a
if it > 0:
conv_r = err_r < conf.eps_r
if conv_a and conv_r:
status = 0
elif (conf.get('eps_mode', '') == 'or') and (conv_a or conv_r):
status = 0
else:
if conv_a:
status = 0
if (status == -1) and (it >= conf.i_max):
status = 1
return status
class Newton(NonlinearSolver):
r"""
Solves a nonlinear system :math:`f(x) = 0` using the Newton method with
backtracking line-search, starting with an initial guess :math:`x^0`.
"""
name = 'nls.newton'
__metaclass__ = SolverMeta
_parameters = [
('i_max', 'int', 1, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-10, False,
'The absolute tolerance for the residual, i.e. :math:`||f(x^i)||`.'),
('eps_r', 'float', 1.0, False,
"""The relative tolerance for the residual, i.e. :math:`||f(x^i)|| /
||f(x^0)||`."""),
('eps_mode', "'and' or 'or'", 'and', False,
"""The logical operator to use for combining the absolute and relative
tolerances."""),
('macheps', 'float', nm.finfo(nm.float64).eps, False,
'The float considered to be machine "zero".'),
('lin_red', 'float', 1.0, False,
"""The linear system solution error should be smaller than (`eps_a` *
`lin_red`), otherwise a warning is printed."""),
('lin_precision', 'float or None', None, False,
"""If not None, the linear system solution tolerances are set in each
nonlinear iteration relative to the current residual norm by the
`lin_precision` factor. Ignored for direct linear solvers."""),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls_red', '0.0 < float < 1.0', 0.1, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.001, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('give_up_warp', 'bool', False, False,
'If True, abort on the "warp violation" error.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
('is_linear', 'bool', False, False,
'If True, the problem is considered to be linear.'),
]
def __init__(self, conf, **kwargs):
NonlinearSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||r||$'], ['iteration']],
xlabels=['', 'all iterations'],
ylabels=[r'$||r||$', 'iteration'],
yscales=['log', 'linear'],
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%d']])
else:
self.log = None
def __call__(self, vec_x0, conf=None, fun=None, fun_grad=None,
lin_solver=None, iter_hook=None, status=None):
"""
Nonlinear system solver call.
Solves a nonlinear system :math:`f(x) = 0` using the Newton method with
backtracking line-search, starting with an initial guess :math:`x^0`.
Parameters
----------
vec_x0 : array
The initial guess vector :math:`x_0`.
conf : Struct instance, optional
The solver configuration parameters,
fun : function, optional
The function :math:`f(x)` whose zero is sought - the residual.
fun_grad : function, optional
The gradient of :math:`f(x)` - the tangent matrix.
lin_solver : LinearSolver instance, optional
The linear solver for each nonlinear iteration.
iter_hook : function, optional
User-supplied function to call before each iteration.
status : dict-like, optional
The user-supplied object to hold convergence statistics.
Notes
-----
* The optional parameters except `iter_hook` and `status` need
to be given either here or upon `Newton` construction.
* Setting `conf.is_linear == True` means a pre-assembled and possibly
pre-solved matrix. This is mostly useful for linear time-dependent
problems.
"""
conf = get_default(conf, self.conf)
fun = get_default(fun, self.fun)
fun_grad = get_default(fun_grad, self.fun_grad)
lin_solver = get_default(lin_solver, self.lin_solver)
iter_hook = get_default(iter_hook, self.iter_hook)
status = get_default(status, self.status)
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
lin_red = conf.eps_a * conf.lin_red
time_stats = {}
vec_x = vec_x0.copy()
vec_x_last = vec_x0.copy()
vec_dx = None
if self.log is not None:
self.log.plot_vlines(color='r', linewidth=1.0)
err = err0 = -1.0
err_last = -1.0
it = 0
while 1:
if iter_hook is not None:
iter_hook(self, vec_x, it, err, err0)
ls = 1.0
vec_dx0 = vec_dx;
while 1:
tt = time.clock()
try:
vec_r = fun(vec_x)
except ValueError:
if (it == 0) or (ls < conf.ls_min):
output('giving up!')
raise
else:
ok = False
else:
ok = True
time_stats['rezidual'] = time.clock() - tt
if ok:
try:
err = nla.norm(vec_r)
except:
output('infs or nans in the residual:', vec_r)
output(nm.isfinite(vec_r).all())
debug()
if self.log is not None:
self.log(err, it)
if it == 0:
err0 = err;
break
if err < (err_last * conf.ls_on): break
red = conf.ls_red;
output('linesearch: iter %d, (%.5e < %.5e) (new ls: %e)'
% (it, err, err_last * conf.ls_on, red * ls))
else: # Failure.
if conf.give_up_warp:
output('giving up!')
break
red = conf.ls_red_warp;
output('rezidual computation failed for iter %d'
' (new ls: %e)!' % (it, red * ls))
if ls < conf.ls_min:
output('linesearch failed, continuing anyway')
break
ls *= red;
vec_dx = ls * vec_dx0;
vec_x = vec_x_last.copy() - vec_dx
# End residual loop.
if self.log is not None:
self.log.plot_vlines([1], color='g', linewidth=0.5)
err_last = err;
vec_x_last = vec_x.copy()
condition = conv_test(conf, it, err, err0)
if condition >= 0:
break
if (not ok) and conf.give_up_warp:
condition = 2
break
tt = time.clock()
if not conf.is_linear:
mtx_a = fun_grad(vec_x)
else:
mtx_a = fun_grad('linear')
time_stats['matrix'] = time.clock() - tt
if conf.check:
tt = time.clock()
wt = check_tangent_matrix(conf, vec_x, fun, fun_grad)
time_stats['check'] = time.clock() - tt - wt
if conf.lin_precision is not None:
if ls_eps_a is not None:
eps_a = max(err * conf.lin_precision, ls_eps_a)
elif ls_eps_r is not None:
eps_r = max(conf.lin_precision, ls_eps_r)
lin_red = max(eps_a, err * eps_r)
if conf.verbose:
output('solving linear system...')
tt = time.clock()
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a)
time_stats['solve'] = time.clock() - tt
if conf.verbose:
|
output('...done')
|
sfepy.base.base.output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.