section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
security | auth | import datetime
import os
from CTFd.cache import clear_user_session
from CTFd.exceptions import UserNotFoundException, UserTokenExpiredException
from CTFd.models import UserTokens, db
from CTFd.utils.encoding import hexencode
from CTFd.utils.security.csrf import generate_nonce
from CTFd.utils.security.signing import hmac
from flask import session
def login_user(user):
session["id"] = user.id
session["nonce"] = generate_nonce()
session["hash"] = hmac(user.password)
session.permanent = True
# Clear out any currently cached user attributes
clear_user_session(user_id=user.id)
def update_user(user):
session["id"] = user.id
session["hash"] = hmac(user.password)
session.permanent = True
# Clear out any currently cached user attributes
clear_user_session(user_id=user.id)
def logout_user():
session.clear()
def generate_user_token(user, expiration=None, description=None):
temp_token = True
while temp_token is not None:
value = "ctfd_" + hexencode(os.urandom(32))
temp_token = UserTokens.query.filter_by(value=value).first()
token = UserTokens(
user_id=user.id, expiration=expiration, description=description, value=value
)
db.session.add(token)
db.session.commit()
return token
def lookup_user_token(token):
token = UserTokens.query.filter_by(value=token).first()
if token:
if datetime.datetime.utcnow() >= token.expiration:
raise UserTokenExpiredException
return token.user
else:
raise UserNotFoundException
return None
|
femtaskpanels | task_element_fluid1D | # ***************************************************************************
# * Copyright (c) 2016 Ofentse Kgoa <kgoaot@eskom.co.za> *
# * Copyright (c) 2018 Bernd Hahnebach <bernd@bimstatik.org> *
# * Based on the FemElementGeometry1D by Bernd Hahnebach *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM element fluid 1D task panel for the document object"
__author__ = "Ofentse Kgoa, Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## @package task_element_fluid1D
# \ingroup FEM
# \brief task panel for element fluid 1D object
import FreeCAD
import FreeCADGui
from femguiutils import selection_widgets
from femobjects import element_fluid1D
from FreeCAD import Units
from PySide import QtCore, QtGui
class _TaskPanel:
"""
The TaskPanel for editing References property of ElementFluid1D objects
"""
def __init__(self, obj):
self.obj = obj
# parameter widget
self.parameterWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/ElementFluid1D.ui"
)
QtCore.QObject.connect(
self.parameterWidget.cb_section_type,
QtCore.SIGNAL("activated(int)"),
self.sectiontype_changed,
)
QtCore.QObject.connect(
self.parameterWidget.cb_liquid_section_type,
QtCore.SIGNAL("activated(int)"),
self.liquidsectiontype_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_manning_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.manning_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_manning_radius,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.manning_radius_changed,
)
QtCore.QObject.connect(
self.parameterWidget.sb_manning_coefficient,
QtCore.SIGNAL("valueChanged(double)"),
self.manning_coefficient_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_enlarge_area1,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.enlarge_area1_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_enlarge_area2,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.enlarge_area2_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_contract_area1,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.contract_area1_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_contract_area2,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.contract_area2_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_inletpressure,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.inlet_pressure_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_outletpressure,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.outlet_pressure_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_inletflowrate,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.inlet_flowrate_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_outletflowrate,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.outlet_flowrate_changed,
)
QtCore.QObject.connect(
self.parameterWidget.gb_inletpressure,
QtCore.SIGNAL("clicked(bool)"),
self.inlet_pressure_active,
)
QtCore.QObject.connect(
self.parameterWidget.gb_outletpressure,
QtCore.SIGNAL("clicked(bool)"),
self.outlet_pressure_active,
)
QtCore.QObject.connect(
self.parameterWidget.gb_inletflowrate,
QtCore.SIGNAL("clicked(bool)"),
self.inlet_flowrate_active,
)
QtCore.QObject.connect(
self.parameterWidget.gb_outletflowrate,
QtCore.SIGNAL("clicked(bool)"),
self.outlet_flowrate_active,
)
QtCore.QObject.connect(
self.parameterWidget.if_entrance_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.entrance_pipe_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_entrance_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.entrance_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_diaphragm_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.diaphragm_pipe_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_diaphragm_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.diaphragm_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_bend_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.bend_pipe_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.sb_bradius_pdiameter,
QtCore.SIGNAL("valueChanged(double)"),
self.bradius_pdiameter_changed,
)
QtCore.QObject.connect(
self.parameterWidget.sb_bend_angle,
QtCore.SIGNAL("valueChanged(double)"),
self.bend_angle_changed,
)
QtCore.QObject.connect(
self.parameterWidget.sb_bend_loss_coefficient,
QtCore.SIGNAL("valueChanged(double)"),
self.bend_loss_coefficient_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_gatevalve_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.gatevalve_pipe_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.sb_gatevalve_closing_coeff,
QtCore.SIGNAL("valueChanged(double)"),
self.gatevalve_closing_coeff_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_pipe_area_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_radius,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_radius_changed,
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_grain_diameter,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_grain_diameter_changed,
)
QtCore.QObject.connect(
self.parameterWidget.sb_colebrooke_form_factor,
QtCore.SIGNAL("valueChanged(double)"),
self.colebrooke_form_factor_changed,
)
QtCore.QObject.connect(
self.parameterWidget.tw_pump_characteristics,
QtCore.SIGNAL("cellChanged(int, int)"),
self.pump_characteristics_changed,
)
# some fluid types deactivated since they are not implemented in ccx writer
self.parameterWidget.cb_section_type.addItems(
element_fluid1D.ElementFluid1D.known_fluid_types
)
self.parameterWidget.cb_liquid_section_type.addItems(
element_fluid1D.ElementFluid1D.known_liquid_types
)
self.parameterWidget.cb_gas_section_type.addItems(
element_fluid1D.ElementFluid1D.known_gas_types
)
self.parameterWidget.cb_channel_section_type.addItems(
element_fluid1D.ElementFluid1D.known_channel_types
)
self.get_fluidsection_props()
self.updateParameterWidget()
# geometry selection widget
self.selectionWidget = selection_widgets.GeometryElementsSelection(
obj.References, ["Edge"], False, True
)
# form made from param and selection widget
self.form = [self.parameterWidget, self.selectionWidget]
def accept(self):
self.set_fluidsection_props()
self.obj.References = self.selectionWidget.references
self.recompute_and_set_back_all()
return True
def reject(self):
self.recompute_and_set_back_all()
return True
def recompute_and_set_back_all(self):
doc = FreeCADGui.getDocument(self.obj.Document)
doc.Document.recompute()
self.selectionWidget.setback_listobj_visibility()
if self.selectionWidget.sel_server:
FreeCADGui.Selection.removeObserver(self.selectionWidget.sel_server)
doc.resetEdit()
def get_fluidsection_props(self):
self.SectionType = self.obj.SectionType
self.LiquidSectionType = self.obj.LiquidSectionType
self.ManningArea = self.obj.ManningArea
self.ManningRadius = self.obj.ManningRadius
self.ManningCoefficient = self.obj.ManningCoefficient
self.EnlargeArea1 = self.obj.EnlargeArea1
self.EnlargeArea2 = self.obj.EnlargeArea2
self.ContractArea1 = self.obj.ContractArea1
self.ContractArea2 = self.obj.ContractArea2
self.OutletPressure = self.obj.OutletPressure
self.InletPressure = self.obj.InletPressure
self.OutletFlowRate = self.obj.OutletFlowRate
self.InletFlowRate = self.obj.InletFlowRate
self.OutletPressureActive = self.obj.OutletPressureActive
self.InletPressureActive = self.obj.InletPressureActive
self.OutletFlowRateActive = self.obj.OutletFlowRateActive
self.InletFlowRateActive = self.obj.InletFlowRateActive
self.EntrancePipeArea = self.obj.EntrancePipeArea
self.EntranceArea = self.obj.EntranceArea
self.DiaphragmPipeArea = self.obj.DiaphragmPipeArea
self.DiaphragmArea = self.obj.DiaphragmArea
self.BendPipeArea = self.obj.BendPipeArea
self.BendRadiusDiameter = self.obj.BendRadiusDiameter
self.BendAngle = self.obj.BendAngle
self.BendLossCoefficient = self.obj.BendLossCoefficient
self.GateValvePipeArea = self.obj.GateValvePipeArea
self.GateValveClosingCoeff = self.obj.GateValveClosingCoeff
self.ColebrookeArea = self.obj.ColebrookeArea
self.ColebrookeRadius = self.obj.ColebrookeRadius
self.ColebrookeGrainDiameter = self.obj.ColebrookeGrainDiameter
self.ColebrookeFormFactor = self.obj.ColebrookeFormFactor
self.PumpFlowRate = self.obj.PumpFlowRate
self.PumpHeadLoss = self.obj.PumpHeadLoss
def set_fluidsection_props(self):
self.obj.LiquidSectionType = self.LiquidSectionType
self.obj.SectionType = self.SectionType
self.obj.ManningArea = self.ManningArea
self.obj.ManningRadius = self.ManningRadius
self.obj.ManningCoefficient = self.ManningCoefficient
self.obj.EnlargeArea1 = self.EnlargeArea1
self.obj.EnlargeArea2 = self.EnlargeArea2
self.obj.ContractArea1 = self.ContractArea1
self.obj.ContractArea2 = self.ContractArea2
self.obj.OutletPressure = self.OutletPressure
self.obj.InletPressure = self.InletPressure
self.obj.OutletFlowRate = self.OutletFlowRate
self.obj.InletFlowRate = self.InletFlowRate
self.obj.OutletPressureActive = self.OutletPressureActive
self.obj.InletPressureActive = self.InletPressureActive
self.obj.OutletFlowRateActive = self.OutletFlowRateActive
self.obj.InletFlowRateActive = self.InletFlowRateActive
self.obj.EntrancePipeArea = self.EntrancePipeArea
self.obj.EntranceArea = self.EntranceArea
self.obj.DiaphragmPipeArea = self.DiaphragmPipeArea
self.obj.DiaphragmArea = self.DiaphragmArea
self.obj.BendPipeArea = self.BendPipeArea
self.obj.BendRadiusDiameter = self.BendRadiusDiameter
self.obj.BendAngle = self.BendAngle
self.obj.BendLossCoefficient = self.BendLossCoefficient
self.obj.GateValvePipeArea = self.GateValvePipeArea
self.obj.GateValveClosingCoeff = self.GateValveClosingCoeff
self.obj.ColebrookeArea = self.ColebrookeArea
self.obj.ColebrookeRadius = self.ColebrookeRadius
self.obj.ColebrookeGrainDiameter = self.ColebrookeGrainDiameter
self.obj.ColebrookeFormFactor = self.ColebrookeFormFactor
self.obj.PumpFlowRate = self.PumpFlowRate
self.obj.PumpHeadLoss = self.PumpHeadLoss
def updateParameterWidget(self):
"fills the widgets"
index_sectiontype = self.parameterWidget.cb_section_type.findText(
self.SectionType
)
self.parameterWidget.cb_section_type.setCurrentIndex(index_sectiontype)
self.parameterWidget.sw_section_type.setCurrentIndex(index_sectiontype)
index_liquidsectiontype = self.parameterWidget.cb_liquid_section_type.findText(
self.LiquidSectionType
)
self.parameterWidget.cb_liquid_section_type.setCurrentIndex(
index_liquidsectiontype
)
self.parameterWidget.sw_liquid_section_type.setCurrentIndex(
index_liquidsectiontype
)
self.parameterWidget.if_manning_area.setText(self.ManningArea.UserString)
self.parameterWidget.if_manning_radius.setText(self.ManningRadius.UserString)
self.parameterWidget.sb_manning_coefficient.setValue(self.ManningCoefficient)
self.parameterWidget.if_enlarge_area1.setText(self.EnlargeArea1.UserString)
self.parameterWidget.if_enlarge_area2.setText(self.EnlargeArea2.UserString)
self.parameterWidget.if_contract_area1.setText(self.ContractArea1.UserString)
self.parameterWidget.if_contract_area2.setText(self.ContractArea2.UserString)
self.parameterWidget.if_inletpressure.setText(
FreeCAD.Units.Quantity(
1000 * self.InletPressure, FreeCAD.Units.Pressure
).UserString
)
self.parameterWidget.if_outletpressure.setText(
FreeCAD.Units.Quantity(
1000 * self.OutletPressure, FreeCAD.Units.Pressure
).UserString
)
self.parameterWidget.if_inletflowrate.setText(str(self.InletFlowRate))
self.parameterWidget.if_outletflowrate.setText(str(self.OutletFlowRate))
self.parameterWidget.gb_inletpressure.setChecked(self.InletPressureActive)
self.parameterWidget.gb_outletpressure.setChecked(self.OutletPressureActive)
self.parameterWidget.gb_inletflowrate.setChecked(self.InletFlowRateActive)
self.parameterWidget.gb_outletflowrate.setChecked(self.OutletFlowRateActive)
self.parameterWidget.if_entrance_pipe_area.setText(
self.EntrancePipeArea.UserString
)
self.parameterWidget.if_entrance_area.setText(self.EntranceArea.UserString)
self.parameterWidget.if_diaphragm_pipe_area.setText(
self.DiaphragmPipeArea.UserString
)
self.parameterWidget.if_diaphragm_area.setText(self.DiaphragmArea.UserString)
self.parameterWidget.if_bend_pipe_area.setText(self.BendPipeArea.UserString)
self.parameterWidget.sb_bradius_pdiameter.setValue(self.BendRadiusDiameter)
self.parameterWidget.sb_bend_angle.setValue(self.BendAngle)
self.parameterWidget.sb_bend_loss_coefficient.setValue(self.BendLossCoefficient)
self.parameterWidget.if_gatevalve_pipe_area.setText(
self.GateValvePipeArea.UserString
)
self.parameterWidget.sb_gatevalve_closing_coeff.setValue(
self.GateValveClosingCoeff
)
self.parameterWidget.if_colebrooke_pipe_area.setText(
self.ColebrookeArea.UserString
)
self.parameterWidget.if_colebrooke_radius.setText(
self.ColebrookeRadius.UserString
)
self.parameterWidget.if_colebrooke_grain_diameter.setText(
self.ColebrookeGrainDiameter.UserString
)
self.parameterWidget.sb_colebrooke_form_factor.setValue(
self.ColebrookeFormFactor
)
for i in range(len(self.PumpFlowRate)):
self.parameterWidget.tw_pump_characteristics.setItem(
i, 0, QtGui.QTableWidgetItem(str(self.PumpFlowRate[i]))
)
self.parameterWidget.tw_pump_characteristics.setItem(
i, 1, QtGui.QTableWidgetItem(str(self.PumpHeadLoss[i]))
)
def sectiontype_changed(self, index):
if index < 0:
return
self.parameterWidget.cb_section_type.setCurrentIndex(index)
self.parameterWidget.sw_section_type.setCurrentIndex(index)
# parameterWidget returns unicode
self.SectionType = str(self.parameterWidget.cb_section_type.itemText(index))
def liquidsectiontype_changed(self, index):
if index < 0:
return
self.parameterWidget.cb_liquid_section_type.setCurrentIndex(index)
self.parameterWidget.sw_liquid_section_type.setCurrentIndex(index)
# parameterWidget returns unicode
self.LiquidSectionType = str(
self.parameterWidget.cb_liquid_section_type.itemText(index)
)
def manning_area_changed(self, base_quantity_value):
self.ManningArea = base_quantity_value
def manning_radius_changed(self, base_quantity_value):
self.ManningRadius = base_quantity_value
def manning_coefficient_changed(self, base_quantity_value):
self.ManningCoefficient = base_quantity_value
def enlarge_area1_changed(self, base_quantity_value):
self.EnlargeArea1 = base_quantity_value
def enlarge_area2_changed(self, base_quantity_value):
self.EnlargeArea2 = base_quantity_value
def contract_area1_changed(self, base_quantity_value):
self.ContractArea1 = base_quantity_value
def contract_area2_changed(self, base_quantity_value):
self.ContractArea2 = base_quantity_value
def inlet_pressure_changed(self, base_quantity_value):
self.InletPressure = Units.Quantity(base_quantity_value).getValueAs("MPa").Value
def outlet_pressure_changed(self, base_quantity_value):
self.OutletPressure = (
Units.Quantity(base_quantity_value).getValueAs("MPa").Value
)
def inlet_flowrate_changed(self, base_quantity_value):
self.InletFlowRate = (
Units.Quantity(base_quantity_value).getValueAs("kg/s").Value
)
def outlet_flowrate_changed(self, base_quantity_value):
self.OutletFlowRate = (
Units.Quantity(base_quantity_value).getValueAs("kg/s").Value
)
def inlet_pressure_active(self, active):
self.InletPressureActive = active
def outlet_pressure_active(self, active):
self.OutletPressureActive = active
def inlet_flowrate_active(self, active):
self.InletFlowRateActive = active
def outlet_flowrate_active(self, active):
self.OutletFlowRateActive = active
def entrance_pipe_area_changed(self, base_quantity_value):
self.EntrancePipeArea = base_quantity_value
def entrance_area_changed(self, base_quantity_value):
self.EntranceArea = base_quantity_value
def diaphragm_pipe_area_changed(self, base_quantity_value):
self.DiaphragmPipeArea = base_quantity_value
def diaphragm_area_changed(self, base_quantity_value):
self.DiaphragmArea = base_quantity_value
def bend_pipe_area_changed(self, base_quantity_value):
self.BendPipeArea = base_quantity_value
def bradius_pdiameter_changed(self, base_quantity_value):
self.BendRadiusDiameter = base_quantity_value
def bend_angle_changed(self, base_quantity_value):
self.BendAngle = base_quantity_value
def bend_loss_coefficient_changed(self, base_quantity_value):
self.BendLossCoefficient = base_quantity_value
def gatevalve_pipe_area_changed(self, base_quantity_value):
self.GateValvePipeArea = base_quantity_value
def gatevalve_closing_coeff_changed(self, base_quantity_value):
self.GateValveClosingCoeff = base_quantity_value
def colebrooke_pipe_area_changed(self, base_quantity_value):
self.ColebrookeArea = base_quantity_value
def colebrooke_radius_changed(self, base_quantity_value):
self.ColebrookeRadius = base_quantity_value
def colebrooke_grain_diameter_changed(self, base_quantity_value):
self.ColebrookeGrainDiameter = base_quantity_value
def colebrooke_form_factor_changed(self, base_quantity_value):
self.ColebrookeFormFactor = base_quantity_value
def pump_characteristics_changed(self, row, column):
if column == 0:
self.PumpFlowRate[row] = float(
self.parameterWidget.tw_pump_characteristics.item(row, column).text()
)
else:
self.PumpHeadLoss[row] = float(
self.parameterWidget.tw_pump_characteristics.item(row, column).text()
)
|
builders | input_reader_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_reader_builder."""
import os
import numpy as np
import tensorflow as tf
from app.object_detection.builders import input_reader_builder
from app.object_detection.core import standard_fields as fields
from app.object_detection.protos import input_reader_pb2
from google.protobuf import text_format
from tensorflow.core.example import example_pb2, feature_pb2
class InputReaderBuilderTest(tf.test.TestCase):
def create_tf_record(self):
path = os.path.join(self.get_temp_dir(), "tfrecord")
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"image/encoded": feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(value=[encoded_jpeg])
),
"image/format": feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(value=["jpeg".encode("utf-8")])
),
"image/height": feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[4])
),
"image/width": feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[5])
),
"image/object/bbox/xmin": feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])
),
"image/object/bbox/xmax": feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0])
),
"image/object/bbox/ymin": feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])
),
"image/object/bbox/ymax": feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0])
),
"image/object/class/label": feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[2])
),
"image/object/mask": feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=flat_mask)
),
}
)
)
writer.write(example.SerializeToString())
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertTrue(
fields.InputDataFields.groundtruth_instance_masks not in output_dict
)
self.assertEquals((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEquals([2], output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEquals(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape
)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0],
)
def test_build_tf_record_input_reader_and_load_instance_masks(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
tf_record_input_reader {{
input_path: '{0}'
}}
""".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertEquals((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEquals([2], output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEquals(
(1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape
)
self.assertAllEqual(
[0.0, 0.0, 1.0, 1.0],
output_dict[fields.InputDataFields.groundtruth_boxes][0],
)
self.assertAllEqual(
(1, 4, 5),
output_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
)
def test_raises_error_with_no_input_paths(self):
input_reader_text_proto = """
shuffle: false
num_readers: 1
load_instance_masks: true
"""
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
with self.assertRaises(ValueError):
input_reader_builder.build(input_reader_proto)
if __name__ == "__main__":
tf.test.main()
|
utils | raw | import os
import re
import shutil
import subprocess
import tempfile
from pathlib import Path
from django.conf import settings
from photonix.photos.models import Photo, PhotoFile, Task
from photonix.web.utils import logger
from PIL import Image
from .metadata import get_dimensions, get_mimetype
RAW_PROCESS_VERSION = "20190305"
NON_RAW_MIMETYPES = [
"image/jpeg",
"image/png",
"image/tiff",
"image/bmp",
"image/gif",
"image/jp2",
"image/x-portable-pixmap",
"image/x-xbitmap",
"image/webp",
]
def ensure_raw_processing_tasks():
for task in Task.objects.filter(type="ensure_raw_processed", status="P").order_by(
"created_at"
):
photo_id = task.subject_id
ensure_raw_processed(photo_id, task)
def ensure_raw_processed(photo_id, task):
task.start()
photo = Photo.objects.get(id=photo_id)
has_raw_photos = False
for photo_file in photo.files.all():
# TODO: Make raw photo detection better
if photo_file.mimetype not in NON_RAW_MIMETYPES:
has_raw_photos = True
Task(
type="process_raw",
subject_id=photo_file.id,
parent=task,
library=photo_file.photo.library,
).save()
# Complete and add next task to generate thumbnails
if not has_raw_photos:
task.complete(next_type="generate_thumbnails", next_subject_id=photo_id)
def process_raw_tasks():
for task in Task.objects.filter(type="process_raw", status="P").order_by(
"created_at"
):
photo_file_id = task.subject_id
process_raw_task(photo_file_id, task)
def process_raw_task(photo_file_id, task):
task.start()
photo_file = PhotoFile.objects.get(id=photo_file_id)
output_path, version, process_params, external_version = generate_jpeg(
photo_file.path
)
if not output_path:
task.failed("Could not generate JPEG")
return
if not os.path.isdir(settings.PHOTO_RAW_PROCESSED_DIR):
os.mkdir(settings.PHOTO_RAW_PROCESSED_DIR)
destination_path = Path(settings.PHOTO_RAW_PROCESSED_DIR) / str(
"{}.jpg".format(photo_file.id)
)
shutil.move(output_path, str(destination_path))
photo_file.raw_processed = True
photo_file.raw_version = version
photo_file.raw_external_params = process_params
photo_file.raw_external_version = external_version
if not photo_file.width or not photo_file.height:
width, height = get_dimensions(photo_file.base_image_path)
photo_file.width = width
photo_file.height = height
photo_file.save()
task.complete(next_type="generate_thumbnails", next_subject_id=photo_file.photo.id)
def __get_generated_image(temp_dir, basename):
for fn in os.listdir(temp_dir):
if fn != basename:
return Path(temp_dir) / fn
def __get_exiftool_image(temp_dir, basename):
"""
Exiftool outputs two files when copying the tags over, we
want the file that ends in .jpg and not .jpg_original, but
to keep the filesystem tidy we need to get the path.
"""
exiftool_files = {}
for fn in os.listdir(temp_dir):
if fn.endswith(".jpg_original"):
exiftool_files["original"]: Path(temp_dir) / fn
if fn.endswith(".jpg"):
exiftool_files["output"]: Path(temp_dir) / fn
return exiftool_files
def __has_acceptable_dimensions(
original_image_path, new_image_path, accept_empty_original_dimensions=False
):
logger.debug("Checking image dimensions")
original_image_dimensions = get_dimensions(original_image_path)
logger.debug(f"Original image dimensions: {original_image_dimensions}")
new_image_dimensions = get_dimensions(new_image_path)
logger.debug(f"New image dimensions: {new_image_dimensions}")
# We don't know the original dimensions so have nothing to compare to
if original_image_dimensions == (None, None):
if accept_empty_original_dimensions:
logger.debug("No original dimensions, accepting new dimensions")
return True
else:
logger.debug("No original dimensions, rejecting new dimensions")
return False
# Embedded image can't be the full resolution
if (
not new_image_dimensions[0]
or not new_image_dimensions[1]
or new_image_dimensions[0] < 512
or new_image_dimensions[1] < 512
):
logger.debug("Dimensions are too small")
return False
# Embedded image is exactly the same dimensions
if original_image_dimensions == new_image_dimensions:
logger.debug("Dimensions match exactly")
return True
# Embedded image within 95% of the raw width and height
if (
original_image_dimensions[0] / new_image_dimensions[0] > 0.95
and original_image_dimensions[1] / new_image_dimensions[1] > 0.95
and new_image_dimensions[0] / original_image_dimensions[0] > 0.95
and new_image_dimensions[1] / original_image_dimensions[1] > 0.95
):
logger.debug("Dimensions match closely enough")
return True
logger.debug("Dimensions are not good")
return False
def identified_as_jpeg(path):
output = (
subprocess.Popen(
["file", path],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
.communicate()[0]
.decode("utf-8")
)
return "JPEG image data" in output
def bitmap_to_jpeg(input_path, output_path, quality=75):
im = Image.open(input_path)
im = im.convert("RGB")
im.save(output_path, format="JPEG", quality=quality)
def __dcraw_version():
output = (
subprocess.Popen(
["dcraw"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
.communicate()[0]
.decode("utf-8")
)
for line in output.split("\n"):
if 'Raw photo decoder "dcraw"' in line:
try:
return re.search(r"v([0-9]+.[0-9]+)", line).group(1)
except AttributeError:
return
def __heif_convert_version():
output = (
subprocess.Popen(
["dpkg", "-s", "libheif-examples"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
.communicate()[0]
.decode("utf-8")
)
for line in output.split("\n"):
if "Version: " in line:
try:
return re.search(r"([0-9]+.[0-9]+.[0-9]+)", line).group(1)
except AttributeError:
return
def __exiftool_version():
output = (
subprocess.Popen(
["dpkg", "-s", "libimage-exiftool-perl"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
.communicate()[0]
.decode("utf-8")
)
for line in output.split("\n"):
if "Version: " in line:
try:
return re.search(r"([0-9]+.[0-9]+.[0-9]+)", line).group(1)
except AttributeError:
return
def __delete_file_silently(path):
try:
os.remove(path)
except FileNotFoundError:
pass
def generate_jpeg(path):
logger.debug(f"Generating JPEG for raw file {path}")
basename = os.path.basename(path)
temp_dir = tempfile.mkdtemp()
temp_input_path = Path(temp_dir) / basename
shutil.copyfile(path, temp_input_path)
valid_image = False
process_params = None
external_version = None
# Handle Canon's CR3 format since their thumbnails are proprietary.
mimetype = get_mimetype(temp_input_path)
if mimetype == "image/x-canon-cr3":
logger.debug("File type detected as Canon Raw v3")
subprocess.Popen(
[
"exiftool",
"-b",
"-JpgFromRaw",
"-w",
"jpg",
"-ext",
"CR3",
temp_input_path,
"-execute",
"-tagsfromfile",
temp_input_path,
"-ext",
"jpg",
Path(temp_dir),
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()
exiftool_output = __get_exiftool_image(temp_dir, basename)
# Clean up the original file without tags.
if "original" in exiftool_output:
os.remove(exiftool_output["original"])
# Set the input file.
if "output" in exiftool_output:
temp_output_path = exiftool_output["output"]
else:
temp_output_path = None
process_params = "exiftool -b -JpgFromRaw"
external_version = __exiftool_version()
elif mimetype in ["image/heif", "image/heic"]:
logger.debug("File type detected as HIEF/HEIC")
temp_output_path = Path(temp_dir) / "out.jpg"
subprocess.run(["heif-convert", "-q", "90", temp_input_path, temp_output_path])
process_params = "heif-convert -q 90"
external_version = __heif_convert_version()
else:
logger.debug("Attempting to extract JPEG using dcraw")
# Try to extract the JPEG that might be inside the raw file
subprocess.run(["dcraw", "-e", temp_input_path])
temp_output_path = __get_generated_image(temp_dir, basename)
process_params = "dcraw -e"
external_version = __dcraw_version()
# Check the JPEGs dimensions are close enough to the raw's dimensions
if temp_output_path:
if __has_acceptable_dimensions(temp_input_path, temp_output_path):
logger.debug("JPEG file looks good so far")
valid_image = True
else:
__delete_file_silently(temp_output_path)
# Next try to use embedded profile to generate an image
if not valid_image:
logger.debug(
"Attempting to generate JPEG with dcraw using embedded color profile"
)
subprocess.run(["dcraw", "-p embed", temp_input_path])
temp_output_path = __get_generated_image(temp_dir, basename)
if temp_output_path:
if __has_acceptable_dimensions(temp_input_path, temp_output_path):
logger.debug("JPEG file looks good so far")
valid_image = True
process_params = "dcraw -p embed"
else:
__delete_file_silently(temp_output_path)
# Finally try to use the embedded whitebalance to generate an image
if not valid_image:
logger.debug(
"Attempting to generate JPEG with dcraw using embedded white balance"
)
subprocess.run(["dcraw", "-w", temp_input_path])
temp_output_path = __get_generated_image(temp_dir, basename)
if temp_output_path:
if __has_acceptable_dimensions(temp_input_path, temp_output_path, True):
logger.debug("JPEG file looks good so far")
valid_image = True
process_params = "dcraw -w"
else:
__delete_file_silently(temp_output_path)
# If extracted image isn't a JPEG then we need to convert it
if valid_image:
valid_image = identified_as_jpeg(temp_output_path)
if not valid_image:
logger.debug("JPEG didn't pass test, attempting bitmap conversion")
jpeg_path = tempfile.mktemp()
bitmap_to_jpeg(temp_output_path, jpeg_path)
if identified_as_jpeg(jpeg_path):
logger.debug("JPEG file now passes test")
temp_output_path = jpeg_path
valid_image = True
# Move the outputted file to a new temporary location
if valid_image:
logger.debug("I'm happy with the JPEG so moving it to a new location")
final_path = tempfile.mktemp()
os.rename(temp_output_path, final_path)
# Delete the temporary working directory
logger.debug("Deleting temporary files")
shutil.rmtree(temp_dir)
if valid_image:
logger.debug(
f"Returning info about JPEG which is temporarily located here: {final_path}"
)
return (final_path, RAW_PROCESS_VERSION, process_params, external_version)
logger.error("Couldn't make JPEG from raw file")
return (None, RAW_PROCESS_VERSION, None, None)
|
vidcutter | changelog | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# VidCutter - media cutter & joiner
#
# copyright © 2018 Pete Alexandrou
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from PyQt5.QtCore import QFile, QSize, Qt, QTextStream
from PyQt5.QtWidgets import (
QDialog,
QDialogButtonBox,
QLabel,
QScrollArea,
QStyleFactory,
QVBoxLayout,
qApp,
)
class Changelog(QDialog):
def __init__(self, parent=None):
super(Changelog, self).__init__(parent, Qt.Dialog | Qt.WindowCloseButtonHint)
self.parent = parent
self.setWindowTitle("{} changelog".format(qApp.applicationName()))
changelog = QFile(":/CHANGELOG")
changelog.open(QFile.ReadOnly | QFile.Text)
content = QTextStream(changelog).readAll()
label = QLabel(content, self)
label.setWordWrap(True)
label.setTextFormat(Qt.PlainText)
buttons = QDialogButtonBox(QDialogButtonBox.Close, self)
buttons.rejected.connect(self.close)
scrollarea = QScrollArea(self)
scrollarea.setStyleSheet("QScrollArea { background:transparent; }")
scrollarea.setWidgetResizable(True)
scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scrollarea.setFrameShape(QScrollArea.NoFrame)
scrollarea.setWidget(label)
if sys.platform in {"win32", "darwin"}:
scrollarea.setStyle(QStyleFactory.create("Fusion"))
# noinspection PyUnresolvedReferences
if parent.parent.stylename == "fusion" or sys.platform in {"win32", "darwin"}:
self.setStyleSheet(
"""
QScrollArea {{
background-color: transparent;
margin-bottom: 10px;
border: none;
border-right: 1px solid {};
}}""".format("#4D5355" if parent.theme == "dark" else "#C0C2C3")
)
else:
self.setStyleSheet(
"""
QScrollArea {{
background-color: transparent;
margin-bottom: 10px;
border: none;
}}"""
)
layout = QVBoxLayout()
layout.addWidget(scrollarea)
layout.addWidget(buttons)
self.setLayout(layout)
self.setMinimumSize(self.sizeHint())
def sizeHint(self) -> QSize:
modes = {
"LOW": QSize(450, 300),
"NORMAL": QSize(565, 560),
"HIGH": QSize(1080, 920),
}
return modes[self.parent.parentWidget().scale]
|
PyObjCTest | test_nsurlerror | from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
try:
import CFNetwork
except ImportError:
CFNetwork = None
class TestNSURLError(TestCase):
def testConstants(self):
self.assertIsInstance(NSURLErrorDomain, unicode)
self.assertIsInstance(NSErrorFailingURLStringKey, unicode)
self.assertEqual(NSURLErrorUnknown, -1)
self.assertEqual(NSURLErrorCancelled, -999)
self.assertEqual(NSURLErrorBadURL, -1000)
self.assertEqual(NSURLErrorTimedOut, -1001)
self.assertEqual(NSURLErrorUnsupportedURL, -1002)
self.assertEqual(NSURLErrorCannotFindHost, -1003)
self.assertEqual(NSURLErrorCannotConnectToHost, -1004)
self.assertEqual(NSURLErrorNetworkConnectionLost, -1005)
self.assertEqual(NSURLErrorDNSLookupFailed, -1006)
self.assertEqual(NSURLErrorHTTPTooManyRedirects, -1007)
self.assertEqual(NSURLErrorResourceUnavailable, -1008)
self.assertEqual(NSURLErrorNotConnectedToInternet, -1009)
self.assertEqual(NSURLErrorRedirectToNonExistentLocation, -1010)
self.assertEqual(NSURLErrorBadServerResponse, -1011)
self.assertEqual(NSURLErrorUserCancelledAuthentication, -1012)
self.assertEqual(NSURLErrorUserAuthenticationRequired, -1013)
self.assertEqual(NSURLErrorZeroByteResource, -1014)
self.assertEqual(NSURLErrorCannotDecodeRawData, -1015)
self.assertEqual(NSURLErrorCannotDecodeContentData, -1016)
self.assertEqual(NSURLErrorCannotParseResponse, -1017)
self.assertEqual(NSURLErrorFileDoesNotExist, -1100)
self.assertEqual(NSURLErrorFileIsDirectory, -1101)
self.assertEqual(NSURLErrorNoPermissionsToReadFile, -1102)
self.assertEqual(NSURLErrorSecureConnectionFailed, -1200)
self.assertEqual(NSURLErrorServerCertificateHasBadDate, -1201)
self.assertEqual(NSURLErrorServerCertificateUntrusted, -1202)
self.assertEqual(NSURLErrorServerCertificateHasUnknownRoot, -1203)
self.assertEqual(NSURLErrorServerCertificateNotYetValid, -1204)
self.assertEqual(NSURLErrorClientCertificateRejected, -1205)
self.assertEqual(NSURLErrorCannotLoadFromNetwork, -2000)
self.assertEqual(NSURLErrorCannotCreateFile, -3000)
self.assertEqual(NSURLErrorCannotOpenFile, -3001)
self.assertEqual(NSURLErrorCannotCloseFile, -3002)
self.assertEqual(NSURLErrorCannotWriteToFile, -3003)
self.assertEqual(NSURLErrorCannotRemoveFile, -3004)
self.assertEqual(NSURLErrorCannotMoveFile, -3005)
self.assertEqual(NSURLErrorDownloadDecodingFailedMidStream, -3006)
self.assertEqual(NSURLErrorDownloadDecodingFailedToComplete, -3007)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertEqual(NSURLErrorDataLengthExceedsMaximum, -1103)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertIsInstance(NSURLErrorFailingURLPeerTrustErrorKey, unicode)
self.assertIsInstance(NSURLErrorFailingURLErrorKey, unicode)
self.assertIsInstance(NSURLErrorFailingURLStringErrorKey, unicode)
self.assertEqual(NSURLErrorClientCertificateRequired, -1206)
@min_os_level("10.7")
@onlyIf(CFNetwork is not None)
def testConstants10_7(self):
self.assertEqual(
NSURLErrorInternationalRoamingOff,
CFNetwork.kCFURLErrorInternationalRoamingOff,
)
self.assertEqual(NSURLErrorCallIsActive, CFNetwork.kCFURLErrorCallIsActive)
self.assertEqual(NSURLErrorDataNotAllowed, CFNetwork.kCFURLErrorDataNotAllowed)
self.assertEqual(
NSURLErrorRequestBodyStreamExhausted,
CFNetwork.kCFURLErrorRequestBodyStreamExhausted,
)
if __name__ == "__main__":
main()
|
downloaders | VeehdCom | # -*- coding: utf-8 -*-
import re
from ..base.downloader import BaseDownloader
class VeehdCom(BaseDownloader):
__name__ = "VeehdCom"
__type__ = "downloader"
__version__ = "0.29"
__status__ = "testing"
__pattern__ = r"http://veehd\.com/video/\d+_\S+"
__config__ = [
("enabled", "bool", "Activated", True),
("filename_spaces", "bool", "Allow spaces in filename", False),
("replacement_char", "str", "Filename replacement character", "_"),
]
__description__ = """Veehd.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("cat", "cat@pyload")]
def setup(self):
self.multi_dl = True
self.req.can_continue = True
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.log_debug(f"Requesting page: {url}")
self.data = self.load(url)
def file_exists(self):
if not self.data:
self.download_html()
if "<title>Veehd</title>" in self.data:
return False
return True
def get_file_name(self):
if not self.data:
self.download_html()
m = re.search(r"<title.*?>(.+?) on Veehd</title>", self.data)
if m is None:
self.error(self._("Video title not found"))
name = m.group(1)
#: Replace unwanted characters in filename
if self.config.get("filename_spaces"):
pattern = r"[^\w ]+"
else:
pattern = r"[^\w.]+"
return re.sub(pattern, self.config.get("replacement_char"), name) + ".avi"
def get_file_url(self):
"""
Returns the absolute downloadable filepath.
"""
if not self.data:
self.download_html()
m = re.search(
r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/.+?)"',
self.data,
)
if m is None:
self.error(self._("Embedded video url not found"))
return m.group(1)
|
canvas | drawable | """
Copyright 2007, 2008, 2009, 2016 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from ..Constants import LINE_SELECT_SENSITIVITY
class Drawable(object):
"""
GraphicalElement is the base class for all graphical elements.
It contains an X,Y coordinate, a list of rectangular areas that the element occupies,
and methods to detect selection of those areas.
"""
@classmethod
def make_cls_with_base(cls, super_cls):
name = super_cls.__name__
bases = (super_cls,) + cls.__bases__[1:]
namespace = cls.__dict__.copy()
return type(name, bases, namespace)
def __init__(self):
"""
Make a new list of rectangular areas and lines, and set the coordinate and the rotation.
"""
self.coordinate = (0, 0)
self.rotation = 0
self.highlighted = False
self._bounding_rects = []
self._bounding_points = []
def is_horizontal(self, rotation=None):
"""
Is this element horizontal?
If rotation is None, use this element's rotation.
Args:
rotation: the optional rotation
Returns:
true if rotation is horizontal
"""
rotation = rotation or self.rotation
return rotation in (0, 180)
def is_vertical(self, rotation=None):
"""
Is this element vertical?
If rotation is None, use this element's rotation.
Args:
rotation: the optional rotation
Returns:
true if rotation is vertical
"""
rotation = rotation or self.rotation
return rotation in (90, 270)
def rotate(self, rotation):
"""
Rotate all of the areas by 90 degrees.
Args:
rotation: multiple of 90 degrees
"""
self.rotation = (self.rotation + rotation) % 360
def move(self, delta_coor):
"""
Move the element by adding the delta_coor to the current coordinate.
Args:
delta_coor: (delta_x,delta_y) tuple
"""
x, y = self.coordinate
dx, dy = delta_coor
self.coordinate = (x + dx, y + dy)
def create_labels(self, cr=None):
"""
Create labels (if applicable) and call on all children.
Call this base method before creating labels in the element.
"""
def create_shapes(self):
"""
Create shapes (if applicable) and call on all children.
Call this base method before creating shapes in the element.
"""
def draw(self, cr):
raise NotImplementedError()
def bounds_from_area(self, area):
x1, y1, w, h = area
x2 = x1 + w
y2 = y1 + h
self._bounding_rects = [(x1, y1, x2, y2)]
self._bounding_points = [(x1, y1), (x2, y1), (x1, y2), (x2, y2)]
def bounds_from_line(self, line):
self._bounding_rects = rects = []
self._bounding_points = list(line)
last_point = line[0]
for x2, y2 in line[1:]:
(x1, y1), last_point = last_point, (x2, y2)
if x1 == x2:
x1, x2 = x1 - LINE_SELECT_SENSITIVITY, x2 + LINE_SELECT_SENSITIVITY
if y2 < y1:
y1, y2 = y2, y1
elif y1 == y2:
y1, y2 = y1 - LINE_SELECT_SENSITIVITY, y2 + LINE_SELECT_SENSITIVITY
if x2 < x1:
x1, x2 = x2, x1
rects.append((x1, y1, x2, y2))
def what_is_selected(self, coor, coor_m=None):
"""
One coordinate specified:
Is this element selected at given coordinate?
ie: is the coordinate encompassed by one of the areas or lines?
Both coordinates specified:
Is this element within the rectangular region defined by both coordinates?
ie: do any area corners or line endpoints fall within the region?
Args:
coor: the selection coordinate, tuple x, y
coor_m: an additional selection coordinate.
Returns:
self if one of the areas/lines encompasses coor, else None.
"""
x, y = [a - b for a, b in zip(coor, self.coordinate)]
if not coor_m:
for x1, y1, x2, y2 in self._bounding_rects:
if x1 <= x <= x2 and y1 <= y <= y2:
return self
else:
x_m, y_m = [a - b for a, b in zip(coor_m, self.coordinate)]
if y_m < y:
y, y_m = y_m, y
if x_m < x:
x, x_m = x_m, x
for x1, y1 in self._bounding_points:
if x <= x1 <= x_m and y <= y1 <= y_m:
return self
def get_extents(self):
x_min, y_min = x_max, y_max = self.coordinate
x_min += min(x for x, y in self._bounding_points)
y_min += min(y for x, y in self._bounding_points)
x_max += max(x for x, y in self._bounding_points)
y_max += max(y for x, y in self._bounding_points)
return x_min, y_min, x_max, y_max
def mouse_over(self):
pass
def mouse_out(self):
pass
|
model | store | # Copyright (C) 2011 Chris Dekter
# Copyright (C) 2019-2020 Thomas Hess <thomas.hess@udo.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Store(dict):
"""
Allows persistent storage of values between invocations of the script.
"""
GLOBALS = {}
def set_value(self, key, value):
"""
Store a value
Usage: C{store.set_value(key, value)}
"""
self[key] = value
def get_value(self, key):
"""
Get a value
Usage: C{store.get_value(key)}
"""
return self.get(key, None)
def remove_value(self, key):
"""
Remove a value
Usage: C{store.remove_value(key)}
"""
del self[key]
def set_global_value(self, key, value):
"""
Store a global value
Usage: C{store.set_global_value(key, value)}
The value stored with this method will be available to all scripts.
"""
Store.GLOBALS[key] = value
def get_global_value(self, key):
"""
Get a global value
Usage: C{store.get_global_value(key)}
"""
return self.GLOBALS.get(key, None)
def remove_global_value(self, key):
"""
Remove a global value
Usage: C{store.remove_global_value(key)}
"""
del self.GLOBALS[key]
def has_key(self, key):
"""
python 2 compatibility
"""
return key in self
|
mmkeys | winhook | # Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import ctypes
import sys
from quodlibet.util import winapi
from ._base import MMKeysAction, MMKeysBackend, MMKeysImportError
if sys.platform != "win32":
raise MMKeysImportError
class WinHookBackend(MMKeysBackend):
def __init__(self, name, callback):
self._callback = callback
self._hhook = None
self._kb_proc_ptr = None
try:
self._start()
except WindowsError:
pass
def cancel(self):
try:
self._stop()
except WindowsError:
pass
def _kb_proc(self, nCode, wParam, lParam):
"""A LowLevelKeyboardProc"""
if nCode == winapi.HC_ACTION and wParam == winapi.WM_KEYDOWN:
hstruct_ptr = ctypes.cast(lParam, winapi.LPKBDLLHOOKSTRUCT)
assert hstruct_ptr
hstruct = hstruct_ptr.contents
vkCode = hstruct.vkCode
STOP_PROCESSING = 1
if vkCode == winapi.VK_MEDIA_PLAY_PAUSE:
self._callback(MMKeysAction.PLAYPAUSE)
return STOP_PROCESSING
elif vkCode == winapi.VK_MEDIA_STOP:
self._callback(MMKeysAction.STOP)
return STOP_PROCESSING
elif vkCode == winapi.VK_MEDIA_NEXT_TRACK:
self._callback(MMKeysAction.NEXT)
return STOP_PROCESSING
elif vkCode == winapi.VK_MEDIA_PREV_TRACK:
self._callback(MMKeysAction.PREV)
return STOP_PROCESSING
return winapi.CallNextHookEx(self._hhook, nCode, wParam, lParam)
def _start(self):
"""Start mmkey monitoring.
Might raise WindowsError.
"""
kb_proc_ptr = winapi.LowLevelKeyboardProc(self._kb_proc)
hhook = winapi.SetWindowsHookExW(winapi.WH_KEYBOARD_LL, kb_proc_ptr, None, 0)
if not hhook:
raise winapi.WinError()
self._kb_proc_ptr = kb_proc_ptr
self._hhook = hhook
def _stop(self):
"""Stop mmkey monitoring. Can be called multiple times.
Might raise WindowsError.
"""
if self._hhook is not None:
if winapi.UnhookWindowsHookEx(self._hhook) == 0:
raise winapi.WinError()
self._hhook = None
self._kb_proc_ptr = None
|
network | networkthread | """
A thread to handle network concerns
"""
import network.asyncore_pollchoose as asyncore
import state
from network.connectionpool import BMConnectionPool
from queues import excQueue
from threads import StoppableThread
class BMNetworkThread(StoppableThread):
"""Main network thread"""
name = "Asyncore"
def run(self):
try:
while not self._stopped and state.shutdown == 0:
BMConnectionPool().loop()
except Exception as e:
excQueue.put((self.name, e))
raise
def stopThread(self):
super(BMNetworkThread, self).stopThread()
for i in BMConnectionPool().listeningSockets.values():
try:
i.close()
except:
pass
for i in BMConnectionPool().outboundConnections.values():
try:
i.close()
except:
pass
for i in BMConnectionPool().inboundConnections.values():
try:
i.close()
except:
pass
# just in case
asyncore.close_all()
|
saveddata | implant | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import eos.db
from eos.effectHandlerHelpers import HandledItem
from eos.modifiedAttributeDict import ItemAttrShortcut, ModifiedAttributeDict
from logbook import Logger
from sqlalchemy.orm import reconstructor, validates
pyfalog = Logger(__name__)
class Implant(HandledItem, ItemAttrShortcut):
def __init__(self, item):
self.__item = item
if self.isInvalid:
raise ValueError("Passed item is not an Implant")
self.itemID = item.ID if item is not None else None
self.active = True
self.build()
@reconstructor
def init(self):
self.__item = None
if self.itemID:
self.__item = eos.db.getItem(self.itemID)
if self.__item is None:
pyfalog.error("Item (id: {0}) does not exist", self.itemID)
return
if self.isInvalid:
pyfalog.error("Item (id: {0}) is not an Implant", self.itemID)
return
self.build()
def build(self):
"""Build object. Assumes proper and valid item already set"""
self.__itemModifiedAttributes = ModifiedAttributeDict()
self.__itemModifiedAttributes.original = self.__item.attributes
self.__itemModifiedAttributes.overrides = self.__item.overrides
self.__slot = self.__calculateSlot(self.__item)
@property
def itemModifiedAttributes(self):
return self.__itemModifiedAttributes
@property
def isInvalid(self):
return self.__item is None or self.__item.category.name != "Implant"
@property
def slot(self):
return self.__slot
@property
def item(self):
return self.__item
@staticmethod
def __calculateSlot(item):
if "implantness" not in item.attributes:
raise ValueError("Passed item is not an implant")
return int(item.attributes["implantness"].value)
def clear(self):
self.itemModifiedAttributes.clear()
def calculateModifiedAttributes(self, fit, runTime, forceProjected=False):
if forceProjected:
return
if not self.active:
return
for effect in self.item.effects.values():
if (
effect.runTime == runTime
and effect.isType("passive")
and effect.activeByDefault
):
effect.handler(fit, self, ("implant",), None, effect=effect)
@validates("fitID", "itemID", "active")
def validator(self, key, val):
map = {
"fitID": lambda _val: isinstance(_val, int),
"itemID": lambda _val: isinstance(_val, int),
"active": lambda _val: isinstance(_val, bool),
}
if not map[key](val):
raise ValueError(str(val) + " is not a valid value for " + key)
else:
return val
def __deepcopy__(self, memo):
copy = Implant(self.item)
copy.active = self.active
return copy
def rebase(self, item):
active = self.active
Implant.__init__(self, item)
self.active = active
def __repr__(self):
return "Implant(ID={}, name={}) at {}".format(
self.item.ID, self.item.name, hex(id(self))
)
|
blocks | stream_to_vector_decimator | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from . import blocks_python as blocks
class stream_to_vector_decimator(gr.hier_block2):
"""
Convert the stream to a vector, decimate the vector stream to achieve the vector rate.
"""
def __init__(self, item_size, sample_rate, vec_rate, vec_len):
"""
Create the block chain.
Args:
item_size: the number of bytes per sample
sample_rate: the rate of incoming samples
vec_rate: the rate of outgoing vectors (same units as sample_rate)
vec_len: the length of the outgoing vectors in items
"""
self._vec_rate = vec_rate
self._vec_len = vec_len
self._sample_rate = sample_rate
gr.hier_block2.__init__(
self,
"stream_to_vector_decimator",
# Input signature
gr.io_signature(1, 1, item_size),
gr.io_signature(1, 1, item_size * vec_len),
) # Output signature
s2v = blocks.stream_to_vector(item_size, vec_len)
self.one_in_n = blocks.keep_one_in_n(item_size * vec_len, 1)
self._update_decimator()
self.connect(self, s2v, self.one_in_n, self)
def set_sample_rate(self, sample_rate):
"""
Set the new sampling rate and update the decimator.
Args:
sample_rate: the new rate
"""
self._sample_rate = sample_rate
self._update_decimator()
def set_vec_rate(self, vec_rate):
"""
Set the new vector rate and update the decimator.
Args:
vec_rate: the new rate
"""
self._vec_rate = vec_rate
self._update_decimator()
def set_decimation(self, decim):
"""
Set the decimation parameter directly.
Args:
decim: the new decimation
"""
self._decim = max(1, int(round(decim)))
self.one_in_n.set_n(self._decim)
def _update_decimator(self):
self.set_decimation(self._sample_rate / self._vec_len / self._vec_rate)
def decimation(self):
"""
Returns the actual decimation.
"""
return self._decim
def sample_rate(self):
"""
Returns configured sample rate.
"""
return self._sample_rate
def frame_rate(self):
"""
Returns actual frame rate
"""
return self._sample_rate / self._vec_len / self._decim
|
migrations | 0014_populate_licenses | from django.db import migrations
def create_standard_licenses(apps, schema_editor):
"""
Creates a set of standard licenses
"""
License = apps.get_model("deposit", "License")
licenses = [
{
"name": "Creative Commons 1.0 Universal (CC0 1.0) Public Domain Dedication",
"uri": "https://creativecommons.org/publicdomain/zero/1.0/",
},
{
"name": "Creative Commons Attribution 4.0 International (CC BY 4.0)",
"uri": "https://creativecommons.org/licenses/by/4.0/",
},
{
"name": "Creative Commons Attribution-ShareAlike 4.0, International (CC BY-SA 4.0)",
"uri": "https://creativecommons.org/licenses/by-sa/4.0/",
},
{
"name": "Creative Commons Attribution-NonCommerical 4.0 International (CC BY-NC 4.0)",
"uri": "https://creativecommons.org/licenses/by-nc/4.0/",
},
{
"name": "Creative Commons Attribution-NoDerivatives 4.0 International (CC BY-ND 4.0)",
"uri": "http://creativecommons.org/licenses/by-nd/4.0/",
},
{
"name": "Free for private use; right holder retains other rights, including distribution",
"uri": "https://dissem.in/deposit/license/zenodo-freetoread-1.0/",
},
{
"name": "Other open license",
"uri": "https://dissem.in/deposit/license/other-open/",
},
{"name": "No license", "uri": "https://dissem.in/deposit/license/no-license/"},
]
License.objects.bulk_create([License(**license) for license in licenses])
def remove_standard_licenses(apps, schema_editor):
"""
Removes the licenses
"""
License = apps.get_model("deposit", "License")
License.objects.all().delete()
LicenseChooser = apps.get_model("deposit", "LicenseChooser")
LicenseChooser.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
("deposit", "0013_licensechooser_position"),
]
operations = [
migrations.RunPython(create_standard_licenses, remove_standard_licenses)
]
|
sitemaps | store | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
"""Store sitemaps in s3.
This module is uploads all subreddit sitemaps as well as the sitemap index
to s3. The basic idea is that amazon will be serving the static sitemaps for
us.
The binary data we send to s3 is a gzipped xml file. In addition we also
send the appropriate type and encoding headers so this is understood
correctly by the browser.
The only file expected to be used outside this module is:
store_sitemaps_in_s3(subreddits)
Even though the subreddits are expected to be generated and passed into this
function, the sitemap index is created here. The reasoning is that in order
to create the sitemap index we need to know how many sitemaps we have.
If we simply queried the subreddit iterator for it's length then we would
have to load all of the subreddits into memory, which would be ... bad.
"""
import gzip
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from pylons import app_globals as g
from r2.lib.sitemaps.generate import sitemap_index, subreddit_sitemaps
from StringIO import StringIO
HEADERS = {
"Content-Type": "text/xml",
"Content-Encoding": "gzip",
}
def zip_string(string):
zipbuffer = StringIO()
with gzip.GzipFile(mode="w", fileobj=zipbuffer) as f:
f.write(string)
return zipbuffer.getvalue()
def upload_sitemap(key, sitemap):
key.set_contents_from_string(zip_string(sitemap), headers=HEADERS)
def store_subreddit_sitemap(bucket, index, sitemap):
key = Key(bucket)
key.key = "subreddit_sitemap/{0}.xml".format(index)
g.log.debug("Uploading %r", key)
upload_sitemap(key, sitemap)
def store_sitemap_index(bucket, count):
key = Key(bucket)
key.key = g.sitemap_subreddit_keyname
g.log.debug("Uploading %r", key)
upload_sitemap(key, sitemap_index(count))
def store_sitemaps_in_s3(subreddits):
s3conn = S3Connection()
bucket = s3conn.get_bucket(g.sitemap_upload_s3_bucket, validate=False)
sitemap_count = 0
for i, sitemap in enumerate(subreddit_sitemaps(subreddits)):
store_subreddit_sitemap(bucket, i, sitemap)
sitemap_count += 1
store_sitemap_index(bucket, sitemap_count)
|
viewers | toolbar | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Abstract base class for a viewer's toolbar.
Provides a consistent but configurable infrastructure for toolbars.
"""
from PyQt5.QtWidgets import QHBoxLayout, QToolBar, QWidget
class ToolBar(QToolBar):
"""Improved toolbar for use when not child of a QMainWindow.
For now this is only a stub."""
pass
class AbstractViewerToolbar(QWidget):
"""Base class for viewers' toolbars.
Each toolbar contains a main and a help toolbar element.
The main toolbar can be configured by overriding individual
add...() methods, and a completely new configuration can be
achieved by passing a list of methods to the constructor.
Suppressing individual actions is most easily achieved by
overriding add...() methods with `pass`.
"""
def __init__(self, parent, methods=None):
super().__init__(parent)
self.actionCollection = ac = parent.actionCollection
self.createComponents()
self.createLayout()
self.populate(methods)
# show or hide toolbar upon creation
self.setVisible(self.actionCollection.viewer_show_toolbar.isChecked())
def createComponents(self):
# TODO: If it would become necessary to
# subclass the ToolBar class the following Two
# instantiations must be replaced by factory methods.
self.main_toolbar = ToolBar(self)
self.help_toolbar = ToolBar(self)
def createLayout(self):
self.layout = layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.main_toolbar)
layout.addStretch()
layout.addWidget(self.help_toolbar)
self.setLayout(layout)
def populate(self, methods=None):
"""Defines a template for the population of the viewer's toolbar.
Subclasses can configure the toolbar by overriding individual
add...() methods or by passing a list of methods."""
ac = self.actionCollection
# add help button to the help toolbar (right-aligned)
# this is not intended to be configured
self.help_toolbar.addAction(ac.viewer_help)
if not methods:
# default order of actions
self.addOpenAction()
self.addCloseAction()
self.addSeparator()
self.addViewdocChooserAction()
self.addSeparator()
self.addPrintAction()
self.addSeparator()
self.addZoomActions()
self.addSeparator()
self.addPagerActions()
self.addSeparator()
self.addRotationActions()
else:
# process the given order of actions
for m in methods:
m()
def addSeparator(self):
"""Add a separator to the toolbar."""
self.main_toolbar.addSeparator()
def addOpenAction(self):
"""Add actions to open viewer documents."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_open)
def addCloseAction(self):
"""Add actions to close the current viewer document."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_close)
def addViewdocChooserAction(self):
"""Add the document chooser to the toolbar."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_document_select)
def addPrintAction(self):
"""Add the print action."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_print)
def addZoomActions(self):
"""Add different zoomer actions."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_zoom_in)
t.addAction(ac.viewer_zoom_combo)
t.addAction(ac.viewer_zoom_out)
t.addAction(ac.viewer_magnifier)
def addPagerActions(self):
"""Add navigational actions."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_prev_page)
t.addAction(ac.viewer_pager)
t.addAction(ac.viewer_next_page)
def addRotationActions(self):
"""Add rotation actions."""
t = self.main_toolbar
ac = self.actionCollection
t.addAction(ac.viewer_rotate_left)
t.addAction(ac.viewer_rotate_right)
|
QT | DatosNueva | from Code.QT import (
Colocacion,
Controles,
FormLayout,
Iconos,
Info,
QTUtil,
QTUtil2,
QTVarios,
)
from PyQt4 import QtCore, QtGui
def datos(wParent, configuracion, procesador):
# Primero determinamos la categoria
resp = dameCategoria(wParent, configuracion, procesador)
if resp:
categoria = resp
else:
return None
w = wDatos(wParent, categoria, configuracion)
if w.exec_():
return categoria, w.nivel, w.siBlancas, w.puntos
else:
return None
def dameCategoria(wParent, configuracion, procesador):
rival = configuracion.rival
menu = QTVarios.LCMenu(wParent)
menu.opcion(
None,
"%s: %d %s" % (_("Total score"), configuracion.puntuacion(), _("pts")),
Iconos.NuevaPartida(),
)
menu.separador()
menu.opcion(
None,
"%s: %s" % (_("Opponent"), rival.rotuloPuntos()),
Iconos.Motor(),
siDeshabilitado=False,
)
menu.separador()
# ---------- CATEGORIAS
ant = 1
for x in range(6):
cat = rival.categorias.numero(x)
txt = cat.nombre()
nm = cat.nivelHecho
nh = cat.hecho
if nm > 0:
txt += " %s %d" % (_("Level"), nm)
if nh:
if "B" in nh:
txt += " +%s:%d" % (_("White"), nm + 1)
if "N" in nh:
txt += " +%s:%d" % (_("Black"), nm + 1)
# if "B" not in nh:
# txt += " ... %s:%d"%( _( "White" )[0],nm+1)
# elif "N" not in nh:
# txt += " ... %s:%d"%( _( "Black" )[0],nm+1)
# else:
# txt += " ... %s:%d"%( _( "White" )[0],nm+1)
siDesHabilitado = ant == 0
ant = nm
menu.opcion(str(x), txt, cat.icono(), siDeshabilitado=siDesHabilitado)
# ----------- RIVAL
menu.separador()
menuRival = menu.submenu(_("Change opponent"))
puntuacion = configuracion.puntuacion()
icoNo = Iconos.Motor_No()
icoSi = Iconos.Motor_Si()
icoActual = Iconos.Motor_Actual()
grpNo = Iconos.Grupo_No()
grpSi = Iconos.Grupo_Si()
for grupo in configuracion.grupos.liGrupos:
nombre = _X(_("%1 group"), grupo.nombre)
if grupo.minPuntos > 0:
nombre += " (+%d %s)" % (grupo.minPuntos, _("pts"))
siDes = grupo.minPuntos > puntuacion
if siDes:
icoG = grpNo
icoM = icoNo
else:
icoG = grpSi
icoM = icoSi
submenu = menuRival.submenu(nombre, icoG)
for rv in grupo.liRivales:
siActual = rv.clave == rival.clave
ico = icoActual if siActual else icoM
submenu.opcion("MT_" + rv.clave, rv.rotuloPuntos(), ico, siDes or siActual)
menuRival.separador()
# ----------- RIVAL
menu.separador()
menu.opcion("ayuda", _("Help"), Iconos.Ayuda())
cursor = QtGui.QCursor.pos()
resp = menu.lanza()
if resp is None:
return None
elif resp == "ayuda":
titulo = _("Competition")
ancho, alto = QTUtil.tamEscritorio()
ancho = min(ancho, 700)
txt = _(
"<br><b>The aim is to obtain the highest possible score</b> :<ul><li>The current point score is displayed in the title bar.</li><li>To obtain points it is necessary to win on different levels in different categories.</li><li>To overcome a level it is necessary to win against the engine with white and with black.</li><li>The categories are ranked in the order of the following table:</li><ul><li><b>Beginner</b> : 5</li><li><b>Amateur</b> : 10</li><li><b>Candidate Master</b> : 20</li><li><b>Master</b> : 40</li><li><b>International Master</b> : 80</li><li><b>Grandmaster</b> : 160</li></ul><li>The score for each game is calculated by multiplying the playing level with the score of the category.</li><li>The engines are divided into groups.</li><li>To be able to play with an opponent of a particular group a minimum point score is required. The required score is shown next to the group label.</li></ul>"
)
Info.info(wParent, _("Lucas Chess"), titulo, txt, ancho, Iconos.pmAyudaGR())
return None
elif resp.startswith("MT_"):
procesador.cambiaRival(resp[3:])
QtGui.QCursor.setPos(cursor)
procesador.competicion()
return None
else:
categoria = rival.categorias.numero(int(resp))
return categoria
class wDatos(QtGui.QDialog):
def __init__(self, wParent, categoria, configuracion):
super(wDatos, self).__init__(wParent)
self.setWindowTitle(_("New game"))
self.setWindowIcon(Iconos.Datos())
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.Dialog
| QtCore.Qt.WindowTitleHint
)
tb = QTUtil2.tbAcceptCancel(self)
f = Controles.TipoLetra(puntos=12, peso=75)
flb = Controles.TipoLetra(puntos=10)
(
self.maxNivel,
self.maxNivelHecho,
self.maxPuntos,
) = configuracion.maxNivelCategoria(categoria)
# self.maxNivel = maxNivel = categoria.nivelHecho+1
# self.maxNivelHecho = categoria.hecho
# self.maxPuntos = categoria.maxPuntos()
self.ed = Controles.SB(self, self.maxNivel, 1, self.maxNivel).tamMaximo(40)
lb = Controles.LB(self, categoria.nombre() + " " + _("Level"))
lb.ponFuente(f)
self.lbPuntos = Controles.LB(self).alinDerecha()
self.connect(self.ed, QtCore.SIGNAL("valueChanged(int)"), self.nivelCambiado)
siBlancas = not categoria.siHechoBlancas()
self.rbBlancas = QtGui.QRadioButton(_("White"))
self.rbBlancas.setChecked(siBlancas)
self.rbNegras = QtGui.QRadioButton(_("Black"))
self.rbNegras.setChecked(not siBlancas)
self.connect(self.rbBlancas, QtCore.SIGNAL("clicked()"), self.ponMaxPuntos)
self.connect(self.rbNegras, QtCore.SIGNAL("clicked()"), self.ponMaxPuntos)
# Rival
rival = configuracion.rival
lbRMotor = (
Controles.LB(self, "<b>%s</b> : %s" % (_("Engine"), rival.nombre))
.ponFuente(flb)
.ponWrap()
.anchoFijo(400)
)
lbRAutor = (
Controles.LB(self, "<b>%s</b> : %s" % (_("Author"), rival.autor))
.ponFuente(flb)
.ponWrap()
.anchoFijo(400)
)
lbRWeb = (
Controles.LB(
self,
'<b>%s</b> : <a href="%s">%s</a>' % (_("Web"), rival.url, rival.url),
)
.ponWrap()
.anchoFijo(400)
.ponFuente(flb)
)
ly = (
Colocacion.V()
.control(lbRMotor)
.control(lbRAutor)
.control(lbRWeb)
.margen(10)
)
gbR = Controles.GB(self, _("Opponent"), ly).ponFuente(f)
# Tutor
tutor = configuracion.tutor
lbTMotor = (
Controles.LB(self, "<b>%s</b> : %s" % (_("Engine"), tutor.nombre))
.ponFuente(flb)
.ponWrap()
.anchoFijo(400)
)
lbTAutor = (
Controles.LB(self, "<b>%s</b> : %s" % (_("Author"), tutor.autor))
.ponFuente(flb)
.ponWrap()
.anchoFijo(400)
)
siURL = hasattr(tutor, "url")
if siURL:
lbTWeb = (
Controles.LB(
self,
'<b>%s</b> : <a href="%s">%s</a>' % ("Web", tutor.url, tutor.url),
)
.ponWrap()
.anchoFijo(400)
.ponFuente(flb)
)
ly = Colocacion.V().control(lbTMotor).control(lbTAutor)
if siURL:
ly.control(lbTWeb)
ly.margen(10)
gbT = Controles.GB(self, _("Tutor"), ly).ponFuente(f)
hbox = (
Colocacion.H()
.relleno()
.control(self.rbBlancas)
.espacio(10)
.control(self.rbNegras)
.relleno()
)
gbColor = Controles.GB(self, _("Play with"), hbox).ponFuente(f)
lyNivel = (
Colocacion.H()
.control(lb)
.control(self.ed)
.espacio(10)
.control(self.lbPuntos)
.relleno()
)
vlayout = (
Colocacion.V()
.otro(lyNivel)
.espacio(10)
.control(gbColor)
.espacio(10)
.control(gbR)
.espacio(10)
.control(gbT)
.margen(30)
)
layout = Colocacion.V().control(tb).otro(vlayout).margen(3)
self.setLayout(layout)
self.ponMaxPuntos()
def aceptar(self):
self.nivel = self.ed.value()
self.siBlancas = self.rbBlancas.isChecked()
self.accept()
def nivelCambiado(self, nuevo):
self.ponMaxPuntos()
def ponMaxPuntos(self):
p = 0
if self.ed.value() >= self.maxNivel:
color = "B" if self.rbBlancas.isChecked() else "N"
if color not in self.maxNivelHecho:
p = self.maxPuntos
self.lbPuntos.setText("%d %s" % (p, _("points")))
self.puntos = p
def numEntrenamiento(
wParent, titulo, hasta, etiqueta=None, pos=None, mensAdicional=None
):
w = WNumEntrenamiento(wParent, titulo, hasta, etiqueta, pos, mensAdicional)
if w.exec_():
return w.numero
else:
return None
class WNumEntrenamiento(QtGui.QDialog):
def __init__(
self, wParent, titulo, hasta, etiqueta=None, pos=None, mensAdicional=None
):
super(WNumEntrenamiento, self).__init__(wParent)
self.setWindowTitle(titulo)
self.setWindowIcon(Iconos.Datos())
tb = QTUtil2.tbAcceptCancel(self)
if pos is None:
pos = 1 # random.randint( 1, hasta )
if etiqueta is None:
etiqueta = _("Training unit")
self.ed, lb = QTUtil2.spinBoxLB(
self, pos, 1, hasta, etiqueta=etiqueta, maxTam=60
)
lb1 = Controles.LB(self, "/ %d" % hasta)
if mensAdicional:
lb2 = Controles.LB(self, mensAdicional)
lb2.ponWrap().anchoMinimo(250)
lyH = (
Colocacion.H()
.relleno()
.control(lb)
.control(self.ed)
.control(lb1)
.relleno()
.margen(15)
)
lyV = Colocacion.V().control(tb).otro(lyH)
if mensAdicional:
lyV.control(lb2)
lyV.margen(3)
self.setLayout(lyV)
self.resultado = None
def aceptar(self):
self.numero = self.ed.value()
self.accept()
def numPosicion(wParent, titulo, nFEN, pos, salta, tipo):
liGen = [FormLayout.separador]
label = "%s (1..%d)" % (_("Select position"), nFEN)
liGen.append((FormLayout.Spinbox(label, 1, nFEN, 50), pos))
liGen.append(FormLayout.separador)
li = [
(_("Sequential"), "s"),
(_("Random"), "r"),
(_("Random with same sequence based on position"), "rk"),
]
liGen.append((FormLayout.Combobox(_("Type"), li), tipo))
liGen.append(FormLayout.separador)
liGen.append((_("Jump to the next after solve") + ":", salta))
resultado = FormLayout.fedit(
liGen,
title=titulo,
parent=wParent,
anchoMinimo=200,
icon=Iconos.Entrenamiento(),
)
if resultado:
posicion, tipo, jump = resultado[1]
return posicion, tipo, jump
else:
return None
|
utils | date_utils | """Parse the date from various formats.
"""
__copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import contextlib
import datetime
import os
import time
def iter_dates(start_date, end_date):
"""Yield all the dates between 'start_date' and 'end_date'.
Args:
start_date: An instance of datetime.date.
end_date: An instance of datetime.date.
Yields:
Instances of datetime.date.
"""
oneday = datetime.timedelta(days=1)
date = start_date
while date < end_date:
yield date
date += oneday
def render_ofx_date(dtime):
"""Render a datetime to the OFX format.
Args:
dtime: A datetime.datetime instance.
Returns:
A string, rendered to milliseconds.
"""
return "{}.{:03d}".format(dtime.strftime("%Y%m%d%H%M%S"), int(dtime.microsecond / 1000))
def next_month(date):
"""Compute the date at the beginning of the following month from the given date.
Args:
date: A datetime.date instance.
Returns:
A datetime.date instance, the first day of the month following 'date'.
"""
# Compute the date at the beginning of the following month.
year = date.year
month = date.month + 1
if date.month == 12:
year += 1
month = 1
return datetime.date(year, month, 1)
@contextlib.contextmanager
def intimezone(tz_value: str):
"""Temporarily reset the value of TZ.
This is used for testing.
Args:
tz_value: The value of TZ to set for the duration of this context.
Returns:
A contextmanager in the given timezone locale.
"""
tz_old = os.environ.get("TZ", None)
os.environ["TZ"] = tz_value
time.tzset()
try:
yield
finally:
if tz_old is None:
del os.environ["TZ"]
else:
os.environ["TZ"] = tz_old
time.tzset()
|
posthog | gzip_middleware | import re
from typing import List
from django.conf import settings
from django.middleware.gzip import GZipMiddleware
class InvalidGZipAllowList(Exception):
pass
def allowed_path(path: str, allowed_paths: List) -> bool:
return any(pattern.search(path) for pattern in allowed_paths)
class ScopedGZipMiddleware(GZipMiddleware):
"""
The Django GZip Middleware comes with security warnings
see: https://docs.djangoproject.com/en/4.0/ref/middleware/#module-django.middleware.gzip
Rather than solve for those across the whole app. We can add it to specific paths
http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf
The vulnerability requires two things
• Reflect user-input in HTTP response bodies
• Reflect a secret (such as a CSRF token) in HTTP response bodies
e.g. a CSRF token in the URL and in the response body, or form input value in the request and in the response
If an API path does that, an attacker can use knowledge of the compression algorithm
to recover the secret from the compressed response
If a given API path doesn't do that it is safe to compress.
Add a pattern that matches it to GZIP_RESPONSE_ALLOW_LIST
"""
def __init__(self, get_response=None) -> None:
super().__init__(get_response)
try:
self.allowed_paths = [
re.compile(pattern) for pattern in settings.GZIP_RESPONSE_ALLOW_LIST
]
self.allowed_post_paths = [
re.compile(pattern)
for pattern in settings.GZIP_POST_RESPONSE_ALLOW_LIST
]
except re.error as ex:
raise InvalidGZipAllowList(str(ex)) from ex
def process_response(self, request, response):
if request.method == "GET" and allowed_path(request.path, self.allowed_paths):
return super().process_response(request, response)
elif request.method == "POST" and allowed_path(
request.path, self.allowed_post_paths
):
return super().process_response(request, response)
else:
return response
|
object-detection | exporter_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_inference_graph."""
import os
import numpy as np
import six
import tensorflow as tf
from app.object_detection import exporter
from app.object_detection.builders import model_builder
from app.object_detection.core import model
from app.object_detection.protos import pipeline_pb2
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
slim = tf.contrib.slim
class FakeModel(model.DetectionModel):
def __init__(self, add_detection_masks=False):
self._add_detection_masks = add_detection_masks
def preprocess(self, inputs):
return tf.identity(inputs)
def predict(self, preprocessed_inputs):
return {"image": tf.layers.conv2d(preprocessed_inputs, 3, 1)}
def postprocess(self, prediction_dict):
with tf.control_dependencies(prediction_dict.values()):
postprocessed_tensors = {
"detection_boxes": tf.constant(
[
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
tf.float32,
),
"detection_scores": tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32),
"detection_classes": tf.constant([[0, 1], [1, 0]], tf.float32),
"num_detections": tf.constant([2, 1], tf.float32),
}
if self._add_detection_masks:
postprocessed_tensors["detection_masks"] = tf.constant(
np.arange(64).reshape([2, 2, 4, 4]), tf.float32
)
return postprocessed_tensors
def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass
def loss(self, prediction_dict):
pass
class ExportInferenceGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel()
preprocessed_inputs = mock_model.preprocess(
tf.placeholder(tf.float32, shape=[None, None, None, 3])
)
predictions = mock_model.predict(preprocessed_inputs)
mock_model.postprocess(predictions)
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
slim.get_or_create_global_step()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _load_inference_graph(self, inference_graph_path):
od_graph = tf.Graph()
with od_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path) as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name="")
return od_graph
def _create_tf_example(self, image_array):
with self.test_session():
encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
example = tf.train.Example(
features=tf.train.Features(
feature={
"image/encoded": _bytes_feature(encoded_image),
"image/format": _bytes_feature("jpg"),
"image/source_id": _bytes_feature("image_id"),
}
)
).SerializeToString()
return example
def test_export_graph_with_image_tensor_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, "output")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="image_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
self.assertTrue(
os.path.exists(
os.path.join(output_directory, "saved_model", "saved_model.pb")
)
)
def test_export_graph_with_fixed_size_image_tensor_input(self):
input_shape = [1, 320, 320, 3]
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, "output")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="image_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
input_shape=input_shape,
)
saved_model_path = os.path.join(output_directory, "saved_model")
self.assertTrue(
os.path.exists(os.path.join(saved_model_path, "saved_model.pb"))
)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path
)
signature = meta_graph.signature_def["serving_default"]
input_tensor_name = signature.inputs["inputs"].name
image_tensor = od_graph.get_tensor_by_name(input_tensor_name)
self.assertSequenceEqual(
image_tensor.get_shape().as_list(), input_shape
)
def test_export_graph_with_tf_example_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, "output")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="tf_example",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
self.assertTrue(
os.path.exists(
os.path.join(output_directory, "saved_model", "saved_model.pb")
)
)
def test_export_graph_with_encoded_image_string_input(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, "output")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="encoded_image_string_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
self.assertTrue(
os.path.exists(
os.path.join(output_directory, "saved_model", "saved_model.pb")
)
)
def _get_variables_in_checkpoint(self, checkpoint_file):
return set(
[var_name for var_name, _ in tf.train.list_variables(checkpoint_file)]
)
def test_replace_variable_values_with_moving_averages(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
new_checkpoint_prefix = os.path.join(tmp_dir, "new.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
graph = tf.Graph()
with graph.as_default():
fake_model = FakeModel()
preprocessed_inputs = fake_model.preprocess(
tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3])
)
predictions = fake_model.predict(preprocessed_inputs)
fake_model.postprocess(predictions)
exporter.replace_variable_values_with_moving_averages(
graph, trained_checkpoint_prefix, new_checkpoint_prefix
)
expected_variables = set(["conv2d/bias", "conv2d/kernel"])
variables_in_old_ckpt = self._get_variables_in_checkpoint(
trained_checkpoint_prefix
)
self.assertIn("conv2d/bias/ExponentialMovingAverage", variables_in_old_ckpt)
self.assertIn("conv2d/kernel/ExponentialMovingAverage", variables_in_old_ckpt)
variables_in_new_ckpt = self._get_variables_in_checkpoint(new_checkpoint_prefix)
self.assertTrue(expected_variables.issubset(variables_in_new_ckpt))
self.assertNotIn("conv2d/bias/ExponentialMovingAverage", variables_in_new_ckpt)
self.assertNotIn(
"conv2d/kernel/ExponentialMovingAverage", variables_in_new_ckpt
)
def test_export_graph_with_moving_averages(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type="image_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
self.assertTrue(
os.path.exists(
os.path.join(output_directory, "saved_model", "saved_model.pb")
)
)
expected_variables = set(["conv2d/bias", "conv2d/kernel", "global_step"])
actual_variables = set(
[var_name for var_name, _ in tf.train.list_variables(output_directory)]
)
self.assertTrue(expected_variables.issubset(actual_variables))
def test_export_model_with_all_output_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
inference_graph_path = os.path.join(
output_directory, "frozen_inference_graph.pb"
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type="image_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name("image_tensor:0")
inference_graph.get_tensor_by_name("detection_boxes:0")
inference_graph.get_tensor_by_name("detection_scores:0")
inference_graph.get_tensor_by_name("detection_classes:0")
inference_graph.get_tensor_by_name("detection_masks:0")
inference_graph.get_tensor_by_name("num_detections:0")
def test_export_model_with_detection_only_nodes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
inference_graph_path = os.path.join(
output_directory, "frozen_inference_graph.pb"
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=False)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter.export_inference_graph(
input_type="image_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph):
inference_graph.get_tensor_by_name("image_tensor:0")
inference_graph.get_tensor_by_name("detection_boxes:0")
inference_graph.get_tensor_by_name("detection_scores:0")
inference_graph.get_tensor_by_name("detection_classes:0")
inference_graph.get_tensor_by_name("num_detections:0")
with self.assertRaises(KeyError):
inference_graph.get_tensor_by_name("detection_masks:0")
def test_export_and_run_inference_with_image_tensor(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
inference_graph_path = os.path.join(
output_directory, "frozen_inference_graph.pb"
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="image_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
image_tensor = inference_graph.get_tensor_by_name("image_tensor:0")
boxes = inference_graph.get_tensor_by_name("detection_boxes:0")
scores = inference_graph.get_tensor_by_name("detection_scores:0")
classes = inference_graph.get_tensor_by_name("detection_classes:0")
masks = inference_graph.get_tensor_by_name("detection_masks:0")
num_detections = inference_graph.get_tensor_by_name("num_detections:0")
(boxes_np, scores_np, classes_np, masks_np, num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)},
)
self.assertAllClose(
boxes_np,
[
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
)
self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2], [2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def _create_encoded_image_string(self, image_array_np, encoding_format):
od_graph = tf.Graph()
with od_graph.as_default():
if encoding_format == "jpg":
encoded_string = tf.image.encode_jpeg(image_array_np)
elif encoding_format == "png":
encoded_string = tf.image.encode_png(image_array_np)
else:
raise ValueError("Supports only the following formats: `jpg`, `png`")
with self.test_session(graph=od_graph):
return encoded_string.eval()
def test_export_and_run_inference_with_encoded_image_string_tensor(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
inference_graph_path = os.path.join(
output_directory, "frozen_inference_graph.pb"
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="encoded_image_string_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
inference_graph = self._load_inference_graph(inference_graph_path)
jpg_image_str = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), "jpg"
)
png_image_str = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), "png"
)
with self.test_session(graph=inference_graph) as sess:
image_str_tensor = inference_graph.get_tensor_by_name(
"encoded_image_string_tensor:0"
)
boxes = inference_graph.get_tensor_by_name("detection_boxes:0")
scores = inference_graph.get_tensor_by_name("detection_scores:0")
classes = inference_graph.get_tensor_by_name("detection_classes:0")
masks = inference_graph.get_tensor_by_name("detection_masks:0")
num_detections = inference_graph.get_tensor_by_name("num_detections:0")
for image_str in [jpg_image_str, png_image_str]:
image_str_batch_np = np.hstack([image_str] * 2)
(
boxes_np,
scores_np,
classes_np,
masks_np,
num_detections_np,
) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={image_str_tensor: image_str_batch_np},
)
self.assertAllClose(
boxes_np,
[
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
)
self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2], [2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_raise_runtime_error_on_images_with_different_sizes(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
inference_graph_path = os.path.join(
output_directory, "frozen_inference_graph.pb"
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="encoded_image_string_tensor",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
inference_graph = self._load_inference_graph(inference_graph_path)
large_image = self._create_encoded_image_string(
np.ones((4, 4, 3)).astype(np.uint8), "jpg"
)
small_image = self._create_encoded_image_string(
np.ones((2, 2, 3)).astype(np.uint8), "jpg"
)
image_str_batch_np = np.hstack([large_image, small_image])
with self.test_session(graph=inference_graph) as sess:
image_str_tensor = inference_graph.get_tensor_by_name(
"encoded_image_string_tensor:0"
)
boxes = inference_graph.get_tensor_by_name("detection_boxes:0")
scores = inference_graph.get_tensor_by_name("detection_scores:0")
classes = inference_graph.get_tensor_by_name("detection_classes:0")
masks = inference_graph.get_tensor_by_name("detection_masks:0")
num_detections = inference_graph.get_tensor_by_name("num_detections:0")
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, "^TensorArray has inconsistent shapes."
):
sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={image_str_tensor: image_str_batch_np},
)
def test_export_and_run_inference_with_tf_example(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=True
)
output_directory = os.path.join(tmp_dir, "output")
inference_graph_path = os.path.join(
output_directory, "frozen_inference_graph.pb"
)
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="tf_example",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
inference_graph = self._load_inference_graph(inference_graph_path)
tf_example_np = np.expand_dims(
self._create_tf_example(np.ones((4, 4, 3)).astype(np.uint8)), axis=0
)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name("tf_example:0")
boxes = inference_graph.get_tensor_by_name("detection_boxes:0")
scores = inference_graph.get_tensor_by_name("detection_scores:0")
classes = inference_graph.get_tensor_by_name("detection_classes:0")
masks = inference_graph.get_tensor_by_name("detection_masks:0")
num_detections = inference_graph.get_tensor_by_name("num_detections:0")
(boxes_np, scores_np, classes_np, masks_np, num_detections_np) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={tf_example: tf_example_np},
)
self.assertAllClose(
boxes_np,
[
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
)
self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2], [2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_export_saved_model_and_run_inference(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False
)
output_directory = os.path.join(tmp_dir, "output")
saved_model_path = os.path.join(output_directory, "saved_model")
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="tf_example",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
tf_example_np = np.hstack(
[self._create_tf_example(np.ones((4, 4, 3)).astype(np.uint8))] * 2
)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
meta_graph = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], saved_model_path
)
signature = meta_graph.signature_def["serving_default"]
input_tensor_name = signature.inputs["inputs"].name
tf_example = od_graph.get_tensor_by_name(input_tensor_name)
boxes = od_graph.get_tensor_by_name(
signature.outputs["detection_boxes"].name
)
scores = od_graph.get_tensor_by_name(
signature.outputs["detection_scores"].name
)
classes = od_graph.get_tensor_by_name(
signature.outputs["detection_classes"].name
)
masks = od_graph.get_tensor_by_name(
signature.outputs["detection_masks"].name
)
num_detections = od_graph.get_tensor_by_name(
signature.outputs["num_detections"].name
)
(
boxes_np,
scores_np,
classes_np,
masks_np,
num_detections_np,
) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={tf_example: tf_example_np},
)
self.assertAllClose(
boxes_np,
[
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
)
self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2], [2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
def test_export_checkpoint_and_run_inference(self):
tmp_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(tmp_dir, "model.ckpt")
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix, use_moving_averages=False
)
output_directory = os.path.join(tmp_dir, "output")
model_path = os.path.join(output_directory, "model.ckpt")
meta_graph_path = model_path + ".meta"
with mock.patch.object(model_builder, "build", autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(add_detection_masks=True)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type="tf_example",
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_directory=output_directory,
)
tf_example_np = np.hstack(
[self._create_tf_example(np.ones((4, 4, 3)).astype(np.uint8))] * 2
)
with tf.Graph().as_default() as od_graph:
with self.test_session(graph=od_graph) as sess:
new_saver = tf.train.import_meta_graph(meta_graph_path)
new_saver.restore(sess, model_path)
tf_example = od_graph.get_tensor_by_name("tf_example:0")
boxes = od_graph.get_tensor_by_name("detection_boxes:0")
scores = od_graph.get_tensor_by_name("detection_scores:0")
classes = od_graph.get_tensor_by_name("detection_classes:0")
masks = od_graph.get_tensor_by_name("detection_masks:0")
num_detections = od_graph.get_tensor_by_name("num_detections:0")
(
boxes_np,
scores_np,
classes_np,
masks_np,
num_detections_np,
) = sess.run(
[boxes, scores, classes, masks, num_detections],
feed_dict={tf_example: tf_example_np},
)
self.assertAllClose(
boxes_np,
[
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
)
self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(classes_np, [[1, 2], [2, 1]])
self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4]))
self.assertAllClose(num_detections_np, [2, 1])
if __name__ == "__main__":
tf.test.main()
|
core | conftest | import asyncio
import gc
import logging
import platform
import sys
from datetime import datetime
from typing import Optional
import human_readable
import pytest
from _pytest.config import Config
from _pytest.python import Function
from aiohttp.web_app import Application
from tribler.core.components.restapi.rest.rest_endpoint import RESTEndpoint
from tribler.core.components.restapi.rest.rest_manager import error_middleware
from tribler.core.utilities.network_utils import default_network_utils
# Enable origin tracking for coroutine objects in the current thread, so when a test does not handle
# some coroutine properly, we can see a traceback with the name of the test which created the coroutine.
# Note that the error can happen in an unrelated test where the unhandled task from the previous test
# was garbage collected. Without the origin tracking, it may be hard to see the test that created the task.
sys.set_coroutine_origin_tracking_depth(10)
enable_extended_logging = False
pytest_start_time: Optional[datetime] = None # a time when the test suite started
# pylint: disable=unused-argument, redefined-outer-name
def pytest_configure(config):
# Disable logging from faker for all tests
logging.getLogger("faker.factory").propagate = False
@pytest.hookimpl
def pytest_cmdline_main(config: Config):
"""Enable extended logging if the verbose option is used"""
# Called for performing the main command line action.
global enable_extended_logging # pylint: disable=global-statement
enable_extended_logging = config.option.verbose > 0
@pytest.hookimpl
def pytest_collection_finish(session):
"""Save the start time of the test suite execution"""
# Called after collection has been performed and modified.
global pytest_start_time # pylint: disable=global-statement
pytest_start_time = datetime.now()
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item: Function, log=True, nextitem=None):
"""Modify the pytest output to include the execution duration for all tests"""
# Perform the runtest protocol for a single test item.
if enable_extended_logging and pytest_start_time:
start_time = datetime.now()
print(
f'\n{start_time.strftime("%H:%M:%S.%f")[:-3]} Starting "{item.name}"...',
end="",
flush=True,
)
yield
now = datetime.now()
duration = (now - start_time).total_seconds()
total = now - pytest_start_time
print(
f" in {duration:.3f}s ({human_readable.time_delta(total)} in total)", end=""
)
else:
yield
@pytest.fixture(autouse=True)
def ensure_gc():
"""Ensure that the garbage collector runs after each test.
This is critical for test stability as we use Libtorrent and need to ensure all its destructors are called."""
# For this fixture, it is necessary for it to be called as late as possible within the current test's scope.
# Therefore it should be placed at the first place in the "function" scope.
# If there are two or more autouse fixtures within this scope, the order should be explicitly set through using
# this fixture as a dependency.
# See the discussion in https://github.com/Tribler/tribler/pull/7542 for more information.
yield
# Without "yield" the fixture triggers the garbage collection at the beginning of the (next) test.
# For that reason, the errors triggered during the garbage collection phase will take place not in the erroneous
# test but in the randomly scheduled next test. Usually, these errors are silently suppressed, as any exception in
# __del__ methods is silently suppressed, but they still can somehow affect the test.
#
# By adding the yield we move the garbage collection phase to the end of the current test, to not affect the next
# test.
gc.collect()
@pytest.fixture
def free_port():
return default_network_utils.get_random_free_port(start=1024, stop=50000)
@pytest.fixture
def event_loop():
if platform.system() == "Windows":
# to prevent the "Loop is closed" error
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
yield loop
loop.close()
@pytest.fixture
async def rest_api(event_loop, aiohttp_client, endpoint: RESTEndpoint):
# In each test file that requires the use of this fixture, the endpoint fixture needs to be specified.
client_max_size: int = endpoint.app._client_max_size # pylint:disable=protected-access
app = Application(middlewares=[error_middleware], client_max_size=client_max_size)
app.add_subapp(endpoint.path, endpoint.app)
yield await aiohttp_client(app)
await endpoint.shutdown()
await app.shutdown()
|
extractor | buzzfeed | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from .facebook import FacebookIE
class BuzzFeedIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)"
_TESTS = [
{
"url": "http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia",
"info_dict": {
"id": "this-angry-ram-destroys-a-punching-bag-like-a-boss",
"title": "This Angry Ram Destroys A Punching Bag Like A Boss",
"description": "Rambro!",
},
"playlist": [
{
"info_dict": {
"id": "aVCR29aE_OQ",
"ext": "mp4",
"title": "Angry Ram destroys a punching bag..",
"description": "md5:c59533190ef23fd4458a5e8c8c872345",
"upload_date": "20141024",
"uploader_id": "Buddhanz1",
"uploader": "Angry Ram",
}
}
],
},
{
"url": "http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia",
"params": {
"skip_download": True, # Got enough YouTube download tests
},
"info_dict": {
"id": "look-at-this-cute-dog-omg",
"description": "re:Munchkin the Teddy Bear is back ?!",
"title": "You Need To Stop What You're Doing And Watching This Dog Walk On A Treadmill",
},
"playlist": [
{
"info_dict": {
"id": "mVmBL8B-In0",
"ext": "mp4",
"title": "re:Munchkin the Teddy Bear gets her exercise",
"description": "md5:28faab95cda6e361bcff06ec12fc21d8",
"upload_date": "20141124",
"uploader_id": "CindysMunchkin",
"uploader": "re:^Munchkin the",
},
}
],
},
{
"url": "http://www.buzzfeed.com/craigsilverman/the-most-adorable-crash-landing-ever#.eq7pX0BAmK",
"info_dict": {
"id": "the-most-adorable-crash-landing-ever",
"title": "Watch This Baby Goose Make The Most Adorable Crash Landing",
"description": "This gosling knows how to stick a landing.",
},
"playlist": [
{
"md5": "763ca415512f91ca62e4621086900a23",
"info_dict": {
"id": "971793786185728",
"ext": "mp4",
"title": "We set up crash pads so that the goslings on our roof would have a safe landi...",
"uploader": "Calgary Outdoor Centre-University of Calgary",
},
}
],
"add_ie": ["Facebook"],
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
all_buckets = re.findall(
r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
webpage,
)
entries = []
for bd_json in all_buckets:
bd = json.loads(bd_json)
video = bd.get("video") or bd.get("progload_video")
if not video:
continue
entries.append(self.url_result(video["url"]))
facebook_urls = FacebookIE._extract_urls(webpage)
entries.extend(
[self.url_result(facebook_url) for facebook_url in facebook_urls]
)
return {
"_type": "playlist",
"id": playlist_id,
"title": self._og_search_title(webpage),
"description": self._og_search_description(webpage),
"entries": entries,
}
|
views | self_hosted_install | from apps.grafana_plugin.helpers import GrafanaAPIClient
from apps.user_management.models.organization import Organization
from apps.user_management.sync import sync_organization
from common.api_helpers.mixins import GrafanaHeadersMixin
from django.conf import settings
from rest_framework import status
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
class SelfHostedInstallView(GrafanaHeadersMixin, APIView):
def post(self, _request: Request) -> Response:
"""
We've already validated that settings.GRAFANA_API_URL is set (in apps.grafana_plugin.GrafanaPluginConfig)
The user is now trying to finish plugin installation. We'll take the Grafana API url that they specified +
the token that we are provided and first verify them. If all is good, upsert the organization in the database,
and provision the plugin.
"""
stack_id = settings.SELF_HOSTED_SETTINGS["STACK_ID"]
org_id = settings.SELF_HOSTED_SETTINGS["ORG_ID"]
grafana_url = settings.SELF_HOSTED_SETTINGS["GRAFANA_API_URL"]
grafana_api_token = self.instance_context["grafana_token"]
provisioning_info = {"error": None}
if settings.LICENSE != settings.OPEN_SOURCE_LICENSE_NAME:
provisioning_info["error"] = "License type not authorized"
return Response(status=status.HTTP_403_FORBIDDEN)
grafana_api_client = GrafanaAPIClient(
api_url=grafana_url, api_token=grafana_api_token
)
_, client_status = grafana_api_client.check_token()
status_code = client_status["status_code"]
if status_code == status.HTTP_404_NOT_FOUND:
provisioning_info[
"error"
] = f"Unable to connect to the specified Grafana API - {grafana_url}"
return Response(data=provisioning_info, status=status.HTTP_400_BAD_REQUEST)
elif status_code in [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]:
provisioning_info[
"error"
] = f"You are not authorized to communicate with the specified Grafana API - {grafana_url}"
return Response(data=provisioning_info, status=status.HTTP_400_BAD_REQUEST)
organization = Organization.objects.filter(
stack_id=stack_id, org_id=org_id
).first()
rbac_is_enabled = grafana_api_client.is_rbac_enabled_for_organization()
if organization:
organization.revoke_plugin()
organization.grafana_url = grafana_url
organization.api_token = grafana_api_token
organization.is_rbac_permissions_enabled = rbac_is_enabled
organization.save(
update_fields=[
"grafana_url",
"api_token",
"is_rbac_permissions_enabled",
]
)
else:
organization = Organization.objects.create(
stack_id=stack_id,
stack_slug=settings.SELF_HOSTED_SETTINGS["STACK_SLUG"],
org_id=org_id,
org_slug=settings.SELF_HOSTED_SETTINGS["ORG_SLUG"],
org_title=settings.SELF_HOSTED_SETTINGS["ORG_TITLE"],
region_slug=settings.SELF_HOSTED_SETTINGS["REGION_SLUG"],
cluster_slug=settings.SELF_HOSTED_SETTINGS["CLUSTER_SLUG"],
grafana_url=grafana_url,
api_token=grafana_api_token,
is_rbac_permissions_enabled=rbac_is_enabled,
)
sync_organization(organization)
provisioning_info.update(organization.provision_plugin())
return Response(data=provisioning_info, status=status.HTTP_201_CREATED)
|
settings | specialhotkeys | # Copyright (C) 2011 Chris Dekter
# Copyright (C) 2018 Thomas Hess <thomas.hess@udo.edu>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import typing
import autokey.qtui.common as ui_common
from autokey.qtui.dialogs import GlobalHotkeyDialog
from PyQt5.QtWidgets import QApplication, QDialog, QLabel, QPushButton, QWidget
if typing.TYPE_CHECKING:
from autokey.qtapp import Application
logger = __import__("autokey.logger").logger.get_logger(__name__)
class SpecialHotkeySettings(
*ui_common.inherits_from_ui_file_with_name("specialhotkeysettings")
):
"""
The SpecialHotkeySettings class is used inside the AutoKey configuration dialog.
It allows the user to select or clear global hotkeys.
Currently has two hotkeys:
- use_service enables/disables the autokey background service
- use_config shows the autokey config/main window, if hidden.
"""
KEY_MAP = GlobalHotkeyDialog.KEY_MAP
REVERSE_KEY_MAP = GlobalHotkeyDialog.REVERSE_KEY_MAP
def __init__(self, parent: QWidget = None):
super(SpecialHotkeySettings, self).__init__(parent)
self.setupUi(self)
self.show_config_dlg = GlobalHotkeyDialog(parent)
self.toggle_monitor_dlg = GlobalHotkeyDialog(parent)
self.use_config_hotkey = False
self.use_service_hotkey = False
app = QApplication.instance() # type: Application
self.config_manager = app.configManager
self.use_config_hotkey = self._load_hotkey(
self.config_manager.configHotkey,
self.config_key_label,
self.show_config_dlg,
self.clear_config_button,
)
self.use_service_hotkey = self._load_hotkey(
self.config_manager.toggleServiceHotkey,
self.monitor_key_label,
self.toggle_monitor_dlg,
self.clear_monitor_button,
)
@staticmethod
def _load_hotkey(
item, label: QLabel, dialog: GlobalHotkeyDialog, clear_button: QPushButton
):
dialog.load(item)
if item.enabled:
key = item.hotKey
label.setText(item.get_hotkey_string(key, item.modifiers))
clear_button.setEnabled(True)
return True
else:
label.setText("(None configured)")
clear_button.setEnabled(False)
return False
def save(self):
config_hotkey = self.config_manager.configHotkey
toggle_hotkey = self.config_manager.toggleServiceHotkey
app = QApplication.instance() # type: Application
if config_hotkey.enabled:
app.hotkey_removed(config_hotkey)
config_hotkey.enabled = self.use_config_hotkey
if self.use_config_hotkey:
self.show_config_dlg.save(config_hotkey)
app.hotkey_created(config_hotkey)
if toggle_hotkey.enabled:
app.hotkey_removed(toggle_hotkey)
toggle_hotkey.enabled = self.use_service_hotkey
if self.use_service_hotkey:
self.toggle_monitor_dlg.save(toggle_hotkey)
app.hotkey_created(toggle_hotkey)
# ---- Signal handlers
def on_set_config_button_pressed(self):
self.show_config_dlg.exec_()
if self.show_config_dlg.result() == QDialog.Accepted:
self.use_config_hotkey = True
key = self.show_config_dlg.key
modifiers = self.show_config_dlg.build_modifiers()
self.config_key_label.setText(
self.show_config_dlg.target_item.get_hotkey_string(key, modifiers)
)
self.clear_config_button.setEnabled(True)
def on_clear_config_button_pressed(self):
self.use_config_hotkey = False
self.clear_config_button.setEnabled(False)
self.config_key_label.setText("(None configured)")
self.show_config_dlg.reset()
def on_set_monitor_button_pressed(self):
self.toggle_monitor_dlg.exec_()
if self.toggle_monitor_dlg.result() == QDialog.Accepted:
self.use_service_hotkey = True
key = self.toggle_monitor_dlg.key
modifiers = self.toggle_monitor_dlg.build_modifiers()
self.monitor_key_label.setText(
self.toggle_monitor_dlg.target_item.get_hotkey_string(key, modifiers)
)
self.clear_monitor_button.setEnabled(True)
def on_clear_monitor_button_pressed(self):
self.use_service_hotkey = False
self.clear_monitor_button.setEnabled(False)
self.monitor_key_label.setText("(None configured)")
self.toggle_monitor_dlg.reset()
|
options | renaming | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008, 2011 Lukáš Lalinský
# Copyright (C) 2008-2009 Nikolai Prokoschenko
# Copyright (C) 2009-2010, 2014-2015, 2018-2022 Philipp Wolfer
# Copyright (C) 2011-2013 Michael Wiencek
# Copyright (C) 2011-2013 Wieland Hoffmann
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013 Ionuț Ciocîrlan
# Copyright (C) 2013-2014 Sophist-UK
# Copyright (C) 2013-2015, 2018-2022 Laurent Monin
# Copyright (C) 2015 Alex Berman
# Copyright (C) 2015 Ohm Patel
# Copyright (C) 2016 Suhas
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2021 Bob Swift
# Copyright (C) 2021 Gabriel Ferreira
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
from picard.config import BoolOption, TextOption, get_config
from picard.script import ScriptParser
from picard.ui.options import OptionsCheckError, OptionsPage, register_options_page
from picard.ui.options.scripting import ScriptCheckError, ScriptingDocumentationDialog
from picard.ui.scripteditor import (
ScriptEditorDialog,
ScriptEditorExamples,
populate_script_selection_combo_box,
synchronize_vertical_scrollbars,
)
from picard.ui.ui_options_renaming import Ui_RenamingOptionsPage
from PyQt5 import QtWidgets
from PyQt5.QtCore import QStandardPaths
from PyQt5.QtGui import QPalette
_default_music_dir = QStandardPaths.writableLocation(
QStandardPaths.StandardLocation.MusicLocation
)
class RenamingOptionsPage(OptionsPage):
NAME = "filerenaming"
TITLE = N_("File Naming")
PARENT = None
SORT_ORDER = 40
ACTIVE = True
HELP_URL = "/config/options_filerenaming.html"
options = [
BoolOption("setting", "rename_files", False),
BoolOption("setting", "move_files", False),
TextOption("setting", "move_files_to", _default_music_dir),
BoolOption("setting", "move_additional_files", False),
TextOption("setting", "move_additional_files_pattern", "*.jpg *.png"),
BoolOption("setting", "delete_empty_dirs", True),
]
def __init__(self, parent=None):
super().__init__(parent)
self.script_text = ""
self.compat_options = {}
self.ui = Ui_RenamingOptionsPage()
self.ui.setupUi(self)
self.ui.rename_files.clicked.connect(self.update_examples_from_local)
self.ui.move_files.clicked.connect(self.update_examples_from_local)
self.ui.move_files_to.editingFinished.connect(self.update_examples_from_local)
self.ui.move_files.toggled.connect(self.toggle_file_naming_format)
self.ui.rename_files.toggled.connect(self.toggle_file_naming_format)
self.toggle_file_naming_format(None)
self.ui.open_script_editor.clicked.connect(self.show_script_editing_page)
self.ui.move_files_to_browse.clicked.connect(self.move_files_to_browse)
self.ui.naming_script_selector.currentIndexChanged.connect(
self.update_selector_in_editor
)
self.ui.example_filename_after.itemSelectionChanged.connect(
self.match_before_to_after
)
self.ui.example_filename_before.itemSelectionChanged.connect(
self.match_after_to_before
)
script_edit = self.ui.move_additional_files_pattern
self.script_palette_normal = script_edit.palette()
self.script_palette_readonly = QPalette(self.script_palette_normal)
disabled_color = self.script_palette_normal.color(
QPalette.ColorGroup.Inactive, QPalette.ColorRole.Window
)
self.script_palette_readonly.setColor(
QPalette.ColorGroup.Disabled, QPalette.ColorRole.Base, disabled_color
)
self.ui.example_filename_sample_files_button.clicked.connect(
self.update_example_files
)
self.examples = ScriptEditorExamples(tagger=self.tagger)
# Script editor dialog object will not be created until it is specifically requested, in order to ensure proper window modality.
self.script_editor_dialog = None
self.ui.example_selection_note.setText(self.examples.get_notes_text())
self.ui.example_filename_sample_files_button.setToolTip(
self.examples.get_tooltip_text()
)
# Sync example lists vertical scrolling and selection colors
synchronize_vertical_scrollbars(
(self.ui.example_filename_before, self.ui.example_filename_after)
)
self.current_row = -1
def update_selector_from_editor(self):
"""Update the script selector combo box from the script editor page."""
self.naming_scripts = self.script_editor_dialog.naming_scripts
self.selected_naming_script_id = self.script_editor_dialog.selected_script_id
populate_script_selection_combo_box(
self.naming_scripts,
self.selected_naming_script_id,
self.ui.naming_script_selector,
)
self.display_examples()
def update_selector_from_settings(self):
"""Update the script selector combo box from the settings."""
populate_script_selection_combo_box(
self.naming_scripts,
self.selected_naming_script_id,
self.ui.naming_script_selector,
)
self.update_selector_in_editor()
def update_selector_in_editor(self):
"""Update the selection in the script editor page to match local selection."""
idx = self.ui.naming_script_selector.currentIndex()
if self.script_editor_dialog:
self.script_editor_dialog.set_selected_script_index(idx)
else:
script_item = self.ui.naming_script_selector.itemData(idx)
self.script_text = script_item["script"]
self.selected_naming_script_id = script_item["id"]
self.examples.update_examples(script_text=self.script_text)
self.update_examples_from_local()
def match_after_to_before(self):
"""Sets the selected item in the 'after' list to the corresponding item in the 'before' list."""
self.examples.synchronize_selected_example_lines(
self.current_row,
self.ui.example_filename_before,
self.ui.example_filename_after,
)
def match_before_to_after(self):
"""Sets the selected item in the 'before' list to the corresponding item in the 'after' list."""
self.examples.synchronize_selected_example_lines(
self.current_row,
self.ui.example_filename_after,
self.ui.example_filename_before,
)
def show_script_editing_page(self):
self.script_editor_dialog = ScriptEditorDialog.show_instance(
parent=self, examples=self.examples
)
self.script_editor_dialog.signal_save.connect(self.save_from_editor)
self.script_editor_dialog.signal_update.connect(self.display_examples)
self.script_editor_dialog.signal_selection_changed.connect(
self.update_selector_from_editor
)
self.script_editor_dialog.finished.connect(self.script_editor_dialog_close)
if self.tagger.window.script_editor_dialog is not None:
self.update_selector_from_editor()
else:
self.script_editor_dialog.loading = True
self.script_editor_dialog.naming_scripts = self.naming_scripts
self.script_editor_dialog.populate_script_selector()
self.update_selector_in_editor()
self.script_editor_dialog.loading = False
self.update_examples_from_local()
self.tagger.window.script_editor_dialog = True
def script_editor_dialog_close(self):
self.tagger.window.script_editor_dialog = None
def show_scripting_documentation(self):
ScriptingDocumentationDialog.show_instance(parent=self)
def toggle_file_naming_format(self, state):
active = self.ui.move_files.isChecked() or self.ui.rename_files.isChecked()
self.ui.open_script_editor.setEnabled(active)
def save_from_editor(self):
self.script_text = self.script_editor_dialog.get_script()
self.update_selector_from_editor()
def check_formats(self):
self.test()
self.update_examples_from_local()
def update_example_files(self):
self.examples.update_sample_example_files()
self.update_displayed_examples()
def update_examples_from_local(self):
override = dict(self.compat_options)
override["move_files"] = self.ui.move_files.isChecked()
override["move_files_to"] = os.path.normpath(self.ui.move_files_to.text())
override["rename_files"] = self.ui.rename_files.isChecked()
self.examples.update_examples(override=override)
self.update_displayed_examples()
def update_displayed_examples(self):
if self.script_editor_dialog is not None:
# Update examples in script editor which will trigger update locally
self.script_editor_dialog.display_examples()
else:
self.display_examples()
def display_examples(self):
self.current_row = -1
self.examples.update_example_listboxes(
self.ui.example_filename_before, self.ui.example_filename_after
)
def load(self):
# React to changes of compat options
compat_page = self.dialog.get_page("filerenaming_compat")
self.compat_options = compat_page.get_options()
compat_page.options_changed.connect(self.on_compat_options_changed)
config = get_config()
self.ui.rename_files.setChecked(config.setting["rename_files"])
self.ui.move_files.setChecked(config.setting["move_files"])
self.ui.move_files_to.setText(config.setting["move_files_to"])
self.ui.move_files_to.setCursorPosition(0)
self.ui.move_additional_files.setChecked(
config.setting["move_additional_files"]
)
self.ui.move_additional_files_pattern.setText(
config.setting["move_additional_files_pattern"]
)
self.ui.delete_empty_dirs.setChecked(config.setting["delete_empty_dirs"])
self.naming_scripts = config.setting["file_renaming_scripts"]
self.selected_naming_script_id = config.setting[
"selected_file_naming_script_id"
]
if self.script_editor_dialog:
self.script_editor_dialog.load()
else:
self.update_selector_from_settings()
self.update_examples_from_local()
def on_compat_options_changed(self, options):
self.compat_options = options
self.update_examples_from_local()
def check(self):
self.check_format()
if self.ui.move_files.isChecked() and not self.ui.move_files_to.text().strip():
raise OptionsCheckError(
_("Error"), _("The location to move files to must not be empty.")
)
def check_format(self):
parser = ScriptParser()
try:
parser.eval(self.script_text)
except Exception as e:
raise ScriptCheckError("", str(e))
if self.ui.rename_files.isChecked():
if not self.script_text.strip():
raise ScriptCheckError(
"", _("The file naming format must not be empty.")
)
def save(self):
config = get_config()
config.setting["rename_files"] = self.ui.rename_files.isChecked()
config.setting["move_files"] = self.ui.move_files.isChecked()
config.setting["move_files_to"] = os.path.normpath(self.ui.move_files_to.text())
config.setting[
"move_additional_files"
] = self.ui.move_additional_files.isChecked()
config.setting[
"move_additional_files_pattern"
] = self.ui.move_additional_files_pattern.text()
config.setting["delete_empty_dirs"] = self.ui.delete_empty_dirs.isChecked()
config.setting[
"selected_file_naming_script_id"
] = self.selected_naming_script_id
self.tagger.window.enable_renaming_action.setChecked(
config.setting["rename_files"]
)
self.tagger.window.enable_moving_action.setChecked(config.setting["move_files"])
self.tagger.window.make_script_selector_menu()
def display_error(self, error):
# Ignore scripting errors, those are handled inline
if not isinstance(error, ScriptCheckError):
super().display_error(error)
def move_files_to_browse(self):
path = QtWidgets.QFileDialog.getExistingDirectory(
self, "", self.ui.move_files_to.text()
)
if path:
path = os.path.normpath(path)
self.ui.move_files_to.setText(path)
def test(self):
self.ui.renaming_error.setStyleSheet("")
self.ui.renaming_error.setText("")
try:
self.check_format()
except ScriptCheckError as e:
self.ui.renaming_error.setStyleSheet(self.STYLESHEET_ERROR)
self.ui.renaming_error.setText(e.info)
return
register_options_page(RenamingOptionsPage)
|
saveddata | price | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
# Copyright (C) 2011 Anton Vorobyov
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from enum import IntEnum, unique
from time import time
from logbook import Logger
VALIDITY = 24 * 60 * 60 # Price validity period, 24 hours
REREQUEST = 4 * 60 * 60 # Re-request delay for failed fetches, 4 hours
TIMEOUT = 15 * 60 # Network timeout delay for connection issues, 15 minutes
pyfalog = Logger(__name__)
@unique
class PriceStatus(IntEnum):
initialized = 0
notSupported = 1
fetchSuccess = 2
fetchFail = 3
fetchTimeout = 4
class Price:
def __init__(self, typeID):
self.typeID = typeID
self.time = 0
self.price = 0
self.status = PriceStatus.initialized
def isValid(self, validityOverride=None):
# Always attempt to update prices which were just initialized, and prices
# of unsupported items (maybe we start supporting them at some point)
if self.status in (PriceStatus.initialized, PriceStatus.notSupported):
return False
elif self.status == PriceStatus.fetchSuccess:
return time() <= self.time + (
validityOverride if validityOverride is not None else VALIDITY
)
elif self.status == PriceStatus.fetchFail:
return time() <= self.time + REREQUEST
elif self.status == PriceStatus.fetchTimeout:
return time() <= self.time + TIMEOUT
else:
return False
def update(self, status, price=0):
# Keep old price if we failed to fetch new one
if status in (PriceStatus.fetchFail, PriceStatus.fetchTimeout):
price = self.price
elif status != PriceStatus.fetchSuccess:
price = 0
self.time = time()
self.price = price
self.status = status
|
utils | formatter | from string import Formatter as StringFormatter
from typing import Any, Callable, Dict, Optional
# we only need string.Formatter for calling its parse() method, which returns `_string.formatter_parser(string)`.
_stringformatter = StringFormatter()
def _identity(obj):
return obj
class Formatter:
def __init__(
self,
mapping: Dict[str, Callable[[], Any]],
formatting: Optional[Dict[str, Callable[[Any, str], Any]]] = None,
):
super().__init__()
self.mapping: Dict[str, Callable[[], Any]] = mapping
self.formatting: Dict[str, Callable[[Any, str], Any]] = formatting or {}
self.cache: Dict[str, Any] = {}
def _get_value(self, field_name: str, format_spec: Optional[str], defaults: Dict[str, str]) -> Any:
if field_name not in self.mapping:
return defaults.get(field_name, f"{{{field_name}}}" if not format_spec else f"{{{field_name}:{format_spec}}}")
if field_name in self.cache:
value = self.cache[field_name]
else:
value = self.mapping[field_name]()
self.cache[field_name] = value
if value is None:
value = defaults.get(field_name, "")
if format_spec and field_name in self.formatting:
# noinspection PyBroadException
try:
return self.formatting[field_name](value, format_spec)
except Exception:
return f"{{{field_name}:{format_spec}}}"
return value
def _format(self, string: str, mapper: Callable[[str], str], defaults: Dict[str, str]) -> str:
result = []
for literal_text, field_name, format_spec, _conversion in _stringformatter.parse(string):
if literal_text:
result.append(literal_text)
if field_name is None:
continue
value = self._get_value(field_name, format_spec, defaults)
result.append(mapper(str(value)))
return "".join(result)
def format(self, string: str, defaults: Optional[Dict[str, str]] = None) -> str:
return self._format(string, _identity, defaults or {})
|
gui | mode | # This file is part of MyPaint.
# Copyright (C) 2014-2018 by the MyPaint Development Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
"""Canvas input modes API: stack, and base classes for modes."""
## Imports
from __future__ import division, print_function
import logging
import math
from gettext import gettext as _
import gui.cursor
import lib.command
from gui.sliderwidget import InputSlider
from lib.brushsettings import settings_dict
from lib.document import Document
from lib.gibindings import Gdk, GLib, Gtk
from lib.layer.data import SimplePaintingLayer
from lib.observable import event
from lib.pycompat import add_metaclass, unicode
logger = logging.getLogger(__name__)
## Module constants
# Actions it makes sense to bind to a button. (HACK).
# Notably, tablet pads tend to offer many more buttons than the usual 3...
BUTTON_BINDING_ACTIONS = [
"ShowPopupMenu",
"Undo",
"Redo",
"Bigger",
"Smaller",
"MoreOpaque",
"LessOpaque",
"PickContext",
"Fullscreen",
"ToggleSubwindows",
"BrushChooserPopup",
"ColorChooserPopup",
"ColorDetailsDialog",
"ColorHistoryPopup",
"PalettePrev",
"PaletteNext",
]
## Behaviour flags
class Behavior:
"""Broad classification of what a mode's handler methods do
These flags are assigned to devices in `gui.device` to allow the
user to limit what devices are allowed to do. Mode instances expose
their behaviour by defining their pointer_behavior and
scroll_behavior properties.
"""
NONE = 0x00 #: the mode does not perform any action
PAINT_FREEHAND = 0x01 #: paint freehand brushstrokes
PAINT_LINE = 0x02 #: non-freehand brushstrokes: lines, perspective
EDIT_OBJECTS = 0x04 #: move and adjust objects on screen
CHANGE_VIEW = 0x08 #: move the viewport around
PAINT_NOBRUSH = 0x10 #: painting independent of brush (eg. fill)
# Useful masks
PAINT_BRUSH = PAINT_FREEHAND | PAINT_LINE #: painting dependent of brush
PAINT_CONSTRAINED = PAINT_LINE | PAINT_NOBRUSH #: non-freehand painting
NON_PAINTING = EDIT_OBJECTS | CHANGE_VIEW
ALL_PAINTING = PAINT_FREEHAND | PAINT_CONSTRAINED
ALL = NON_PAINTING | ALL_PAINTING
## Metaclass for all modes
class ModeRegistry(type):
"""Lookup table for interaction modes and their associated actions
Operates as the metaclass for `InteractionMode`, so all you need to do to
create the association for a mode subclass is to define an
``ACTION_NAME`` entry in the class's namespace containing the name of
the associated `Gtk.Action` defined in ``resources.xml``.
"""
action_name_to_mode_class = {}
mode_classes = set()
# (Special-cased @staticmethod)
def __new__(cls, name, bases, dict):
"""Creates and records a new (InteractionMode) class.
:param cls: this metaclass
:param name: name of the class under construction
:param bases: immediate base classes of the class under construction
:param dict: class dict for the class under construction
:rtype: the constructed class, a regular InteractionMode class object
If it exists, the ``ACTION_NAME`` entry in `dict` is recorded,
and can be used as a key for lookup of the returned class via the
``@classmethod``s defined on `ModeRegistry`.
"""
action_name = dict.get("ACTION_NAME", None)
mode_class = super(ModeRegistry, cls).__new__(cls, name, bases, dict)
if action_name is not None:
action_name = str(action_name)
cls.action_name_to_mode_class[action_name] = mode_class
cls.mode_classes.add(mode_class)
return mode_class
@classmethod
def get_mode_class(cls, action_name):
"""Looks up a registered mode class by its associated action's name.
:param action_name: a string containing an action name (see this
metaclass's docs regarding the ``ACTION_NAME`` class variable)
:rtype: an InteractionMode class object, or `None`.
"""
return cls.action_name_to_mode_class.get(action_name, None)
@classmethod
def get_action_names(cls):
"""Returns all action names associated with interaction.
:rtype: an iterable of action name strings.
"""
return cls.action_name_to_mode_class.keys()
## Mode base classes
@add_metaclass(ModeRegistry)
class InteractionMode(object):
"""Required base class for temporary interaction modes.
Active interaction mode objects process input events, and can manipulate
document views (TiledDrawWidget), the document model data (lib.document),
and the mode stack they sit on. Interactions encapsulate state about their
particular kind of interaction; for example a drag interaction typically
contains the starting position for the drag.
Event handler methods can create new sub-modes and push them to the stack.
It is conventional to pass the current event to the equivalent method on
the new object when this transfer of control happens.
Subclasses may nominate a related `GtkAction` instance in the UI by setting
the class-level variable ``ACTION_NAME``: this should be the name of an
action defined in `gui.app.Application.builder`'s XML file.
All InteractionMode subclasses register themselves in ModeRegistry
when their class is defined.
"""
## Class configuration
#: See the docs for `gui.mode.ModeRegistry`.
ACTION_NAME = None
#: True if the mode supports live update from the brush editor
IS_LIVE_UPDATEABLE = False
#: Timeout for Document.mode_flip_action_activated_cb(). How long, in
#: milliseconds, it takes for the controller to change the key-up action
#: when activated with a keyboard "Flip<ModeName>" action. Set to zero
#: for modes where key-up should exit the mode at any time, and to a larger
#: number for modes where the behaviour changes.
keyup_timeout = 500
## Defaults for instances (sue me, I'm lazy)
#: The `gui.document.Document` this mode affects: see enter()
doc = None
#: Broad description of what result clicking, moving the pointer,
#: or dragging has in this mode. See `Behavior`.
pointer_behavior = Behavior.NONE
#: Broad description of what result scrolling a mouse scroll-wheel
#: does in this mode. See `Behavior`.
scroll_behavior = Behavior.NONE
#: True if the mode supports switching to another mode based on
#: combinations of pointer buttons and modifier keys.
supports_button_switching = True
#: Optional whitelist of the names of the modes which this mode can
#: switch to. If the iterable is empty, all modes are possible.
permitted_switch_actions = ()
## Status message info
@classmethod
def get_name(cls):
"""Returns a short human-readable description of the mode.
:rtype: unicode
This is used for status bar messages, and potentially elsewhere before
the mode has been instantiated. All concrete subclasses should
override this. By default the (non-localized) class name is returned.
When capitalizing, use whatever style the GNOME HIG specifies for menu
items. In English, this is currently "header", or title case (first
word capitalized, all other words capitalized except closed-class
words). Do not use trailing punctuation.
"""
return unicode(cls.__name__)
def get_usage(self):
"""Returns a medium-length usage message for the mode.
:rtype: unicode
This is used for status bar messages. All concrete subclasses should
override this. The default return value is an empty string.
The usage message should be a short, natural-sounding explanation to
the user detailing what the current mode is for. Note that the usage
message is typically displayed after the mode's name or explanatory
icon, so there is no need to repeat that. Brevity is important because
space is limited.
When capitalizing, use whatever style the GNOME HIG specifies for
tooltips. In English, this is currently sentence case. Use one
complete sentence, and always omit the the trailing period.
"""
return ""
def __unicode__(self):
return self.get_name()
## Associated action
def get_action(self):
"""Returns any app action associated with the mode."""
if self.doc and hasattr(self.doc, "app"):
if self.ACTION_NAME:
return self.doc.app.find_action(self.ACTION_NAME)
## Mode icon
def get_icon_name(self):
"""Returns the icon to use when representing the mode.
If there's an associated action, this method returns the icon
associated with the action.
"""
icon_name = None
action = self.get_action()
if action:
icon_name = action.get_icon_name()
if not icon_name:
return "missing-icon"
return icon_name
## Mode stacking interface
def stackable_on(self, mode):
"""Tests whether the mode can usefully stack onto an active mode.
:param mode: another mode object
:rtype: bool
This method should return True if this mode can usefully be stacked
onto another mode when switching via toolbars buttons or other actions.
"""
# By default, anything can be stacked on brush tools, except for brush
# tools.
# Why? So whenever the user picks a brush (from a shortcut or
# whatever), the active tool becomes the last used brush-sensitive
# tool.
# See issue #530.
if self.pointer_behavior & Behavior.PAINT_BRUSH:
return False
return mode.pointer_behavior & Behavior.PAINT_BRUSH
def popped(self):
"""Called when the mode is removed from the stack, after leave"""
assert self.doc is None
assert not hasattr(super(InteractionMode, self), "popped")
def enter(self, doc, **kwds):
"""Enters the mode: called by `ModeStack.push()` etc.
:param doc: the `gui.document.Document` this mode should affect.
A reference is kept in `self.doc`.
This is called when the mode becomes active, i.e. when it becomes the
top mode on a ModeStack, and before input is sent to it. Note that a
mode may be entered only to be left immediately: mode stacks are
cleared by repeated pop()ing.
"""
self.doc = doc
assert not hasattr(super(InteractionMode, self), "enter")
def leave(self):
"""Leaves the mode: called by `ModeStack.pop()` etc.
This is called when an active mode becomes inactive, i.e. when it is
no longer the top mode on its ModeStack. It should commit any
uncommitted work to the undo stack, just as `checkpoint()` does.
"""
self.doc = None
assert not hasattr(super(InteractionMode, self), "leave")
def checkpoint(self, **kwargs):
"""Commits any of the mode's uncommitted work
This is called on the active mode at various times to signal
that pending work should be committed to the command stack now,
so that the Undo command would be able to undo it if it were
called next.
The mode continues to be active.
This method is not automatically invoked when changing modes:
leave() should manage that transition.
"""
assert not hasattr(super(InteractionMode, self), "checkpoint")
## Event handler defaults (no-ops)
def button_press_cb(self, tdw, event):
"""Handler for ``button-press-event``s."""
assert not hasattr(super(InteractionMode, self), "button_press_cb")
def motion_notify_cb(self, tdw, event):
"""Handler for ``motion-notify-event``s."""
def button_release_cb(self, tdw, event):
"""Handler for ``button-release-event``s."""
assert not hasattr(super(InteractionMode, self), "button_release_cb")
def scroll_cb(self, tdw, event):
"""Handler for ``scroll-event``s."""
assert not hasattr(super(InteractionMode, self), "scroll_cb")
def key_press_cb(self, win, tdw, event):
"""Handler for ``key-press-event``s.
The base class implementation does nothing.
Keypresses are received by the main window only, but at this point it
has applied some heuristics to determine the active doc and view.
These are passed through to the active mode and are accessible to
keypress handlers via `self.doc` and the `tdw` argument.
"""
assert not hasattr(super(InteractionMode, self), "key_press_cb")
return True
def key_release_cb(self, win, tdw, event):
"""Handler for ``key-release-event``s.
The base class implementation does nothing. See `key_press_cb` for
details of the additional arguments.
"""
assert not hasattr(super(InteractionMode, self), "key_release_cb")
return True
## Drag sub-API (FIXME: this is in the wrong place)
# Defined here to allow mixins to provide behaviour for both both drags and
# regular events without having to derive from DragMode. Really these
# buck-stops-here definitions belong in DragMode, so consider moving them
# somewhere more sensible.
def drag_start_cb(self, tdw, event):
assert not hasattr(super(InteractionMode, self), "drag_start_cb")
def drag_update_cb(self, tdw, event, ev_x, ev_y, dx, dy):
assert not hasattr(super(InteractionMode, self), "drag_update_cb")
def drag_stop_cb(self, tdw):
assert not hasattr(super(InteractionMode, self), "drag_stop_cb")
## Internal utility functions
def current_modifiers(self):
"""Returns the current set of modifier keys as a Gdk bitmask.
See: gui.document.Document.get_current_modifiers()
"""
doc = self.doc
if doc is None:
modifiers = Gdk.ModifierType(0)
else:
modifiers = doc.get_current_modifiers()
return modifiers
def current_position(self):
"""Returns the current client pointer position on the main TDW.
For use in enter() methods: since the mode may be being entered
by the user pressing a key, no position is available at this
point. Normal event handlers should use their argument GdkEvents
to determine position.
"""
disp = self.doc.tdw.get_display()
mgr = disp.get_device_manager()
dev = mgr.get_client_pointer()
win = self.doc.tdw.get_window()
underwin, x, y, mods = win.get_device_position(dev)
return x, y
class ScrollableModeMixin(InteractionMode):
"""Mixin for scrollable modes.
Implements some immediate rotation and zoom commands for the scroll wheel.
These should be useful in many modes, but perhaps not all.
"""
# Hack conversion factor from smooth scroll units to screen pixels.
_PIXELS_PER_SMOOTH_SCROLL_UNIT = 25.0
# Could also use the viewport-page-sized approximation that
# Gtk.ScrolledWindow uses internally:
# https://git.gnome.org/browse/gtk+/tree/gtk/gtkscrolledwindow.c?h=gtk-3-14#n2416
def __reset_delta_totals(self):
self.__total_dx = 0.0
self.__total_dy = 0.0
def enter(self, doc, **kwds):
self.__reset_delta_totals()
return super(ScrollableModeMixin, self).enter(doc, **kwds)
def button_press_cb(self, tdw, event):
self.__reset_delta_totals()
return super(ScrollableModeMixin, self).button_press_cb(tdw, event)
def button_release_cb(self, tdw, event):
self.__reset_delta_totals()
return super(ScrollableModeMixin, self).button_release_cb(tdw, event)
def scroll_cb(self, tdw, event):
"""Handles scroll-wheel events.
Normal scroll wheel events: whichever of {panning, scrolling}
the device is configured to do. With Ctrl or Alt: invert
scrolling and zooming.
With shift, if smooth scroll events are being sent, constrain
the zoom or scroll in appropriate chunks.
"""
doc = self.doc
direction = event.direction
dev_mon = doc.app.device_monitor
dev = event.get_source_device()
dev_settings = dev_mon.get_device_settings(dev)
scroll_action = dev_settings.scroll
# Invert scrolling and zooming if Ctrl or Alt is held
import gui.device
if event.state & (Gdk.ModifierType.MOD1_MASK | Gdk.ModifierType.CONTROL_MASK):
if scroll_action == gui.device.ScrollAction.ZOOM:
scroll_action = gui.device.ScrollAction.PAN
elif scroll_action == gui.device.ScrollAction.PAN:
scroll_action = gui.device.ScrollAction.ZOOM
# Force incremental scrolling or zooming when shift is held.
constrain_smooth = event.state & Gdk.ModifierType.SHIFT_MASK
if direction == Gdk.ScrollDirection.SMOOTH:
self.__total_dx += event.delta_x
self.__total_dy += event.delta_y
# Handle zooming (the old default)
# We don't rotate any more though. Srsly, that was awful.
if scroll_action == gui.device.ScrollAction.ZOOM:
if direction == Gdk.ScrollDirection.SMOOTH:
if constrain_smooth:
# Needs to work in an identical fashion to old-style
# zooming.
while self.__total_dy > 1:
self.__total_dy -= 1.0
doc.zoom(doc.ZOOM_OUTWARDS)
while self.__total_dy < -1:
self.__total_dy += 1.0
doc.zoom(doc.ZOOM_INWARDS)
else:
# Smooth scroll zooming is intended to resemble what
# gui.viewmanip.ZoomViewMode does, minus the
# panning. In other words, simple zooming at the
# cursor.
dx = event.delta_x
dy = event.delta_y
dx *= self._PIXELS_PER_SMOOTH_SCROLL_UNIT
dy *= self._PIXELS_PER_SMOOTH_SCROLL_UNIT
# Don't pan: that's because the cursor generally does
# not move during scroll events.
# tdw.scroll(-dx, 0) # not for now
dy *= -1
tdw.zoom(
math.exp(dy / 100.0),
center=(event.x, event.y),
ongoing=True,
)
tdw.renderer.update_cursor()
self.__reset_delta_totals()
# Need to send the notifications here if not
# callling doc methods.
# https://github.com/mypaint/mypaint/issues/313
doc.notify_view_changed()
# Old-style zooming
elif direction == Gdk.ScrollDirection.UP:
doc.zoom(doc.ZOOM_INWARDS)
self.__reset_delta_totals()
elif direction == Gdk.ScrollDirection.DOWN:
doc.zoom(doc.ZOOM_OUTWARDS)
self.__reset_delta_totals()
# Handle scroll panning.
elif scroll_action == gui.device.ScrollAction.PAN:
if direction == Gdk.ScrollDirection.SMOOTH:
if constrain_smooth:
# Holding shift to constrain the pan works like
# discrete panning below.
while self.__total_dy > 1:
self.__total_dy -= 1.0
doc.pan(doc.PAN_DOWN)
while self.__total_dy < -1:
self.__total_dy += 1.0
doc.pan(doc.PAN_UP)
while self.__total_dx > 1:
self.__total_dx -= 1.0
doc.pan(doc.PAN_RIGHT)
while self.__total_dx < -1:
self.__total_dx += 1.0
doc.pan(doc.PAN_LEFT)
else:
# Smooth panning is *nice*. It should work identically to
# gui.viewmanip.PanViewMode.
# No inertia here. Too many touchpads already
# emulate that, some by default and others not.
dx = event.delta_x
dy = event.delta_y
dx *= self._PIXELS_PER_SMOOTH_SCROLL_UNIT
dy *= self._PIXELS_PER_SMOOTH_SCROLL_UNIT
tdw.scroll(dx, dy, ongoing=True)
doc.notify_view_changed()
self.__reset_delta_totals()
# Discrete panning.
elif direction == Gdk.ScrollDirection.UP:
doc.pan(doc.PAN_UP)
self.__reset_delta_totals()
elif direction == Gdk.ScrollDirection.DOWN:
doc.pan(doc.PAN_DOWN)
self.__reset_delta_totals()
elif direction == Gdk.ScrollDirection.LEFT:
doc.pan(doc.PAN_LEFT)
self.__reset_delta_totals()
elif direction == Gdk.ScrollDirection.RIGHT:
doc.pan(doc.PAN_RIGHT)
self.__reset_delta_totals()
return super(ScrollableModeMixin, self).scroll_cb(tdw, event)
class PaintingModeOptionsWidgetBase(Gtk.Grid):
"""Base class for the options widget of a generic painting mode"""
_COMMON_SETTINGS = [
# TRANSLATORS:"Brush radius" for the options panel. Short.
("radius_logarithmic", _("Size:")),
# TRANSLATORS:"Brush opacity" for the options panel. Short.
("opaque", _("Opaque:")),
# TRANSLATORS:"Brush hardness/sharpness" for the options panel. Short.
("hardness", _("Sharp:")),
# TRANSLATORS:"Additional pressure gain" for the options panel. Short.
("pressure_gain_log", _("Gain:")),
# TRANSLATORS:"Paint Pigment Mode" for the options panel. Short.
("paint_mode", _("Pigment:")),
]
def __init__(self):
super(PaintingModeOptionsWidgetBase, self).__init__()
self.set_row_spacing(6)
self.set_column_spacing(6)
from gui.application import get_app
self.app = get_app()
self.adjustable_settings = set() #: What the reset button resets
row = self.init_common_widgets(0)
row = self.init_specialized_widgets(row)
row = self.init_reset_widgets(row)
def init_common_widgets(self, row):
for cname, text in self._COMMON_SETTINGS:
label = Gtk.Label()
label.set_text(text)
label.set_alignment(1.0, 0.5)
label.set_hexpand(False)
label.set_tooltip_text(settings_dict[cname].tooltip)
self.adjustable_settings.add(cname)
adj = self.app.brush_adjustment[cname]
scale = InputSlider(adj)
scale.set_draw_value(False)
scale.set_hexpand(True)
scale.dynamic_tooltip = True
self.attach(label, 0, row, 1, 1)
self.attach(scale, 1, row, 1, 1)
row += 1
return row
def init_specialized_widgets(self, row):
return row
def init_reset_widgets(self, row):
align = Gtk.Alignment.new(0.5, 1.0, 1.0, 0.0)
align.set_vexpand(True)
self.attach(align, 0, row, 2, 1)
button = Gtk.Button(label=_("Reset"))
button.connect("clicked", self.reset_button_clicked_cb)
align.add(button)
row += 1
return row
def reset_button_clicked_cb(self, button):
app = self.app
bm = app.brushmanager
parent_brush = bm.get_parent_brush(brushinfo=app.brush)
if parent_brush:
parent_binf = parent_brush.get_brushinfo()
for cname in self.adjustable_settings:
parent_value = parent_binf.get_base_value(cname)
adj = self.app.brush_adjustment[cname]
adj.set_value(parent_value)
class BrushworkModeMixin(InteractionMode):
"""Mixin for modes using brushes
This mixin adds the ability to paint undoably to the current layer
with proper atomicity and handling of checkpoints, and time-based
automatic commits.
Classes using this mixin should use `stroke_to()` to paint, and then
may use the `brushwork_commit()` method to commit completed segments
atomically to the command stack. If a subclass needs greater
control over new segments, `brushwork_begin()` can be used to start
them recording.
The `leave()` and `checkpoint()` methods defined here cooperatively
commit all outstanding brushwork.
"""
def __init__(self, **kwds):
"""Cooperative init (this mixin initializes some private fields)
:param bool \*\*kwds: Passed through to other __init__s.
"""
super(BrushworkModeMixin, self).__init__(**kwds)
self.__first_begin = True
self.__active_brushwork = {} # {model: Brushwork}
def brushwork_begin(self, model, description=None, abrupt=False, layer=None):
"""Begins a new segment of active brushwork for a model
:param Document model: The model to begin work on
:param unicode description: Optional description of the work
:param bool abrupt: Tail out/in abruptly with faked zero pressure.
:param SimplePaintingLayer layer: explicit target layer.
Any current segment of brushwork is committed, and a new segment
is begun.
Passing ``None`` for the description is suitable for freehand
drawing modes. This method will be called automatically with
the default options by `stroke_to()` if needed, so not all
mode classes will need to use it.
The first segment of brushwork begun by a newly created
BrushworkMode object always starts abruptly.
The second and subsequent segments are assumed to be
continuations by default. Set abrupt=True to break off any
existing segment cleanly, and start the new segment cleanly.
If an explicit target layer is used, it must be one that's
guaranteed to persist for the lifetime of the current document
model to prevent leaks.
"""
# Commit any previous work for this model
cmd = self.__active_brushwork.get(model)
if cmd is not None:
self.brushwork_commit(model, abrupt=abrupt)
# New segment of brushwork
if layer is None:
layer_path = model.layer_stack.current_path
else:
layer_path = None
cmd = lib.command.Brushwork(
model,
layer_path=layer_path,
description=description,
abrupt_start=(abrupt or self.__first_begin),
layer=layer,
)
self.__first_begin = False
cmd.__last_pos = None
self.__active_brushwork[model] = cmd
def brushwork_commit(self, model, abrupt=False):
"""Commits any active brushwork for a model to the command stack
:param Document model: The model to commit work to
:param bool abrupt: End with a faked zero pressure "stroke_to()"
This only makes a new entry on the command stack if
the currently active brushwork segment made
any changes to the model.
See also `brushwork_rollback()`.
"""
cmd = self.__active_brushwork.pop(model, None)
if cmd is None:
return
if abrupt and cmd.__last_pos is not None:
x, y, xtilt, ytilt, viewzoom, viewrotation, barrel_rotation = cmd.__last_pos
pressure = 0.0
dtime = 0.0
viewzoom = self.doc.tdw.scale
viewrotation = self.doc.tdw.rotation
barrel_rotation = 0.0
cmd.stroke_to(
dtime,
x,
y,
pressure,
xtilt,
ytilt,
viewzoom,
viewrotation,
barrel_rotation,
)
changed = cmd.stop_recording(revert=False)
if changed:
model.do(cmd)
def brushwork_rollback(self, model):
"""Rolls back any active brushwork for a model
:param Document model: The model to roll back
This restores the model's appearance and state to how it was
when the current segment of brushwork started.
For input patterns where this makes sense,
your calls to `stroke_to()` should have ``auto_split=False``.
See also `brushwork_commit()`.
"""
cmd = self.__active_brushwork.pop(model, None)
if cmd is None:
return
cmd.stop_recording(revert=True)
def brushwork_commit_all(self, abrupt=False):
"""Commits all active brushwork"""
for model in list(self.__active_brushwork.keys()):
self.brushwork_commit(model, abrupt=abrupt)
def brushwork_rollback_all(self):
"""Rolls back all active brushwork"""
for model in list(self.__active_brushwork.keys()):
self.brushwork_rollback(model)
def stroke_to(
self,
model,
dtime,
x,
y,
pressure,
xtilt,
ytilt,
viewzoom,
viewrotation,
barrel_rotation,
auto_split=True,
layer=None,
):
"""Feeds an updated stroke position to the brush engine
:param Document model: model on which to paint
:param float dtime: Seconds since the last call to this method
:param float x: Document X position update
:param float y: Document Y position update
:param float pressure: Pressure, ranging from 0.0 to 1.0
:param float xtilt: X-axis tilt, ranging from -1.0 to 1.0
:param float ytilt: Y-axis tilt, ranging from -1.0 to 1.0
:param viewzoom: The view's current zoom level, [0, 64]
:param viewrotation: The view's current rotation, [-180.0, 180.0]
:param float barrel_rotation: Stylus barrel rotation, [0.0 to 1.0]
:param bool auto_split: Split ongoing brushwork if due
:param SimplePaintingLayer layer: explicit target layer
During normal operation, successive calls to `stroke_to()` record
an ongoing sequence of `lib.command.Brushwork` commands on the
undo stack, stopping and committing the currently recording
command when it becomes due.
The explicit target layer is intended for simple painting modes
operating on out-of-tree layers which rely on stroke_to()
automatically calling brushwork_begin(). Normally the currently
selected layer is used as the target layer for each new segment
of brushwork.
"""
cmd = self.__active_brushwork.get(model, None)
desc0 = None
if auto_split and cmd and cmd.split_due:
desc0 = cmd.description # retain for the next cmd
self.brushwork_commit(model, abrupt=False)
assert model not in self.__active_brushwork
cmd = None
if not cmd:
self.brushwork_begin(
model,
description=desc0,
abrupt=False,
layer=layer,
)
cmd = self.__active_brushwork[model]
cmd.stroke_to(
dtime, x, y, pressure, xtilt, ytilt, viewzoom, viewrotation, barrel_rotation
)
cmd.__last_pos = (x, y, xtilt, ytilt, viewzoom, viewrotation, barrel_rotation)
def leave(self, **kwds):
"""Leave mode, committing outstanding brushwork as necessary
The leave action defined here is careful to tail off strokes
cleanly: certain subclasses are geared towards fast capture of
data and queued delivery of stroke information, so we have to
reset the brush engine's idea of pressure fast. If we don't, an
interrupted queued stroke can result in a *huge* sequence of
dabs from the last processed position to wherever the cursor is
right now.
This leave() knows about the mode stack, and only commits if it
knows it isn't still stacked. That's to allow temporary view
manipulation modes to work without disrupting `gui.inktool`'s
mode, which normally has a lot pending.
"""
logger.debug("BrushworkModeMixin: leave()")
# FIXME: The mode stack should be telling enter() and leave()
# FIXME: whether this is an initial/final call.
# FIXME: Stack state tracking should be unnecessary inside mode objs.
still_stacked = False
for mode in self.doc.modes:
if mode is self:
still_stacked = True
break
if not still_stacked:
self.brushwork_commit_all(abrupt=True)
super(BrushworkModeMixin, self).leave(**kwds)
def checkpoint(self, **kwargs):
"""Commit any outstanding brushwork
Like `leave()`, this commits the currently recording Brushwork
command for each known model; however we do not attempt to tail
off brushstrokes cleanly because that would make Freehand mode
discontinuous when the user changes the brush color.
"""
logger.debug("BrushworkModeMixin: checkpoint()")
super(BrushworkModeMixin, self).checkpoint(**kwargs)
self.brushwork_commit_all(abrupt=False)
class SingleClickMode(InteractionMode):
"""Base class for non-drag (single click) modes"""
#: The cursor to use when entering the mode
cursor = None
def __init__(self, ignore_modifiers=False, **kwds):
super(SingleClickMode, self).__init__(**kwds)
self._button_pressed = None
def enter(self, doc, **kwds):
super(SingleClickMode, self).enter(doc, **kwds)
assert self.doc is not None
self.doc.tdw.set_override_cursor(self.cursor)
if self.cursor is None:
self.cursor = self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME, gui.cursor.Name.ARROW
)
def leave(self, **kwds):
if self.doc is not None:
self.doc.tdw.set_override_cursor(None)
super(SingleClickMode, self).leave(**kwds)
def button_press_cb(self, tdw, event):
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
self._button_pressed = 1
return False
else:
return super(SingleClickMode, self).button_press_cb(tdw, event)
def button_release_cb(self, tdw, event):
if event.button == self._button_pressed:
self._button_pressed = None
self.clicked_cb(tdw, event)
return False
else:
return super(SingleClickMode, self).button_press_cb(tdw, event)
def clicked_cb(self, tdw, event):
assert not hasattr(super(SingleClickMode, self), "clicked_cb")
class DragMode(InteractionMode):
"""Base class for drag activities.
Dragm modes can be entered when the button is pressed, or not yet
pressed. If the button is pressed when the mode is entered. the
initial position will be determined from the first motion event.
Drag modes are normally "spring-loaded", meaning that when a drag
mode is first entered, it remembers which modifier keys were held
down at that time. When these keys are released, the mode will exit.
"""
inactive_cursor = None
active_cursor = None
#: If true, exit mode when initial modifiers are released
SPRING_LOADED = True
def __init__(self, ignore_modifiers=False, **kwds):
"""Construct, possibly ignoring initial modifiers.
:param ignore_modifiers: If True, ignore initial modifier keys.
Drag modes can be instructed to ignore the initial set of
modifiers when they're entered. This is appropriate when the
mode is being entered in response to a keyboard shortcut.
Modifiers don't mean the same thing for keyboard shortcuts.
Conversely, toolbar buttons and mode-switching via pointer
buttons should use the default behaviour.
In practice, it's not quite so clear cut. Instead we have
keyboard-friendly "Flip*" actions which allow the mode to be
toggled off with a second press. These actions use the
`ignore_modifiers` behaviour, and coexist with a secondary layer
of radioactions which don't do this, but which reflect the state
prettily.
"""
super(DragMode, self).__init__(**kwds)
self._tdw_grab_broken_conninfo = None
self._in_drag = False
self._reset_drag_state()
self.initial_modifiers = None
#: Ignore the initial modifiers (FIXME: bad name, maybe not public?)
self.ignore_modifiers = ignore_modifiers
def _reset_drag_state(self):
self.last_x = None
self.last_y = None
self.start_x = None
self.start_y = None
self._start_keyval = None
self._start_button = None
self._grab_widget = None
self._in_drag = False
if self._tdw_grab_broken_conninfo is not None:
tdw, connid = self._tdw_grab_broken_conninfo
tdw.disconnect(connid)
self._tdw_grab_broken_conninfo = None
def _stop_drag(self, t=Gdk.CURRENT_TIME):
# Stops any active drag, calls drag_stop_cb(), and cleans up.
if not self.in_drag:
return
tdw = self._grab_widget
tdw.grab_remove()
Gdk.keyboard_ungrab(t)
Gdk.pointer_ungrab(t)
self._grab_widget = None
self._in_drag = False
self.drag_stop_cb(tdw)
self._reset_drag_state()
def _start_drag(self, tdw, event):
# Attempt to start a new drag, calling drag_start_cb() if successful.
if self.in_drag:
return
if hasattr(event, "x"):
self.start_x = event.x
self.start_y = event.y
else:
last_t, last_x, last_y = self.doc.get_last_event_info(tdw)
self.start_x = last_x
self.start_y = last_y
tdw_window = tdw.get_window()
event_mask = (
Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.BUTTON_RELEASE_MASK
| Gdk.EventMask.POINTER_MOTION_MASK
)
cursor = self.active_cursor
if cursor is None:
cursor = self.inactive_cursor
# Grab the pointer
grab_status = Gdk.pointer_grab(
tdw_window, False, event_mask, None, cursor, event.time
)
if grab_status != Gdk.GrabStatus.SUCCESS:
logger.warning("pointer grab failed: %r", grab_status)
logger.debug("gdk_pointer_is_grabbed(): %r", Gdk.pointer_is_grabbed())
# There seems to be a race condition between this grab under
# PyGTK/GTK2 and some other grab - possibly just the implicit grabs
# on color selectors: https://gna.org/bugs/?20068 Only pointer
# events are affected, and PyGI+GTK3 is unaffected.
#
# It's probably safest to exit the mode and not start the drag.
# This condition should be rare enough for this to be a valid
# approach: the irritation of having to click again to do something
# should be far less than that of getting "stuck" in a drag.
self._bailout()
# Sometimes a pointer ungrab is needed even though the grab
# apparently failed to avoid the UI partially "locking up" with the
# stylus (and only the stylus). Happens when WMs like Xfwm
# intercept an <Alt>Button combination for window management
# purposes. Results in Gdk.GrabStatus.ALREADY_GRABBED, but this
# line is necessary to avoid the rest of the UI becoming
# unresponsive even though the canvas can be drawn on with
# the stylus. Are we cancelling an implicit grab here, and why
# is it device specific?
Gdk.pointer_ungrab(event.time)
return
# We managed to establish a grab, so watch for it being broken.
# This signal is disconnected when the mode leaves.
connid = tdw.connect("grab-broken-event", self._tdw_grab_broken_cb)
self._tdw_grab_broken_conninfo = (tdw, connid)
# Grab the keyboard too, to be certain of getting the key release event
# for a spacebar drag.
grab_status = Gdk.keyboard_grab(tdw_window, False, event.time)
if grab_status != Gdk.GrabStatus.SUCCESS:
logger.warning("Keyboard grab failed: %r", grab_status)
self._bailout()
Gdk.pointer_ungrab(event.time)
return
# GTK too...
tdw.grab_add()
self._grab_widget = tdw
self._in_drag = True
self.last_x = self.start_x
self.last_y = self.start_y
# Drag has started, perform whatever action the mode needs.
self.drag_start_cb(tdw, event)
def _bailout(self):
"""Attempt to exit this mode safely, via an idle routine
The actual task is handled by an idle callback to make this
method safe to call during a mode's enter() or leave() methods.
Modes on top of the one requesting bailout will also be ejected.
"""
from gui.application import get_app
app = get_app()
if self not in app.doc.modes:
logger.debug(
"bailout: cannot bail out of %r: " "mode is not in the mode stack",
self,
)
return
logger.debug("bailout: starting idler to safely bail out of %r", self)
GLib.idle_add(self._bailout_idle_cb, app.doc.modes)
def _bailout_idle_cb(self, modestack):
"""Bail out of this mode if it's anywhere in the mode stack"""
while self in modestack:
logger.debug("bailout idler: leaving %r", modestack.top)
modestack.pop()
logger.debug("bailout idler: done")
return False
def _tdw_grab_broken_cb(self, tdw, event):
# Cede control as cleanly as possible if something else grabs either
# the keyboard or the pointer while a grab is active.
# One possible cause for https://gna.org/bugs/?20333
logger.debug("grab-broken-event on %r", tdw)
logger.debug(" send_event : %r", event.send_event)
logger.debug(" keyboard : %r", event.keyboard)
logger.debug(" implicit : %r", event.implicit)
logger.debug(" grab_window : %r", event.grab_window)
self._bailout()
return True
@property
def in_drag(self):
return self._in_drag
def enter(self, doc, **kwds):
"""Enter the mode, recording the held modifier keys the 1st time
The attribute `self.initial_modifiers` is set the first time the
mode is entered.
"""
super(DragMode, self).enter(doc, **kwds)
assert self.doc is not None
if self.inactive_cursor is None:
# some children might override self.inactive_cursor as read-only
try:
self.inactive_cursor = self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME, gui.cursor.Name.ARROW
)
except AttributeError:
pass
self.doc.tdw.set_override_cursor(self.inactive_cursor)
if self.SPRING_LOADED:
if self.ignore_modifiers:
self.initial_modifiers = 0
return
old_modifiers = self.initial_modifiers
if old_modifiers is not None:
# Re-entering due to an overlying mode being popped
if old_modifiers != 0:
# This mode started with modifiers held
modifiers = self.current_modifiers()
if (modifiers & old_modifiers) == 0:
# But none of them are held any more,
# so queue a further pop.
GLib.idle_add(self.__pop_modestack_idle_cb)
else:
# Record modifiers when the mode is entered for the first time
self.initial_modifiers = self.current_modifiers()
def __pop_modestack_idle_cb(self):
# Pop the mode stack when this mode is re-entered but has to leave
# straight away because its modifiers are no longer held. Doing it in
# an idle function avoids confusing the derived class's enter() method:
# a leave() during an enter() would be strange.
if self.initial_modifiers is not None:
if self.doc and (self is self.doc.modes.top):
self.doc.modes.pop()
return False
def leave(self, **kwds):
self._stop_drag()
if self.doc is not None:
self.doc.tdw.set_override_cursor(None)
super(DragMode, self).leave(**kwds)
def button_press_cb(self, tdw, event):
if event.type == Gdk.EventType.BUTTON_PRESS:
if self.in_drag:
if self._start_button is None:
# Doing this allows single clicks to exit keyboard
# initiated drags, e.g. those forced when handling a
# keyboard event somewhere else.
self._start_button = event.button
else:
self._start_drag(tdw, event)
if self.in_drag:
# Grab succeeded
self._start_button = event.button
return super(DragMode, self).button_press_cb(tdw, event)
def button_release_cb(self, tdw, event):
if self.in_drag:
if event.button == self._start_button:
self._stop_drag()
return super(DragMode, self).button_release_cb(tdw, event)
def motion_notify_cb(self, tdw, event):
# We might be here because an Action manipulated the modes stack
# but if that's the case then we should wait for a button or
# a keypress to initiate the drag.
if self._in_drag:
x, y = event.x, event.y
self.drag_update_cb(tdw, event, x, y, x - self.last_x, y - self.last_y)
self.last_x = x
self.last_y = y
return True
# Fall through to other behavioral mixins, just in case
return super(DragMode, self).motion_notify_cb(tdw, event)
def key_press_cb(self, win, tdw, event):
if self.in_drag:
# Eat keypresses in the middle of a drag no matter how
# it was started.
return True
elif event.keyval == Gdk.KEY_space:
# Start drags on space
if event.keyval != self._start_keyval:
self._start_keyval = event.keyval
self._start_drag(tdw, event)
return True
# Fall through to other behavioral mixins
return super(DragMode, self).key_press_cb(win, tdw, event)
def key_release_cb(self, win, tdw, event):
if self.in_drag:
if event.keyval == self._start_keyval:
self._stop_drag()
self._start_keyval = None
return True
if self.SPRING_LOADED:
if event.is_modifier and self.in_drag:
return False
if self.initial_modifiers:
modifiers = self.current_modifiers()
if modifiers & self.initial_modifiers == 0:
if self is self.doc.modes.top:
self.doc.modes.pop()
return True
# Fall through to other behavioral mixins
return super(DragMode, self).key_release_cb(win, tdw, event)
class OneshotDragMode(DragMode):
"""Drag modes that can exit immediately when the drag stops
These are utility modes which allow the user to do quick, simple
tasks with the canvas like pick a color from it or pan the view.
"""
def __init__(self, unmodified_persist=True, temporary_activation=True, **kwargs):
"""
:param bool unmodified_persist: Stay active if entered without modkeys
:param bool \*\*kwargs: Passed through to other __init__s.
If unmodified_persist is true, and drag mode is spring-loaded, the
tool will stay active if no modifiers were held initially. This means
tools will not deselect themselves after one use if activated from,
say, the toolbar.
"""
DragMode.__init__(self)
self.unmodified_persist = unmodified_persist
self.temporary_activation = temporary_activation
def stackable_on(self, mode):
"""Oneshot modes return to the mode the user came from on exit"""
return not isinstance(mode, OneshotDragMode)
def drag_stop_cb(self, tdw):
# Always exit at the end of a drag if not spring-loaded.
pop = True
if self.SPRING_LOADED:
init_mods = self.initial_modifiers
if init_mods:
# If started with modifiers, keeping the modifiers held keeps
# spring-loaded modes active. If not, exit the mode.
pop = not (init_mods & self.current_modifiers() == init_mods)
else:
# No modifiers were held when this mode was entered.
pop = self.temporary_activation or not self.unmodified_persist
if pop and self is self.doc.modes.top:
self.doc.modes.pop()
return super(OneshotDragMode, self).drag_stop_cb(tdw)
## Mode stack
class _NullMode(InteractionMode):
"""A mode that does nothing (placeholder only)"""
class ModeStack(object):
"""A stack of InteractionModes. The top mode is the active one.
Mode stacks can never be empty. If the final element is popped, it
will be replaced with a new instance of its ``default_mode_class``.
"""
def __init__(self, doc):
"""Initialize for a particular controller
:param doc: Controller instance
:type doc: CanvasController
The main MyPaint app uses an instance of `gui.document.Document`
as `doc`. Simpler drawing canvases can use a basic
CanvasController and a simpler `default_mode_class`.
"""
object.__init__(self)
self._stack = []
self._doc = doc
self._syncing_pending_changes = False
if hasattr(doc, "model"):
doc.model.sync_pending_changes += self._sync_pending_changes_cb
#: Class to instantiate if stack is empty: callable with 0 args.
self.default_mode_class = _NullMode
def _sync_pending_changes_cb(self, model, **kwargs):
"""Syncs pending changes with the model
:param Document model: the requesting model
:param \*\*kwargs: passed through to checkpoint()
This issues a `checkpoint()` on the current InteractionMode.
"""
if self._syncing_pending_changes:
return
self._syncing_pending_changes = True
self.top.checkpoint(**kwargs)
self._syncing_pending_changes = False
@event
def changed(self, old, new):
"""Event: emitted when the active mode changes
:param old: The previous active mode
:param new: The new `top` (current) mode
This event is emitted after the ``enter()`` method of the new
mode has been called, and therefore after the ``leave()`` of the
old mode too. On occasion, the old mode may be null.
Context-aware pushes call this only once, with the old active
and newly active mode only regardless of how many modes were
skipped.
"""
@property
def top(self):
"""The top node on the stack"""
# Perhaps rename to "active()"?
new_mode = self._check()
if new_mode is not None:
new_mode.enter(doc=self._doc)
self.changed(None, new_mode)
return self._stack[-1]
def context_push(self, mode):
"""Context-aware push.
:param mode: mode to be stacked and made active
:type mode: `InteractionMode`
Stacks a mode onto the topmost element in the stack it is compatible
with, as determined by its ``stackable_on()`` method. Incompatible
top modes are popped one by one until either a compatible mode is
found, or the stack is emptied, then the new mode is pushed.
"""
# Pop until the stack is empty, or the top mode is compatible
old_mode = None
if len(self._stack) > 0:
old_mode = self._stack[-1]
while len(self._stack) > 0:
if mode.stackable_on(self._stack[-1]):
break
incompat = self._stack.pop(-1)
incompat.leave()
incompat.popped()
if len(self._stack) > 0:
self._stack[-1].enter(doc=self._doc)
# Stack on top of any remaining compatible mode
if len(self._stack) > 0:
self._stack[-1].leave()
self._stack.append(mode)
mode.enter(doc=self._doc)
self.changed(old=old_mode, new=mode)
def pop(self):
"""Pops a mode, leaving the old top mode and entering the exposed top."""
old_mode = None
if len(self._stack) > 0:
old_mode = self._stack.pop(-1)
old_mode.leave()
old_mode.popped()
top_mode = self._check()
if top_mode is None:
top_mode = self._stack[-1]
# No need to checkpoint user activity here: leave() was already called
top_mode.enter(doc=self._doc)
self.changed(old=old_mode, new=top_mode)
def push(self, mode):
"""Pushes a mode, and enters it.
:param mode: Mode to be stacked and made active
:type mode: InteractionMode
"""
old_mode = None
if len(self._stack) > 0:
old_mode = self._stack[-1]
old_mode.leave()
# No need to checkpoint user activity here: leave() was already called
self._stack.append(mode)
mode.enter(doc=self._doc)
self.changed(old=old_mode, new=mode)
def reset(self, replacement=None):
"""Clears the stack, popping the final element and replacing it.
:param replacement: Optional mode to go on top of the cleared stack.
:type replacement: `InteractionMode`.
"""
old_top_mode = None
if len(self._stack) > 0:
old_top_mode = self._stack[-1]
while len(self._stack) > 0:
old_mode = self._stack.pop(-1)
old_mode.leave()
old_mode.popped()
if len(self._stack) > 0:
self._stack[-1].enter(doc=self._doc)
top_mode = self._check(replacement)
assert top_mode is not None
self.changed(old=old_top_mode, new=top_mode)
def pop_to_behaviour(self, flags):
"""Keeps popping the stack until a node that matches the flags is found.
If the stack does not contain such a node, you will simply end up with
an empty ModeStack.
:param flags: Descriptors of the node you want.
:type flags: `Behavior`.
By "empty ModeStack" I mean a ModeStack with a single
``default_mode_class`` instance, as usual.
"""
while self.top.pointer_behavior & flags == 0:
if len(self._stack) == 1:
self.pop()
return
self.pop()
def _check(self, replacement=None):
"""Ensures that the stack is non-empty
:param replacement: Optional replacement mode instance.
:type replacement: `InteractionMode`.
Returns the new top mode if one was pushed.
"""
if len(self._stack) > 0:
return None
if replacement is not None:
mode = replacement
else:
mode = self.default_mode_class()
self._stack.append(mode)
mode.enter(doc=self._doc)
return mode
def __repr__(self):
"""Plain-text representation."""
s = "<ModeStack ["
s += ", ".join([m.__class__.__name__ for m in self._stack])
s += "]>"
return s
def __len__(self):
"""Returns the number of modes on the stack."""
return len(self._stack)
def __nonzero__(self):
"""Mode stacks never test false, regardless of length."""
return True
def __iter__(self):
for mode in self._stack:
yield mode
def __getitem__(self, index):
return self._stack[index]
|
verification | channel | from apps.telegram.client import TelegramClient
from apps.telegram.models import (
TelegramChannelVerificationCode,
TelegramToOrganizationConnector,
)
from apps.telegram.updates.update_handlers import UpdateHandler
from apps.telegram.utils import is_verification_message
TELEGRAM_ID = 777000
VERIFICATION_FAILED_BOT_NOT_IN_CHANNEL = """Verification failed!
Please add the Grafana OnCall bot to the "{channel_name}" channel as admin and allow it to post messages."""
VERIFICATION_FAILED_SIGN_MESSAGES_NOT_ENABLED = """Verification failed!
Please enable "Sign messages" in channel settings, otherwise Grafana OnCall bot will not be able to operate properly."""
VERIFICATION_FAILED_DISCUSSION_GROUP_ALREADY_REGISTERED = """Verification failed!
The associated discussion group has already been registered with a different channel."""
CHANNEL_CONNECTED_TEXT = (
"Done! This channel is now linked to organization <b>{organization_title} 🎉</b>"
)
RELINK_CHANNEL_TEXT = """This Telegram channel is already connected to organization <b>{organization_title}</b>.
Please unlink Telegram channel in settings of organization <b>{organization_title}</b> or contact Grafana OnCall support"""
WRONG_VERIFICATION_CODE = "Verification failed: wrong verification code"
class ChannelVerificationCodeHandler(UpdateHandler):
def matches(self) -> bool:
is_message = (
self.update.message is not None and self.update.message.text is not None
)
if not is_message:
return False
is_from_discussion_group = self.update.message.chat.type == "supergroup"
is_forwarded_by_telegram = self.update.effective_user.id == TELEGRAM_ID
return (
is_verification_message(self.update.message.text)
and is_from_discussion_group
and is_forwarded_by_telegram
)
def process_update(self) -> None:
telegram_client = TelegramClient()
channel_chat_id = self.update.message.forward_from_chat.id
channel_name = self.update.message.forward_from_chat.title
discussion_group_chat_id = self.update.message.chat.id
discussion_group_name = self.update.message.chat.title
verification_code = self.update.message.text
# check if bot is in channel
if not telegram_client.is_chat_member(chat_id=channel_chat_id):
telegram_client.send_raw_message(
chat_id=self.update.message.chat.id,
text=VERIFICATION_FAILED_BOT_NOT_IN_CHANNEL.format(
channel_name=channel_name
),
reply_to_message_id=self.update.message.message_id,
)
return
# check if "Sign messages" is enabled
if self.update.message.forward_signature is None:
telegram_client.send_raw_message(
chat_id=self.update.message.chat.id,
text=VERIFICATION_FAILED_SIGN_MESSAGES_NOT_ENABLED,
reply_to_message_id=self.update.message.message_id,
)
return
# check discussion group chat is not reused
connector = TelegramToOrganizationConnector.objects.filter(
discussion_group_chat_id=discussion_group_chat_id
).first()
if connector is not None and connector.channel_chat_id != channel_chat_id:
# discussion group is already connected to a different channel chat
telegram_client.send_raw_message(
chat_id=self.update.message.chat.id,
text=VERIFICATION_FAILED_DISCUSSION_GROUP_ALREADY_REGISTERED,
reply_to_message_id=self.update.message.message_id,
)
return
(
connector,
created,
) = TelegramChannelVerificationCode.verify_channel_and_discussion_group(
verification_code=verification_code,
channel_chat_id=channel_chat_id,
channel_name=channel_name,
discussion_group_chat_id=discussion_group_chat_id,
discussion_group_name=discussion_group_name,
)
if created:
reply_text = CHANNEL_CONNECTED_TEXT.format(
organization_title=connector.organization.org_title
)
else:
if connector is not None:
reply_text = RELINK_CHANNEL_TEXT.format(
organization_title=connector.organization.org_title
)
else:
reply_text = WRONG_VERIFICATION_CODE
telegram_client.send_raw_message(chat_id=channel_chat_id, text=reply_text)
|
builders | losses_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_builder."""
import tensorflow as tf
from app.object_detection.builders import losses_builder
from app.object_detection.core import losses
from app.object_detection.protos import losses_pb2
from google.protobuf import text_format
class LocalizationLossBuilderTest(tf.test.TestCase):
def test_build_weighted_l2_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(localization_loss, losses.WeightedL2LocalizationLoss)
)
def test_build_weighted_smooth_l1_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss)
)
def test_build_weighted_iou_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_iou {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(localization_loss, losses.WeightedIOULocalizationLoss)
)
def test_anchorwise_output(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
anchorwise_output: true
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss)
)
predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
weights = tf.constant([[1.0, 1.0]])
loss = localization_loss(predictions, targets, weights=weights)
self.assertEqual(loss.shape, [1, 2])
def test_raise_error_on_empty_localization_config(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder._build_localization_loss(losses_proto)
class ClassificationLossBuilderTest(tf.test.TestCase):
def test_build_weighted_sigmoid_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSigmoidClassificationLoss)
)
def test_build_weighted_sigmoid_focal_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss, losses.SigmoidFocalClassificationLoss)
)
self.assertAlmostEqual(classification_loss._alpha, None)
self.assertAlmostEqual(classification_loss._gamma, 2.0)
def test_build_weighted_sigmoid_focal_loss_non_default(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 3.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss, losses.SigmoidFocalClassificationLoss)
)
self.assertAlmostEqual(classification_loss._alpha, 0.25)
self.assertAlmostEqual(classification_loss._gamma, 3.0)
def test_build_weighted_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)
)
def test_build_weighted_softmax_classification_loss_with_logit_scale(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
logit_scale: 2.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)
)
def test_build_bootstrapped_sigmoid_classification_loss(self):
losses_text_proto = """
classification_loss {
bootstrapped_sigmoid {
alpha: 0.5
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(
classification_loss, losses.BootstrappedSigmoidClassificationLoss
)
)
def test_anchorwise_output(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid {
anchorwise_output: true
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSigmoidClassificationLoss)
)
predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]])
targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]])
weights = tf.constant([[1.0, 1.0]])
loss = classification_loss(predictions, targets, weights=weights)
self.assertEqual(loss.shape, [1, 2])
def test_raise_error_on_empty_config(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder.build(losses_proto)
class HardExampleMinerBuilderTest(tf.test.TestCase):
def test_do_not_build_hard_example_miner_by_default(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner = losses_builder.build(losses_proto)
self.assertEqual(hard_example_miner, None)
def test_build_hard_example_miner_for_classification_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: CLASSIFICATION
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertEqual(hard_example_miner._loss_type, "cls")
def test_build_hard_example_miner_for_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: LOCALIZATION
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertEqual(hard_example_miner._loss_type, "loc")
def test_build_hard_example_miner_with_non_default_values(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
num_hard_examples: 32
iou_threshold: 0.5
loss_type: LOCALIZATION
max_negatives_per_positive: 10
min_negatives_per_image: 3
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertEqual(hard_example_miner._num_hard_examples, 32)
self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5)
self.assertEqual(hard_example_miner._max_negatives_per_positive, 10)
self.assertEqual(hard_example_miner._min_negatives_per_image, 3)
class LossBuilderTest(tf.test.TestCase):
def test_build_all_loss_parameters(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(
classification_loss,
localization_loss,
classification_weight,
localization_weight,
hard_example_miner,
) = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertTrue(
isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)
)
self.assertTrue(
isinstance(localization_loss, losses.WeightedL2LocalizationLoss)
)
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_raise_error_when_both_focal_loss_and_hard_example_miner(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_sigmoid_focal {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder.build(losses_proto)
class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase):
def test_build_sigmoid_loss(self):
losses_text_proto = """
weighted_sigmoid {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto
)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSigmoidClassificationLoss)
)
def test_build_softmax_loss(self):
losses_text_proto = """
weighted_softmax {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto
)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)
)
def test_build_softmax_loss_by_default(self):
losses_text_proto = """
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto
)
self.assertTrue(
isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)
)
if __name__ == "__main__":
tf.test.main()
|
group | sql | from posthog.clickhouse.base_sql import COPY_ROWS_BETWEEN_TEAMS_BASE_SQL
from posthog.clickhouse.kafka_engine import KAFKA_COLUMNS, STORAGE_POLICY, kafka_engine
from posthog.clickhouse.table_engines import ReplacingMergeTree
from posthog.kafka_client.topics import KAFKA_GROUPS
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE
GROUPS_TABLE = "groups"
DROP_GROUPS_TABLE_SQL = f"DROP TABLE {GROUPS_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_GROUPS_TABLE_SQL = (
f"TRUNCATE TABLE {GROUPS_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
GROUPS_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
group_type_index UInt8,
group_key VARCHAR,
created_at DateTime64,
team_id Int64,
group_properties VARCHAR
{extra_fields}
) ENGINE = {engine}
"""
GROUPS_TABLE_ENGINE = lambda: ReplacingMergeTree(GROUPS_TABLE, ver="_timestamp")
GROUPS_TABLE_SQL = lambda: (
GROUPS_TABLE_BASE_SQL
+ """Order By (team_id, group_type_index, group_key)
{storage_policy}
"""
).format(
table_name=GROUPS_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=GROUPS_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
KAFKA_GROUPS_TABLE_SQL = lambda: GROUPS_TABLE_BASE_SQL.format(
table_name="kafka_" + GROUPS_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_GROUPS),
extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
GROUPS_TABLE_MV_SQL = f"""
CREATE MATERIALIZED VIEW IF NOT EXISTS {GROUPS_TABLE}_mv ON CLUSTER '{CLICKHOUSE_CLUSTER}'
TO {CLICKHOUSE_DATABASE}.{GROUPS_TABLE}
AS SELECT
group_type_index,
group_key,
created_at,
team_id,
group_properties,
_timestamp,
_offset
FROM {CLICKHOUSE_DATABASE}.kafka_{GROUPS_TABLE}
"""
# { ..., "group_0": 1325 }
# To join with events join using $group_{group_type_index} column
TRUNCATE_GROUPS_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS {GROUPS_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
INSERT_GROUP_SQL = """
INSERT INTO groups (group_type_index, group_key, team_id, group_properties, created_at, _timestamp, _offset) SELECT %(group_type_index)s, %(group_key)s, %(team_id)s, %(group_properties)s, %(created_at)s, %(_timestamp)s, 0
"""
GET_GROUP_IDS_BY_PROPERTY_SQL = """
SELECT DISTINCT group_key
FROM groups
WHERE team_id = %(team_id)s AND group_type_index = %({group_type_index_var})s {filters}
"""
#
# Demo data
#
COPY_GROUPS_BETWEEN_TEAMS = COPY_ROWS_BETWEEN_TEAMS_BASE_SQL.format(
table_name=GROUPS_TABLE,
columns_except_team_id="""group_type_index, group_key, group_properties, created_at, _timestamp, _offset""",
)
SELECT_GROUPS_OF_TEAM = (
"""SELECT * FROM {table_name} WHERE team_id = %(source_team_id)s""".format(
table_name=GROUPS_TABLE
)
)
|
chardet | eucjpprober | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .chardistribution import EUCJPDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .jpcntx import EUCJPContextAnalysis
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(
self.get_charset_name()
+ " prober hit error at byte "
+ str(i)
+ "\n"
)
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and (
self.get_confidence() > constants.SHORTCUT_THRESHOLD
):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
QT | Gui | import os
from Code import Configuracion, Usuarios, Util, VarGen
from Code.QT import Colocacion, Controles, Iconos, QTUtil, QTVarios
from PyQt4 import QtCore, QtGui
def lanzaGUI(procesador):
"""
Lanzador del interfaz grafico de la aplicacion.
"""
# Comprobamos el lenguaje
app = QtGui.QApplication([])
liUsuarios = Usuarios.Usuarios().lista
usuario = None
if liUsuarios:
usuario = pideUsuario(liUsuarios)
if usuario is None:
return
user = str(usuario.numero) if usuario.numero else ""
else:
user = ""
activeFolder = Configuracion.activeFolder()
siPedirLenguaje = not os.path.isdir(activeFolder) or not os.listdir(activeFolder)
procesador.iniciaConUsuario(user)
configuracion = procesador.configuracion
if usuario:
if not configuracion.jugador:
configuracion.jugador = usuario.nombre
configuracion.graba()
elif configuracion.jugador != usuario.nombre:
for usu in liUsuarios:
if usu.numero == usuario.numero:
usu.nombre = configuracion.jugador
Usuarios.Usuarios().guardaLista(liUsuarios)
# Comprobamos el lenguaje
if siPedirLenguaje and not configuracion.traductor:
if user:
confMain = Configuracion.Configuracion("")
ori = confMain.ficheroMExternos
confMain.lee()
confMain.limpia(usuario.nombre)
confMain.ponCarpetas(user)
confMain.graba()
procesador.configuracion = confMain
Util.copiaFichero(ori, confMain.carpeta)
else:
li = configuracion.listaTraducciones()
menu = QTVarios.LCMenu(None)
nico = QTVarios.rondoPuntos()
for k, nombre, porc, author in li:
rotulo = nombre
if porc != "100":
rotulo += " (%s%%)" % porc
menu.opcion(k, nombre, nico.otro())
resp = menu.lanza()
if resp:
configuracion.traductor = resp
configuracion.graba()
# Estilo
app.setStyle(QtGui.QStyleFactory.create(configuracion.estilo))
if configuracion.palette:
qpalette = QtGui.QPalette(QtGui.QPalette.Dark)
palette = configuracion.palette
def cl(tipo):
return QtGui.QColor(palette[tipo])
qpalette.setColor(QtGui.QPalette.Window, cl("Window"))
qpalette.setColor(QtGui.QPalette.WindowText, cl("WindowText"))
qpalette.setColor(QtGui.QPalette.Base, cl("Base"))
qpalette.setColor(QtGui.QPalette.Text, cl("Text"))
qpalette.setColor(QtGui.QPalette.AlternateBase, cl("AlternateBase"))
qpalette.setColor(QtGui.QPalette.ToolTipBase, cl("ToolTipBase"))
qpalette.setColor(QtGui.QPalette.ToolTipText, cl("ToolTipText"))
qpalette.setColor(QtGui.QPalette.Button, cl("Button"))
qpalette.setColor(QtGui.QPalette.ButtonText, cl("ButtonText"))
qpalette.setColor(QtGui.QPalette.BrightText, cl("BrightText"))
qpalette.setColor(QtGui.QPalette.Link, cl("Link"))
else:
qpalette = QtGui.QApplication.style().standardPalette()
app.setPalette(qpalette)
app.setEffectEnabled(QtCore.Qt.UI_AnimateMenu)
QtGui.QFontDatabase.addApplicationFont("IntFiles/PIRATDIA.TTF")
if configuracion.familia:
font = Controles.TipoLetra(configuracion.familia)
app.setFont(font)
VarGen.gc = QTUtil.GarbageCollector()
# Lanzamos la pantalla
procesador.iniciarGUI()
resp = app.exec_()
return resp
class WPassword(QtGui.QDialog):
def __init__(self, liUsuarios):
QtGui.QDialog.__init__(self, None)
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.Dialog
| QtCore.Qt.WindowTitleHint
)
main = liUsuarios[0]
self.setWindowTitle(main.trlucas)
self.setWindowIcon(Iconos.Usuarios())
liOpciones = [(usuario.nombre, usuario) for usuario in liUsuarios]
lbU = Controles.LB(self, main.trusuario + ":")
self.cbU = Controles.CB(self, liOpciones, main)
lbP = Controles.LB(self, main.trpassword + ":")
self.edP = Controles.ED(self).password()
btaceptar = Controles.PB(self, main.traceptar, rutina=self.accept, plano=False)
btcancelar = Controles.PB(
self, main.trcancelar, rutina=self.reject, plano=False
)
ly = Colocacion.G()
ly.controld(lbU, 0, 0).control(self.cbU, 0, 1)
ly.controld(lbP, 1, 0).control(self.edP, 1, 1)
lybt = (
Colocacion.H().relleno().control(btaceptar).espacio(10).control(btcancelar)
)
layout = Colocacion.V().otro(ly).espacio(10).otro(lybt).margen(10)
self.setLayout(layout)
self.edP.setFocus()
def resultado(self):
usuario = self.cbU.valor()
return usuario if self.edP.texto().strip() == usuario.password else None
def pideUsuario(liUsuarios):
# Miramos si alguno tiene clave, si es asi, lanzamos ventana
siPass = False
for usuario in liUsuarios:
if usuario.password:
siPass = True
if siPass:
intentos = 3
while True:
w = WPassword(liUsuarios)
if w.exec_():
usuario = w.resultado()
if usuario:
break
else:
return None
intentos -= 1
if intentos == 0:
return None
else:
if len(liUsuarios) == 1:
usuario = liUsuarios[0]
else:
menu = Controles.Menu(
None
) # No puede ser LCmenu, ya que todavia no existe la configuracion
menu.separador()
for usuario in liUsuarios:
menu.opcion(usuario, usuario.nombre, Iconos.PuntoNaranja())
menu.separador()
usuario = menu.lanza()
if usuario is None:
return None
return usuario
|
calculix | write_constraint_temperature | # ***************************************************************************
# * Copyright (c) 2021 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM calculix constraint temperature"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecad.org"
import FreeCAD
def get_analysis_types():
return ["thermomech"]
# name must substitute underscores for whitespace (#7360)
def get_sets_name():
return "constraints_temperature_node_sets"
def get_constraint_title():
return "Fixed temperature constraint applied"
def write_meshdata_constraint(f, femobj, temp_obj, ccxwriter):
f.write("*NSET,NSET={}\n".format(temp_obj.Name))
for n in femobj["Nodes"]:
f.write("{},\n".format(n))
def get_before_write_meshdata_constraint():
return ""
def get_after_write_meshdata_constraint():
return ""
def get_before_write_constraint():
return ""
def get_after_write_constraint():
return ""
def write_constraint(f, femobj, temp_obj, ccxwriter):
# floats read from ccx should use {:.13G}, see comment in writer module
NumberOfNodes = len(femobj["Nodes"])
if temp_obj.ConstraintType == "Temperature":
f.write("*BOUNDARY\n")
f.write(
"{},11,11,{}\n".format(
temp_obj.Name,
FreeCAD.Units.Quantity(temp_obj.Temperature.getValueAs("K")),
)
)
f.write("\n")
elif temp_obj.ConstraintType == "CFlux":
f.write("*CFLUX\n")
# CFLUX has to be specified in mW
f.write(
"{},11,{}\n".format(
temp_obj.Name,
FreeCAD.Units.Quantity(temp_obj.CFlux.getValueAs("mW")) / NumberOfNodes,
)
)
f.write("\n")
|
clientScripts | archivematicaCreateMETSMetadataXML | #!/usr/bin/env python
#
# This file is part of Archivematica.
#
# Copyright 2010-2021 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
"""Management of XML metadata files."""
import csv
from pathlib import Path
from urllib.parse import urlparse
from urllib.request import urlopen
import create_mets_v2 as createmets2
import namespaces as ns
import requests
from databaseFunctions import insertIntoEvents
from importlib_metadata import version
from lxml import etree
from main import models
def process_xml_metadata(mets, sip_dir, sip_uuid, sip_type, xml_validation):
if not xml_validation:
return mets, []
xml_metadata_mapping, xml_metadata_errors = _get_xml_metadata_mapping(
sip_dir, reingest="REIN" in sip_type
)
if not xml_metadata_mapping:
return mets, xml_metadata_errors
for fsentry in mets.all_files():
if fsentry.use != "original" and fsentry.type != "Directory":
continue
path = fsentry.get_path()
if path not in xml_metadata_mapping:
continue
for xml_type, xml_path in xml_metadata_mapping[path].items():
if not xml_path:
fsentry.delete_dmdsec("OTHER", xml_type)
continue
tree = etree.parse(str(xml_path))
try:
schema_uri = _get_schema_uri(tree, xml_validation)
except ValueError as err:
xml_metadata_errors.append(err)
continue
if schema_uri:
xml_rel_path = xml_path.relative_to(sip_dir)
try:
metadata_file = models.File.objects.get(
sip_id=sip_uuid,
currentlocation=f"%SIPDirectory%{xml_rel_path}",
)
except models.File.DoesNotExist:
xml_metadata_errors.append(f"No uuid for file: {xml_rel_path}")
continue
valid, errors = _validate_xml(tree, schema_uri)
_add_validation_event(
mets, metadata_file.uuid, schema_uri, valid, errors
)
if not valid:
xml_metadata_errors += errors
continue
fsentry.add_dmdsec(
tree.getroot(),
"OTHER",
othermdtype=xml_type,
status="update" if "REIN" in sip_type else "original",
)
return mets, xml_metadata_errors
def _get_xml_metadata_mapping(sip_path, reingest=False):
"""Get a mapping of files/dirs in the SIP and their related XML files.
On initial ingests, it looks for such mapping in source-metadata.csv
files located on each transfer metadata folder. On reingest it only
considers the source-metadata.csv file in the main metadata folder.
Example source-metadata.csv:
filename,metadata,type
objects,objects_metadata.xml,metadata_type
objects/dir,dir_metadata.xml,metadata_type
objects/dir/file.pdf,file_metadata_a.xml,metadata_type_a
objects/dir/file.pdf,file_metadata_b.xml,metadata_type_b
Example dict returned:
{
"objects": {"metadata_type": Path("/path/to/objects_metadata.xml")},
"objects/dir": {"metadata_type": Path("/path/to/dir_metadata.xml")},
"objects/dir/file.pdf": {
"metadata_type_a": Path("/path/to/file_metadata_a.xml"),
"metadata_type_b": Path("/path/to/file_metadata_b.xml"),
},
}
:param str sip_path: Absolute path to the SIP.
:param bool reingest: Boolean to indicate if it's a reingest.
:return dict, list: Dictionary with File/dir path -> dict of type -> metadata
file pathlib Path, and list with errors (if a CSV row is missing the filename
or type, or if there is more than one entry for the same filename and type).
"""
mapping = {}
errors = []
source_metadata_paths = []
metadata_path = Path(sip_path) / "objects" / "metadata"
transfers_metadata_path = metadata_path / "transfers"
if reingest:
source_metadata_paths.append(metadata_path / "source-metadata.csv")
elif transfers_metadata_path.is_dir():
for dir_ in transfers_metadata_path.iterdir():
source_metadata_paths.append(dir_ / "source-metadata.csv")
for source_metadata_path in source_metadata_paths:
if not source_metadata_path.is_file():
continue
with source_metadata_path.open() as f:
reader = csv.DictReader(f)
for row in reader:
if not all(k in row and row[k] for k in ["filename", "type"]):
errors.append(
"A row in {} is missing the filename and/or type".format(
source_metadata_path
)
)
continue
if row["type"] == "CUSTOM":
errors.append(
"A row in {} is using CUSTOM, a reserved type".format(
source_metadata_path
)
)
continue
if row["filename"] not in mapping:
mapping[row["filename"]] = {}
elif row["type"] in mapping[row["filename"]]:
errors.append(
"More than one entry in {} for path {} and type {}".format(
source_metadata_path, row["filename"], row["type"]
)
)
continue
if row["metadata"]:
row["metadata"] = source_metadata_path.parent / row["metadata"]
mapping[row["filename"]][row["type"]] = row["metadata"]
return mapping, errors
def _get_schema_uri(tree, xml_validation):
key = None
checked_keys = []
schema_location = tree.xpath(
"/*/@xsi:noNamespaceSchemaLocation", namespaces={"xsi": ns.xsiNS}
)
if schema_location:
key = schema_location[0].strip()
checked_keys.append(key)
if not key or key not in xml_validation:
schema_location = tree.xpath(
"/*/@xsi:schemaLocation", namespaces={"xsi": ns.xsiNS}
)
if schema_location:
key = schema_location[0].strip().split()[-1]
checked_keys.append(key)
if not key or key not in xml_validation:
key = tree.xpath("namespace-uri(.)")
checked_keys.append(key)
if not key or key not in xml_validation:
key = tree.xpath("local-name(.)")
checked_keys.append(key)
if not key or key not in xml_validation:
raise ValueError(f"XML validation schema not found for keys: {checked_keys}")
return xml_validation[key]
class Resolver(etree.Resolver):
def resolve(self, url, id, context):
url_scheme = urlparse(url).scheme
if url_scheme in ("http", "https"):
try:
response = requests.get(url)
except requests.RequestException:
return super().resolve(url, id, context)
else:
return self.resolve_string(response.text, context)
else:
return super().resolve(url, id, context)
def _validate_xml(tree, schema_uri):
schema_type = schema_uri.split(".")[-1]
parse_result = urlparse(schema_uri)
if not parse_result.scheme and schema_uri == parse_result.path:
# URI is a local file
try:
schema_uri = Path(schema_uri).as_uri()
except ValueError:
return False, [f"XML schema local path {schema_uri} must be absolute"]
try:
with urlopen(schema_uri) as f:
if schema_type == "dtd":
schema = etree.DTD(f)
elif schema_type == "xsd":
schema_contents = etree.parse(f)
try:
schema = etree.XMLSchema(schema_contents)
except etree.XMLSchemaParseError:
# Try parsing the schema again with a custom resolver
parser = etree.XMLParser()
resolver = Resolver()
parser.resolvers.add(resolver)
with urlopen(schema_uri) as f2:
schema_contents = etree.parse(f2, parser)
schema = etree.XMLSchema(schema_contents)
elif schema_type == "rng":
schema_contents = etree.parse(f)
schema = etree.RelaxNG(schema_contents)
else:
return False, [f"Unknown XML validation schema type: {schema_type}"]
except etree.LxmlError as err:
return False, [f"Could not parse schema file: {schema_uri}", err]
if not schema.validate(tree):
return False, schema.error_log
return True, []
def _add_validation_event(mets, file_uuid, schema_uri, valid, errors):
event_detail = {
"type": "metadata",
"validation-source-type": schema_uri.split(".")[-1],
"validation-source": schema_uri,
"program": "lxml",
"version": version("lxml"),
}
event_data = {
"eventType": "validation",
"eventDetail": "; ".join([f'{k}="{v}"' for k, v in event_detail.items()]),
"eventOutcome": "pass" if valid else "fail",
"eventOutcomeDetailNote": "\n".join([str(err) for err in errors]),
}
event_object = insertIntoEvents(file_uuid, **event_data)
metadata_fsentry = mets.get_file(file_uuid=file_uuid)
metadata_fsentry.add_premis_event(createmets2.createEvent(event_object))
|
extractor | cctv | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import float_or_none, try_get, unified_timestamp
from .common import InfoExtractor
class CCTVIE(InfoExtractor):
IE_DESC = "央视网"
_VALID_URL = r"https?://(?:(?:[^/]+)\.(?:cntv|cctv)\.(?:com|cn)|(?:www\.)?ncpa-classic\.com)/(?:[^/]+/)*?(?P<id>[^/?#&]+?)(?:/index)?(?:\.s?html|[?#&]|$)"
_TESTS = [
{
# fo.addVariable("videoCenterId","id")
"url": "http://sports.cntv.cn/2016/02/12/ARTIaBRxv4rTT1yWf1frW2wi160212.shtml",
"md5": "d61ec00a493e09da810bf406a078f691",
"info_dict": {
"id": "5ecdbeab623f4973b40ff25f18b174e8",
"ext": "mp4",
"title": "[NBA]二少联手砍下46分 雷霆主场击败鹈鹕(快讯)",
"description": "md5:7e14a5328dc5eb3d1cd6afbbe0574e95",
"duration": 98,
"uploader": "songjunjie",
"timestamp": 1455279956,
"upload_date": "20160212",
},
},
{
# var guid = "id"
"url": "http://tv.cctv.com/2016/02/05/VIDEUS7apq3lKrHG9Dncm03B160205.shtml",
"info_dict": {
"id": "efc5d49e5b3b4ab2b34f3a502b73d3ae",
"ext": "mp4",
"title": "[赛车]“车王”舒马赫恢复情况成谜(快讯)",
"description": "2月4日,蒙特泽莫罗透露了关于“车王”舒马赫恢复情况,但情况是否属实遭到了质疑。",
"duration": 37,
"uploader": "shujun",
"timestamp": 1454677291,
"upload_date": "20160205",
},
"params": {
"skip_download": True,
},
},
{
# changePlayer('id')
"url": "http://english.cntv.cn/special/four_comprehensives/index.shtml",
"info_dict": {
"id": "4bb9bb4db7a6471ba85fdeda5af0381e",
"ext": "mp4",
"title": "NHnews008 ANNUAL POLITICAL SEASON",
"description": "Four Comprehensives",
"duration": 60,
"uploader": "zhangyunlei",
"timestamp": 1425385521,
"upload_date": "20150303",
},
"params": {
"skip_download": True,
},
},
{
# loadvideo('id')
"url": "http://cctv.cntv.cn/lm/tvseries_russian/yilugesanghua/index.shtml",
"info_dict": {
"id": "b15f009ff45c43968b9af583fc2e04b2",
"ext": "mp4",
"title": "Путь,усыпанный космеями Серия 1",
"description": "Путь, усыпанный космеями",
"duration": 2645,
"uploader": "renxue",
"timestamp": 1477479241,
"upload_date": "20161026",
},
"params": {
"skip_download": True,
},
},
{
# var initMyAray = 'id'
"url": "http://www.ncpa-classic.com/2013/05/22/VIDE1369219508996867.shtml",
"info_dict": {
"id": "a194cfa7f18c426b823d876668325946",
"ext": "mp4",
"title": "小泽征尔音乐塾 音乐梦想无国界",
"duration": 2173,
"timestamp": 1369248264,
"upload_date": "20130522",
},
"params": {
"skip_download": True,
},
},
{
# var ids = ["id"]
"url": "http://www.ncpa-classic.com/clt/more/416/index.shtml",
"info_dict": {
"id": "a8606119a4884588a79d81c02abecc16",
"ext": "mp3",
"title": "来自维也纳的新年贺礼",
"description": "md5:f13764ae8dd484e84dd4b39d5bcba2a7",
"duration": 1578,
"uploader": "djy",
"timestamp": 1482942419,
"upload_date": "20161228",
},
"params": {
"skip_download": True,
},
"expected_warnings": ["Failed to download m3u8 information"],
},
{
"url": "http://ent.cntv.cn/2016/01/18/ARTIjprSSJH8DryTVr5Bx8Wb160118.shtml",
"only_matching": True,
},
{
"url": "http://tv.cntv.cn/video/C39296/e0210d949f113ddfb38d31f00a4e5c44",
"only_matching": True,
},
{
"url": "http://english.cntv.cn/2016/09/03/VIDEhnkB5y9AgHyIEVphCEz1160903.shtml",
"only_matching": True,
},
{
"url": "http://tv.cctv.com/2016/09/07/VIDE5C1FnlX5bUywlrjhxXOV160907.shtml",
"only_matching": True,
},
{
"url": "http://tv.cntv.cn/video/C39296/95cfac44cabd3ddc4a9438780a4e5c44",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
[
r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
r'videoCenterId["\']\s*,\s*["\']([\da-fA-F]+)',
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
r'var\s+ids\s*=\s*\[["\']([\da-fA-F]+)',
],
webpage,
"video id",
)
data = self._download_json(
"http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do",
video_id,
query={
"pid": video_id,
"url": url,
"idl": 32,
"idlr": 32,
"modifyed": "false",
},
)
title = data["title"]
formats = []
video = data.get("video")
if isinstance(video, dict):
for quality, chapters_key in enumerate(("lowChapters", "chapters")):
video_url = try_get(
video, lambda x: x[chapters_key][0]["url"], compat_str
)
if video_url:
formats.append(
{
"url": video_url,
"format_id": "http",
"quality": quality,
"preference": -1,
}
)
hls_url = try_get(data, lambda x: x["hls_url"], compat_str)
if hls_url:
hls_url = re.sub(r"maxbr=\d+&?", "", hls_url)
formats.extend(
self._extract_m3u8_formats(
hls_url,
video_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
self._sort_formats(formats)
uploader = data.get("editer_name")
description = self._html_search_meta("description", webpage, default=None)
timestamp = unified_timestamp(data.get("f_pgmtime"))
duration = float_or_none(try_get(video, lambda x: x["totalLength"]))
return {
"id": video_id,
"title": title,
"description": description,
"uploader": uploader,
"timestamp": timestamp,
"duration": duration,
"formats": formats,
}
|
lib | idletask | # This file is part of MyPaint.
# Copyright (C) 2015 by Andrew Chadwick <a.t.chadwick@gmail.com>
# Copyright (C) 2009 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Non-threaded, prioritizable background processing."""
from __future__ import division, print_function
import collections
from lib.gibindings import GLib
class Processor(object):
"""Queue of low priority tasks for background processing
Queued tasks are automatically processed in the main thread.
They run when GTK is idle, or on demand.
The default priority is much lower than gui event processing.
"""
def __init__(self, priority=GLib.PRIORITY_LOW):
"""Initialize, specifying a priority"""
object.__init__(self)
self._queue = collections.deque()
self._priority = priority
self._idle_id = None
def has_work(self):
return len(self._queue) > 0
def add_work(self, func, *args, **kwargs):
"""Adds work
:param func: a task callable.
:param *args: passed to func
:param **kwargs: passed to func
This starts the queue running if it isn't already.
Each callable will be called with the given parameters
until it returns false, at which point it's discarded.
"""
if not self._idle_id:
self._idle_id = GLib.idle_add(
self._process,
priority=self._priority,
)
self._queue.append((func, args, kwargs))
def finish_all(self):
"""Complete processing: finishes all queued tasks."""
while self._process():
pass
assert self._idle_id is None
assert len(self._queue) == 0
def iter_work(self):
"""Iterate across the queued tasks."""
return iter(self._queue)
def stop(self):
"""Immediately stop processing and clear the queue."""
if self._idle_id:
GLib.source_remove(self._idle_id)
self._idle_id = None
self._queue.clear()
assert self._idle_id is None
assert len(self._queue) == 0
def _process(self):
if not self._idle_id:
return False
if len(self._queue) > 0:
func, args, kwargs = self._queue[0]
func_done = bool(func(*args, **kwargs))
if not func_done:
self._queue.popleft()
if len(self._queue) == 0:
self._idle_id = None
return bool(self._queue)
|
language | generate_translations | #!/usr/bin/python3
"""
@file
@brief This file updates the OpenShot.POT (language translation template) by scanning all source files.
@author Jonathan Thomas <jonathan@openshot.org>
This file helps you generate the POT file that contains all of the translatable
strings / text in OpenShot. Because some of our text is in custom XML files,
the xgettext command can't correctly generate the POT file. Thus... the
existence of this file. =)
Command to create the individual language PO files (Ascii files)
$ msginit --input=OpenShot.pot --locale=fr_FR
$ msginit --input=OpenShot.pot --locale=es
Command to update the PO files (if text is added or changed)
$ msgmerge en_US.po OpenShot.pot -U
$ msgmerge es.po OpenShot.pot -U
Command to compile the Ascii PO files into binary MO files
$ msgfmt en_US.po --output-file=en_US/LC_MESSAGES/OpenShot.mo
$ msgfmt es.po --output-file=es/LC_MESSAGES/OpenShot.mo
Command to compile all PO files in a folder
$ find -iname "*.po" -exec msgfmt {} -o {}.mo \;
Command to combine the 2 pot files into 1 file
$ msgcat ~/openshot/locale/OpenShot/OpenShot_source.pot ~/openshot/openshot/locale/OpenShot/OpenShot_glade.pot -o ~/openshot/main/locale/OpenShot/OpenShot.pot
@section LICENSE
Copyright (c) 2008-2018 OpenShot Studios, LLC
(http://www.openshotstudios.com). This file is part of
OpenShot Video Editor (http://www.openshot.org), an open-source project
dedicated to delivering high quality video editing and animation solutions
to the world.
OpenShot Video Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenShot Video Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import json
import os
import re
import shutil
import subprocess
import sys
# Try to get the security-patched XML functions from defusedxml
try:
from defusedxml import minidom as xml
except ImportError:
from xml.dom import minidom as xml
import openshot
# Get the absolute path of this project
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if path not in sys.path:
sys.path.append(path)
import classes.info as info
from classes.effect_init import effect_options
from classes.logger import log
# get the path of the main OpenShot folder
language_folder_path = os.path.dirname(os.path.abspath(__file__))
openshot_path = os.path.dirname(language_folder_path)
effects_path = os.path.join(openshot_path, "effects")
blender_path = os.path.join(openshot_path, "blender")
transitions_path = os.path.join(openshot_path, "transitions")
titles_path = os.path.join(openshot_path, "titles")
export_path = os.path.join(openshot_path, "presets")
windows_ui_path = os.path.join(openshot_path, "windows", "ui")
log.info("-----------------------------------------------------")
log.info(" Creating temp POT files")
log.info("-----------------------------------------------------")
# create empty temp POT files
temp_files = [
"OpenShot_source.pot",
"OpenShot_glade.pot",
"OpenShot_effects.pot",
"OpenShot_export.pot",
"OpenShot_transitions.pot",
"OpenShot_QtUi.pot",
]
for temp_file_name in temp_files:
temp_file_path = os.path.join(language_folder_path, temp_file_name)
if os.path.exists(temp_file_path):
os.remove(temp_file_path)
f = open(temp_file_path, "w")
f.close()
log.info("-----------------------------------------------------")
log.info(" Using xgettext to generate .py POT files")
log.info("-----------------------------------------------------")
# Generate POT for Source Code strings (i.e. strings marked with a _("translate me"))
subprocess.call(
'find %s -iname "*.py" -exec xgettext -j -o %s --keyword=_ {} \;'
% (openshot_path, os.path.join(language_folder_path, "OpenShot_source.pot")),
shell=True,
)
log.info("-----------------------------------------------------")
log.info(" Using Qt's lupdate to generate .ui POT files")
log.info("-----------------------------------------------------")
# Generate POT for Qt *.ui files (which require the lupdate command, and ts2po command)
os.chdir(windows_ui_path)
subprocess.call(
"lupdate *.ui -ts %s" % (os.path.join(language_folder_path, "OpenShot_QtUi.ts")),
shell=True,
)
subprocess.call(
"lupdate *.ui -ts %s" % (os.path.join(language_folder_path, "OpenShot_QtUi.pot")),
shell=True,
)
os.chdir(language_folder_path)
# Rewrite the UI POT, removing msgctxt
output = open(os.path.join(language_folder_path, "clean.po"), "w")
for line in open(os.path.join(language_folder_path, "OpenShot_QtUi.pot"), "r"):
if not line.startswith("msgctxt"):
output.write(line)
# Overwrite original PO file
output.close()
shutil.copy(
os.path.join(language_folder_path, "clean.po"),
os.path.join(language_folder_path, "OpenShot_QtUi.pot"),
)
os.remove(os.path.join(language_folder_path, "clean.po"))
# Remove duplicates (if any found)
subprocess.call(
"msguniq %s --use-first -o %s"
% (
os.path.join(language_folder_path, "OpenShot_QtUi.pot"),
os.path.join(language_folder_path, "clean.po"),
),
shell=True,
)
shutil.copy(
os.path.join(language_folder_path, "clean.po"),
os.path.join(language_folder_path, "OpenShot_QtUi.pot"),
)
os.remove(os.path.join(language_folder_path, "clean.po"))
log.info("-----------------------------------------------------")
log.info(" Updating auto created POT files to set CharSet")
log.info("-----------------------------------------------------")
temp_files = ["OpenShot_source.pot", "OpenShot_glade.pot"]
for temp_file in temp_files:
# get the entire text
f = open(os.path.join(language_folder_path, temp_file), "r")
# read entire text of file
entire_source = f.read()
f.close()
# replace charset
entire_source = entire_source.replace("charset=CHARSET", "charset=UTF-8")
# Create Updated POT Output File
if os.path.exists(os.path.join(language_folder_path, temp_file)):
os.remove(os.path.join(language_folder_path, temp_file))
f = open(os.path.join(language_folder_path, temp_file), "w")
f.write(entire_source)
f.close()
log.info("-----------------------------------------------------")
log.info(" Scanning custom XML files and finding text")
log.info("-----------------------------------------------------")
# Loop through the Effects XML
effects_text = {}
for file in os.listdir(effects_path):
if os.path.isfile(os.path.join(effects_path, file)):
# load xml effect file
full_file_path = os.path.join(effects_path, file)
xmldoc = xml.parse(os.path.join(effects_path, file))
# add text to list
effects_text[
xmldoc.getElementsByTagName("title")[0].childNodes[0].data
] = full_file_path
effects_text[
xmldoc.getElementsByTagName("description")[0].childNodes[0].data
] = full_file_path
# get params
params = xmldoc.getElementsByTagName("param")
# Loop through params
for param in params:
if param.attributes["title"]:
effects_text[param.attributes["title"].value] = full_file_path
# Append on properties from libopenshot
objects = [
openshot.Clip(),
openshot.Bars(),
openshot.Blur(),
openshot.Brightness(),
openshot.ChromaKey(),
openshot.ColorShift(),
openshot.Crop(),
openshot.Deinterlace(),
openshot.Hue(),
openshot.Mask(),
openshot.Negate(),
openshot.Pixelate(),
openshot.Saturation(),
openshot.Shift(),
openshot.Wave(),
]
# Loop through each libopenshot object
for object in objects:
props = json.loads(object.PropertiesJSON(1))
# Loop through props
for key in props.keys():
object = props[key]
if "name" in object:
effects_text[object["name"]] = "libopenshot (Clip Properties)"
if "choices" in object:
for choice in object["choices"]:
effects_text[choice["name"]] = "libopenshot (Clip Properties)"
# Append Effect Init Data
# Loop through props
for effect in effect_options:
for param in effect_options[effect]:
if "title" in param:
effects_text[param["title"]] = (
"effect_init (Effect parameter for %s)" % effect
)
if "values" in param:
for value in param["values"]:
effects_text[value["name"]] = (
"effect_init (Effect parameter for %s)" % effect
)
# Append Effect Meta Data
e = openshot.EffectInfo()
props = json.loads(e.Json())
# Loop through props
for effect in props:
if "name" in effect:
effects_text[effect["name"]] = "libopenshot (Effect Metadata)"
if "description" in effect:
effects_text[effect["description"]] = "libopenshot (Effect Metadata)"
# Append Emoji Data
emoji_text = {"translator-credits": "Translator credits to be translated by LaunchPad"}
emoji_metadata_path = os.path.join(
info.PATH, "emojis", "data", "openmoji-optimized.json"
)
emoji_ignore_keys = (
"Keyboard",
"Sunset",
"Key",
"Right arrow",
"Left arrow",
"Bubbles",
"Twitter",
"Instagram",
"Scale",
"Simple",
"Close",
"Forward",
"Copy",
"Filter",
"Details",
)
with open(emoji_metadata_path, "r", encoding="utf-8") as f:
emoji_metadata = json.load(f)
# Loop through props
for filename, emoji in emoji_metadata.items():
emoji_name = emoji["annotation"].capitalize()
emoji_group = emoji["group"].split("-")[0].capitalize()
if "annotation" in emoji and emoji_name not in emoji_ignore_keys:
emoji_text[emoji_name] = "Emoji Metadata (Displayed Name)"
if (
"group" in emoji
and emoji_group not in effects_text
and emoji_group not in emoji_ignore_keys
):
emoji_text[emoji_group] = "Emoji Metadata (Group Filter name)"
# Loop through the Blender XML
blender_text = {
"translator-credits": "Translator credits to be translated by LaunchPad"
}
blender_ignore_keys = ("Title", "Alpha", "Blur")
for file in os.listdir(blender_path):
if os.path.isfile(os.path.join(blender_path, file)):
# load xml effect file
full_file_path = os.path.join(blender_path, file)
xmldoc = xml.parse(os.path.join(blender_path, file))
# add text to list
translation_key = xmldoc.getElementsByTagName("title")[0].childNodes[0].data
if translation_key not in blender_ignore_keys:
blender_text[translation_key] = full_file_path
# get params
params = xmldoc.getElementsByTagName("param")
# Loop through params
for param in params:
if param.attributes["title"]:
translation_key = param.attributes["title"].value
if translation_key not in blender_ignore_keys:
blender_text[param.attributes["title"].value] = full_file_path
# Loop through the Export Settings XML
export_text = {}
for file in os.listdir(export_path):
if os.path.isfile(os.path.join(export_path, file)):
# load xml export file
full_file_path = os.path.join(export_path, file)
xmldoc = xml.parse(os.path.join(export_path, file))
# add text to list
export_text[
xmldoc.getElementsByTagName("type")[0].childNodes[0].data
] = full_file_path
export_text[
xmldoc.getElementsByTagName("title")[0].childNodes[0].data
] = full_file_path
# Loop through Settings
settings_file = open(
os.path.join(info.PATH, "settings", "_default.settings"), "r"
).read()
settings = json.loads(settings_file)
category_names = []
for setting in settings:
if "type" in setting and setting["type"] != "hidden":
# Add visible settings
export_text[setting["title"]] = "Settings for %s" % setting["setting"]
if "type" in setting and setting["type"] != "hidden":
# Add visible category names
if setting["category"] not in category_names:
export_text[setting["category"]] = (
"Settings Category for %s" % setting["category"]
)
category_names.append(setting["category"])
if "translate_values" in setting and setting.get("translate_values"):
# Add translatable dropdown keys
for value in setting.get("values", []):
export_text[value["name"]] = "Settings for %s" % setting["setting"]
# Loop through transitions and add to POT file
transitions_text = {
"translator-credits": "Translator credits to be translated by LaunchPad"
}
transitions_ignore_keys = ("Common", "Fade")
for file in os.listdir(transitions_path):
# load xml export file
full_file_path = os.path.join(transitions_path, file)
(fileBaseName, fileExtension) = os.path.splitext(file)
# get transition name
name = fileBaseName.replace("_", " ").capitalize()
# add text to list
if name not in transitions_ignore_keys:
transitions_text[name] = full_file_path
# Look in sub-folders
for sub_file in os.listdir(full_file_path):
# load xml export file
full_subfile_path = os.path.join(full_file_path, sub_file)
fileBaseName = os.path.splitext(sub_file)[0]
# split the name into parts (looking for a number)
suffix_number = None
name_parts = fileBaseName.split("_")
if name_parts[-1].isdigit():
suffix_number = name_parts[-1]
# get transition name
name = fileBaseName.replace("_", " ").capitalize()
# replace suffix number with placeholder (if any)
if suffix_number:
name = name.replace(suffix_number, "%s")
# add text to list
if name not in transitions_ignore_keys:
transitions_text[name] = full_subfile_path
# Loop through titles and add to POT file
for sub_file in os.listdir(titles_path):
# load xml export file
full_subfile_path = os.path.join(titles_path, sub_file)
fileBaseName = os.path.splitext(sub_file)[0]
# split the name into parts (looking for a number)
suffix_number = None
name_parts = fileBaseName.split("_")
if name_parts[-1].isdigit():
suffix_number = name_parts[-1]
# get transition name
name = fileBaseName.replace("_", " ").capitalize()
# replace suffix number with placeholder (if any)
if suffix_number:
name = name.replace(suffix_number, "%s")
# add text to list
transitions_text[name] = full_subfile_path
log.info("-----------------------------------------------------")
log.info(" Creating the custom XML POT files")
log.info("-----------------------------------------------------")
# header of POT file
header_text = ""
header_text = header_text + "# OpenShot Video Editor POT Template File.\n"
header_text = header_text + "# Copyright (C) 2008-2018 OpenShot Studios, LLC\n"
header_text = (
header_text + "# This file is distributed under the same license as OpenShot.\n"
)
header_text = header_text + "# Jonathan Thomas <Jonathan.Oomph@gmail.com>, 2018.\n"
header_text = header_text + "#\n"
header_text = header_text + "#, fuzzy\n"
header_text = header_text + 'msgid ""\n'
header_text = header_text + 'msgstr ""\n'
header_text = (
header_text
+ '"Project-Id-Version: OpenShot Video Editor (version: %s)\\n"\n' % info.VERSION
)
header_text = (
header_text
+ '"Report-Msgid-Bugs-To: Jonathan Thomas <Jonathan.Oomph@gmail.com>\\n"\n'
)
header_text = header_text + '"POT-Creation-Date: %s\\n"\n' % datetime.datetime.now()
header_text = header_text + '"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n'
header_text = (
header_text + '"Last-Translator: Jonathan Thomas <Jonathan.Oomph@gmail.com>\\n"\n'
)
header_text = (
header_text
+ '"Language-Team: https://translations.launchpad.net/+groups/launchpad-translators\\n"\n'
)
header_text = header_text + '"MIME-Version: 1.0\\n"\n'
header_text = header_text + '"Content-Type: text/plain; charset=UTF-8\\n"\n'
header_text = header_text + '"Content-Transfer-Encoding: 8bit\\n"\n'
# Create POT files for the custom text (from our XML files)
temp_files = [
["OpenShot_effects.pot", effects_text],
["OpenShot_export.pot", export_text],
["OpenShot_transitions.pot", transitions_text],
["OpenShot_emojis.pot", emoji_text],
["OpenShot_blender.pot", blender_text],
]
for temp_file, text_dict in temp_files:
f = open(temp_file, "w")
# write header
f.write(header_text)
# loop through each line of text
for k, v in text_dict.items():
if k:
f.write("\n")
f.write("#: %s\n" % v)
f.write('msgid "%s"\n' % k)
f.write('msgstr ""\n')
# close file
f.close()
log.info("-----------------------------------------------------")
log.info(" Combine all temp POT files using msgcat command ")
log.info(" (this removes dupes) ")
log.info("-----------------------------------------------------")
temp_files = [
"OpenShot_source.pot",
"OpenShot_glade.pot",
"OpenShot_effects.pot",
"OpenShot_export.pot",
"OpenShot_QtUi.pot",
]
command = "msgcat"
for temp_file in temp_files:
# append files
command = command + " " + os.path.join(language_folder_path, temp_file)
command = (
command + " -o " + os.path.join(language_folder_path, "OpenShot", "OpenShot.pot")
)
log.info(command)
# merge all 4 temp POT files
subprocess.call(command, shell=True)
log.info("-----------------------------------------------------")
log.info(" Create FINAL POT File from all temp POT files ")
log.info("-----------------------------------------------------")
# get the entire text of OpenShot.POT
f = open(os.path.join(language_folder_path, "OpenShot", "OpenShot.pot"), "r")
# read entire text of file
entire_source = f.read()
f.close()
# Create Final POT Output File
if os.path.exists(os.path.join(language_folder_path, "OpenShot", "OpenShot.pot")):
os.remove(os.path.join(language_folder_path, "OpenShot", "OpenShot.pot"))
final = open(os.path.join(language_folder_path, "OpenShot", "OpenShot.pot"), "w")
final.write(header_text)
final.write("\n")
# Move transitions POT file to final location
if os.path.exists(os.path.join(language_folder_path, "OpenShot_transitions.pot")):
os.rename(
os.path.join(language_folder_path, "OpenShot_transitions.pot"),
os.path.join(language_folder_path, "OpenShot", "OpenShot_transitions.pot"),
)
# Move emoji POT file to final location
if os.path.exists(os.path.join(language_folder_path, "OpenShot_emojis.pot")):
os.rename(
os.path.join(language_folder_path, "OpenShot_emojis.pot"),
os.path.join(language_folder_path, "OpenShot", "OpenShot_emojis.pot"),
)
# Move blender POT file to final location
if os.path.exists(os.path.join(language_folder_path, "OpenShot_blender.pot")):
os.rename(
os.path.join(language_folder_path, "OpenShot_blender.pot"),
os.path.join(language_folder_path, "OpenShot", "OpenShot_blender.pot"),
)
# Trim the beginning off of each POT file
start_pos = entire_source.find("#: ")
trimmed_source = entire_source[start_pos:]
# Add to Final POT File
final.write(trimmed_source)
final.write("\n")
# Close final POT file
final.close()
log.info("-----------------------------------------------------")
log.info(" Remove all temp POT files ")
log.info("-----------------------------------------------------")
# Delete all 4 temp files
temp_files = [
"OpenShot_source.pot",
"OpenShot_glade.pot",
"OpenShot_effects.pot",
"OpenShot_export.pot",
"OpenShot_transitions.pot",
"OpenShot_QtUi.pot",
"OpenShot_QtUi.ts",
]
for temp_file_name in temp_files:
temp_file_path = os.path.join(language_folder_path, temp_file_name)
if os.path.exists(temp_file_path):
os.remove(temp_file_path)
# output success
log.info("-----------------------------------------------------")
log.info(" The OpenShot.pot file has been successfully created ")
log.info(" with all text in OpenShot.")
log.info("")
log.info(" Checking for duplicate keys...")
log.info("-----------------------------------------------------")
# Find any duplicate translations between our 4 template files
# If these duplicates are translated differently, we will end up
# with conflicts, and both translations will be combined incorrectly
all_strings = {}
for pot_file in [
"OpenShot.pot",
"OpenShot_transitions.pot",
"OpenShot_blender.pot",
"OpenShot_emojis.pot",
]:
with open(os.path.join(language_folder_path, "OpenShot", pot_file)) as f:
data = f.read()
for key in re.findall('^msgid "(.*)"', data, re.MULTILINE):
if key not in all_strings:
all_strings[key] = "%s | %s" % (key, pot_file)
elif key and key not in ("translator-credits",):
log.info(
" ERROR: Duplicate key found: %s::%s" % (pot_file, all_strings[key])
)
|
femobjects | constraint_currentdensity | # ***************************************************************************
# * Copyright (c) 2023 Uwe Stöhr <uwestoehr@lyx.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM constraint current density document object"
__author__ = "Uwe Stöhr"
__url__ = "https://www.freecad.org"
## @package constraint_currentdensity
# \ingroup FEM
# \brief constraint current density object
from . import base_fempythonobject
class ConstraintCurrentDensity(base_fempythonobject.BaseFemPythonObject):
Type = "Fem::ConstraintCurrentDensity"
def __init__(self, obj):
super(ConstraintCurrentDensity, self).__init__(obj)
self.add_properties(obj)
def onDocumentRestored(self, obj):
self.add_properties(obj)
def add_properties(self, obj):
if not hasattr(obj, "CurrentDensity_re_1"):
obj.addProperty(
"App::PropertyCurrentDensity",
"CurrentDensity_re_1",
"Vector Potential",
"Real part of current density x-component",
)
obj.CurrentDensity_re_1 = "0 A/m^2"
if not hasattr(obj, "CurrentDensity_re_2"):
obj.addProperty(
"App::PropertyCurrentDensity",
"CurrentDensity_re_2",
"Vector Potential",
"Real part of current density y-component",
)
obj.CurrentDensity_re_2 = "0 A/m^2"
if not hasattr(obj, "CurrentDensity_re_3"):
obj.addProperty(
"App::PropertyCurrentDensity",
"CurrentDensity_re_3",
"Vector Potential",
"Real part of current density z-component",
)
obj.CurrentDensity_re_3 = "0 A/m^2"
if not hasattr(obj, "CurrentDensity_im_1"):
obj.addProperty(
"App::PropertyCurrentDensity",
"CurrentDensity_im_1",
"Vector Potential",
"Imaginary part of current density x-component",
)
obj.CurrentDensity_im_1 = "0 A/m^2"
if not hasattr(obj, "CurrentDensity_im_2"):
obj.addProperty(
"App::PropertyCurrentDensity",
"CurrentDensity_im_2",
"Vector Potential",
"Imaginary part of current density y-component",
)
obj.CurrentDensity_im_2 = "0 A/m^2"
if not hasattr(obj, "CurrentDensity_im_3"):
obj.addProperty(
"App::PropertyCurrentDensity",
"CurrentDensity_im_3",
"Vector Potential",
"Imaginary part of current density z-component",
)
obj.CurrentDensity_im_3 = "0 A/m^2"
# now the enable bools
if not hasattr(obj, "CurrentDensity_re_1_Disabled"):
obj.addProperty(
"App::PropertyBool",
"CurrentDensity_re_1_Disabled",
"Vector Potential",
"",
)
obj.CurrentDensity_re_1_Disabled = True
if not hasattr(obj, "CurrentDensity_re_2_Disabled"):
obj.addProperty(
"App::PropertyBool",
"CurrentDensity_re_2_Disabled",
"Vector Potential",
"",
)
obj.CurrentDensity_re_2_Disabled = True
if not hasattr(obj, "CurrentDensity_re_3_Disabled"):
obj.addProperty(
"App::PropertyBool",
"CurrentDensity_re_3_Disabled",
"Vector Potential",
"",
)
obj.CurrentDensity_re_3_Disabled = True
if not hasattr(obj, "CurrentDensity_im_1_Disabled"):
obj.addProperty(
"App::PropertyBool",
"CurrentDensity_im_1_Disabled",
"Vector Potential",
"",
)
obj.CurrentDensity_im_1_Disabled = True
if not hasattr(obj, "CurrentDensity_im_2_Disabled"):
obj.addProperty(
"App::PropertyBool",
"CurrentDensity_im_2_Disabled",
"Vector Potential",
"",
)
obj.CurrentDensity_im_2_Disabled = True
if not hasattr(obj, "CurrentDensity_im_3_Disabled"):
obj.addProperty(
"App::PropertyBool",
"CurrentDensity_im_3_Disabled",
"Vector Potential",
"",
)
obj.CurrentDensity_im_3_Disabled = True
|
Arch | ArchReference | # ***************************************************************************
# * Copyright (c) 2018 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD Arch External Reference"
__author__ = "Yorik van Havre"
__url__ = "https://www.freecad.org"
import os
import re
import zipfile
import FreeCAD
if FreeCAD.GuiUp:
import FreeCADGui
from draftutils.translate import translate
from PySide import QtCore, QtGui
from PySide.QtCore import QT_TRANSLATE_NOOP
else:
# \cond
def translate(ctxt, txt):
return txt
def QT_TRANSLATE_NOOP(ctxt, txt):
return txt
# \endcond
## @package ArchReference
# \ingroup ARCH
# \brief The Reference object and tools
#
# This module provides tools to build Reference objects.
# References can take a shape from a Part-based object in
# another file.
def makeReference(filepath=None, partname=None, name=None):
"makeReference([filepath],[partname],[name]): Creates an Arch Reference object"
if not FreeCAD.ActiveDocument:
FreeCAD.Console.PrintError("No active document. Aborting\n")
return
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython", "ArchReference")
obj.Label = name if name else translate("Arch", "External Reference")
ArchReference(obj)
if FreeCAD.GuiUp:
ViewProviderArchReference(obj.ViewObject)
if filepath:
obj.File = filepath
if partname:
obj.Part = partname
import Draft
Draft.select(obj)
return obj
class ArchReference:
"The Arch Reference object"
def __init__(self, obj):
obj.Proxy = self
ArchReference.setProperties(self, obj)
self.Type = "Reference"
self.reload = True
def setProperties(self, obj):
pl = obj.PropertiesList
if not "File" in pl:
obj.addProperty(
"App::PropertyFile",
"File",
"Reference",
QT_TRANSLATE_NOOP(
"App::Property", "The base file this component is built upon"
),
)
if not "Part" in pl:
obj.addProperty(
"App::PropertyString",
"Part",
"Reference",
QT_TRANSLATE_NOOP(
"App::Property", "The part to use from the base file"
),
)
if not "ReferenceMode" in pl:
obj.addProperty(
"App::PropertyEnumeration",
"ReferenceMode",
"Reference",
QT_TRANSLATE_NOOP(
"App::Property",
"The way the referenced objects are included in the current document. 'Normal' includes the shape, 'Transient' discards the shape when the object is switched off (smaller filesize), 'Lightweight' does not import the shape but only the OpenInventor representation",
),
)
obj.ReferenceMode = ["Normal", "Transient", "Lightweight"]
if "TransientReference" in pl:
if obj.TransientReference:
obj.ReferenceMode = "Transient"
obj.removeProperty("TransientReference")
FreeCAD.Console.PrintMessage(
"Upgrading "
+ obj.Label
+ " TransientReference property to ReferenceMode\n"
)
if not "FuseArch" in pl:
obj.addProperty(
"App::PropertyBool",
"FuseArch",
"Reference",
QT_TRANSLATE_NOOP("App::Property", "Fuse objects of same material"),
)
self.Type = "Reference"
def onDocumentRestored(self, obj):
ArchReference.setProperties(self, obj)
self.reload = False
if obj.ReferenceMode == "Lightweight":
if obj.ViewObject and obj.ViewObject.Proxy:
obj.ViewObject.Proxy.loadInventor(obj)
def dumps(self):
return None
def loads(self, state):
return None
def onChanged(self, obj, prop):
if prop in ["File", "Part"]:
self.reload = True
elif prop == "ReferenceMode":
if obj.ReferenceMode == "Normal":
if obj.ViewObject and obj.ViewObject.Proxy:
obj.ViewObject.Proxy.unloadInventor(obj)
if (not obj.Shape) or obj.Shape.isNull():
self.reload = True
obj.touch()
elif obj.ReferenceMode == "Transient":
if obj.ViewObject and obj.ViewObject.Proxy:
obj.ViewObject.Proxy.unloadInventor(obj)
self.reload = False
elif obj.ReferenceMode == "Lightweight":
self.reload = False
import Part
pl = obj.Placement
obj.Shape = Part.Shape()
obj.Placement = pl
if obj.ViewObject and obj.ViewObject.Proxy:
obj.ViewObject.Proxy.loadInventor(obj)
def execute(self, obj):
pl = obj.Placement
filename = self.getFile(obj)
if (
filename
and obj.Part
and self.reload
and obj.ReferenceMode in ["Normal", "Transient"]
):
self.parts = self.getPartsList(obj)
if self.parts:
zdoc = zipfile.ZipFile(filename)
if zdoc:
if obj.Part in self.parts:
if self.parts[obj.Part][1] in zdoc.namelist():
f = zdoc.open(self.parts[obj.Part][1])
shapedata = f.read()
f.close()
shapedata = shapedata.decode("utf8")
shape = self.cleanShape(
shapedata, obj, self.parts[obj.Part][2]
)
obj.Shape = shape
if not pl.isIdentity():
obj.Placement = pl
else:
print("Part not found in file")
self.reload = False
def cleanShape(self, shapedata, obj, materials):
"cleans the imported shape"
import Part
shape = Part.Shape()
shape.importBrepFromString(shapedata)
if obj.FuseArch and materials:
# separate lone edges
shapes = []
for edge in shape.Edges:
found = False
for solid in shape.Solids:
for soledge in solid.Edges:
if edge.hashCode() == soledge.hashCode():
found = True
break
if found:
break
if found:
break
else:
shapes.append(edge)
print("solids:", len(shape.Solids), "mattable:", materials)
for key, solindexes in materials.items():
if key == "Undefined":
# do not join objects with no defined material
for solindex in [int(i) for i in solindexes.split(",")]:
shapes.append(shape.Solids[solindex])
else:
fusion = None
for solindex in [int(i) for i in solindexes.split(",")]:
if not fusion:
fusion = shape.Solids[solindex]
else:
fusion = fusion.fuse(shape.Solids[solindex])
if fusion:
shapes.append(fusion)
shape = Part.makeCompound(shapes)
try:
shape = shape.removeSplitter()
except Exception:
print(obj.Label, ": error removing splitter")
return shape
def getFile(self, obj, filename=None):
"gets a valid file, if possible"
if not filename:
filename = obj.File
if not filename:
return None
if not filename.lower().endswith(".fcstd"):
return None
if not os.path.exists(filename):
# search for the file in the current directory if not found
basename = os.path.basename(filename)
currentdir = os.path.dirname(obj.Document.FileName)
altfile = os.path.join(currentdir, basename)
if altfile == obj.Document.FileName:
return None
elif os.path.exists(altfile):
return altfile
else:
# search for subpaths in current folder
altfile = None
subdirs = self.splitall(os.path.dirname(filename))
for i in range(len(subdirs)):
subpath = [currentdir] + subdirs[-i:] + [basename]
altfile = os.path.join(*subpath)
if os.path.exists(altfile):
return altfile
return None
return filename
def getPartsList(self, obj, filename=None):
"returns a list of Part-based objects in a FCStd file"
parts = {}
materials = {}
filename = self.getFile(obj, filename)
if not filename:
return parts
zdoc = zipfile.ZipFile(filename)
with zdoc.open("Document.xml") as docf:
name = None
label = None
part = None
materials = {}
writemode = False
for line in docf:
line = line.decode("utf8")
if "<Object name=" in line:
n = re.findall('name="(.*?)"', line)
if n:
name = n[0]
elif '<Property name="Label"' in line:
writemode = True
elif writemode and "<String value=" in line:
n = re.findall('value="(.*?)"', line)
if n:
label = n[0]
writemode = False
elif '<Property name="Shape" type="Part::PropertyPartShape"' in line:
writemode = True
elif writemode and "<Part file=" in line:
n = re.findall('file="(.*?)"', line)
if n:
part = n[0]
writemode = False
elif '<Property name="MaterialsTable" type="App::PropertyMap"' in line:
writemode = True
elif writemode and "<Item key=" in line:
n = re.findall('key="(.*?)"', line)
v = re.findall('value="(.*?)"', line)
if n and v:
materials[n[0]] = v[0]
elif writemode and "</Map>" in line:
writemode = False
elif "</Object>" in line:
if name and label and part:
parts[name] = [label, part, materials]
name = None
label = None
part = None
materials = {}
writemode = False
return parts
def getColors(self, obj):
"returns the DiffuseColor of the referenced object"
filename = self.getFile(obj)
if not filename:
return None
part = obj.Part
if not obj.Part:
return None
zdoc = zipfile.ZipFile(filename)
if not "GuiDocument.xml" in zdoc.namelist():
return None
colorfile = None
with zdoc.open("GuiDocument.xml") as docf:
writemode1 = False
writemode2 = False
for line in docf:
line = line.decode("utf8")
if ("<ViewProvider name=" in line) and (part in line):
writemode1 = True
elif writemode1 and ('<Property name="DiffuseColor"' in line):
writemode1 = False
writemode2 = True
elif writemode2 and ("<ColorList file=" in line):
n = re.findall('file="(.*?)"', line)
if n:
colorfile = n[0]
break
if not colorfile:
return None
if not colorfile in zdoc.namelist():
return None
colors = []
cf = zdoc.open(colorfile)
buf = cf.read()
cf.close()
for i in range(1, int(len(buf) / 4)):
colors.append(
(
buf[i * 4 + 3] / 255.0,
buf[i * 4 + 2] / 255.0,
buf[i * 4 + 1] / 255.0,
buf[i * 4] / 255.0,
)
)
if colors:
return colors
return None
def splitall(self, path):
"splits a path between its components"
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
class ViewProviderArchReference:
"A View Provider for the Arch Reference object"
def __init__(self, vobj):
vobj.Proxy = self
self.setProperties(vobj)
def setProperties(self, vobj):
pl = vobj.PropertiesList
if not "TimeStamp" in pl:
vobj.addProperty(
"App::PropertyFloat",
"TimeStamp",
"Reference",
QT_TRANSLATE_NOOP(
"App::Property", "The latest time stamp of the linked file"
),
)
vobj.setEditorMode("TimeStamp", 2)
if not "UpdateColors" in pl:
vobj.addProperty(
"App::PropertyBool",
"UpdateColors",
"Reference",
QT_TRANSLATE_NOOP(
"App::Property",
"If true, the colors from the linked file will be kept updated",
),
)
vobj.UpdateColors = True
def getIcon(self):
import Arch_rc
return ":/icons/Arch_Reference.svg"
def attach(self, vobj):
self.Object = vobj.Object
# Check for file change every minute
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.checkChanges)
s = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").GetInt(
"ReferenceCheckInterval", 60
)
self.timer.start(1000 * s)
def dumps(self):
return None
def loads(self, state):
return None
def updateData(self, obj, prop):
if (
(prop == "Shape")
and hasattr(obj.ViewObject, "UpdateColors")
and obj.ViewObject.UpdateColors
):
if obj.Shape and not obj.Shape.isNull():
colors = obj.Proxy.getColors(obj)
if colors:
obj.ViewObject.DiffuseColor = colors
from DraftGui import todo
todo.delay(self.recolorize, obj.ViewObject)
def recolorize(self, vobj):
if (
hasattr(vobj, "DiffuseColor")
and hasattr(vobj, "UpdateColors")
and vobj.UpdateColors
):
vobj.DiffuseColor = vobj.DiffuseColor
def checkChanges(self):
"checks if the linked file has changed"
if hasattr(self, "Object") and self.Object:
try:
f = self.Object.File
except ReferenceError:
f = None
if hasattr(self, "timer"):
self.timer.stop()
del self.timer
if f:
filename = self.Object.Proxy.getFile(self.Object)
if filename:
st_mtime = os.stat(filename).st_mtime
if hasattr(self.Object.ViewObject, "TimeStamp"):
if self.Object.ViewObject.TimeStamp:
if self.Object.ViewObject.TimeStamp != st_mtime:
self.Object.Proxy.reload = True
self.Object.touch()
self.Object.ViewObject.TimeStamp = st_mtime
def onChanged(self, vobj, prop):
if prop == "ShapeColor":
# prevent ShapeColor to override DiffuseColor
if hasattr(vobj, "DiffuseColor") and hasattr(vobj, "UpdateColors"):
if vobj.DiffuseColor and vobj.UpdateColors:
vobj.DiffuseColor = vobj.DiffuseColor
elif prop == "Visibility":
if vobj.Visibility:
if (not vobj.Object.Shape) or vobj.Object.Shape.isNull():
vobj.Object.Proxy.reload = True
vobj.Object.Proxy.execute(vobj.Object)
else:
if (
hasattr(vobj.Object, "ReferenceMode")
and vobj.Object.ReferenceMode == "Transient"
):
vobj.Object.Proxy.reload = False
import Part
pl = vobj.Object.Placement
vobj.Object.Shape = Part.Shape()
vobj.Object.Placement = pl
def onDelete(self, obj, doc):
if hasattr(self, "timer"):
self.timer.stop()
del self.timer
return True
def setEdit(self, vobj, mode):
if mode != 0:
return None
taskd = ArchReferenceTaskPanel(vobj.Object)
FreeCADGui.Control.showDialog(taskd)
return True
def unsetEdit(self, vobj, mode):
if mode != 0:
return None
FreeCADGui.Control.closeDialog()
from DraftGui import todo
todo.delay(vobj.Proxy.recolorize, vobj)
return True
def setupContextMenu(self, vobj, menu):
actionEdit = QtGui.QAction(translate("Arch", "Edit"), menu)
QtCore.QObject.connect(actionEdit, QtCore.SIGNAL("triggered()"), self.edit)
menu.addAction(actionEdit)
actionOnReload = QtGui.QAction(
QtGui.QIcon(":/icons/view-refresh.svg"),
translate("Arch", "Reload reference"),
menu,
)
QtCore.QObject.connect(
actionOnReload, QtCore.SIGNAL("triggered()"), self.onReload
)
menu.addAction(actionOnReload)
actionOnOpen = QtGui.QAction(
QtGui.QIcon(":/icons/document-open.svg"),
translate("Arch", "Open reference"),
menu,
)
QtCore.QObject.connect(actionOnOpen, QtCore.SIGNAL("triggered()"), self.onOpen)
menu.addAction(actionOnOpen)
def edit(self):
FreeCADGui.ActiveDocument.setEdit(self.Object, 0)
def onReload(self):
"reloads the reference object"
if hasattr(self, "Object") and self.Object:
self.Object.Proxy.reload = True
self.Object.touch()
FreeCAD.ActiveDocument.recompute()
def onOpen(self):
"opens the reference file"
if hasattr(self, "Object") and self.Object:
if self.Object.File:
FreeCAD.openDocument(self.Object.File)
def loadInventor(self, obj):
"loads an openinventor file and replace the root node of this object"
# check inventor contents
ivstring = self.getInventorString(obj)
if not ivstring:
FreeCAD.Console.PrintWarning(
"Unable to get lightWeight node for object referenced in "
+ obj.Label
+ "\n"
)
return
from pivy import coin
inputnode = coin.SoInput()
inputnode.setBuffer(ivstring)
lwnode = coin.SoDB.readAll(inputnode)
if not isinstance(lwnode, coin.SoSeparator):
FreeCAD.Console.PrintError(
"Invalid lightWeight node for object referenced in " + obj.Label + "\n"
)
return
if lwnode.getNumChildren() < 2:
FreeCAD.Console.PrintError(
"Invalid lightWeight node for object referenced in " + obj.Label + "\n"
)
return
flatlines = lwnode
shaded = lwnode.getChild(0)
wireframe = lwnode.getChild(1)
# check node contents
rootnode = obj.ViewObject.RootNode
if rootnode.getNumChildren() < 3:
FreeCAD.Console.PrintError("Invalid root node in " + obj.Label + "\n")
return
switch = rootnode.getChild(2)
if switch.getNumChildren() != 4:
FreeCAD.Console.PrintError("Invalid root node in " + obj.Label + "\n")
return
# keep a copy of the original nodes
self.orig_flatlines = switch.getChild(0).copy()
self.orig_shaded = switch.getChild(1).copy()
self.orig_wireframe = switch.getChild(2).copy()
# replace root node of object
switch.replaceChild(0, flatlines)
switch.replaceChild(1, shaded)
switch.replaceChild(2, wireframe)
def unloadInventor(self, obj):
"restore original nodes"
if (not hasattr(self, "orig_flatlines")) or (not self.orig_flatlines):
return
if (not hasattr(self, "orig_shaded")) or (not self.orig_shaded):
return
if (not hasattr(self, "orig_wireframe")) or (not self.orig_wireframe):
return
# check node contents
rootnode = obj.ViewObject.RootNode
if rootnode.getNumChildren() < 3:
FreeCAD.Console.PrintError("Invalid root node in " + obj.Label + "\n")
return
switch = rootnode.getChild(2)
if switch.getNumChildren() != 4:
FreeCAD.Console.PrintError("Invalid root node in " + obj.Label + "\n")
return
# replace root node of object
switch.replaceChild(0, self.orig_flatlines)
switch.replaceChild(1, self.orig_shaded)
switch.replaceChild(2, self.orig_wireframe)
# discard old content
self.orig_flatlines = None
self.orig_shaded = None
self.orig_wireframe = None
def getInventorString(self, obj):
"locates and loads an iv file saved together with an object, if existing"
filename = obj.Proxy.getFile(obj)
if not filename:
return None
part = obj.Part
if not obj.Part:
return None
zdoc = zipfile.ZipFile(filename)
if not "Document.xml" in zdoc.namelist():
return None
ivfile = None
with zdoc.open("Document.xml") as docf:
writemode1 = False
writemode2 = False
for line in docf:
line = line.decode("utf8")
if ("<Object name=" in line) and (part in line):
writemode1 = True
elif writemode1 and ('<Property name="SavedInventor"' in line):
writemode1 = False
writemode2 = True
elif writemode2 and ("<FileIncluded file=" in line):
n = re.findall('file="(.*?)"', line)
if n:
ivfile = n[0]
break
if not ivfile:
return None
if not ivfile in zdoc.namelist():
return None
f = zdoc.open(ivfile)
buf = f.read()
buf = buf.decode("utf8")
f.close()
buf = buf.replace(
"lineWidth 2", "lineWidth " + str(int(obj.ViewObject.LineWidth))
)
return buf
class ArchReferenceTaskPanel:
"""The editmode TaskPanel for Reference objects"""
def __init__(self, obj):
self.obj = obj
self.filename = None
self.form = QtGui.QWidget()
self.form.setWindowTitle("External reference")
layout = QtGui.QVBoxLayout(self.form)
label1 = QtGui.QLabel("External file:")
layout.addWidget(label1)
self.fileButton = QtGui.QPushButton(self.form)
self.openButton = QtGui.QPushButton(self.form)
self.openButton.setText("Open")
if not self.obj.File:
self.openButton.setEnabled(False)
l2 = QtGui.QHBoxLayout()
layout.addLayout(l2)
l2.addWidget(self.fileButton)
l2.addWidget(self.openButton)
label2 = QtGui.QLabel("Part to use:")
layout.addWidget(label2)
if self.obj.File:
self.fileButton.setText(os.path.basename(self.obj.File))
else:
self.fileButton.setText("Choose file...")
self.partCombo = QtGui.QComboBox(self.form)
layout.addWidget(self.partCombo)
if hasattr(self.obj.Proxy, "parts"):
parts = self.obj.Proxy.parts
else:
parts = self.obj.Proxy.getPartsList(self.obj)
sortedkeys = sorted(parts)
for k in sortedkeys:
self.partCombo.addItem(parts[k][0], k)
if self.obj.Part:
if self.obj.Part in sortedkeys:
self.partCombo.setCurrentIndex(sortedkeys.index(self.obj.Part))
QtCore.QObject.connect(
self.fileButton, QtCore.SIGNAL("clicked()"), self.chooseFile
)
QtCore.QObject.connect(
self.openButton, QtCore.SIGNAL("clicked()"), self.openFile
)
def accept(self):
if self.filename:
if self.filename != self.obj.File:
self.obj.File = self.filename
FreeCAD.ActiveDocument.recompute()
if self.partCombo.currentText():
i = self.partCombo.currentIndex()
if self.partCombo.itemData(i) != self.obj.Part:
self.obj.Part = self.partCombo.itemData(i)
if self.obj.Label == "External Reference":
self.obj.Label = self.partCombo.itemText(i)
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
def reject(self):
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
def chooseFile(self):
loc = QtCore.QDir.homePath()
if self.obj.File:
loc = os.path.dirname(self.obj.File)
f = QtGui.QFileDialog.getOpenFileName(
self.form, "Choose reference file", loc, "FreeCAD standard files (*.FCStd)"
)
if f:
self.filename = f[0]
self.fileButton.setText(os.path.basename(self.filename))
parts = self.obj.Proxy.getPartsList(self.obj, self.filename)
if parts:
self.partCombo.clear()
sortedkeys = sorted(parts)
for k in sortedkeys:
self.partCombo.addItem(parts[k][0], k)
if self.obj.Part:
if self.obj.Part in sortedkeys:
self.partCombo.setCurrentIndex(sortedkeys.index(self.obj.Part))
def openFile(self):
if self.obj.File:
FreeCAD.openDocument(self.obj.File)
FreeCADGui.Control.closeDialog()
FreeCADGui.ActiveDocument.resetEdit()
class ArchReferenceCommand:
"the Arch Reference command definition"
def GetResources(self):
return {
"Pixmap": "Arch_Reference",
"MenuText": QtCore.QT_TRANSLATE_NOOP(
"Arch_Reference", "External reference"
),
"Accel": "E, X",
"ToolTip": QtCore.QT_TRANSLATE_NOOP(
"Arch_Reference", "Creates an external reference object"
),
}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.openTransaction(
translate("Arch", "Create external reference")
)
FreeCADGui.addModule("Arch")
FreeCADGui.addModule("Draft")
FreeCADGui.doCommand("obj = Arch.makeReference()")
FreeCADGui.doCommand("Draft.autogroup(obj)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCADGui.doCommand("obj.ViewObject.Document.setEdit(obj.ViewObject, 0)")
if FreeCAD.GuiUp:
FreeCADGui.addCommand("Arch_Reference", ArchReferenceCommand())
|
draftmake | make_sketch | # ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * Copyright (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * Copyright (c) 2020 FreeCAD Developers *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides functions to create Sketch objects from Draft objects."""
## @package make_sketch
# \ingroup draftmake
# \brief Provides functions to create Sketch objects from Draft objects.
## \addtogroup draftmake
# @{
import math
import DraftGeomUtils
import draftutils.gui_utils as gui_utils
import draftutils.utils as utils
import DraftVecUtils
import FreeCAD as App
from draftutils.translate import translate
def make_sketch(
objects_list,
autoconstraints=False,
addTo=None,
delete=False,
name="Sketch",
radiusPrecision=-1,
tol=1e-3,
):
"""make_sketch(objects_list, [autoconstraints], [addTo], [delete],
[name], [radiusPrecision], [tol])
Makes a Sketch objects_list with the given Draft objects.
Parameters
----------
objects_list: can be single or list of objects of Draft type objects,
Part::Feature, Part.Shape, or mix of them.
autoconstraints(False): if True, constraints will be automatically added to
wire nodes, rectangles and circles.
addTo(None) : if set to an existing sketch, geometry will be added to it
instead of creating a new one.
delete(False): if True, the original object will be deleted.
If set to a string 'all' the object and all its linked object will be
deleted.
name('Sketch'): the name for the new sketch object.
radiusPrecision(-1): If <0, disable radius constraint. If =0, add individual
radius constraint. If >0, the radius will be rounded according to this
precision, and 'Equal' constraint will be added to curve with equal
radius within precision.
tol(1e-3): Tolerance used to check if the shapes are planar and coplanar.
Consider change to tol=-1 for a more accurate analysis.
"""
if not App.ActiveDocument:
App.Console.PrintError("No active document. Aborting\n")
return
import Part
import Sketcher
from Sketcher import Constraint
start_point = 1
end_point = 2
middle_point = 3
if App.GuiUp:
v_dir = gui_utils.get_3d_view().getViewDirection()
else:
v_dir = App.Base.Vector(0, 0, -1)
# lists to accumulate shapes with defined normal and undefined normal
shape_norm_yes = list()
shape_norm_no = list()
if not isinstance(objects_list, (list, tuple)):
objects_list = [objects_list]
for obj in objects_list:
if isinstance(obj, Part.Shape):
shape = obj
elif not hasattr(obj, "Shape"):
App.Console.PrintError(translate("draft", "No shape found") + "\n")
return None
else:
shape = obj.Shape
if not DraftGeomUtils.is_planar(shape, tol):
App.Console.PrintError(
translate("draft", "All Shapes must be planar") + "\n"
)
return None
if DraftGeomUtils.get_normal(shape, tol):
shape_norm_yes.append(shape)
else:
shape_norm_no.append(shape)
shapes_list = shape_norm_yes + shape_norm_no
# test if all shapes are coplanar
if len(shape_norm_yes) >= 1:
for shape in shapes_list[1:]:
if not DraftGeomUtils.are_coplanar(shapes_list[0], shape, tol):
App.Console.PrintError(
translate("draft", "All Shapes must be coplanar") + "\n"
)
return None
# define sketch normal
normal = DraftGeomUtils.get_normal(shapes_list[0], tol)
else:
# suppose all geometries are straight lines or points
points = [vertex.Point for shape in shapes_list for vertex in shape.Vertexes]
if len(points) >= 2:
poly = Part.makePolygon(points)
if not DraftGeomUtils.is_planar(poly, tol):
App.Console.PrintError(
translate("draft", "All Shapes must be coplanar") + "\n"
)
return None
normal = DraftGeomUtils.get_normal(poly, tol)
if not normal:
# all points aligned
poly_dir = poly.Edges[0].Curve.Direction
normal = (v_dir - v_dir.dot(poly_dir) * poly_dir).normalize()
normal = normal.negative()
else:
# only one point
normal = v_dir.negative()
if addTo:
nobj = addTo
else:
nobj = App.ActiveDocument.addObject("Sketcher::SketchObject", name)
# Collect constraints and add in one go to improve performance
constraints = []
radiuses = {}
def addRadiusConstraint(edge):
try:
if radiusPrecision < 0:
return
if radiusPrecision == 0:
constraints.append(
Constraint("Radius", nobj.GeometryCount - 1, edge.Curve.Radius)
)
return
r = round(edge.Curve.Radius, radiusPrecision)
constraints.append(Constraint("Equal", radiuses[r], nobj.GeometryCount - 1))
except KeyError:
radiuses[r] = nobj.GeometryCount - 1
constraints.append(Constraint("Radius", nobj.GeometryCount - 1, r))
except AttributeError:
pass
def convertBezier(edge):
if DraftGeomUtils.geomType(edge) == "BezierCurve":
return edge.Curve.toBSpline(
edge.FirstParameter, edge.LastParameter
).toShape()
else:
return edge
axis = App.Vector(0, 0, 1).cross(normal)
angle = DraftVecUtils.angle(normal, App.Vector(0, 0, 1)) * App.Units.Radian
rotation = App.Rotation(axis, angle)
point = shapes_list[0].Vertexes[0].Point
base = App.Vector(normal)
base.Length = point.dot(
base.normalize()
) # See https://forum.freecad.org/viewtopic.php?f=22&t=69304#p601149
nobj.Placement = App.Placement(base, rotation)
for obj in objects_list:
ok = False
tp = utils.get_type(obj)
if tp in ["Circle", "Ellipse"]:
if obj.Shape.Edges:
edge = obj.Shape.Edges[0]
if len(edge.Vertexes) == 1:
newedge = DraftGeomUtils.orientEdge(edge, normal)
nobj.addGeometry(newedge)
else:
# make new ArcOfCircle
circle = DraftGeomUtils.orientEdge(edge, normal)
first = math.radians(obj.FirstAngle)
last = math.radians(obj.LastAngle)
arc = Part.ArcOfCircle(circle, first, last)
nobj.addGeometry(arc)
addRadiusConstraint(edge)
ok = True
elif tp in ["Wire", "Rectangle", "Polygon"] and obj.FilletRadius.Value == 0:
if obj.Shape.Edges:
for edge in obj.Shape.Edges:
nobj.addGeometry(DraftGeomUtils.orientEdge(edge, normal))
if autoconstraints:
closed = tp in ["Rectangle", "Polygon"] or obj.Closed
last = nobj.GeometryCount
segs = list(range(last - len(obj.Shape.Edges), last))
nexts = segs[1:] + ([segs[0]] if closed else [None])
for seg, next in zip(segs, nexts):
if next is not None:
constraints.append(
Constraint(
"Coincident", seg, end_point, next, start_point
)
)
if DraftGeomUtils.isAligned(nobj.Geometry[seg], "x"):
constraints.append(Constraint("Vertical", seg))
elif DraftGeomUtils.isAligned(nobj.Geometry[seg], "y"):
constraints.append(Constraint("Horizontal", seg))
ok = True
elif tp == "BSpline":
if obj.Shape.Edges:
edge = DraftGeomUtils.orientEdge(obj.Shape.Edges[0], normal)
nobj.addGeometry(edge)
nobj.exposeInternalGeometry(nobj.GeometryCount - 1)
ok = True
elif tp == "BezCurve":
if obj.Shape.Edges:
for piece in obj.Shape.Edges:
bez = piece.Curve
bsp = bez.toBSpline(bez.FirstParameter, bez.LastParameter).toShape()
edge = DraftGeomUtils.orientEdge(bsp.Edges[0], normal)
nobj.addGeometry(edge)
nobj.exposeInternalGeometry(nobj.GeometryCount - 1)
ok = True
# TODO: set coincident constraint for vertexes in multi-edge bezier curve
elif tp == "Point":
shape = obj.Shape.copy()
if angle:
shape.rotate(App.Base.Vector(0, 0, 0), axis, -1 * angle)
point = Part.Point(shape.Point)
nobj.addGeometry(point)
ok = True
elif tp == "Shape" or hasattr(obj, "Shape"):
shape = obj if tp == "Shape" else obj.Shape
if not shape.Wires:
for e in shape.Edges:
# unconnected edges
newedge = convertBezier(e)
nobj.addGeometry(
DraftGeomUtils.orientEdge(newedge, normal, make_arc=True)
)
addRadiusConstraint(newedge)
if autoconstraints:
for wire in shape.Wires:
last_count = nobj.GeometryCount
edges = wire.OrderedEdges
for edge in edges:
newedge = convertBezier(edge)
nobj.addGeometry(
DraftGeomUtils.orientEdge(newedge, normal, make_arc=True)
)
addRadiusConstraint(newedge)
for i, g in enumerate(nobj.Geometry[last_count:]):
if edges[i].Closed:
continue
seg = last_count + i
if DraftGeomUtils.isAligned(g, "x"):
constraints.append(Constraint("Vertical", seg))
elif DraftGeomUtils.isAligned(g, "y"):
constraints.append(Constraint("Horizontal", seg))
if seg == nobj.GeometryCount - 1:
if not wire.isClosed():
break
g2 = nobj.Geometry[last_count]
seg2 = last_count
else:
seg2 = seg + 1
g2 = nobj.Geometry[seg2]
end1 = g.value(g.LastParameter)
start2 = g2.value(g2.FirstParameter)
if DraftVecUtils.equals(end1, start2):
constraints.append(
Constraint(
"Coincident", seg, end_point, seg2, start_point
)
)
continue
end2 = g2.value(g2.LastParameter)
start1 = g.value(g.FirstParameter)
if DraftVecUtils.equals(end2, start1):
constraints.append(
Constraint(
"Coincident", seg, start_point, seg2, end_point
)
)
elif DraftVecUtils.equals(start1, start2):
constraints.append(
Constraint(
"Coincident", seg, start_point, seg2, start_point
)
)
elif DraftVecUtils.equals(end1, end2):
constraints.append(
Constraint(
"Coincident", seg, end_point, seg2, end_point
)
)
else:
for wire in shape.Wires:
for edge in wire.OrderedEdges:
newedge = convertBezier(edge)
nobj.addGeometry(
DraftGeomUtils.orientEdge(newedge, normal, make_arc=True)
)
ok = True
gui_utils.format_object(nobj, obj)
if ok and delete and hasattr(obj, "Shape"):
doc = obj.Document
def delObj(obj):
if obj.InList:
App.Console.PrintWarning(
translate(
"draft",
"Cannot delete object {} with dependency".format(obj.Label),
)
+ "\n"
)
else:
doc.removeObject(obj.Name)
try:
if delete == "all":
objs = [obj]
while objs:
obj = objs[0]
objs = objs[1:] + obj.OutList
delObj(obj)
else:
delObj(obj)
except Exception as ex:
App.Console.PrintWarning(
translate(
"draft", "Failed to delete object {}: {}".format(obj.Label, ex)
)
+ "\n"
)
nobj.addConstraint(constraints)
return nobj
makeSketch = make_sketch
## @}
|
neubot | web100 | # neubot/web100.py
#
# Copyright (c) 2012
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN)
# and Simone Basso <bassosimone@gmail.com>
#
# I wrote this python interface for web100 using libweb100 sources as
# documentation, so I inevitably translated portions of it from C to python.
# Below there's libweb100 copyright statement: this python adaptation is
# available under the same license.
#
# ======================================================================
# Copyright (c) 2001 Carnegie Mellon University,
# The Board of Trustees of the University of Illinois,
# and University Corporation for Atmospheric Research.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ======================================================================
#
""" Pure python interface to web100 """
#
# N.B. This implementation does not follow the model of the original libweb100
# C implementation, i.e. there is no web100 agent. I have just implemented
# the minimal set of features that I need to allow Neubot to snap at web100's
# variables.
#
import getopt
import logging
import os
import pprint
import struct
import sys
TYPES = (
INTEGER,
INTEGER32,
INET_ADDRESS_IPV4,
COUNTER32,
GAUGE32,
UNSIGNED32,
TIME_TICKS,
COUNTER64,
INET_PORT_NUMBER,
INET_ADDRESS,
INET_ADDRESS_IPV6,
STR32,
OCTET,
) = range(13)
SIZES = {
COUNTER32: 4,
COUNTER64: 8,
GAUGE32: 4,
INET_ADDRESS: 17,
INET_ADDRESS_IPV4: 4,
INET_ADDRESS_IPV6: 17,
INET_PORT_NUMBER: 2,
INTEGER32: 4,
INTEGER: 4,
OCTET: 1,
STR32: 32,
TIME_TICKS: 4,
UNSIGNED32: 4,
}
# Note: in struct '=' means native byte order, standard alignment, no padding
CONVERT = {
COUNTER32: lambda raw: struct.unpack("=I", raw)[0],
COUNTER64: lambda raw: struct.unpack("=Q", raw)[0],
GAUGE32: lambda raw: struct.unpack("=I", raw)[0],
INET_ADDRESS: lambda raw: struct.unpack("=17s", raw)[0],
INET_ADDRESS_IPV4: lambda raw: struct.unpack("=I", raw)[0],
INET_ADDRESS_IPV6: lambda raw: struct.unpack("=17s", raw)[0],
INET_PORT_NUMBER: lambda raw: struct.unpack("=H", raw)[0],
INTEGER32: lambda raw: struct.unpack("=I", raw)[0],
INTEGER: lambda raw: struct.unpack("=I", raw)[0],
OCTET: lambda raw: struct.unpack("=B", raw)[0],
STR32: lambda raw: struct.unpack("=32s", raw)[0],
TIME_TICKS: lambda raw: struct.unpack("=I", raw)[0],
UNSIGNED32: lambda raw: struct.unpack("=I", raw)[0],
}
ADDRTYPES = (ADDRTYPE_UNKNOWN, ADDRTYPE_IPV4, ADDRTYPE_IPV6, ADDRTYPE_DNS) = (
0,
1,
2,
16,
)
def _web100_init():
"""Read web100 header at /proc/web100/header"""
hdr, group = {}, ""
filep = open("/proc/web100/header", "r")
for line in filep:
line = line.strip()
if not line:
continue
if line.startswith("/"):
group = line
hdr[group] = {}
continue
if not group:
continue
name, off, kind, size = line.split()
if name.startswith("X_") or name.startswith("_"): # XXX
continue
off, kind, size = int(off), int(kind), int(size)
if kind not in TYPES or size != SIZES[kind]:
raise RuntimeError("web100: internal consistency error: %s", name)
hdr[group][name] = (off, kind, size)
filep.close()
return hdr
def web100_init():
"""Read web100 hdr at /proc/web100/header"""
try:
return _web100_init()
except IOError:
logging.warning("web100: no information available", exc_info=1)
return {}
def web100_find_dirname(hdr, spec):
"""Find /proc/web100/<dirname> with the given spec"""
result = ""
if hdr:
matching = []
for name in os.listdir("/proc/web100"):
dirname = os.sep.join(["/proc/web100", name])
if not os.path.isdir(dirname):
continue
tmp = os.sep.join([dirname, "spec-ascii"])
if not os.path.isfile(tmp):
continue
data = _web100_readfile(tmp)
if not data:
continue
data = data.strip()
# Work-around web100 kernel bug
if ":::" in data:
data = data.replace(":::", "::")
if data == spec:
matching.append(dirname)
if len(matching) == 1:
result = matching[0]
elif len(matching) > 1:
logging.warning("web100: multiple matching entries") # XXX
else:
logging.warning("web100: no information available")
return result
def web100_snap(hdr, dirname):
"""Take a snapshot of standard web100 variables"""
if not hdr:
logging.warning("web100: no information available")
return {}
result = {}
path = os.sep.join([dirname, "read"])
data = _web100_readfile(path)
if data:
for name, value in hdr["/read"].items():
off, kind, size = value
tmp = data[off : off + size]
value = CONVERT[kind](tmp)
result[name] = value
_web100_normalise_addr(result, "LocalAddress", "LocalAddressType")
_web100_normalise_addr(result, "RemAddress", "LocalAddressType")
return result
def _web100_readfile(path):
"""Read the specified path in a robust way"""
# Web100 files may disappear at any time
try:
filep = open(path, "rb")
data = filep.read()
filep.close()
return data
except (KeyboardInterrupt, SystemExit):
raise
except:
return ""
IPV4_MAPPED = "00000000000000000000ffff"
IPV4_COMPAT = "000000000000000000000000"
def _web100_normalise_addr(result, value_name, addrtype_name):
"""Normalise IPv4 or IPv6 address"""
addrtype = result[addrtype_name]
# Note: it seems the last byte of the address is unused
if addrtype == ADDRTYPE_IPV4:
value = result[value_name][:4].encode("hex")
elif addrtype == ADDRTYPE_IPV6:
value = result[value_name][:16].encode("hex")
# Let IPv4-mapped and -compatible addresses look like IPv4
if value[:12] in (IPV4_MAPPED, IPV4_COMPAT):
value = value[12:16]
addrtype = ADDRTYPE_IPV4
else:
raise RuntimeError("web100: invalid address type")
result[addrtype_name] = addrtype
result[value_name] = value
def __autocheck(hdr):
"""Autocheck this implementation"""
for dirname in os.listdir("/proc/web100"):
dirpath = os.sep.join(["/proc/web100", dirname])
if not os.path.isdir(dirpath):
continue
filepath = os.sep.join([dirpath, "spec-ascii"])
ascii_spec = _web100_readfile(filepath)
ascii_spec = ascii_spec.strip()
if not ascii_spec:
continue
result = web100_snap(hdr, dirpath)
if not result or result["LocalAddressType"] != ADDRTYPE_IPV4:
continue
local, remote = result["LocalAddress"], result["RemAddress"]
xxx_spec = "%d.%d.%d.%d:%d %d.%d.%d.%d:%d" % (
int(local[0:2], 16),
int(local[2:4], 16),
int(local[4:6], 16),
int(local[6:8], 16),
result["LocalPort"],
int(remote[0:2], 16),
int(remote[2:4], 16),
int(remote[4:6], 16),
int(remote[6:8], 16),
result["RemPort"],
)
assert ascii_spec == xxx_spec
return "web100: autocheck OK"
WEB100_HEADER = web100_init()
def main(args):
"""Main function"""
try:
options, arguments = getopt.getopt(args[1:], "af:s:")
except getopt.error:
sys.exit("usage: neubot web100 [-a] [-f spec] [-s dirname]")
if arguments:
sys.exit("usage: neubot web100 [-a] [-f spec] [-s dirname]")
autocheck, spec, dirname = 0, None, None
for name, value in options:
if name == "-a":
autocheck = 1
elif name == "-f":
spec = value
elif name == "-s":
dirname = value
hdr = WEB100_HEADER
if autocheck:
result = __autocheck(hdr)
elif dirname:
result = web100_snap(hdr, dirname)
elif spec:
result = web100_find_dirname(hdr, spec)
else:
result = hdr
pprint.pprint(result)
if __name__ == "__main__":
main(sys.argv)
|
Code | Resistance | import collections
from Code import Util
class Resistance:
def __init__(self, procesador, tipo):
# Variables
self.configuracion = procesador.configuracion
self.tipo = tipo
self.fichDB = self.configuracion.ficheroBoxing + tipo
self.db = Util.DicSQL(self.fichDB)
self.conf = self.db["CONFIG"]
if self.conf is None:
self.conf = {"SEGUNDOS": 5, "PUNTOS": 100, "NIVELHECHO": 0, "MAXERROR": 0}
self.liMotores = self.configuracion.comboMotoresCompleto() # nombre, clave
self.claveActual = self.calcClaveActual()
self.dicActual = self.dameDicActual()
def calcClaveActual(self):
merr = self.maxerror()
mas = "M%d" % merr if merr else ""
return "S%dP%d%s" % (self.conf["SEGUNDOS"], self.conf["PUNTOS"], mas)
def cambiaConfiguracion(self, segundos, puntos, maxerror):
self.conf["SEGUNDOS"] = segundos
self.conf["PUNTOS"] = puntos
self.conf["MAXERROR"] = maxerror
self.db["CONFIG"] = self.conf
self.claveActual = self.calcClaveActual()
self.dicActual = self.dameDicActual()
def numEngines(self):
return len(self.liMotores)
def dameEtiEngine(self, fila):
return self.liMotores[fila][0]
def dameClaveEngine(self, fila):
return self.liMotores[fila][1]
def dameResultado(self, campo, numEngine):
engine = self.liMotores[numEngine][1]
dicEngine = self.dicActual.get(engine, None)
if dicEngine is None:
return None, None
recordFecha = dicEngine.get("RECORD_FECHA_%s" % campo, None)
recordMovimientos = dicEngine.get("RECORD_MOVIMIENTOS_%s" % campo, None)
return recordFecha, recordMovimientos
def ponResultado(self, numEngine, clave, movimientos):
engine = self.liMotores[numEngine][1]
dicEngine = self.dicActual.get(engine, collections.OrderedDict())
historico = dicEngine.get("HISTORICO_%s" % clave, [])
hoy = Util.hoy()
historico.append((hoy, movimientos))
recordMovimientos = dicEngine.get("RECORD_MOVIMIENTOS_%s" % clave, 0)
siRecord = movimientos > recordMovimientos
if siRecord:
dicEngine["RECORD_FECHA_%s" % clave] = hoy
dicEngine["RECORD_MOVIMIENTOS_%s" % clave] = movimientos
self.dicActual[engine] = dicEngine
self.db[self.claveActual] = self.dicActual
return siRecord
def dameEti(self, fecha, moves):
if not fecha:
return "-"
if moves > 2000:
mv = _("Won") + " %d" % (moves - 2000)
elif moves > 1000:
mv = _("Draw") + " %d" % (moves - 1000)
else:
mv = "%d %s" % (moves, _("Moves"))
return "%s -> %s" % (Util.localDate(fecha), mv)
def dameEtiRecord(self, campo, fila):
fecha, moves = self.dameResultado(campo, fila)
return self.dameEti(fecha, moves)
def dameDicActual(self):
dicActual = self.db[self.claveActual]
if dicActual is None:
dicActual = {}
return dicActual
def actual(self):
return self.conf["SEGUNDOS"], self.conf["PUNTOS"], self.conf.get("MAXERROR", 0)
def rotuloActual(self):
segundos, puntos, maxerror = self.actual()
if maxerror:
txt = _X(
_(
"Target %1/%2/%3: withstand maximum moves against an engine,"
"<br> that thinks %1 second(s), without losing more than %2 points in total or %3 points in a single move."
),
str(segundos),
str(puntos),
str(maxerror),
)
else:
txt = _X(
_(
"Target %1/%2: withstand maximum moves against an engine,<br> that thinks %1 second(s), without losing more than %2 points."
),
str(segundos),
str(puntos),
)
return txt
def segundos(self):
return self.conf["SEGUNDOS"]
def maxerror(self):
return self.conf.get("MAXERROR")
def borraRegistros(self, numEngine):
engine = self.liMotores[numEngine][1]
if engine in self.dicActual:
del self.dicActual[engine]
self.db[self.claveActual] = self.dicActual
def cerrar(self):
self.db.close()
|
digital | qa_binary_slicer_fb | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
from gnuradio import blocks, digital, gr, gr_unittest
class test_binary_slicer_fb(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_binary_slicer_fb(self):
expected_result = (0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1)
src_data = (-1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1)
src_data = [s + (1 - random.random()) for s in src_data] # add some noise
src = blocks.vector_source_f(src_data)
op = digital.binary_slicer_fb()
dst = blocks.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run() # run the graph and wait for it to finish
actual_result = dst.data() # fetch the contents of the sink
# print "actual result", actual_result
# print "expected result", expected_result
self.assertFloatTuplesAlmostEqual(expected_result, actual_result)
if __name__ == "__main__":
gr_unittest.run(test_binary_slicer_fb)
|
CloudSync | DiscrepanciesPresenter | # Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os
from typing import Optional
from PyQt6.QtCore import QObject
from UM.Qt.QtApplication import QtApplication
from UM.Signal import Signal
from .SubscribedPackagesModel import SubscribedPackagesModel
class DiscrepanciesPresenter(QObject):
"""Shows a list of packages to be added or removed. The user can select which packages to (un)install. The user's
choices are emitted on the `packageMutations` Signal.
"""
def __init__(self, app: QtApplication) -> None:
super().__init__()
self.packageMutations = Signal() # Emits SubscribedPackagesModel
self._app = app
self._package_manager = app.getPackageManager()
self._dialog: Optional[QObject] = None
self._compatibility_dialog_path = "resources/qml/CompatibilityDialog.qml"
def present(self, plugin_path: str, model: SubscribedPackagesModel) -> None:
path = os.path.join(plugin_path, self._compatibility_dialog_path)
self._dialog = self._app.createQmlComponent(
path, {"subscribedPackagesModel": model, "handler": self}
)
assert self._dialog
self._dialog.accepted.connect(lambda: self._onConfirmClicked(model))
def _onConfirmClicked(self, model: SubscribedPackagesModel) -> None:
# If there are incompatible packages - automatically dismiss them
if model.getIncompatiblePackages():
self._package_manager.dismissAllIncompatiblePackages(
model.getIncompatiblePackages()
)
# For now, all compatible packages presented to the user should be installed.
# Later, we might remove items for which the user unselected the package
if model.getCompatiblePackages():
model.setItems(model.getCompatiblePackages())
self.packageMutations.emit(model)
|
frescobaldi-app | viewhighlighter | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Manages highlighting of arbitrary things in a View, e.g.
the current line, marked lines, search results etc.
"""
import app
import bookmarks
import gadgets.arbitraryhighlighter
import plugin
import textformats
from PyQt5.QtCore import QEvent
from PyQt5.QtGui import QColor, QTextCharFormat, QTextFormat
def highlighter(view):
return ViewHighlighter.instance(view)
app.viewCreated.connect(highlighter)
class ViewHighlighter(plugin.Plugin, gadgets.arbitraryhighlighter.ArbitraryHighlighter):
def __init__(self, view):
# no need to call the plugin __init__ method
gadgets.arbitraryhighlighter.ArbitraryHighlighter.__init__(self, view)
self._cursorFormat = QTextCharFormat()
self._cursorFormat.setProperty(QTextFormat.FullWidthSelection, True)
app.settingsChanged.connect(self.readSettings)
self.readSettings()
bookmarks.bookmarks(view.document()).marksChanged.connect(
self.updateMarkedLines
)
self.updateMarkedLines()
view.cursorPositionChanged.connect(self.updateCursor)
view.installEventFilter(self)
def updateMarkedLines(self):
"""Called when something changes in the bookmarks."""
for type, marks in (
bookmarks.bookmarks(self.parent().document()).marks().items()
):
self.highlight(type, marks, -1)
def eventFilter(self, view, ev):
if ev.type() in (QEvent.FocusIn, QEvent.FocusOut):
self.updateCursor(view)
return False
def updateCursor(self, view=None):
"""Called when the textCursor has moved. Highlights the current line.
If view is None (the default), our parent() is assumed to be the
view. The eventFilter() method calls us with the view, this is
done because the event filter is sometimes called very late in
the destructor phase, when our parent is possibly not valid
anymore.
"""
if view is None:
view = self.parent()
# sometimes in the destruction phase, view is a generic QWidget...
try:
cursor = view.textCursor()
except AttributeError:
return
# highlight current line
cursor.clearSelection()
color = QColor(self._baseColors["current"])
color.setAlpha(200 if view.hasFocus() else 100)
self._cursorFormat.setBackground(color)
self.highlight(self._cursorFormat, [cursor], 0)
def readSettings(self):
data = textformats.formatData("editor")
self._baseColors = data.baseColors
self.updateCursor()
self.reload()
def textFormat(self, name):
"""(Internal) Returns a QTextCharFormat setup according to the preferences.
For bookmarks and the current line, FullWidthSelection is automatically enabled.
"""
f = QTextCharFormat()
f.setBackground(self._baseColors[name])
if name in ("current", "mark", "error"):
f.setProperty(QTextFormat.FullWidthSelection, True)
return f
|
Scene | ConvexHullDecorator | # Copyright (c) 2020 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import TYPE_CHECKING, Any, Optional
import numpy
from cura.Scene import ConvexHullNode
from cura.Settings.ExtruderManager import ExtruderManager
from PyQt6.QtCore import QTimer
from UM.Application import Application
from UM.Math.Polygon import Polygon
from UM.Scene.SceneNodeDecorator import SceneNodeDecorator
from UM.Settings.ContainerRegistry import ContainerRegistry
if TYPE_CHECKING:
from cura.Settings.GlobalStack import GlobalStack
from UM.Math.Matrix import Matrix
from UM.Mesh.MeshData import MeshData
from UM.Scene.SceneNode import SceneNode
class ConvexHullDecorator(SceneNodeDecorator):
"""The convex hull decorator is a scene node decorator that adds the convex hull functionality to a scene node.
If a scene node has a convex hull decorator, it will have a shadow in which other objects can not be printed.
"""
def __init__(self) -> None:
super().__init__()
self._convex_hull_node = None # type: Optional["SceneNode"]
self._init2DConvexHullCache()
self._global_stack = None # type: Optional[GlobalStack]
# Make sure the timer is created on the main thread
self._recompute_convex_hull_timer = None # type: Optional[QTimer]
self._timer_scheduled_to_be_created = False
from cura.CuraApplication import CuraApplication
if CuraApplication.getInstance() is not None:
self._timer_scheduled_to_be_created = True
CuraApplication.getInstance().callLater(self.createRecomputeConvexHullTimer)
self._raft_thickness = 0.0
self._build_volume = CuraApplication.getInstance().getBuildVolume()
self._build_volume.raftThicknessChanged.connect(self._onChanged)
CuraApplication.getInstance().globalContainerStackChanged.connect(
self._onGlobalStackChanged
)
controller = CuraApplication.getInstance().getController()
controller.toolOperationStarted.connect(self._onChanged)
controller.toolOperationStopped.connect(self._onChanged)
# CuraApplication.getInstance().sceneBoundingBoxChanged.connect(self._onChanged)
self._root = Application.getInstance().getController().getScene().getRoot()
self._onGlobalStackChanged()
def createRecomputeConvexHullTimer(self) -> None:
self._recompute_convex_hull_timer = QTimer()
self._recompute_convex_hull_timer.setInterval(200)
self._recompute_convex_hull_timer.setSingleShot(True)
self._recompute_convex_hull_timer.timeout.connect(self.recomputeConvexHull)
def setNode(self, node: "SceneNode") -> None:
previous_node = self._node
# Disconnect from previous node signals
if previous_node is not None and node is not previous_node:
previous_node.boundingBoxChanged.disconnect(self._onChanged)
super().setNode(node)
node.boundingBoxChanged.connect(self._onChanged)
per_object_stack = node.callDecoration("getStack")
if per_object_stack:
per_object_stack.propertyChanged.connect(self._onSettingValueChanged)
self._onChanged()
def __deepcopy__(self, memo):
"""Force that a new (empty) object is created upon copy."""
return ConvexHullDecorator()
def getAdhesionArea(self) -> Optional[Polygon]:
"""The polygon representing the 2D adhesion area.
If no adhesion is used, the regular convex hull is returned
"""
if self._node is None:
return None
hull = self._compute2DConvexHull()
if hull is None:
return None
return self._add2DAdhesionMargin(hull)
def getConvexHull(self) -> Optional[Polygon]:
"""Get the unmodified 2D projected convex hull of the node (if any)
In case of one-at-a-time, this includes adhesion and head+fans clearance
"""
if self._node is None:
return None
if self._node.callDecoration("isNonPrintingMesh"):
return None
# Parent can be None if node is just loaded.
if self._isSingularOneAtATimeNode():
return self.getConvexHullHeadFull()
return self._compute2DConvexHull()
def getConvexHullHeadFull(self) -> Optional[Polygon]:
"""For one at the time this is the convex hull of the node with the full head size
In case of printing all at once this is None.
"""
if self._node is None:
return None
if self._isSingularOneAtATimeNode():
return self._compute2DConvexHeadFull()
return None
@staticmethod
def hasGroupAsParent(node: "SceneNode") -> bool:
parent = node.getParent()
if parent is None:
return False
return bool(parent.callDecoration("isGroup"))
def getConvexHullHead(self) -> Optional[Polygon]:
"""Get convex hull of the object + head size
In case of printing all at once this is None.
For one at the time this is area with intersection of mirrored head
"""
if self._node is None:
return None
if self._node.callDecoration("isNonPrintingMesh"):
return None
if self._isSingularOneAtATimeNode():
head_with_fans = self._compute2DConvexHeadMin()
if head_with_fans is None:
return None
head_with_fans_with_adhesion_margin = self._add2DAdhesionMargin(
head_with_fans
)
return head_with_fans_with_adhesion_margin
return None
def getConvexHullBoundary(self) -> Optional[Polygon]:
"""Get convex hull of the node
In case of printing all at once this None??
For one at the time this is the area without the head.
"""
if self._node is None:
return None
if self._node.callDecoration("isNonPrintingMesh"):
return None
if self._isSingularOneAtATimeNode():
# Printing one at a time and it's not an object in a group
return self._compute2DConvexHull()
return None
def getPrintingArea(self) -> Optional[Polygon]:
"""Get the buildplate polygon where will be printed
In case of printing all at once this is the same as convex hull (no individual adhesion)
For one at the time this includes the adhesion area
"""
if self._isSingularOneAtATimeNode():
# In one-at-a-time mode, every printed object gets it's own adhesion
printing_area = self.getAdhesionArea()
else:
printing_area = self.getConvexHull()
return printing_area
def recomputeConvexHullDelayed(self) -> None:
"""The same as recomputeConvexHull, but using a timer if it was set."""
if self._recompute_convex_hull_timer is not None:
self._recompute_convex_hull_timer.start()
else:
from cura.CuraApplication import CuraApplication
if not self._timer_scheduled_to_be_created:
# The timer is not created and we never scheduled it. Time to create it now!
CuraApplication.getInstance().callLater(
self.createRecomputeConvexHullTimer
)
# Now we know for sure that the timer has been scheduled for creation, so we can try this again.
CuraApplication.getInstance().callLater(self.recomputeConvexHullDelayed)
def recomputeConvexHull(self) -> None:
if self._node is None or not self.__isDescendant(self._root, self._node):
if self._convex_hull_node:
# Convex hull node still exists, but the node is removed or no longer in the scene.
self._convex_hull_node.setParent(None)
self._convex_hull_node = None
return
if self._convex_hull_node:
self._convex_hull_node.setParent(None)
hull_node = ConvexHullNode.ConvexHullNode(
self._node, self.getPrintingArea(), self._raft_thickness, self._root
)
self._convex_hull_node = hull_node
def _onSettingValueChanged(self, key: str, property_name: str) -> None:
if property_name != "value": # Not the value that was changed.
return
if key in self._affected_settings:
self._onChanged()
if key in self._influencing_settings:
self._init2DConvexHullCache() # Invalidate the cache.
self._onChanged()
def _init2DConvexHullCache(self) -> None:
# Cache for the group code path in _compute2DConvexHull()
self._2d_convex_hull_group_child_polygon = None # type: Optional[Polygon]
self._2d_convex_hull_group_result = None # type: Optional[Polygon]
# Cache for the mesh code path in _compute2DConvexHull()
self._2d_convex_hull_mesh = None # type: Optional[MeshData]
self._2d_convex_hull_mesh_world_transform = None # type: Optional[Matrix]
self._2d_convex_hull_mesh_result = None # type: Optional[Polygon]
def _compute2DConvexHull(self) -> Optional[Polygon]:
if self._node is None:
return None
if self._node.callDecoration("isGroup"):
points = numpy.zeros((0, 2), dtype=numpy.int32)
for child in self._node.getChildren():
child_hull = child.callDecoration("_compute2DConvexHull")
if child_hull:
try:
points = numpy.append(points, child_hull.getPoints(), axis=0)
except ValueError:
pass
if points.size < 3:
return None
child_polygon = Polygon(points)
# Check the cache
if child_polygon == self._2d_convex_hull_group_child_polygon:
return self._2d_convex_hull_group_result
convex_hull = (
child_polygon.getConvexHull()
) # First calculate the normal convex hull around the points.
offset_hull = self._offsetHull(
convex_hull
) # Then apply the offset from the settings.
# Store the result in the cache
self._2d_convex_hull_group_child_polygon = child_polygon
self._2d_convex_hull_group_result = offset_hull
return offset_hull
else:
convex_hull = Polygon([])
offset_hull = Polygon([])
mesh = self._node.getMeshData()
if mesh is None:
return Polygon(
[]
) # Node has no mesh data, so just return an empty Polygon.
world_transform = self._node.getWorldTransformation(copy=True)
# Check the cache
if (
mesh is self._2d_convex_hull_mesh
and world_transform == self._2d_convex_hull_mesh_world_transform
):
return self._offsetHull(self._2d_convex_hull_mesh_result)
vertex_data = mesh.getConvexHullTransformedVertices(world_transform)
# Don't use data below 0.
# TODO; We need a better check for this as this gives poor results for meshes with long edges.
# Do not throw away vertices: the convex hull may be too small and objects can collide.
# vertex_data = vertex_data[vertex_data[:,1] >= -0.01]
if vertex_data is not None and len(vertex_data) >= 4: # type: ignore # mypy and numpy don't play along well just yet.
# Round the vertex data to 1/10th of a mm, then remove all duplicate vertices
# This is done to greatly speed up further convex hull calculations as the convex hull
# becomes much less complex when dealing with highly detailed models.
vertex_data = numpy.round(vertex_data, 1)
vertex_data = vertex_data[
:, [0, 2]
] # Drop the Y components to project to 2D.
# Grab the set of unique points.
#
# This basically finds the unique rows in the array by treating them as opaque groups of bytes
# which are as long as the 2 float64s in each row, and giving this view to numpy.unique() to munch.
# See http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
vertex_byte_view = numpy.ascontiguousarray(vertex_data).view(
numpy.dtype(
(numpy.void, vertex_data.dtype.itemsize * vertex_data.shape[1])
)
)
_, idx = numpy.unique(vertex_byte_view, return_index=True)
vertex_data = vertex_data[idx] # Select the unique rows by index.
hull = Polygon(vertex_data)
if len(vertex_data) >= 3:
convex_hull = hull.getConvexHull()
offset_hull = self._offsetHull(convex_hull)
# Store the result in the cache
self._2d_convex_hull_mesh = mesh
self._2d_convex_hull_mesh_world_transform = world_transform
self._2d_convex_hull_mesh_result = convex_hull
return offset_hull
def _getHeadAndFans(self) -> Polygon:
if not self._global_stack:
return Polygon()
polygon = Polygon(
numpy.array(self._global_stack.getHeadAndFansCoordinates(), numpy.float32)
)
offset_x = self._getSettingProperty("machine_nozzle_offset_x", "value")
offset_y = self._getSettingProperty("machine_nozzle_offset_y", "value")
return polygon.translate(-offset_x, -offset_y)
def _compute2DConvexHeadFull(self) -> Optional[Polygon]:
convex_hull = self._compute2DConvexHull()
convex_hull = self._add2DAdhesionMargin(convex_hull)
if convex_hull:
return convex_hull.getMinkowskiHull(self._getHeadAndFans())
return None
def _compute2DConvexHeadMin(self) -> Optional[Polygon]:
head_and_fans = self._getHeadAndFans()
mirrored = head_and_fans.mirror([0, 0], [0, 1]).mirror(
[0, 0], [1, 0]
) # Mirror horizontally & vertically.
head_and_fans = self._getHeadAndFans().intersectionConvexHulls(mirrored)
# Min head hull is used for the push free
convex_hull = self._compute2DConvexHull()
if convex_hull:
return convex_hull.getMinkowskiHull(head_and_fans)
return None
def _add2DAdhesionMargin(self, poly: Polygon) -> Polygon:
"""Compensate given 2D polygon with adhesion margin
:return: 2D polygon with added margin
"""
if not self._global_stack:
return Polygon()
# Compensate for raft/skirt/brim
# Add extra margin depending on adhesion type
adhesion_type = self._global_stack.getProperty("adhesion_type", "value")
max_length_available = 0.5 * min(
self._getSettingProperty("machine_width", "value"),
self._getSettingProperty("machine_depth", "value"),
)
if adhesion_type == "raft":
extra_margin = min(
max_length_available,
max(0, self._getSettingProperty("raft_margin", "value")),
)
elif adhesion_type == "brim":
extra_margin = min(
max_length_available,
max(
0,
self._getSettingProperty("brim_line_count", "value")
* self._getSettingProperty("skirt_brim_line_width", "value"),
),
)
elif adhesion_type == "none":
extra_margin = 0
elif adhesion_type == "skirt":
extra_margin = min(
max_length_available,
max(
0,
self._getSettingProperty("skirt_gap", "value")
+ self._getSettingProperty("skirt_line_count", "value")
* self._getSettingProperty("skirt_brim_line_width", "value"),
),
)
else:
raise Exception(
"Unknown bed adhesion type. Did you forget to update the convex hull calculations for your new bed adhesion type?"
)
# Adjust head_and_fans with extra margin
if extra_margin > 0:
extra_margin_polygon = Polygon.approximatedCircle(extra_margin)
poly = poly.getMinkowskiHull(extra_margin_polygon)
return poly
def _offsetHull(self, convex_hull: Polygon) -> Polygon:
"""Offset the convex hull with settings that influence the collision area.
:param convex_hull: Polygon of the original convex hull.
:return: New Polygon instance that is offset with everything that
influences the collision area.
"""
# Shrinkage compensation.
if not self._global_stack: # Should never happen.
return convex_hull
scale_factor = (
self._global_stack.getProperty("material_shrinkage_percentage_xy", "value")
/ 100.0
)
result = convex_hull
if (
scale_factor != 1.0
and scale_factor > 0
and not self.getNode().callDecoration("isGroup")
):
center = None
if (
self._global_stack.getProperty("print_sequence", "value")
== "one_at_a_time"
):
# Find the root node that's placed in the scene; the root of the mesh group.
ancestor = self.getNode()
while (
ancestor.getParent() != self._root
and ancestor.getParent() is not None
):
ancestor = ancestor.getParent()
center = ancestor.getBoundingBox().center
else:
# Find the bounding box of the entire scene, which is all one mesh group then.
aabb = None
for printed_node in self._root.getChildren():
if not printed_node.callDecoration(
"isSliceable"
) and not printed_node.callDecoration("isGroup"):
continue # Not a printed node.
if aabb is None:
aabb = printed_node.getBoundingBox()
else:
aabb = aabb + printed_node.getBoundingBox()
if aabb:
center = aabb.center
if center:
result = convex_hull.scale(
scale_factor, [center.x, center.z]
) # Yes, use Z instead of Y. Mixed conventions there with how the OpenGL coordinates are transmitted.
# Horizontal expansion.
horizontal_expansion = max(
self._getSettingProperty("xy_offset", "value"),
self._getSettingProperty("xy_offset_layer_0", "value"),
)
# Mold.
mold_width = 0
if self._getSettingProperty("mold_enabled", "value"):
mold_width = self._getSettingProperty("mold_width", "value")
hull_offset = horizontal_expansion + mold_width
if (
hull_offset > 0
): # TODO: Implement Minkowski subtraction for if the offset < 0.
expansion_polygon = Polygon(
numpy.array(
[
[-hull_offset, -hull_offset],
[-hull_offset, hull_offset],
[hull_offset, hull_offset],
[hull_offset, -hull_offset],
],
numpy.float32,
)
)
return result.getMinkowskiHull(expansion_polygon)
else:
return result
def _onChanged(self, *args) -> None:
self._raft_thickness = self._build_volume.getRaftThickness()
self.recomputeConvexHullDelayed()
def _onGlobalStackChanged(self) -> None:
if self._global_stack:
self._global_stack.propertyChanged.disconnect(self._onSettingValueChanged)
self._global_stack.containersChanged.disconnect(self._onChanged)
extruders = ExtruderManager.getInstance().getActiveExtruderStacks()
for extruder in extruders:
extruder.propertyChanged.disconnect(self._onSettingValueChanged)
self._global_stack = Application.getInstance().getGlobalContainerStack()
if self._global_stack:
self._global_stack.propertyChanged.connect(self._onSettingValueChanged)
self._global_stack.containersChanged.connect(self._onChanged)
extruders = ExtruderManager.getInstance().getActiveExtruderStacks()
for extruder in extruders:
extruder.propertyChanged.connect(self._onSettingValueChanged)
self._onChanged()
def _getSettingProperty(self, setting_key: str, prop: str = "value") -> Any:
"""Private convenience function to get a setting from the correct extruder (as defined by limit_to_extruder property)."""
if self._global_stack is None or self._node is None:
return None
per_mesh_stack = self._node.callDecoration("getStack")
if per_mesh_stack:
return per_mesh_stack.getProperty(setting_key, prop)
extruder_index = self._global_stack.getProperty(
setting_key, "limit_to_extruder"
)
if extruder_index == "-1":
# No limit_to_extruder
extruder_stack_id = self._node.callDecoration("getActiveExtruder")
if not extruder_stack_id:
# Decoration doesn't exist
extruder_stack_id = ExtruderManager.getInstance().extruderIds["0"]
extruder_stack = ContainerRegistry.getInstance().findContainerStacks(
id=extruder_stack_id
)[0]
return extruder_stack.getProperty(setting_key, prop)
else:
# Limit_to_extruder is set. The global stack handles this then
return self._global_stack.getProperty(setting_key, prop)
def __isDescendant(self, root: "SceneNode", node: Optional["SceneNode"]) -> bool:
"""Returns True if node is a descendant or the same as the root node."""
if node is None:
return False
if root is node:
return True
return self.__isDescendant(root, node.getParent())
def _isSingularOneAtATimeNode(self) -> bool:
"""True if print_sequence is one_at_a_time and _node is not part of a group"""
if self._node is None:
return False
return (
self._global_stack is not None
and self._global_stack.getProperty("print_sequence", "value")
== "one_at_a_time"
and not self.hasGroupAsParent(self._node)
)
_affected_settings = [
"adhesion_type",
"raft_margin",
"print_sequence",
"skirt_gap",
"skirt_line_count",
"skirt_brim_line_width",
"skirt_distance",
"brim_line_count",
]
_influencing_settings = {
"xy_offset",
"xy_offset_layer_0",
"mold_enabled",
"mold_width",
"anti_overhang_mesh",
"infill_mesh",
"cutting_mesh",
"material_shrinkage_percentage_xy",
}
"""Settings that change the convex hull.
If these settings change, the convex hull should be recalculated.
"""
|
blocks | variable_struct_block_yml | #!/usr/bin/env python
MAX_NUM_FIELDS = 20
HEADER = """\
id: variable_struct
label: Struct Variable
flags: [ show_id ]
parameters:
"""
TEMPLATES = """\
templates:
imports: "def struct(data): return type('Struct', (object,), data)()"
var_make: |-
self.${{id}} = ${{id}} = struct({{
% for i in range({0}):
<%
field = context.get('field' + str(i))
value = context.get('value' + str(i))
%>
% if len(str(field)) > 2:
${{field}}: ${{value}},
% endif
% endfor
}})
var_value: |-
struct({{
% for i in range({0}):
<%
field = context.get('field' + str(i))
%>
% if len(str(field)) > 2:
${{field}}: ${{field}},
% endif
% endfor
}})
"""
FIELD0 = """\
- id: field0
label: Field 0
category: Fields
dtype: string
default: field0
hide: part
"""
FIELDS = """\
- id: field{0}
label: Field {0}
category: Fields
dtype: string
hide: part
"""
VALUES = """\
- id: value{0}
label: ${{field{0}}}
dtype: raw
default: '0'
hide: ${{ 'none' if field{0} else 'all' }}
"""
ASSERTS = """\
- ${{ (str(field{0}) or "a")[0].isalpha() }}
- ${{ (str(field{0}) or "a").isalnum() }}
"""
FOOTER = """\
documentation: |-
This is a simple struct/record like variable.
Attribute/field names can be specified in the tab 'Fields'.
For each non-empty field a parameter with type raw is shown.
Value access via the dot operator, e.g. "variable_struct_0.field0"
file_format: 1
"""
def make_yml(num_fields):
return "".join(
(
HEADER.format(num_fields),
FIELD0,
"".join(FIELDS.format(i) for i in range(1, num_fields)),
"".join(VALUES.format(i) for i in range(num_fields)),
"value: ${value}\n\nasserts:\n",
"".join(ASSERTS.format(i) for i in range(num_fields)),
"".join(TEMPLATES.format(num_fields)),
FOOTER,
)
)
if __name__ == "__main__":
import sys
try:
filename = sys.argv[1]
except IndexError:
filename = __file__[:-3]
data = make_yml(MAX_NUM_FIELDS)
with open(filename, "wb") as fp:
fp.write(data.encode())
|
views | slack_team_settings | from apps.api.permissions import RBACPermission
from apps.api.serializers.organization_slack_settings import (
OrganizationSlackSettingsSerializer,
)
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.models import Organization
from common.insight_log import EntityEvent, write_resource_insight_log
from rest_framework import views
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
class SlackTeamSettingsAPIView(views.APIView):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated, RBACPermission)
rbac_permissions = {
"get": [RBACPermission.Permissions.CHATOPS_READ],
"put": [RBACPermission.Permissions.CHATOPS_UPDATE_SETTINGS],
}
serializer_class = OrganizationSlackSettingsSerializer
def get(self, request):
organization = self.request.auth.organization
serializer = self.serializer_class(organization)
return Response(serializer.data)
def put(self, request):
organization = self.request.auth.organization
prev_state = organization.insight_logs_serialized
serializer = self.serializer_class(organization, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return Response(serializer.data)
class AcknowledgeReminderOptionsAPIView(views.APIView):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
choices = []
for item in Organization.ACKNOWLEDGE_REMIND_CHOICES:
choices.append(
{
"value": item[0],
"sec_value": Organization.ACKNOWLEDGE_REMIND_DELAY[item[0]],
"display_name": item[1],
}
)
return Response(choices)
class UnAcknowledgeTimeoutOptionsAPIView(views.APIView):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
choices = []
for item in Organization.UNACKNOWLEDGE_TIMEOUT_CHOICES:
choices.append(
{
"value": item[0],
"sec_value": Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[item[0]],
"display_name": item[1],
}
)
return Response(choices)
|
rtorrent | common | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import urlparse
from rtorrent.compat import is_py3
def bool_to_int(value):
"""Translates python booleans to RPC-safe integers"""
if value is True:
return "1"
elif value is False:
return "0"
else:
return value
def cmd_exists(cmds_list, cmd):
"""Check if given command is in list of available commands
@param cmds_list: see L{RTorrent._rpc_methods}
@type cmds_list: list
@param cmd: name of command to be checked
@type cmd: str
@return: bool
"""
return cmd in cmds_list
def find_torrent(info_hash, torrent_list):
"""Find torrent file in given list of Torrent classes
@param info_hash: info hash of torrent
@type info_hash: str
@param torrent_list: list of L{Torrent} instances (see L{RTorrent.get_torrents})
@type torrent_list: list
@return: L{Torrent} instance, or -1 if not found
"""
for t in torrent_list:
if t.info_hash == info_hash:
return t
return None
def is_valid_port(port):
"""Check if given port is valid"""
return 0 <= int(port) <= 65535
def convert_version_tuple_to_str(t):
return ".".join([str(n) for n in t])
def safe_repr(fmt, *args, **kwargs):
"""Formatter that handles unicode arguments"""
if not is_py3():
# unicode fmt can take str args, str fmt cannot take unicode args
fmt = fmt.decode("utf-8")
out = fmt.format(*args, **kwargs)
return out.encode("utf-8")
else:
return fmt.format(*args, **kwargs)
def split_path(path):
fragments = path.split("/")
if len(fragments) == 1:
return fragments
if not fragments[-1]:
return fragments[:-1]
return fragments
def join_path(base, path):
# Return if we have a new absolute path
if os.path.isabs(path):
return path
# non-absolute base encountered
if base and not os.path.isabs(base):
raise NotImplementedError()
return "/".join(split_path(base) + split_path(path))
def join_uri(base, uri, construct=True):
p_uri = urlparse.urlparse(uri)
# Return if there is nothing to join
if not p_uri.path:
return base
scheme, netloc, path, params, query, fragment = urlparse.urlparse(base)
# Switch to 'uri' parts
_, _, _, params, query, fragment = p_uri
path = join_path(path, p_uri.path)
result = urlparse.ParseResult(scheme, netloc, path, params, query, fragment)
if not construct:
return result
# Construct from parts
return urlparse.urlunparse(result)
def update_uri(uri, construct=True, **kwargs):
if isinstance(uri, urlparse.ParseResult):
uri = dict(uri._asdict())
if type(uri) is not dict:
raise ValueError("Unknown URI type")
uri.update(kwargs)
result = urlparse.ParseResult(**uri)
if not construct:
return result
return urlparse.urlunparse(result)
|
commands | generate_demo_data | import datetime as dt
import logging
import secrets
from time import monotonic
from django.core import exceptions
from django.core.management.base import BaseCommand
from posthog.demo.matrix import Matrix, MatrixManager
from posthog.demo.products.hedgebox import HedgeboxMatrix
logging.getLogger("kafka").setLevel(logging.WARNING) # Hide kafka-python's logspam
class Command(BaseCommand):
help = "Generate demo data using the Matrix"
def add_arguments(self, parser):
parser.add_argument(
"--seed", type=str, help="Simulation seed for deterministic output"
)
parser.add_argument(
"--now",
type=dt.datetime.fromisoformat,
help="Simulation 'now' datetime in ISO format (default: now)",
)
parser.add_argument(
"--days-past",
type=int,
default=120,
help="At how many days before 'now' should the simulation start (default: 120)",
)
parser.add_argument(
"--days-future",
type=int,
default=30,
help="At how many days after 'now' should the simulation end (default: 30)",
)
parser.add_argument(
"--n-clusters",
type=int,
default=500,
help="Number of clusters (default: 500)",
)
parser.add_argument(
"--dry-run", action="store_true", help="Don't save simulation results"
)
parser.add_argument(
"--reset-master",
action="store_true",
help="Reset master project instead of creating a demo project",
)
parser.add_argument(
"--email",
type=str,
default="test@posthog.com",
help="Email of the demo user (default: test@posthog.com)",
)
parser.add_argument(
"--password",
type=str,
default="12345678",
help="Password of the demo user (default: 12345678)",
)
def handle(self, *args, **options):
timer = monotonic()
seed = options.get("seed") or secrets.token_hex(16)
now = options.get("now") or dt.datetime.now(dt.timezone.utc)
print("Instantiating the Matrix...")
matrix = HedgeboxMatrix(
seed,
now=now,
days_past=options["days_past"],
days_future=options["days_future"],
n_clusters=options["n_clusters"],
)
print("Running simulation...")
matrix.simulate()
self.print_results(
matrix,
seed=seed,
duration=monotonic() - timer,
verbosity=options["verbosity"],
)
if not options["dry_run"]:
email = options["email"]
password = options["password"]
matrix_manager = MatrixManager(matrix, print_steps=True)
try:
if options["reset_master"]:
matrix_manager.reset_master()
else:
matrix_manager.ensure_account_and_save(
email,
"Employee 427",
"Hedgebox Inc.",
password=password,
disallow_collision=True,
)
except exceptions.ValidationError as e:
print(f"Error: {e}")
else:
print(
"Master project reset!"
if options["reset_master"]
else f"\nDemo data ready for {email}!\n\n"
"Pre-fill the login form with this link:\n"
f"http://localhost:8000/login?email={email}\n"
f"The password is {password}.\n\n"
"If running demo mode (DEMO=1), log in instantly with this link:\n"
f"http://localhost:8000/signup?email={email}\n"
)
else:
print("Dry run - not saving results.")
@staticmethod
def print_results(matrix: Matrix, *, seed: str, duration: float, verbosity: int):
active_people_count = 0 # Active means they have at least one event
total_event_count = 0
future_event_count = 0
summary_lines = [f"Matrix: {matrix.PRODUCT_NAME}. Seed: {seed}."]
for cluster in matrix.clusters:
summary_lines.append(
f" Cluster {cluster.index}: {cluster}. Radius = {cluster.radius}. Population = {len(cluster.people_matrix) * len(cluster.people_matrix[0])}."
)
for y, person_row in enumerate(cluster.people_matrix):
for x, person in enumerate(person_row):
if verbosity >= 2:
summary_lines.append(f" Person {x, y}: {person}")
total_event_count += len(person.past_events) + len(
person.future_events
)
future_event_count += len(person.future_events)
if person.all_events:
active_people_count += 1
if verbosity >= 3:
active_session_id = None
for event in person.all_events:
if session_id := event.properties.get("$session_id"):
if active_session_id != session_id:
summary_lines.append(
f" Session {session_id}:"
)
active_session_id = session_id
summary_lines.append(f" {event}")
elif verbosity >= 2:
event_count = len(person.past_events) + len(
person.future_events
)
if not event_count:
summary_lines.append(" No events")
else:
assert (
person.first_seen_at is not None
and person.last_seen_at is not None
)
session_count = len(
set(
event.properties.get("$session_id")
for event in person.all_events
)
)
summary_lines.append(
f" {event_count} event{'' if event_count == 1 else 's'} "
f"across {session_count} session{'' if session_count == 1 else 's'} "
f"between {person.first_seen_at.strftime('%Y-%m-%d %H:%M:%S')} "
f"and {person.last_seen_at.strftime('%Y-%m-%d %H:%M:%S')}"
)
summary_lines.append(
f"All in all, in {duration * 1000:.2f} ms "
f"simulated {len(matrix.people)} {'person' if len(matrix.people) == 1 else 'people'} "
f"({active_people_count} active) "
f"within {len(matrix.clusters)} cluster{'' if len(matrix.clusters) == 1 else 's'} "
f"for a total of {total_event_count} event{'' if total_event_count == 1 else 's'} (of which {future_event_count} {'is' if future_event_count == 1 else 'are'} in the future)."
)
print("\n".join(summary_lines))
|
config-integrations | kapacitor | # Main
enabled = True
title = "Kapacitor"
slug = "kapacitor"
short_description = "InfluxDB"
description = None
is_displayed_on_web = True
is_featured = False
is_able_to_autoresolve = True
is_demo_alert_enabled = True
description = None
# Default templates
slack_title = """\
*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ payload.get("id", "Title undefined (check Slack Title Template)") }}>* via {{ integration_name }}
{% if source_link %}
(*<{{ source_link }}|source>*)
{%- endif %}"""
slack_message = """\
```{{ payload|tojson_pretty }}```
"""
slack_image_url = None
web_title = '{{ payload.get("id", "Title undefined (check Web Title Template)") }}'
web_message = """\
```
{{ payload|tojson_pretty }}
```
"""
web_image_url = slack_image_url
sms_title = web_title
phone_call_title = web_title
telegram_title = sms_title
telegram_message = "<code>{{ payload|tojson_pretty }}</code>"
telegram_image_url = slack_image_url
source_link = None
grouping_id = '{{ payload.get("id", "") }}'
resolve_condition = '{{ payload.get("level", "").startswith("OK") }}'
acknowledge_condition = None
example_payload = {
"id": "TestAlert",
"message": "This alert was sent by user for demonstration purposes",
"data": "{foo: bar}",
}
|
blocks | msg_meta_to_pair | #!/usr/bin/env python
#
# Copyright 2020 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import pmt
from gnuradio import gr
class meta_to_pair(gr.sync_block):
"""
This block converts a metadata dictionary item to a pmt pair that is
compatible with other blocks expecting a pair in. You can specify
which item in the incoming metadata to output as a pair and what
the pair name is.
"""
def __init__(self, incomingKeyName, outgoingPairName):
gr.sync_block.__init__(self, name="meta_to_pair", in_sig=None, out_sig=None)
self.incomingKeyName = str(incomingKeyName)
self.outgoingPairName = str(outgoingPairName)
self.message_port_register_in(pmt.intern("inmeta"))
self.set_msg_handler(pmt.intern("inmeta"), self.msg_handler)
self.message_port_register_out(pmt.intern("outpair"))
def msg_handler(self, msg):
if not pmt.is_pair(msg):
gr.log.warn(
"Incoming message is not a pair. Only pairs are supported. "
"No message generated."
)
return
meta = pmt.to_python(pmt.car(msg))
if not type(meta) is dict:
gr.log.warn(
"Incoming message does not contain a dictionary. "
"No message generated."
)
return
if not self.incomingKeyName in meta:
gr.log.warn(
"Incoming message dictionary does not contain key %s. "
"No message generated." % self.incomingKeyName
)
return
incomingVal = meta[self.incomingKeyName]
new_pair = None
try:
new_pair = pmt.cons(
pmt.intern(self.outgoingPairName), pmt.to_pmt(incomingVal)
)
except Exception as e:
gr.log.error("Cannot construct new message: %s" % str(e))
return
try:
self.message_port_pub(pmt.intern("outpair"), new_pair)
except Exception as e:
gr.log.error("Cannot send message: %s" % str(e))
gr.log.error("Incoming dictionary (%s):" % str(type(meta)))
gr.log.error(str(meta))
def stop(self):
return True
|
engines | bing_news | # SPDX-License-Identifier: AGPL-3.0-or-later
"""
Bing (News)
"""
from datetime import datetime
from urllib.parse import parse_qsl, urlencode, urlparse
from dateutil import parser
from lxml import etree
from lxml.etree import XPath
from searx.engines.bing import ( # NOQA # pylint: disable=unused-import
_fetch_supported_languages,
language_aliases,
supported_languages_url,
)
from searx.utils import eval_xpath_getindex, match_language
# about
about = {
"website": "https://www.bing.com/news",
"wikidata_id": "Q2878637",
"official_api_documentation": "https://www.microsoft.com/en-us/bing/apis/bing-news-search-api",
"use_official_api": False,
"require_api_key": False,
"results": "RSS",
}
# engine dependent config
categories = ["news"]
paging = True
time_range_support = True
# search-url
base_url = "https://www.bing.com/"
search_string = "news/search?{query}&first={offset}&format=RSS"
search_string_with_time = (
'news/search?{query}&first={offset}&qft=interval%3d"{interval}"&format=RSS'
)
time_range_dict = {"day": "7", "week": "8", "month": "9"}
# remove click
def url_cleanup(url_string):
parsed_url = urlparse(url_string)
if parsed_url.netloc == "www.bing.com" and parsed_url.path == "/news/apiclick.aspx":
query = dict(parse_qsl(parsed_url.query))
return query.get("url", None)
return url_string
# replace the http://*bing4.com/th?id=... by https://www.bing.com/th?id=...
def image_url_cleanup(url_string):
parsed_url = urlparse(url_string)
if parsed_url.netloc.endswith("bing4.com") and parsed_url.path == "/th":
query = dict(parse_qsl(parsed_url.query))
return "https://www.bing.com/th?id=" + query.get("id")
return url_string
def _get_url(query, language, offset, time_range):
if time_range in time_range_dict:
search_path = search_string_with_time.format(
query=urlencode({"q": query, "setmkt": language}),
offset=offset,
interval=time_range_dict[time_range],
)
else:
# e.g. setmkt=de-de&setlang=de
search_path = search_string.format(
query=urlencode({"q": query, "setmkt": language}), offset=offset
)
return base_url + search_path
# do search-request
def request(query, params):
if params["time_range"] and params["time_range"] not in time_range_dict:
return params
offset = (params["pageno"] - 1) * 10 + 1
if params["language"] == "all":
language = "en-US"
else:
language = match_language(
params["language"], supported_languages, language_aliases
)
params["url"] = _get_url(query, language, offset, params["time_range"])
return params
# get response from search-request
def response(resp):
results = []
rss = etree.fromstring(resp.content)
ns = rss.nsmap
# parse results
for item in rss.xpath("./channel/item"):
# url / title / content
url = url_cleanup(eval_xpath_getindex(item, "./link/text()", 0, default=None))
title = eval_xpath_getindex(item, "./title/text()", 0, default=url)
content = eval_xpath_getindex(item, "./description/text()", 0, default="")
# publishedDate
publishedDate = eval_xpath_getindex(item, "./pubDate/text()", 0, default=None)
try:
publishedDate = parser.parse(publishedDate, dayfirst=False)
except TypeError:
publishedDate = datetime.now()
except ValueError:
publishedDate = datetime.now()
# thumbnail
thumbnail = eval_xpath_getindex(
item, XPath("./News:Image/text()", namespaces=ns), 0, default=None
)
if thumbnail is not None:
thumbnail = image_url_cleanup(thumbnail)
# append result
if thumbnail is not None:
results.append(
{
"url": url,
"title": title,
"publishedDate": publishedDate,
"content": content,
"img_src": thumbnail,
}
)
else:
results.append(
{
"url": url,
"title": title,
"publishedDate": publishedDate,
"content": content,
}
)
# return results
return results
|
util | Diff | import difflib
import io
def sumLen(lines):
return sum(map(len, lines))
def diff(old, new, limit=False):
matcher = difflib.SequenceMatcher(None, old, new)
actions = []
size = 0
for tag, old_from, old_to, new_from, new_to in matcher.get_opcodes():
if tag == "insert":
new_line = new[new_from:new_to]
actions.append(("+", new_line))
size += sum(map(len, new_line))
elif tag == "equal":
actions.append(("=", sumLen(old[old_from:old_to])))
elif tag == "delete":
actions.append(("-", sumLen(old[old_from:old_to])))
elif tag == "replace":
actions.append(("-", sumLen(old[old_from:old_to])))
new_lines = new[new_from:new_to]
actions.append(("+", new_lines))
size += sumLen(new_lines)
if limit and size > limit:
return False
return actions
def patch(old_f, actions):
new_f = io.BytesIO()
for action, param in actions:
if type(action) is bytes:
action = action.decode()
if action == "=": # Same lines
new_f.write(old_f.read(param))
elif action == "-": # Delete lines
old_f.seek(param, 1) # Seek from current position
continue
elif action == "+": # Add lines
for add_line in param:
new_f.write(add_line)
else:
raise "Unknown action: %s" % action
return new_f
|
module | projectedChangeProjectionRange | import wx
from gui.fitCommands.helpers import restoreCheckedStates
from logbook import Logger
from service.fit import Fit
pyfalog = Logger(__name__)
class CalcChangeProjectedModuleProjectionRangeCommand(wx.Command):
def __init__(self, fitID, position, projectionRange):
wx.Command.__init__(self, True)
self.fitID = fitID
self.position = position
self.projectionRange = projectionRange
self.savedProjectionRange = None
self.savedStateCheckChanges = None
def Do(self):
pyfalog.debug(
"Doing change of projected module projection range at position {} to range {} on fit {}".format(
self.position, self.projectionRange, self.fitID
)
)
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
mod = fit.projectedModules[self.position]
if mod.projectionRange == self.projectionRange:
return False
self.savedProjectionRange = mod.projectionRange
mod.projectionRange = self.projectionRange
sFit.recalc(fit)
self.savedStateCheckChanges = sFit.checkStates(fit, mod)
return True
def Undo(self):
pyfalog.debug(
"Undoing change of projected module projection range at position {} to range {} on fit {}".format(
self.position, self.projectionRange, self.fitID
)
)
cmd = CalcChangeProjectedModuleProjectionRangeCommand(
fitID=self.fitID,
position=self.position,
projectionRange=self.savedProjectionRange,
)
result = cmd.Do()
restoreCheckedStates(
Fit.getInstance().getFit(self.fitID), self.savedStateCheckChanges
)
return result
@property
def needsGuiRecalc(self):
if self.savedStateCheckChanges is None:
return True
for container in self.savedStateCheckChanges:
if len(container) > 0:
return True
return False
|
classes | transition | """
@file
@brief This file is for legacy support of OpenShot 1.x project files
@author Jonathan Thomas <jonathan@openshot.org>
@section LICENSE
Copyright (c) 2008-2018 OpenShot Studios, LLC
(http://www.openshotstudios.com). This file is part of
OpenShot Video Editor (http://www.openshot.org), an open-source project
dedicated to delivering high quality video editing and animation solutions
to the world.
OpenShot Video Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenShot Video Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
"""
import uuid
class transition:
"""This class represents a media clip on the timeline."""
# ----------------------------------------------------------------------
def __init__(
self,
name,
position_on_track,
length,
resource,
parent,
type="transition",
mask_value=50.0,
):
"""Constructor"""
# init variables for clip object
self.name = name
self.position_on_track = float(
position_on_track
) # the time in seconds where the transition starts on the timeline
self.length = float(length) # the length in seconds of this transition
self.resource = resource # Any grey-scale image, or leave empty for a dissolve
self.softness = 0.3 # 0.0 = no softness. 1.0 = too soft.
self.reverse = False
self.unique_id = str(uuid.uuid1())
self.parent = parent # the sequence
# mask settings
self.type = type # transition or mask
self.mask_value = mask_value # 0.0 to 1.0
# init vars for drag n drop
self.drag_x = 0.0
self.drag_y = 0.0
|
preferences | tools | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Per-tool preferences.
"""
import re
import app
import documentstructure
import preferences
import qutil
import userguide
import widgets.dialog
import widgets.listedit
from PyQt5.QtCore import QSettings, Qt
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (
QAbstractItemView,
QCheckBox,
QDoubleSpinBox,
QFontComboBox,
QHBoxLayout,
QLabel,
QPushButton,
QVBoxLayout,
QWidget,
)
class Tools(preferences.ScrolledGroupsPage):
def __init__(self, dialog):
super().__init__(dialog)
layout = QVBoxLayout()
self.scrolledWidget.setLayout(layout)
layout.addWidget(LogTool(self))
layout.addWidget(MusicView(self))
layout.addWidget(CharMap(self))
layout.addWidget(DocumentList(self))
layout.addWidget(Outline(self))
layout.addStretch(1)
class LogTool(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.fontLabel = QLabel()
self.fontChooser = QFontComboBox(currentFontChanged=self.changed)
self.fontSize = QDoubleSpinBox(valueChanged=self.changed)
self.fontSize.setRange(6.0, 32.0)
self.fontSize.setSingleStep(0.5)
self.fontSize.setDecimals(1)
box = QHBoxLayout()
box.addWidget(self.fontLabel)
box.addWidget(self.fontChooser, 1)
box.addWidget(self.fontSize)
layout.addLayout(box)
self.showlog = QCheckBox(toggled=self.changed)
layout.addWidget(self.showlog)
self.rawview = QCheckBox(toggled=self.changed)
layout.addWidget(self.rawview)
self.hideauto = QCheckBox(toggled=self.changed)
layout.addWidget(self.hideauto)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("LilyPond Log"))
self.fontLabel.setText(_("Font:"))
self.showlog.setText(_("Show log when a job is started"))
self.rawview.setText(_("Display plain log output"))
self.rawview.setToolTip(
_("If checked, Frescobaldi will not shorten filenames in the log output.")
)
self.hideauto.setText(_("Hide automatic engraving jobs"))
self.hideauto.setToolTip(
_(
"If checked, Frescobaldi will not show the log for automatically\n"
"started engraving jobs (LilyPond->Auto-engrave)."
)
)
def loadSettings(self):
s = QSettings()
s.beginGroup("log")
font = QFont(s.value("fontfamily", "monospace", str))
font.setPointSizeF(s.value("fontsize", 9.0, float))
with qutil.signalsBlocked(self.fontChooser, self.fontSize):
self.fontChooser.setCurrentFont(font)
self.fontSize.setValue(font.pointSizeF())
self.showlog.setChecked(s.value("show_on_start", True, bool))
self.rawview.setChecked(s.value("rawview", True, bool))
self.hideauto.setChecked(s.value("hide_auto_engrave", False, bool))
def saveSettings(self):
s = QSettings()
s.beginGroup("log")
s.setValue("fontfamily", self.fontChooser.currentFont().family())
s.setValue("fontsize", self.fontSize.value())
s.setValue("show_on_start", self.showlog.isChecked())
s.setValue("rawview", self.rawview.isChecked())
s.setValue("hide_auto_engrave", self.hideauto.isChecked())
class MusicView(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.newerFilesOnly = QCheckBox(toggled=self.changed)
layout.addWidget(self.newerFilesOnly)
self.documentProperties = QCheckBox(toggled=self.changed)
layout.addWidget(self.documentProperties)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Music View"))
self.newerFilesOnly.setText(_("Only load updated PDF documents"))
self.newerFilesOnly.setToolTip(
_(
"If checked, Frescobaldi will not open PDF documents that are not\n"
"up-to-date (i.e. the source file has been modified later)."
)
)
self.documentProperties.setText(_("Remember View settings per-document"))
self.documentProperties.setToolTip(
_(
"If checked, every document in the Music View will remember its\n"
"own layout setting, zoom factor, etc. If unchecked, the View will\n"
"not change its settings when a different document is displayed."
)
)
def loadSettings(self):
s = QSettings()
s.beginGroup("musicview")
self.newerFilesOnly.setChecked(s.value("newer_files_only", True, bool))
self.documentProperties.setChecked(s.value("document_properties", True, bool))
def saveSettings(self):
s = QSettings()
s.beginGroup("musicview")
s.setValue("newer_files_only", self.newerFilesOnly.isChecked())
s.setValue("document_properties", self.documentProperties.isChecked())
class CharMap(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.fontLabel = QLabel()
self.fontChooser = QFontComboBox(currentFontChanged=self.changed)
self.fontSize = QDoubleSpinBox(valueChanged=self.changed)
self.fontSize.setRange(6.0, 32.0)
self.fontSize.setSingleStep(0.5)
self.fontSize.setDecimals(1)
box = QHBoxLayout()
box.addWidget(self.fontLabel)
box.addWidget(self.fontChooser, 1)
box.addWidget(self.fontSize)
layout.addLayout(box)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Special Characters"))
self.fontLabel.setText(_("Font:"))
def loadSettings(self):
s = QSettings()
s.beginGroup("charmaptool")
font = self.font()
family = s.value("fontfamily", "", str)
if family:
font.setFamily(family)
font.setPointSizeF(s.value("fontsize", font.pointSizeF(), float))
with qutil.signalsBlocked(self.fontChooser, self.fontSize):
self.fontChooser.setCurrentFont(font)
self.fontSize.setValue(font.pointSizeF())
def saveSettings(self):
s = QSettings()
s.beginGroup("charmaptool")
s.setValue("fontfamily", self.fontChooser.currentFont().family())
s.setValue("fontsize", self.fontSize.value())
class DocumentList(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.groupCheck = QCheckBox(toggled=self.changed)
layout.addWidget(self.groupCheck)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Documents"))
self.groupCheck.setText(_("Group documents by directory"))
def loadSettings(self):
s = QSettings()
s.beginGroup("document_list")
self.groupCheck.setChecked(s.value("group_by_folder", False, bool))
def saveSettings(self):
s = QSettings()
s.beginGroup("document_list")
s.setValue("group_by_folder", self.groupCheck.isChecked())
class Outline(preferences.Group):
def __init__(self, page):
super().__init__(page)
layout = QVBoxLayout()
self.setLayout(layout)
self.label = QLabel()
self.patternList = OutlinePatterns()
self.patternList.listBox.setDragDropMode(QAbstractItemView.InternalMove)
self.defaultButton = QPushButton(clicked=self.reloadDefaults)
self.patternList.layout().addWidget(self.defaultButton, 3, 1)
self.patternList.layout().addWidget(self.patternList.listBox, 0, 0, 5, 1)
self.patternList.changed.connect(self.changed)
self.labelComments = QLabel()
self.patternListComments = OutlinePatterns()
self.patternListComments.listBox.setDragDropMode(QAbstractItemView.InternalMove)
self.defaultButtonComments = QPushButton(clicked=self.reloadDefaultsComments)
self.patternListComments.layout().addWidget(self.defaultButtonComments, 3, 1)
self.patternListComments.layout().addWidget(
self.patternListComments.listBox, 0, 0, 5, 1
)
self.patternListComments.changed.connect(self.changed)
layout.addWidget(self.label)
layout.addWidget(self.patternList)
layout.addWidget(self.labelComments)
layout.addWidget(self.patternListComments)
app.translateUI(self)
def translateUI(self):
self.setTitle(_("Outline"))
self.defaultButton.setText(_("Default"))
self.defaultButton.setToolTip(_("Restores the built-in outline patterns."))
self.label.setText(
_(
"Patterns to match in text (excluding comments) that are shown in outline:"
)
)
self.defaultButtonComments.setText(_("Default"))
self.defaultButtonComments.setToolTip(
_("Restores the built-in outline patterns.")
)
self.labelComments.setText(
_(
"Patterns to match in text (including comments) that are shown in outline:"
)
)
def reloadDefaults(self):
self.patternList.setValue(documentstructure.default_outline_patterns)
def reloadDefaultsComments(self):
self.patternListComments.setValue(
documentstructure.default_outline_patterns_comments
)
def loadSettings(self):
s = QSettings()
s.beginGroup("documentstructure")
try:
patterns = s.value(
"outline_patterns", documentstructure.default_outline_patterns, str
)
except TypeError:
patterns = []
try:
patterns_comments = s.value(
"outline_patterns_comments",
documentstructure.default_outline_patterns_comments,
str,
)
except TypeError:
patterns_comments = []
self.patternList.setValue(patterns)
self.patternListComments.setValue(patterns_comments)
def saveSettings(self):
s = QSettings()
s.beginGroup("documentstructure")
if self.patternList.value() != documentstructure.default_outline_patterns:
s.setValue("outline_patterns", self.patternList.value())
else:
s.remove("outline_patterns")
class OutlinePatterns(widgets.listedit.ListEdit):
def openEditor(self, item):
dlg = widgets.dialog.TextDialog(
None, _("Enter a regular expression to match:"), app.caption("Outline")
)
userguide.addButton(dlg.buttonBox(), "outline_configure")
dlg.setValidateFunction(is_regex)
dlg.setText(item.text())
if dlg.exec_():
item.setText(dlg.text())
return True
return False
def is_regex(text):
"""Return True if text is a valid regular expression."""
try:
re.compile(text, re.M)
except re.error:
return False
return True
|
utils | curses_util | #
# Copyright (C) 2016 bendikro <bro.devel+deluge@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
try:
import curses
except ImportError:
pass
KEY_BELL = 7 # CTRL-/ ^G (curses.keyname(KEY_BELL) == "^G")
KEY_TAB = 9
KEY_ENTER2 = 10
KEY_ESC = 27
KEY_SPACE = 32
KEY_BACKSPACE2 = 127
KEY_ALT_AND_ARROW_UP = 564
KEY_ALT_AND_ARROW_DOWN = 523
KEY_ALT_AND_KEY_PPAGE = 553
KEY_ALT_AND_KEY_NPAGE = 548
KEY_CTRL_AND_ARROW_UP = 566
KEY_CTRL_AND_ARROW_DOWN = 525
def is_printable_chr(c):
return c >= 32 and c <= 126
def is_int_chr(c):
return c > 47 and c < 58
class Curser:
INVISIBLE = 0
NORMAL = 1
VERY_VISIBLE = 2
def safe_curs_set(visibility):
"""
Args:
visibility(int): 0, 1, or 2, for invisible, normal, or very visible
curses.curs_set fails on monochrome terminals so use this
to ignore errors
"""
try:
curses.curs_set(visibility)
except curses.error:
pass
class ReadState:
IGNORED = 0
READ = 1
CHANGED = 2
|
submitters | simpleFarmSubmitter | #!/usr/bin/env python
# coding:utf-8
import json
import logging
import os
import simpleFarm
from meshroom.core.desc import Level
from meshroom.core.submitter import BaseSubmitter
currentDir = os.path.dirname(os.path.realpath(__file__))
binDir = os.path.dirname(os.path.dirname(os.path.dirname(currentDir)))
class SimpleFarmSubmitter(BaseSubmitter):
filepath = os.environ.get(
"SIMPLEFARMCONFIG", os.path.join(currentDir, "simpleFarmConfig.json")
)
config = json.load(open(filepath))
reqPackages = []
environment = {}
ENGINE = ""
DEFAULT_TAGS = {"prod": ""}
def __init__(self, parent=None):
super(SimpleFarmSubmitter, self).__init__(name="SimpleFarm", parent=parent)
self.engine = os.environ.get("MESHROOM_SIMPLEFARM_ENGINE", "tractor")
self.share = os.environ.get("MESHROOM_SIMPLEFARM_SHARE", "vfx")
self.prod = os.environ.get("PROD", "mvg")
if "REZ_REQUEST" in os.environ:
packages = os.environ.get("REZ_REQUEST", "").split()
resolvedPackages = os.environ.get("REZ_RESOLVE", "").split()
resolvedVersions = {}
for r in resolvedPackages:
# remove implicit packages
if r.startswith("~"):
continue
# logging.info('REZ: {}'.format(str(r)))
v = r.split("-")
# logging.info(' v: {}'.format(str(v)))
if len(v) == 2:
resolvedVersions[v[0]] = v[1]
for p in packages:
if p.startswith("~"):
continue
v = p.split("-")
self.reqPackages.append("-".join([v[0], resolvedVersions[v[0]]]))
logging.debug("REZ Packages: {}".format(str(self.reqPackages)))
elif "REZ_MESHROOM_VERSION" in os.environ:
self.reqPackages = [
"meshroom-{}".format(os.environ.get("REZ_MESHROOM_VERSION", ""))
]
else:
self.reqPackages = None
if "REZ_DEV_PACKAGES_ROOT" in os.environ:
self.environment["REZ_DEV_PACKAGES_ROOT"] = os.environ[
"REZ_DEV_PACKAGES_ROOT"
]
if "REZ_PROD_PACKAGES_PATH" in os.environ:
self.environment["REZ_PROD_PACKAGES_PATH"] = os.environ[
"REZ_PROD_PACKAGES_PATH"
]
def createTask(self, meshroomFile, node):
tags = self.DEFAULT_TAGS.copy() # copy to not modify default tags
nbFrames = node.size
arguments = {}
parallelArgs = ""
print("node: ", node.name)
if node.isParallelized:
blockSize, fullSize, nbBlocks = node.nodeDesc.parallelization.getSizes(node)
parallelArgs = " --iteration @start"
arguments.update({"start": 0, "end": nbBlocks - 1, "step": 1})
tags["nbFrames"] = nbFrames
tags["prod"] = self.prod
allRequirements = list()
allRequirements.extend(self.config["CPU"].get(node.nodeDesc.cpu.name, []))
allRequirements.extend(self.config["RAM"].get(node.nodeDesc.ram.name, []))
allRequirements.extend(self.config["GPU"].get(node.nodeDesc.gpu.name, []))
task = simpleFarm.Task(
name=node.name,
command='{exe} --node {nodeName} "{meshroomFile}" {parallelArgs} --extern'.format(
exe="meshroom_compute"
if self.reqPackages
else os.path.join(binDir, "meshroom_compute"),
nodeName=node.name,
meshroomFile=meshroomFile,
parallelArgs=parallelArgs,
),
tags=tags,
rezPackages=self.reqPackages,
requirements={"service": str(",".join(allRequirements))},
**arguments,
)
return task
def submit(self, nodes, edges, filepath, submitLabel="{projectName}"):
projectName = os.path.splitext(os.path.basename(filepath))[0]
name = submitLabel.format(projectName=projectName)
comment = filepath
nbFrames = max([node.size for node in nodes])
mainTags = {
"prod": self.prod,
"nbFrames": str(nbFrames),
"comment": comment,
}
allRequirements = list(self.config.get("BASE", []))
# Create Job Graph
job = simpleFarm.Job(
name,
tags=mainTags,
requirements={"service": str(",".join(allRequirements))},
environment=self.environment,
)
nodeNameToTask = {}
for node in nodes:
task = self.createTask(filepath, node)
job.addTask(task)
nodeNameToTask[node.name] = task
for u, v in edges:
nodeNameToTask[u.name].dependsOn(nodeNameToTask[v.name])
if self.engine == "tractor-dummy":
job.submit(share=self.share, engine="tractor", execute=True)
return True
else:
res = job.submit(share=self.share, engine=self.engine)
return len(res) > 0
|
builders | matcher_builder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection matcher from configuration."""
from app.object_detection.matchers import argmax_matcher, bipartite_matcher
from app.object_detection.protos import matcher_pb2
def build(matcher_config):
"""Builds a matcher object based on the matcher config.
Args:
matcher_config: A matcher.proto object containing the config for the desired
Matcher.
Returns:
Matcher based on the config.
Raises:
ValueError: On empty matcher proto.
"""
if not isinstance(matcher_config, matcher_pb2.Matcher):
raise ValueError("matcher_config not of type matcher_pb2.Matcher.")
if matcher_config.WhichOneof("matcher_oneof") == "argmax_matcher":
matcher = matcher_config.argmax_matcher
matched_threshold = unmatched_threshold = None
if not matcher.ignore_thresholds:
matched_threshold = matcher.matched_threshold
unmatched_threshold = matcher.unmatched_threshold
return argmax_matcher.ArgMaxMatcher(
matched_threshold=matched_threshold,
unmatched_threshold=unmatched_threshold,
negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched,
force_match_for_each_row=matcher.force_match_for_each_row,
)
if matcher_config.WhichOneof("matcher_oneof") == "bipartite_matcher":
return bipartite_matcher.GreedyBipartiteMatcher()
raise ValueError("Empty matcher.")
|
core | preprocessor_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.preprocessor."""
import numpy as np
import six
import tensorflow as tf
from app.object_detection.core import preprocessor
from app.object_detection.core import standard_fields as fields
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
class PreprocessorTest(tf.test.TestCase):
def createColorfulTestImage(self):
ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8))
ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8))
ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8))
imr = tf.concat([ch255, ch0, ch0], 3)
img = tf.concat([ch255, ch255, ch0], 3)
imb = tf.concat([ch255, ch0, ch255], 3)
imw = tf.concat([ch128, ch128, ch128], 3)
imu = tf.concat([imr, img], 2)
imd = tf.concat([imb, imw], 2)
im = tf.concat([imu, imd], 1)
return im
def createTestImages(self):
images_r = tf.constant(
[
[
[128, 128, 128, 128],
[0, 0, 128, 128],
[0, 128, 128, 128],
[192, 192, 128, 128],
]
],
dtype=tf.uint8,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[
[
[0, 0, 128, 128],
[0, 0, 128, 128],
[0, 128, 192, 192],
[192, 192, 128, 192],
]
],
dtype=tf.uint8,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[
[
[128, 128, 192, 0],
[0, 0, 128, 192],
[0, 128, 128, 0],
[192, 192, 192, 128],
]
],
dtype=tf.uint8,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def createEmptyTestBoxes(self):
boxes = tf.constant([[]], dtype=tf.float32)
return boxes
def createTestBoxes(self):
boxes = tf.constant(
[[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32
)
return boxes
def createTestLabelScores(self):
return tf.constant([1.0, 0.5], dtype=tf.float32)
def createTestLabelScoresWithMissingScore(self):
return tf.constant([0.5, np.nan], dtype=tf.float32)
def createTestMasks(self):
mask = np.array(
[
[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]],
]
)
return tf.constant(mask, dtype=tf.float32)
def createTestKeypoints(self):
keypoints = np.array(
[
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
]
)
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsInsideCrop(self):
keypoints = np.array(
[
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
]
)
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsOutsideCrop(self):
keypoints = np.array(
[
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
]
)
return tf.constant(keypoints, dtype=tf.float32)
def createKeypointFlipPermutation(self):
return np.array([0, 2, 1], dtype=np.int32)
def createTestLabels(self):
labels = tf.constant([1, 2], dtype=tf.int32)
return labels
def createTestBoxesOutOfImage(self):
boxes = tf.constant(
[[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32
)
return boxes
def expectedImagesAfterNormalization(self):
images_r = tf.constant(
[[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]],
dtype=tf.float32,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]],
dtype=tf.float32,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]],
dtype=tf.float32,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMaxImageAfterColorScale(self):
images_r = tf.constant(
[
[
[0.1, 0.1, 0.1, 0.1],
[-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.1, 0.1],
[0.6, 0.6, 0.1, 0.1],
]
],
dtype=tf.float32,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[
[
[-0.9, -0.9, 0.1, 0.1],
[-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.6, 0.6],
[0.6, 0.6, 0.1, 0.6],
]
],
dtype=tf.float32,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[
[
[0.1, 0.1, 0.6, -0.9],
[-0.9, -0.9, 0.1, 0.6],
[-0.9, 0.1, 0.1, -0.9],
[0.6, 0.6, 0.6, 0.1],
]
],
dtype=tf.float32,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMinImageAfterColorScale(self):
images_r = tf.constant(
[
[
[-0.1, -0.1, -0.1, -0.1],
[-1, -1, -0.1, -0.1],
[-1, -0.1, -0.1, -0.1],
[0.4, 0.4, -0.1, -0.1],
]
],
dtype=tf.float32,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[
[
[-1, -1, -0.1, -0.1],
[-1, -1, -0.1, -0.1],
[-1, -0.1, 0.4, 0.4],
[0.4, 0.4, -0.1, 0.4],
]
],
dtype=tf.float32,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[
[
[-0.1, -0.1, 0.4, -1],
[-1, -1, -0.1, 0.4],
[-1, -0.1, -0.1, -1],
[0.4, 0.4, 0.4, -0.1],
]
],
dtype=tf.float32,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterLeftRightFlip(self):
images_r = tf.constant(
[[[0, 0, 0, 0], [0, 0, -1, -1], [0, 0, 0, -1], [0, 0, 0.5, 0.5]]],
dtype=tf.float32,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[[[0, 0, -1, -1], [0, 0, -1, -1], [0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]],
dtype=tf.float32,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], [-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]],
dtype=tf.float32,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterUpDownFlip(self):
images_r = tf.constant(
[[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]]],
dtype=tf.float32,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -1, 0, 0]]],
dtype=tf.float32,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]],
dtype=tf.float32,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterRot90(self):
images_r = tf.constant(
[[[0, 0, 0, 0], [0, 0, 0, 0], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32,
)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant(
[[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], [-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]],
dtype=tf.float32,
)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant(
[[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32,
)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedBoxesAfterLeftRightFlip(self):
boxes = tf.constant(
[[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]], dtype=tf.float32
)
return boxes
def expectedBoxesAfterUpDownFlip(self):
boxes = tf.constant(
[[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32
)
return boxes
def expectedBoxesAfterRot90(self):
boxes = tf.constant(
[[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32
)
return boxes
def expectedMasksAfterLeftRightFlip(self):
mask = np.array(
[
[[0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0]],
[[0.0, 255.0, 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]],
]
)
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterUpDownFlip(self):
mask = np.array(
[
[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]],
]
)
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterRot90(self):
mask = np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [255.0, 255.0, 255.0]],
[[0.0, 0.0, 0.0], [255.0, 255.0, 255.0], [255.0, 255.0, 255.0]],
]
)
return tf.constant(mask, dtype=tf.float32)
def expectedLabelScoresAfterThresholding(self):
return tf.constant([1.0], dtype=tf.float32)
def expectedBoxesAfterThresholding(self):
return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32)
def expectedLabelsAfterThresholding(self):
return tf.constant([1], dtype=tf.float32)
def expectedMasksAfterThresholding(self):
mask = np.array([[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedKeypointsAfterThresholding(self):
keypoints = np.array([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
return tf.constant(keypoints, dtype=tf.float32)
def expectedLabelScoresAfterThresholdingWithMissingScore(self):
return tf.constant([np.nan], dtype=tf.float32)
def expectedBoxesAfterThresholdingWithMissingScore(self):
return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32)
def expectedLabelsAfterThresholdingWithMissingScore(self):
return tf.constant([2], dtype=tf.float32)
def testNormalizeImage(self):
preprocess_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 256,
"target_minval": -1,
"target_maxval": 1,
},
)
]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
images_expected = self.expectedImagesAfterNormalization()
with self.test_session() as sess:
(images_, images_expected_) = sess.run([images, images_expected])
images_shape_ = images_.shape
images_expected_shape_ = images_expected_.shape
expected_shape = [1, 4, 4, 3]
self.assertAllEqual(images_expected_shape_, images_shape_)
self.assertAllEqual(images_shape_, expected_shape)
self.assertAllClose(images_, images_expected_)
def testRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
(
retained_boxes,
retained_labels,
retained_label_scores,
) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6
)
with self.test_session() as sess:
(
retained_boxes_,
retained_labels_,
retained_label_scores_,
expected_retained_boxes_,
expected_retained_labels_,
expected_retained_label_scores_,
) = sess.run(
[
retained_boxes,
retained_labels,
retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding(),
]
)
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(retained_label_scores_, expected_retained_label_scores_)
def testRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
_, _, _, retained_masks = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, masks, threshold=0.6
)
with self.test_session() as sess:
retained_masks_, expected_retained_masks_ = sess.run(
[retained_masks, self.expectedMasksAfterThresholding()]
)
self.assertAllClose(retained_masks_, expected_retained_masks_)
def testRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
(_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, keypoints=keypoints, threshold=0.6
)
with self.test_session() as sess:
(retained_keypoints_, expected_retained_keypoints_) = sess.run(
[retained_keypoints, self.expectedKeypointsAfterThresholding()]
)
self.assertAllClose(retained_keypoints_, expected_retained_keypoints_)
def testRetainBoxesAboveThresholdWithMissingScore(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScoresWithMissingScore()
(
retained_boxes,
retained_labels,
retained_label_scores,
) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6
)
with self.test_session() as sess:
(
retained_boxes_,
retained_labels_,
retained_label_scores_,
expected_retained_boxes_,
expected_retained_labels_,
expected_retained_label_scores_,
) = sess.run(
[
retained_boxes,
retained_labels,
retained_label_scores,
self.expectedBoxesAfterThresholdingWithMissingScore(),
self.expectedLabelsAfterThresholdingWithMissingScore(),
self.expectedLabelScoresAfterThresholdingWithMissingScore(),
]
)
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(retained_label_scores_, expected_retained_label_scores_)
def testFlipBoxesLeftRight(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_left_right(boxes)
expected_boxes = self.expectedBoxesAfterLeftRightFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testFlipBoxesUpDown(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_up_down(boxes)
expected_boxes = self.expectedBoxesAfterUpDownFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testRot90Boxes(self):
boxes = self.createTestBoxes()
rotated_boxes = preprocessor._rot90_boxes(boxes)
expected_boxes = self.expectedBoxesAfterRot90()
with self.test_session() as sess:
rotated_boxes, expected_boxes = sess.run([rotated_boxes, expected_boxes])
self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten())
def testFlipMasksLeftRight(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_left_right(test_mask)
expected_mask = self.expectedMasksAfterLeftRightFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testFlipMasksUpDown(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_up_down(test_mask)
expected_mask = self.expectedMasksAfterUpDownFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testRot90Masks(self):
test_mask = self.createTestMasks()
rotated_mask = preprocessor._rot90_masks(test_mask)
expected_mask = self.expectedMasksAfterRot90()
with self.test_session() as sess:
rotated_mask, expected_mask = sess.run([rotated_mask, expected_mask])
self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten())
def testRandomHorizontalFlip(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected1 = self.expectedBoxesAfterLeftRightFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(
images_diff_,
images_diff_expected_,
boxes_diff_,
boxes_diff_expected_,
) = sess.run(
[images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]
)
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_, boxes_expected_) = sess.run(
[images_diff, images_diff_expected, boxes, boxes_expected]
)
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRunRandomHorizontalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocess_options = [
(
preprocessor.random_horizontal_flip,
{"keypoint_flip_permutation": keypoint_flip_permutation},
)
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True
)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map
)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomVerticalFlip(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected1 = self.expectedBoxesAfterUpDownFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(
images_diff_,
images_diff_expected_,
boxes_diff_,
boxes_diff_expected_,
) = sess.run(
[images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]
)
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_, boxes_expected_) = sess.run(
[images_diff, images_diff_expected, boxes, boxes_expected]
)
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRunRandomVerticalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocess_options = [
(
preprocessor.random_vertical_flip,
{"keypoint_flip_permutation": keypoint_flip_permutation},
)
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True
)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map
)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomRotation90(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected1 = self.expectedBoxesAfterRot90()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(
images_diff_,
images_diff_expected_,
boxes_diff_,
boxes_diff_expected_,
) = sess.run(
[images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]
)
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_, boxes_expected_) = sess.run(
[images_diff, images_diff_expected, boxes, boxes_expected]
)
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRunRandomRotation90WithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True
)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map
)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomPixelValueScale(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_min = tf.to_float(images) * 0.9 / 255.0
images_max = tf.to_float(images) * 1.1 / 255.0
images = tensor_dict[fields.InputDataFields.image]
values_greater = tf.greater_equal(images, images_min)
values_less = tf.less_equal(images, images_max)
values_true = tf.fill([1, 4, 4, 3], True)
with self.test_session() as sess:
(values_greater_, values_less_, values_true_) = sess.run(
[values_greater, values_less, values_true]
)
self.assertAllClose(values_greater_, values_true_)
self.assertAllClose(values_less_, values_true_)
def testRandomImageScale(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_scaled = tensor_dict[fields.InputDataFields.image]
images_original_shape = tf.shape(images_original)
images_scaled_shape = tf.shape(images_scaled)
with self.test_session() as sess:
(images_original_shape_, images_scaled_shape_) = sess.run(
[images_original_shape, images_scaled_shape]
)
self.assertTrue(images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
self.assertTrue(images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
self.assertTrue(images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
self.assertTrue(images_original_shape_[2] * 2.0 >= images_scaled_shape_[2])
def testRandomRGBtoGray(self):
preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_gray = tensor_dict[fields.InputDataFields.image]
images_gray_r, images_gray_g, images_gray_b = tf.split(
value=images_gray, num_or_size_splits=3, axis=3
)
images_r, images_g, images_b = tf.split(
value=images_original, num_or_size_splits=3, axis=3
)
images_r_diff1 = tf.squared_difference(
tf.to_float(images_r), tf.to_float(images_gray_r)
)
images_r_diff2 = tf.squared_difference(
tf.to_float(images_gray_r), tf.to_float(images_gray_g)
)
images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
images_g_diff1 = tf.squared_difference(
tf.to_float(images_g), tf.to_float(images_gray_g)
)
images_g_diff2 = tf.squared_difference(
tf.to_float(images_gray_g), tf.to_float(images_gray_b)
)
images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
images_b_diff1 = tf.squared_difference(
tf.to_float(images_b), tf.to_float(images_gray_b)
)
images_b_diff2 = tf.squared_difference(
tf.to_float(images_gray_b), tf.to_float(images_gray_r)
)
images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
with self.test_session() as sess:
(images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run(
[images_r_diff, images_g_diff, images_b_diff, image_zero1]
)
self.assertAllClose(images_r_diff_, image_zero1_)
self.assertAllClose(images_g_diff_, image_zero1_)
self.assertAllClose(images_b_diff_, image_zero1_)
def testRandomAdjustBrightness(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_bright = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_bright_shape = tf.shape(images_bright)
with self.test_session() as sess:
(image_original_shape_, image_bright_shape_) = sess.run(
[image_original_shape, image_bright_shape]
)
self.assertAllEqual(image_original_shape_, image_bright_shape_)
def testRandomAdjustContrast(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_contrast = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_contrast_shape = tf.shape(images_contrast)
with self.test_session() as sess:
(image_original_shape_, image_contrast_shape_) = sess.run(
[image_original_shape, image_contrast_shape]
)
self.assertAllEqual(image_original_shape_, image_contrast_shape_)
def testRandomAdjustHue(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_adjust_hue, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_hue = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_hue_shape = tf.shape(images_hue)
with self.test_session() as sess:
(image_original_shape_, image_hue_shape_) = sess.run(
[image_original_shape, image_hue_shape]
)
self.assertAllEqual(image_original_shape_, image_hue_shape_)
def testRandomDistortColor(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_distort_color, {}))
images_original = self.createTestImages()
images_original_shape = tf.shape(images_original)
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_distorted_color = tensor_dict[fields.InputDataFields.image]
images_distorted_color_shape = tf.shape(images_distorted_color)
with self.test_session() as sess:
(images_original_shape_, images_distorted_color_shape_) = sess.run(
[images_original_shape, images_distorted_color_shape]
)
self.assertAllEqual(images_original_shape_, images_distorted_color_shape_)
def testRandomJitterBoxes(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes, {}))
boxes = self.createTestBoxes()
boxes_shape = tf.shape(boxes)
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
distorted_boxes_shape = tf.shape(distorted_boxes)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_) = sess.run(
[boxes_shape, distorted_boxes_shape]
)
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
def testRandomCropImage(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(3, distorted_images.get_shape()[3])
with self.test_session() as sess:
(
boxes_rank_,
distorted_boxes_rank_,
images_rank_,
distorted_images_rank_,
) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]
)
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageGrayscale(self):
preprocessing_options = [
(preprocessor.rgb_to_gray, {}),
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
),
(preprocessor.random_crop_image, {}),
]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(1, distorted_images.get_shape()[3])
with self.test_session() as sess:
session_results = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]
)
(
boxes_rank_,
distorted_boxes_rank_,
images_rank_,
distorted_images_rank_,
) = session_results
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithBoxOutOfImage(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxesOutOfImage()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
with self.test_session() as sess:
(
boxes_rank_,
distorted_boxes_rank_,
images_rank_,
distorted_images_rank_,
) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]
)
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithRandomCoefOne(self):
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {"random_coef": 1.0})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_label_scores = distorted_tensor_dict[
fields.InputDataFields.groundtruth_label_scores
]
boxes_shape = tf.shape(boxes)
distorted_boxes_shape = tf.shape(distorted_boxes)
images_shape = tf.shape(images)
distorted_images_shape = tf.shape(distorted_images)
with self.test_session() as sess:
(
boxes_shape_,
distorted_boxes_shape_,
images_shape_,
distorted_images_shape_,
images_,
distorted_images_,
boxes_,
distorted_boxes_,
labels_,
distorted_labels_,
label_scores_,
distorted_label_scores_,
) = sess.run(
[
boxes_shape,
distorted_boxes_shape,
images_shape,
distorted_images_shape,
images,
distorted_images,
boxes,
distorted_boxes,
labels,
distorted_labels,
label_scores,
distorted_label_scores,
]
)
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
self.assertAllEqual(images_shape_, distorted_images_shape_)
self.assertAllClose(images_, distorted_images_)
self.assertAllClose(boxes_, distorted_boxes_)
self.assertAllEqual(labels_, distorted_labels_)
self.assertAllEqual(label_scores_, distorted_label_scores_)
def testRandomCropWithMockSampleDistortedBoundingBox(self):
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
]
images = self.createColorfulTestImage()
boxes = tf.constant(
[[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, 0.7]],
dtype=tf.float32,
)
labels = tf.constant([1, 7, 11], dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
expected_boxes = tf.constant(
[
[0.178947, 0.07173, 0.75789469, 0.66244733],
[0.28421, 0.0, 0.38947365, 0.57805908],
],
dtype=tf.float32,
)
expected_labels = tf.constant([7, 11], dtype=tf.int32)
with self.test_session() as sess:
(
distorted_boxes_,
distorted_labels_,
expected_boxes_,
expected_labels_,
) = sess.run(
[distorted_boxes, distorted_labels, expected_boxes, expected_labels]
)
self.assertAllClose(distorted_boxes_, expected_boxes_)
self.assertAllEqual(distorted_labels_, expected_labels_)
def testStrictRandomCropImageWithLabelScores(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
(
new_image,
new_boxes,
new_labels,
new_label_scores,
) = preprocessor._strict_random_crop_image(
image, boxes, labels, label_scores
)
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_label_scores = sess.run(
[new_image, new_boxes, new_labels, new_label_scores]
)
expected_boxes = np.array(
[
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
],
dtype=np.float32,
)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_label_scores, [1.0, 0.5])
self.assertAllClose(new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
(
new_image,
new_boxes,
new_labels,
new_masks,
) = preprocessor._strict_random_crop_image(
image, boxes, labels, masks=masks
)
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_masks = sess.run(
[new_image, new_boxes, new_labels, new_masks]
)
expected_boxes = np.array(
[
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
],
dtype=np.float32,
)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_masks.shape, [2, 190, 237])
self.assertAllClose(new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithKeypoints(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
(
new_image,
new_boxes,
new_labels,
new_keypoints,
) = preprocessor._strict_random_crop_image(
image, boxes, labels, keypoints=keypoints
)
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_keypoints = sess.run(
[new_image, new_boxes, new_labels, new_keypoints]
)
expected_boxes = np.array(
[
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
],
dtype=np.float32,
)
expected_keypoints = np.array(
[
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]],
[
[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277],
],
],
dtype=np.float32,
)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllClose(new_boxes.flatten(), expected_boxes.flatten())
self.assertAllClose(
new_keypoints.flatten(), expected_keypoints.flatten()
)
def testRunRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True
)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_masks_,
) = sess.run(
[
distorted_image,
distorted_boxes,
distorted_labels,
distorted_masks,
]
)
expected_boxes = np.array(
[
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
],
dtype=np.float32,
)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_masks_.shape, [2, 190, 237])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten()
)
def testRunRandomCropImageWithKeypointsInsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsInsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True
)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_keypoints_,
) = sess.run(
[
distorted_image,
distorted_boxes,
distorted_labels,
distorted_keypoints,
]
)
expected_boxes = np.array(
[
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
],
dtype=np.float32,
)
expected_keypoints = np.array(
[
[
[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277],
],
[
[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277],
],
]
)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten()
)
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten()
)
def testRunRandomCropImageWithKeypointsOutsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsOutsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True
)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image, "sample_distorted_bounding_box"
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32),
)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_keypoints_,
) = sess.run(
[
distorted_image,
distorted_boxes,
distorted_labels,
distorted_keypoints,
]
)
expected_boxes = np.array(
[
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
],
dtype=np.float32,
)
expected_keypoints = np.array(
[
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]],
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]],
]
)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten()
)
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten()
)
def testRunRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
}
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {"threshold": 0.6})
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True
)
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
retained_boxes = retained_tensor_dict[fields.InputDataFields.groundtruth_boxes]
retained_labels = retained_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
retained_label_scores = retained_tensor_dict[
fields.InputDataFields.groundtruth_label_scores
]
with self.test_session() as sess:
(
retained_boxes_,
retained_labels_,
retained_label_scores_,
expected_retained_boxes_,
expected_retained_labels_,
expected_retained_label_scores_,
) = sess.run(
[
retained_boxes,
retained_labels,
retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding(),
]
)
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(retained_label_scores_, expected_retained_label_scores_)
def testRunRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True, include_instance_masks=True
)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {"threshold": 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
retained_masks = retained_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks
]
with self.test_session() as sess:
(retained_masks_, expected_masks_) = sess.run(
[retained_masks, self.expectedMasksAfterThresholding()]
)
self.assertAllClose(retained_masks_, expected_masks_)
def testRunRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True, include_keypoints=True
)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {"threshold": 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
retained_keypoints = retained_tensor_dict[
fields.InputDataFields.groundtruth_keypoints
]
with self.test_session() as sess:
(retained_keypoints_, expected_keypoints_) = sess.run(
[retained_keypoints, self.expectedKeypointsAfterThresholding()]
)
self.assertAllClose(retained_keypoints_, expected_keypoints_)
def testRunRandomCropToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True
)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor, "_random_integer") as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_masks_,
) = sess.run(
[
distorted_image,
distorted_boxes,
distorted_labels,
distorted_masks,
]
)
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten()
)
self.assertAllEqual(distorted_masks_.shape, [1, 200, 200])
def testRunRandomCropToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True
)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor, "_random_integer") as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_keypoints_,
) = sess.run(
[
distorted_image,
distorted_boxes,
distorted_labels,
distorted_keypoints,
]
)
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
expected_keypoints = np.array(
[[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32
)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten()
)
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten()
)
def testRunRandomPadToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True
)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_masks_,
) = sess.run(
[distorted_image, distorted_boxes, distorted_labels, distorted_masks]
)
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32
)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [2, 400, 400])
def testRunRandomPadToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True
)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes
]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints
]
with self.test_session() as sess:
(
distorted_image_,
distorted_boxes_,
distorted_labels_,
distorted_keypoints_,
) = sess.run(
[
distorted_image,
distorted_boxes,
distorted_labels,
distorted_keypoints,
]
)
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32
)
expected_keypoints = np.array(
[
[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]],
[[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]],
],
dtype=np.float32,
)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten()
)
def testRandomPadImage(self):
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(
boxes_shape_,
padded_boxes_shape_,
images_shape_,
padded_images_shape_,
boxes_,
padded_boxes_,
) = sess.run(
[
boxes_shape,
padded_boxes_shape,
images_shape,
padded_images_shape,
boxes,
padded_boxes,
]
)
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(
np.all(
(boxes_[:, 2] - boxes_[:, 0])
>= (padded_boxes_[:, 2] - padded_boxes_[:, 0])
)
)
self.assertTrue(
np.all(
(boxes_[:, 3] - boxes_[:, 1])
>= (padded_boxes_[:, 3] - padded_boxes_[:, 1])
)
)
def testRandomCropPadImageWithRandomCoefOne(self):
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [
(preprocessor.random_crop_pad_image, {"random_coef": 1.0})
]
padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(
boxes_shape_,
padded_boxes_shape_,
images_shape_,
padded_images_shape_,
boxes_,
padded_boxes_,
) = sess.run(
[
boxes_shape,
padded_boxes_shape,
images_shape,
padded_images_shape,
boxes,
padded_boxes,
]
)
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(
np.all(
(boxes_[:, 2] - boxes_[:, 0])
>= (padded_boxes_[:, 2] - padded_boxes_[:, 0])
)
)
self.assertTrue(
np.all(
(boxes_[:, 3] - boxes_[:, 1])
>= (padded_boxes_[:, 3] - padded_boxes_[:, 1])
)
)
def testRandomCropToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [
(preprocessor.random_crop_to_aspect_ratio, {"aspect_ratio": 2.0})
]
cropped_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
with self.test_session() as sess:
(
boxes_shape_,
cropped_boxes_shape_,
images_shape_,
cropped_images_shape_,
) = sess.run(
[boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape]
)
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
def testRandomPadToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [
(preprocessor.random_pad_to_aspect_ratio, {"aspect_ratio": 2.0})
]
padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(
boxes_shape_,
padded_boxes_shape_,
images_shape_,
padded_images_shape_,
) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape, padded_images_shape]
)
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertEqual(images_shape_[1], padded_images_shape_[1])
self.assertEqual(2 * images_shape_[2], padded_images_shape_[2])
def testRandomBlackPatches(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append(
(preprocessor.random_black_patches, {"size_to_image_ratio": 0.5})
)
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
blacked_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
blacked_images_shape = tf.shape(blacked_images)
with self.test_session() as sess:
(images_shape_, blacked_images_shape_) = sess.run(
[images_shape, blacked_images_shape]
)
self.assertAllEqual(images_shape_, blacked_images_shape_)
def testRandomResizeMethod(self):
preprocessing_options = []
preprocessing_options.append(
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
)
)
preprocessing_options.append(
(preprocessor.random_resize_method, {"target_size": (75, 150)})
)
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
resized_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
resized_images = resized_tensor_dict[fields.InputDataFields.image]
resized_images_shape = tf.shape(resized_images)
expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)
with self.test_session() as sess:
(expected_images_shape_, resized_images_shape_) = sess.run(
[expected_images_shape, resized_images_shape]
)
self.assertAllEqual(expected_images_shape_, resized_images_shape_)
def testResizeImageWithMasks(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width
)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape]
)
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithNoInstanceMask(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width
)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape]
)
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangePreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim
)
self.assertAllEqual(out_image.get_shape().as_list(), expected_shape)
def testResizeToRangeWithDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim
)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(
out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)}
)
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithMasksPreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim
)
self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape)
self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape)
def testResizeToRangeWithMasksAndDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim
)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape),
},
)
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim
)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape]
)
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRange4DImageTensor(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_range(image, 500, 600)
def testResizeToRangeSameMinMax(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim
)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape)
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToMinDimensionTensorShapes(self):
in_image_shape_list = [[60, 55, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 55], [10, 15, 30]]
min_dim = 50
expected_image_shape_list = [[60, 55, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim
)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape),
},
)
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (
in_image_shape,
expected_image_shape,
in_masks_shape,
expected_mask_shape,
) in zip(
in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list,
):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim
)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape]
)
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionRaisesErrorOn4DImage(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_min_dimension(image, 500)
def testScaleBoxesToPixelCoordinates(self):
"""Tests box scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6], [0.5, 0.3, 0.9, 0.7]]
expected_boxes = [[6.0, 8.0, 24.0, 24.0], [30.0, 12.0, 54.0, 28.0]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
_, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes
)
with self.test_session() as sess:
out_boxes = sess.run(out_boxes)
self.assertAllClose(out_boxes, expected_boxes)
def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
"""Tests box and keypoint scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints = self.createTestKeypoints()
expected_boxes = [[0.0, 10.0, 45.0, 40.0], [15.0, 20.0, 45.0, 40.0]]
expected_keypoints = [
[[6.0, 4.0], [12.0, 8.0], [18.0, 12.0]],
[[24.0, 16.0], [30.0, 20.0], [36.0, 24.0]],
]
in_image = tf.random_uniform(in_shape)
_, out_boxes, out_keypoints = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes, keypoints=in_keypoints
)
with self.test_session() as sess:
out_boxes_, out_keypoints_ = sess.run([out_boxes, out_keypoints])
self.assertAllClose(out_boxes_, expected_boxes)
self.assertAllClose(out_keypoints_, expected_keypoints)
def testSubtractChannelMean(self):
"""Tests whether channel means have been subtracted."""
with self.test_session():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
actual = actual.eval()
self.assertTrue((actual[:, :, 0] == -1).all())
self.assertTrue((actual[:, :, 1] == -2).all())
self.assertTrue((actual[:, :, 2] == -3).all())
def testOneHotEncoding(self):
"""Tests one hot encoding of multiclass labels."""
with self.test_session():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
one_hot = one_hot.eval()
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
def testSSDRandomCrop(self):
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
),
(preprocessor.ssd_random_crop, {}),
]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(
boxes_rank_,
distorted_boxes_rank_,
images_rank_,
distorted_images_rank_,
) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]
)
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropPad(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
),
(preprocessor.ssd_random_crop_pad, {}),
]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(
boxes_rank_,
distorted_boxes_rank_,
images_rank_,
distorted_images_rank_,
) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]
)
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def _testSSDRandomCropFixedAspectRatio(
self, include_label_scores, include_instance_masks, include_keypoints
):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [
(
preprocessor.normalize_image,
{
"original_minval": 0,
"original_maxval": 255,
"target_minval": 0,
"target_maxval": 1,
},
),
(preprocessor.ssd_random_crop_fixed_aspect_ratio, {}),
]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
if include_label_scores:
label_scores = self.createTestLabelScores()
tensor_dict[fields.InputDataFields.groundtruth_label_scores] = label_scores
if include_instance_masks:
masks = self.createTestMasks()
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
if include_keypoints:
keypoints = self.createTestKeypoints()
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=include_label_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints,
)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map
)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes
]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(
boxes_rank_,
distorted_boxes_rank_,
images_rank_,
distorted_images_rank_,
) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]
)
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatio(self):
self._testSSDRandomCropFixedAspectRatio(
include_label_scores=False,
include_instance_masks=False,
include_keypoints=False,
)
def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(
include_label_scores=False,
include_instance_masks=True,
include_keypoints=True,
)
def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(
include_label_scores=True,
include_instance_masks=True,
include_keypoints=True,
)
if __name__ == "__main__":
tf.test.main()
|
Generator | rotation | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2021 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
# Technical Debt. This generator currently assumes 3+2 axis rotation of CA.
# The main generator function should be extended to include other flavors of 3+2
import math
from enum import Enum
import FreeCAD
import Path
__title__ = "Rotation Path Generator"
__author__ = "sliptonic (Brad Collette)"
__url__ = "https://www.freecad.org"
__doc__ = "Generates the rotation toolpath"
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
class refAxis(Enum):
x = FreeCAD.Vector(1, 0, 0)
y = FreeCAD.Vector(0, 1, 0)
z = FreeCAD.Vector(0, 0, 1)
def relAngle(vec, ref):
"""
Takes a vector and a reference axis (refAxis) vector. Calculates the
relative angle. The result is returned in degrees (plus or minus)
"""
Path.Log.debug("vec: {} ref: {}".format(vec, ref))
norm = vec * 1 # copy vec so we don't alter original
if ref == refAxis.x:
plane = refAxis.y.value
elif ref == refAxis.y:
plane = refAxis.z.value
else:
plane = refAxis.x.value
norm.projectToPlane(FreeCAD.Vector(0, 0, 0), plane)
ref = ref.value
rot = FreeCAD.Rotation(norm, ref)
ang = math.degrees(rot.Angle)
angle = ang * plane.dot(rot.Axis)
Path.Log.debug("relative ang: {}".format(angle))
return angle
def __getCRotation(normalVector, cMin=-360, cMax=360):
"""
Calculate the valid C axis rotations component to align the normalVector
with either the +y or -y axis.
multiple poses may be possible. Returns a list of all valid poses
"""
Path.Log.debug(
"normalVector: {} cMin: {} cMax: {}".format(normalVector, cMin, cMax)
)
angle = relAngle(normalVector, refAxis.y)
# Given an angle, there are four possibilities; rotating +- to each of the
# two axes +y and -y
candidates = [angle]
if angle == 0:
candidates.append(180)
elif angle == 180:
candidates.append(0)
elif angle >= 0:
candidates.append(angle - 180)
candidates.append(180 + angle)
candidates.append(angle - 360)
else:
candidates.append(angle + 180)
candidates.append(-180 + angle)
candidates.append(angle + 360)
# final results are candidates that don't violate rotation limits
results = [c for c in candidates if c >= cMin and c <= cMax]
return results
def __getARotation(normalVector, aMin=-360, aMax=360):
"""
Calculate the A axis rotation component.
Final rotation is always assumed to be around +X. The sign of the returned
value indicates direction of rotation.
Returns None if rotation violates min/max constraints
"""
angle = relAngle(normalVector, refAxis.z)
# only return a result if it doesn't violate rotation constraints
if angle > aMin and angle <= aMax:
return angle
else:
return None
def generate(normalVector, aMin=-360, aMax=360, cMin=-360, cMax=360, compound=False):
"""
Generates Gcode rotation to align a vector (alignVector) with the positive Z axis.
It first rotates around the Z axis (C rotation)
to align the vector the positive Y axis. Then around the X axis
(A rotation).
The min and max arguments dictate the range of motion allowed rotation in
the respective axis.
Default assumes continuous rotation.
Returns a list of path commands for the shortest valid solution
If compound is False, axis moves will be broken out to individual commands
The normalVector input from a typical face (f) can be obtained like this:
u, v = f.ParameterRange[:2]
n = f.normalAt(u,v)
plm = obj.getGlobalPlacement()
rot = plm.Rotation
normalVector = rot.multVec(n
"""
Path.Log.track(
"\n=============\n normalVector: {}\n aMin: {}\n aMax: {}\n cMin: {}\n cMax: {}".format(
normalVector, aMin, aMax, cMin, cMax
)
)
# Calculate C rotation
cResults = __getCRotation(normalVector, cMin, cMax)
Path.Log.debug("C Rotation results {}".format(cResults))
solutions = []
for result in cResults:
# calculate a new vector based on the result
rot = FreeCAD.Rotation(FreeCAD.Vector(0, 0, 1), result)
newvec = rot.multVec(normalVector)
# Get the candidate A rotation for the new vector
aResult = __getARotation(newvec, aMin, aMax)
Path.Log.debug(
"\n=====\nFor C Rotation: {}\n Calculated A {}\n".format(result, aResult)
)
if aResult is not None:
solutions.append({"A": aResult, "C": result})
if len(solutions) == 0: # No valid solution found
raise ValueError("No valid rotation solution found")
# find pose with the shortest transit length
best = solutions[0]
curlen = math.fabs(best["A"]) + math.fabs(best["C"])
for solution in solutions[1:]:
testlen = math.fabs(solution["A"]) + math.fabs(solution["C"])
if testlen < curlen:
best = solution
curlen = testlen
Path.Log.debug("best result: {}".format(best))
# format and return rotation commands
commands = []
if compound:
commands.append(Path.Command("G0", best))
else:
for key, val in best.items():
print(key, val)
commands.append(Path.Command("G0", {key: val}))
return commands
|
base | downloader | # -*- coding: utf-8 -*-
import mimetypes
import os
import re
from pyload.core.network.exceptions import Fail
from pyload.core.network.http.exceptions import BadHeader
from pyload.core.utils import format, parse
from pyload.core.utils.old import safejoin
from ..helpers import exists
from .hoster import BaseHoster
class BaseDownloader(BaseHoster):
__name__ = "BaseDownloader"
__type__ = "downloader"
__version__ = "0.82"
__status__ = "stable"
__pattern__ = r"^unmatchable$"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
]
__description__ = """Base downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
@property
def last_download(self):
return self._last_download if exists(self._last_download) else ""
@last_download.setter
def last_download(self, value):
if isinstance(value, str) and exists(value):
self._last_download = value
else:
self._last_download = ""
def init_base(self):
#: Enable simultaneous processing of multiple downloads
self.limit_dl = 0
#: Download chunks limit
self.chunk_limit = None
#: Enable download resuming if the hoster supports resuming
self.resume_download = False
#: Location where the last call to download was saved
self._last_download = ""
#: Re match of the last call to `check_download`
self.last_check = None
#: Restart flag
self.restart_free = False # TODO: Recheck in 0.6.x
#: Download is possible with premium account only, don't fallback to free download
self.no_fallback = False
def setup_base(self):
self._last_download = ""
self.last_check = None
self.restart_free = False
self.no_fallback = False
if self.account:
self.chunk_limit = -1 #: -1 for unlimited
self.resume_download = True
else:
self.chunk_limit = 1
self.resume_download = False
def load_account(self):
if self.restart_free:
self.account = False
else:
super().load_account()
# self.restart_free = False
def _process(self, thread):
self.thread = thread
try:
self._initialize()
self._setup()
# TODO: Enable in 0.6.x
# self.pyload.addon_manager.download_preparing(self.pyfile)
# self.check_status()
self.check_duplicates()
self.pyfile.set_status("starting")
try:
self.log_info(self._("Processing url: ") + self.pyfile.url)
self.process(self.pyfile)
self.check_status()
self._check_download()
except Fail as exc: # TODO: Move to DownloadThread in 0.6.x
self.log_warning(
self._("Premium download failed")
if self.premium
else self._("Free download failed"),
str(exc),
)
if (
not self.no_fallback
and self.config.get("fallback", True)
and self.premium
):
self.restart(premium=False)
else:
raise
finally:
self._finalize()
# TODO: Remove in 0.6.x
def _finalize(self):
pypack = self.pyfile.package()
self.pyload.addon_manager.dispatch_event("download_processed", self.pyfile)
try:
unfinished = any(
fdata.get("status") in (3, 7)
for fid, fdata in pypack.get_children().items()
if fid != self.pyfile.id
)
if unfinished:
return
self.pyload.addon_manager.dispatch_event("package_processed", pypack)
failed = any(
fdata.get("status") in (1, 6, 8, 9, 14)
for fid, fdata in pypack.get_children().items()
)
if not failed:
return
self.pyload.addon_manager.dispatch_event("package_failed", pypack)
finally:
self.check_status()
def isresource(self, url, redirect=True, resumable=None):
resource = False
maxredirs = 5
if resumable is None:
resumable = self.resume_download
if type(redirect) == int:
maxredirs = max(redirect, 1)
elif redirect:
maxredirs = (
self.config.get("maxredirs", plugin="UserAgentSwitcher") or maxredirs
)
header = self.load(url, just_header=True)
for i in range(1, maxredirs):
if not redirect or header.get("connection") == "close":
resumable = False
if "content-disposition" in header:
resource = url
elif header.get("location"):
location = self.fixurl(header.get("location"), url)
code = header.get("code")
if code in (301, 302) or resumable:
self.log_debug(f"Redirect #{i} to: {location}")
header = self.load(location, just_header=True)
url = location
continue
else:
contenttype = header.get("content-type")
extension = os.path.splitext(parse.name(url))[-1]
if contenttype:
mimetype = contenttype.split(";")[0].strip()
elif extension:
mimetype = (
mimetypes.guess_type(extension, False)[0]
or "application/octet-stream"
)
else:
mimetype = None
if mimetype and (resource or "html" not in mimetype):
resource = url
else:
resource = False
return resource
def _on_notification(self, notification):
if "progress" in notification:
self.pyfile.set_progress(notification["progress"])
if "disposition" in notification:
self.pyfile.set_name(notification["disposition"])
def _download(
self, url, filename, get, post, ref, cookies, disposition, resume, chunks
):
# TODO: Safe-filename check in HTTPDownload in 0.6.x
filename = os.fsdecode(filename)
resume = self.resume_download if resume is None else bool(resume)
dl_chunks = self.pyload.config.get("download", "chunks")
chunk_limit = chunks or self.chunk_limit or -1
if -1 in (dl_chunks, chunk_limit):
chunks = max(dl_chunks, chunk_limit)
else:
chunks = min(dl_chunks, chunk_limit)
try:
newname = self.req.http_download(
url,
filename,
size=self.pyfile.size,
get=get,
post=post,
ref=ref,
cookies=cookies,
chunks=chunks,
resume=resume,
status_notify=self._on_notification,
disposition=disposition,
)
except IOError as exc:
self.log_error(str(exc))
self.fail(self._("IOError {}").format(exc.errno))
except BadHeader as exc:
self.req.http.code = exc.code
raise
else:
if self.req.code in (404, 410):
bad_file = os.path.join(os.path.dirname(filename), newname)
if self.remove(bad_file):
return ""
else:
self.log_info(self._("File saved"))
return newname
finally:
self.pyfile.size = self.req.size
self.captcha.correct()
def download(
self,
url,
get={},
post={},
ref=True,
cookies=True,
disposition=True,
resume=None,
chunks=None,
):
"""
Downloads the content at url to download folder.
:param url:
:param get:
:param post:
:param ref:
:param cookies:
:param disposition: if True and server provides content-disposition header\
the filename will be changed if needed
:return: The location where the file was saved
"""
self.check_status()
if self.pyload.debug:
self.log_debug(
"DOWNLOAD URL " + url,
*[
"{}={}".format(key, value)
for key, value in locals().items()
if key not in ("self", "url", "_[1]")
],
)
dl_basename = parse.name(self.pyfile.name)
self.pyfile.name = dl_basename
self.check_duplicates()
self.pyfile.set_status("downloading")
dl_url = self.fixurl(url)
dl_folder = self.pyload.config.get("general", "storage_folder")
dl_dirname = safejoin(dl_folder, self.pyfile.package().folder)
dl_filename = safejoin(dl_dirname, self.pyfile.name)
os.makedirs(dl_dirname, exist_ok=True)
self.set_permissions(dl_dirname)
self.pyload.addon_manager.dispatch_event(
"download_start", self.pyfile, dl_url, dl_filename
)
self.check_status()
newname = self._download(
dl_url, dl_filename, get, post, ref, cookies, disposition, resume, chunks
)
if disposition and newname:
self.pyfile.name = newname
dl_filename = safejoin(dl_dirname, newname)
self.set_permissions(dl_filename)
self.last_download = dl_filename
return dl_filename
def scan_download(self, rules, read_size=1_048_576):
"""
Checks the content of the last downloaded file, re match is saved to
`last_check`
:param rules: dict with names and rules to match (compiled regexp or strings)
:param read_size: size to read and scan
:return: dictionary key of the first rule that matched
"""
if not self.last_download:
self.log_warning(self._("No file to scan"))
return
dl_file = os.fsdecode(self.last_download) # TODO: Recheck in 0.6.x
with open(dl_file, mode="rb") as fp:
content = fp.read(read_size)
for name, rule in rules.items():
if isinstance(rule, bytes):
if rule in content:
return name
elif isinstance(rule, str):
raise TypeError(f"Cannot check binary data with string rule '{name}'")
elif hasattr(rule, "search"):
m = rule.search(content)
if m is not None:
self.last_check = m
return name
elif callable(rule):
return rule(content)
def _check_download(self):
def _is_empty_file(content):
firstbyte = content[0:1]
whitespaces_count = len(re.findall(rb"[%s\s]" % firstbyte, content))
return whitespaces_count == len(content)
self.log_info(self._("Checking download..."))
self.pyfile.set_custom_status(self._("checking"))
if not self.last_download:
if self.captcha.task:
self.retry_captcha()
else:
self.error(self._("No file downloaded"))
elif self.scan_download({"Empty file": _is_empty_file}):
if self.remove(self.last_download):
self.last_download = ""
self.error(self._("Empty file"))
else:
self.pyload.addon_manager.dispatch_event("download_check", self.pyfile)
self.check_status()
self.log_info(self._("File is OK"))
def out_of_traffic(self):
if not self.account:
return False
traffic = self.account.get_data("trafficleft")
if traffic is None:
return True
elif traffic == -1:
return False
else:
size = self.pyfile.size
self.log_info(
self._("Filesize: {}").format(format.size(size)),
self._("Traffic left for user `{}`: {}").format(
self.account.user, format.size(traffic)
),
)
return size > traffic
# def check_size(self, file_size, size_tolerance=1 << 10, delete=False):
# """
# Checks the file size of the last downloaded file
# :param file_size: expected file size
# :param size_tolerance: size check tolerance
# """
# self.log_info(self._("Checking file size..."))
# if not self.last_download:
# self.log_warning(self._("No file to check"))
# return
# dl_file = encode(self.last_download)
# dl_size = os.stat(dl_file).st_size
# try:
# if dl_size == 0:
# delete = True
# self.fail(self._("Empty file"))
# elif file_size > 0:
# diff = abs(file_size - dl_size)
# if diff > size_tolerance:
# self.fail(self._("File size mismatch | Expected file size: {} bytes | Downloaded file size: {} bytes").format((file_size), dl_size))
# elif diff != 0:
# self.log_warning(self._("File size is not equal to expected download size, but does not exceed the tolerance threshold"))
# self.log_debug(f"Expected file size: {file_size} bytes"
# "Downloaded file size: {} bytes".format(dl_size)
# "Tolerance threshold: {} bytes".format(size_tolerance))
# else:
# delete = False
# self.log_info(self._("File size match"))
# finally:
# if delete:
# self.remove(dl_file, try_trash=False)
# def check_hash(self, type, digest, delete=False):
# hashtype = type.strip('-').upper()
# self.log_info(self._("Checking file hashsum {}...").format(hashtype))
# if not self.last_download:
# self.log_warning(self._("No file to check"))
# return
# dl_file = encode(self.last_download)
# try:
# dl_hash = digest
# file_hash = compute_checksum(dl_file, hashtype)
# if not file_hash:
# self.fail(self._("Unsupported hashing algorithm: ") + hashtype)
# elif dl_hash == file_hash:
# delete = False
# self.log_info(self._("File hashsum {} match").format(hashtype))
# else:
# self.fail(self._("File hashsum {} mismatch | Expected file hashsum: {} | Downloaded file hashsum: {}").format(hashtype, dl_hash, file_hash))
# finally:
# if delete:
# self.remove(dl_file, try_trash=False)
def check_duplicates(self):
"""
Checks if same file was downloaded within same package.
:raises Skip:
"""
pack_folder = self.pyfile.package().folder
for pyfile in list(self.pyload.files.cache.values()):
if (
pyfile != self.pyfile
and pyfile.name == self.pyfile.name
and pyfile.package().folder == pack_folder
):
if pyfile.status in (
0,
12,
5,
7,
): #: finished / downloading / waiting / starting
self.skip(pyfile.pluginname)
dl_folder = self.pyload.config.get("general", "storage_folder")
dl_file = os.path.join(dl_folder, pack_folder, self.pyfile.name)
if not exists(dl_file):
return
if os.stat(dl_file).st_size == 0:
if self.remove(self.last_download):
self.last_download = ""
return
if self.pyload.config.get("download", "skip_existing"):
plugin = self.pyload.db.find_duplicates(
self.pyfile.id, pack_folder, self.pyfile.name
)
msg = plugin[0] if plugin else self._("File exists")
self.skip(msg)
else:
# Same file exists but, it does not belong to our pack, add a trailing
# counter
name, ext = os.path.splitext(self.pyfile.name)
m = re.match(r"(.+?)(?:\((\d+)\))?$", name)
dl_n = int(m.group(2) or "0")
while True:
name = "{} ({}){}".format(m.group(1), dl_n + 1, ext)
dl_file = os.path.join(dl_folder, pack_folder, name)
if not exists(dl_file):
break
dl_n += 1
self.pyfile.name = name
#: Deprecated method (Recheck in 0.6.x)
def check_for_same_files(self, *args, **kwargs):
pass
|
models | alert_group | import datetime
import logging
import typing
import urllib
from collections import namedtuple
from functools import partial
from urllib.parse import urljoin
from apps.alerts.constants import AlertGroupState
from apps.alerts.escalation_snapshot import EscalationSnapshotMixin
from apps.alerts.escalation_snapshot.escalation_snapshot_mixin import (
START_ESCALATION_DELAY,
)
from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
from apps.alerts.incident_appearance.renderers.slack_renderer import (
AlertGroupSlackRenderer,
)
from apps.alerts.incident_log_builder import IncidentLogBuilder
from apps.alerts.signals import (
alert_group_action_triggered_signal,
alert_group_created_signal,
)
from apps.alerts.tasks import (
acknowledge_reminder_task,
send_alert_group_signal,
unsilence_task,
)
from apps.metrics_exporter.metrics_cache_manager import MetricsCacheManager
from apps.slack.slack_formatter import SlackFormatter
from apps.user_management.models import User
from celery import uuid as celery_uuid
from common.public_primary_keys import (
generate_public_primary_key,
increase_public_primary_key_length,
)
from common.utils import clean_markup, str_or_backup
from django.conf import settings
from django.core.validators import MinLengthValidator
from django.db import IntegrityError, models, transaction
from django.db.models import JSONField, Q, QuerySet
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import cached_property
from .alert_group_counter import AlertGroupCounter
if typing.TYPE_CHECKING:
from apps.alerts.models import (
Alert,
AlertGroupLogRecord,
AlertReceiveChannel,
ResolutionNote,
ResolutionNoteSlackMessage,
)
from apps.base.models import UserNotificationPolicyLogRecord
from apps.slack.models import SlackMessage
from django.db.models.manager import RelatedManager
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def generate_public_primary_key_for_alert_group():
prefix = "I"
new_public_primary_key = generate_public_primary_key(prefix)
failure_counter = 0
while AlertGroup.objects.filter(public_primary_key=new_public_primary_key).exists():
new_public_primary_key = increase_public_primary_key_length(
failure_counter=failure_counter, prefix=prefix, model_name="AlertGroup"
)
failure_counter += 1
return new_public_primary_key
class LogRecordUser(typing.TypedDict):
username: str
pk: str
avatar: str
avatar_full: str
class LogRecords(typing.TypedDict):
time: str # humanized delta relative to now
action: str # human-friendly description
realm: typing.Literal["user_notification", "alert_group", "resolution_note"]
type: int # depending on realm, check type choices
created_at: str # timestamp
author: LogRecordUser
class Permalinks(typing.TypedDict):
slack: typing.Optional[str]
telegram: typing.Optional[str]
web: str
class AlertGroupQuerySet(models.QuerySet):
def create(self, **kwargs):
organization = kwargs["channel"].organization
inside_organization_number = (
AlertGroupCounter.objects.get_value(organization=organization) + 1
)
return super().create(
**kwargs, inside_organization_number=inside_organization_number
)
def get_or_create_grouping(self, channel, channel_filter, group_data):
"""
This method is similar to default Django QuerySet.get_or_create(), please see the original get_or_create method.
The difference is that this method is trying to get an object using multiple queries with different filters.
Also, "create" is invoked without transaction.atomic to reduce number of ConcurrentUpdateError's which can be
raised in AlertGroupQuerySet.create() due to optimistic locking of AlertGroupCounter model.
"""
search_params = {
"channel": channel,
"channel_filter": channel_filter,
"distinction": group_data.group_distinction,
}
# Try to return the last open group
# Note that (channel, channel_filter, distinction, is_open_for_grouping) is in unique_together
try:
return self.get(**search_params, is_open_for_grouping__isnull=False), False
except self.model.DoesNotExist:
pass
# If it's an "OK" alert, try to return the latest resolved group
if group_data.is_resolve_signal:
try:
return self.filter(**search_params, resolved=True).latest(), False
except self.model.DoesNotExist:
pass
# Create a new group if we couldn't group it to any existing ones
try:
alert_group = self.create(
**search_params,
is_open_for_grouping=True,
web_title_cache=group_data.web_title_cache,
)
alert_group_created_signal.send(
sender=self.__class__, alert_group=alert_group
)
return (alert_group, True)
except IntegrityError:
try:
return (
self.get(**search_params, is_open_for_grouping__isnull=False),
False,
)
except self.model.DoesNotExist:
pass
raise
class AlertGroupSlackRenderingMixin:
"""
Ideally this mixin should not exist. Instead of this instance of AlertGroupSlackRenderer should be created and used
but slack rendering is distributed throughout the codebase.
"""
@cached_property
def slack_renderer(self):
return AlertGroupSlackRenderer(self)
def render_slack_attachments(self):
return self.slack_renderer.render_alert_group_attachments()
def render_slack_blocks(self):
return self.slack_renderer.render_alert_group_blocks()
@property
def slack_templated_first_alert(self):
return self.slack_renderer.alert_renderer.templated_alert
class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
alerts: "RelatedManager['Alert']"
dependent_alert_groups: "RelatedManager['AlertGroup']"
channel: "AlertReceiveChannel"
log_records: "RelatedManager['AlertGroupLogRecord']"
personal_log_records: "RelatedManager['UserNotificationPolicyLogRecord']"
resolution_notes: "RelatedManager['ResolutionNote']"
resolution_note_slack_messages: "RelatedManager['ResolutionNoteSlackMessage']"
resolved_by_alert: typing.Optional["Alert"]
root_alert_group: typing.Optional["AlertGroup"]
slack_log_message: typing.Optional["SlackMessage"]
slack_messages: "RelatedManager['SlackMessage']"
users: "RelatedManager['User']"
objects: models.Manager["AlertGroup"] = AlertGroupQuerySet.as_manager()
(
NEW,
ACKNOWLEDGED,
RESOLVED,
SILENCED,
) = range(4)
# exists for status filter in API
STATUS_CHOICES = (
(NEW, "New"),
(ACKNOWLEDGED, "Acknowledged"),
(RESOLVED, "Resolved"),
(SILENCED, "Silenced"),
)
GroupData = namedtuple(
"GroupData",
[
"is_resolve_signal",
"group_distinction",
"web_title_cache",
"is_acknowledge_signal",
],
)
(
SOURCE,
USER,
NOT_YET,
LAST_STEP,
ARCHIVED,
WIPED,
DISABLE_MAINTENANCE,
NOT_YET_STOP_AUTORESOLVE,
) = range(8)
SOURCE_CHOICES = (
(SOURCE, "source"),
(USER, "user"),
(NOT_YET, "not yet"),
(LAST_STEP, "last escalation step"),
(ARCHIVED, "archived"), # deprecated. don't use
(WIPED, "wiped"),
(DISABLE_MAINTENANCE, "stop maintenance"),
(NOT_YET_STOP_AUTORESOLVE, "not yet, autoresolve disabled"),
)
ACKNOWLEDGE = "acknowledge"
RESOLVE = "resolve"
SILENCE = "silence"
RESTART = "restart"
BULK_ACTIONS = [
ACKNOWLEDGE,
RESOLVE,
SILENCE,
RESTART,
]
public_primary_key = models.CharField(
max_length=20,
validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
unique=True,
default=generate_public_primary_key_for_alert_group,
)
channel = models.ForeignKey(
"alerts.AlertReceiveChannel",
on_delete=models.CASCADE,
related_name="alert_groups",
)
# Distinction is a difference between groups inside the same channel.
# For example different types of alerts from the same channel should go to different groups.
# Distinction is what describes their difference.
distinction = models.CharField(
max_length=100, null=True, default=None, db_index=True
)
web_title_cache = models.TextField(null=True, default=None)
inside_organization_number = models.IntegerField(default=0)
channel_filter = models.ForeignKey(
"alerts.ChannelFilter",
on_delete=models.SET_DEFAULT,
related_name="alert_groups",
null=True,
default=None,
)
resolved = models.BooleanField(default=False)
resolved_by = models.IntegerField(choices=SOURCE_CHOICES, default=NOT_YET)
resolved_by_user = models.ForeignKey(
"user_management.User",
on_delete=models.SET_NULL,
null=True,
default=None,
related_name="resolved_alert_groups",
)
resolved_by_alert = models.ForeignKey(
"alerts.Alert",
on_delete=models.SET_NULL,
null=True,
default=None,
related_name="resolved_alert_groups",
)
resolved_at = models.DateTimeField(blank=True, null=True)
acknowledged = models.BooleanField(default=False)
acknowledged_on_source = models.BooleanField(default=False)
acknowledged_at = models.DateTimeField(blank=True, null=True)
acknowledged_by = models.IntegerField(choices=SOURCE_CHOICES, default=NOT_YET)
acknowledged_by_user = models.ForeignKey(
"user_management.User",
on_delete=models.SET_NULL,
null=True,
default=None,
related_name="acknowledged_alert_groups",
)
acknowledged_by_confirmed = models.DateTimeField(null=True, default=None)
is_escalation_finished = models.BooleanField(default=False)
started_at = models.DateTimeField(auto_now_add=True, db_index=True)
slack_message_sent = models.BooleanField(default=False)
active_escalation_id = models.CharField(
max_length=100, null=True, default=None
) # ID generated by celery
active_resolve_calculation_id = models.CharField(
max_length=100, null=True, default=None
) # ID generated by celery
SILENCE_DELAY_OPTIONS = (
(1800, "30 minutes"),
(3600, "1 hour"),
(7200, "2 hours"),
(10800, "3 hours"),
(14400, "4 hours"),
(21600, "6 hours"),
(43200, "12 hours"),
(57600, "16 hours"),
(72000, "20 hours"),
(86400, "24 hours"),
(-1, "Forever"),
)
silenced = models.BooleanField(default=False)
silenced_at = models.DateTimeField(null=True)
silenced_by_user = models.ForeignKey(
"user_management.User",
on_delete=models.SET_NULL,
null=True,
default=None,
related_name="silenced_alert_groups",
)
silenced_until = models.DateTimeField(blank=True, null=True)
unsilence_task_uuid = models.CharField(max_length=100, null=True, default=None)
restarted_at = models.DateTimeField(blank=True, null=True, default=None)
response_time = models.DurationField(null=True, default=None)
@property
def is_silenced_forever(self):
return self.silenced and self.silenced_until is None
@property
def is_silenced_for_period(self):
return self.silenced and self.silenced_until is not None
@property
def status(self):
if self.resolved:
return AlertGroup.RESOLVED
elif self.acknowledged:
return AlertGroup.ACKNOWLEDGED
elif self.silenced:
return AlertGroup.SILENCED
else:
return AlertGroup.NEW
(
ACCOUNT_INACTIVE,
CHANNEL_ARCHIVED,
NO_REASON,
RATE_LIMITED,
CHANNEL_NOT_SPECIFIED,
RESTRICTED_ACTION,
) = range(6)
REASONS_TO_SKIP_ESCALATIONS = (
(ACCOUNT_INACTIVE, "account_inactive"),
(CHANNEL_ARCHIVED, "channel_archived"),
(NO_REASON, "no_reason"),
(RATE_LIMITED, "rate_limited"),
(CHANNEL_NOT_SPECIFIED, "channel_not_specified"),
(RESTRICTED_ACTION, "restricted_action"),
)
reason_to_skip_escalation = models.IntegerField(
choices=REASONS_TO_SKIP_ESCALATIONS, default=NO_REASON
)
root_alert_group = models.ForeignKey(
"alerts.AlertGroup",
on_delete=models.SET_NULL,
null=True,
default=None,
related_name="dependent_alert_groups",
)
last_unique_unacknowledge_process_id = models.CharField(
max_length=100, null=True, default=None
)
wiped_at = models.DateTimeField(null=True, default=None)
wiped_by = models.ForeignKey(
"user_management.User",
on_delete=models.SET_NULL,
null=True,
default=None,
related_name="wiped_alert_groups",
)
slack_log_message = models.OneToOneField(
"slack.SlackMessage",
on_delete=models.SET_NULL,
null=True,
default=None,
)
prevent_posting_alerts = models.BooleanField(default=False)
maintenance_uuid = models.CharField(
max_length=100, unique=True, null=True, default=None
)
raw_escalation_snapshot = JSONField(null=True, default=None)
# This field is used for constraints so we can use get_or_create() in concurrent calls
# https://docs.djangoproject.com/en/3.2/ref/models/querysets/#get-or-create
# Combined with unique_together below, it allows only one alert group with
# the combination (alert_receive_channel_id, channel_filter_id, distinction, is_open_for_grouping=True)
# If is_open_for_grouping=None, then we can have as many combinations of
# (alert_receive_channel_id, channel_filter_id, distinction, is_open_for_grouping=None) as we want
# We just don't care about that because we'll use only get_or_create(...is_open_for_grouping=True...)
# https://code.djangoproject.com/ticket/28545
is_open_for_grouping = models.BooleanField(default=None, null=True, blank=True)
is_restricted = models.BooleanField(default=False, null=True)
@staticmethod
def get_silenced_state_filter():
"""
models.Value(0/1) is used instead of True/False because django translates that into
WHERE bool_field=0/1 instead of WHERE bool_field/NOT bool_field
which works much faster in mysql
"""
return (
Q(silenced=models.Value("1"))
& Q(acknowledged=models.Value("0"))
& Q(resolved=models.Value("0"))
)
@staticmethod
def get_new_state_filter():
"""
models.Value(0/1) is used instead of True/False because django translates that into
WHERE bool_field=0/1 instead of WHERE bool_field/NOT bool_field
which works much faster in mysql
"""
return (
Q(silenced=models.Value("0"))
& Q(acknowledged=models.Value("0"))
& Q(resolved=models.Value("0"))
)
@staticmethod
def get_acknowledged_state_filter():
"""
models.Value(0/1) is used instead of True/False because django translates that into
WHERE bool_field=0/1 instead of WHERE bool_field/NOT bool_field
which works much faster in mysql
"""
return Q(acknowledged=models.Value("1")) & Q(resolved=models.Value("0"))
@staticmethod
def get_resolved_state_filter():
"""
models.Value(0/1) is used instead of True/False because django translates that into
WHERE bool_field=0/1 instead of WHERE bool_field/NOT bool_field
which works much faster in mysql
"""
return Q(resolved=models.Value("1"))
class Meta:
get_latest_by = "pk"
unique_together = [
"channel_id",
"channel_filter_id",
"distinction",
"is_open_for_grouping",
]
indexes = [
models.Index(
fields=[
"channel_id",
"resolved",
"acknowledged",
"silenced",
"root_alert_group_id",
]
),
]
def __str__(self):
return f"{self.pk}: {self.web_title_cache}"
@property
def is_maintenance_incident(self):
return self.maintenance_uuid is not None
def stop_maintenance(self, user: User) -> None:
from apps.alerts.models import AlertReceiveChannel
try:
integration_on_maintenance = AlertReceiveChannel.objects.get(
maintenance_uuid=self.maintenance_uuid
)
integration_on_maintenance.force_disable_maintenance(user)
return
except AlertReceiveChannel.DoesNotExist:
pass
self.resolve_by_disable_maintenance()
@property
def skip_escalation_in_slack(self):
return self.reason_to_skip_escalation in (
AlertGroup.CHANNEL_ARCHIVED,
AlertGroup.ACCOUNT_INACTIVE,
AlertGroup.RATE_LIMITED,
AlertGroup.CHANNEL_NOT_SPECIFIED,
)
def is_alert_a_resolve_signal(self, alert):
raise NotImplementedError
@property
def slack_permalink(self) -> typing.Optional[str]:
return None if self.slack_message is None else self.slack_message.permalink
@property
def telegram_permalink(self) -> typing.Optional[str]:
"""
This property will attempt to access an attribute, `prefetched_telegram_messages`, representing a list of
prefetched telegram messages. If this attribute does not exist, it falls back to performing a query.
See `apps.public_api.serializers.incidents.IncidentSerializer.PREFETCH_RELATED` as an example.
"""
from apps.telegram.models.message import TelegramMessage
if hasattr(self, "prefetched_telegram_messages"):
return (
self.prefetched_telegram_messages[0].link
if self.prefetched_telegram_messages
else None
)
main_telegram_message = self.telegram_messages.filter(
chat_id__startswith="-", message_type=TelegramMessage.ALERT_GROUP_MESSAGE
).first()
return main_telegram_message.link if main_telegram_message else None
@property
def permalinks(self) -> Permalinks:
return {
"slack": self.slack_permalink,
"telegram": self.telegram_permalink,
"web": self.web_link,
}
@property
def web_link(self) -> str:
return urljoin(
self.channel.organization.web_link,
f"alert-groups/{self.public_primary_key}",
)
@property
def declare_incident_link(self) -> str:
"""Generate a link for AlertGroup to declare Grafana Incident by click"""
incident_link = urljoin(
self.channel.organization.grafana_url,
"a/grafana-incident-app/incidents/declare/",
)
caption = urllib.parse.quote_plus("OnCall Alert Group")
title = (
urllib.parse.quote_plus(self.web_title_cache)
if self.web_title_cache
else DEFAULT_BACKUP_TITLE
)
title = title[
:2000
] # set max title length to avoid exceptions with too long declare incident link
link = urllib.parse.quote_plus(self.web_link)
return urljoin(incident_link, f"?caption={caption}&url={link}&title={title}")
@property
def happened_while_maintenance(self):
return (
self.root_alert_group is not None
and self.root_alert_group.maintenance_uuid is not None
)
def get_paged_users(self) -> QuerySet[User]:
from apps.alerts.models import AlertGroupLogRecord
users_ids = set()
for log_record in self.log_records.filter(
type__in=(
AlertGroupLogRecord.TYPE_DIRECT_PAGING,
AlertGroupLogRecord.TYPE_UNPAGE_USER,
)
):
# filter paging events, track still active escalations
info = log_record.get_step_specific_info()
user_id = info.get("user") if info else None
if user_id is not None:
users_ids.add(
user_id
) if log_record.type == AlertGroupLogRecord.TYPE_DIRECT_PAGING else users_ids.discard(
user_id
)
return User.objects.filter(public_primary_key__in=users_ids)
def _get_response_time(self):
"""Return response_time based on current alert group status."""
response_time = None
timestamps = (
self.acknowledged_at,
self.resolved_at,
self.silenced_at,
self.wiped_at,
)
min_timestamp = min((ts for ts in timestamps if ts), default=None)
if min_timestamp:
response_time = min_timestamp - self.started_at
return response_time
def _update_metrics(self, organization_id, previous_state, state):
"""Update metrics cache for response time and state as needed."""
updated_response_time = self.response_time
if previous_state != AlertGroupState.FIRING or self.restarted_at:
# only consider response time from the first action
updated_response_time = None
MetricsCacheManager.metrics_update_cache_for_alert_group(
self.channel_id,
organization_id=organization_id,
old_state=previous_state,
new_state=state,
response_time=updated_response_time,
started_at=self.started_at,
)
def acknowledge_by_user(
self, user: User, action_source: typing.Optional[str] = None
) -> None:
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
logger.debug(f"Started acknowledge_by_user for alert_group {self.pk}")
# if incident was silenced or resolved, unsilence/unresolve it without starting escalation
if self.silenced:
self.un_silence()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
silence_delay=None,
reason="Acknowledge button",
)
if self.resolved:
self.unresolve()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
author=user,
reason="Acknowledge button",
)
self.acknowledge(acknowledged_by_user=user, acknowledged_by=AlertGroup.USER)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
self.stop_escalation()
self.start_ack_reminder_if_needed()
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_ACK, author=user
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.acknowledge_by_user,
log_record=log_record.pk,
action_source=action_source,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.acknowledge_by_user(user, action_source=action_source)
logger.debug(f"Finished acknowledge_by_user for alert_group {self.pk}")
def acknowledge_by_source(self):
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
# if incident was silenced, unsilence it without starting escalation
if self.silenced:
self.un_silence()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
silence_delay=None,
reason="Acknowledge by source",
)
self.acknowledge(acknowledged_by=AlertGroup.SOURCE)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=self.channel.organization_id,
previous_state=initial_state,
state=self.state,
)
self.stop_escalation()
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_ACK)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: alert"
)
alert_group_action_triggered_signal.send(
sender=self.acknowledge_by_source,
log_record=log_record.pk,
action_source=None,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.acknowledge_by_source()
def un_acknowledge_by_user(
self, user: User, action_source: typing.Optional[str] = None
) -> None:
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
logger.debug(f"Started un_acknowledge_by_user for alert_group {self.pk}")
self.unacknowledge()
# Update alert group state metric cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
if self.is_root_alert_group:
self.start_escalation_if_needed()
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_ACK, author=user
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.un_acknowledge_by_user,
log_record=log_record.pk,
action_source=action_source,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.un_acknowledge_by_user(
user, action_source=action_source
)
logger.debug(f"Finished un_acknowledge_by_user for alert_group {self.pk}")
def resolve_by_user(
self, user: User, action_source: typing.Optional[str] = None
) -> None:
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
# if incident was silenced, unsilence it without starting escalation
if self.silenced:
self.un_silence()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
silence_delay=None,
reason="Resolve button",
)
self.resolve(resolved_by=AlertGroup.USER, resolved_by_user=user)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
self.stop_escalation()
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_RESOLVED, author=user
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.resolve_by_user,
log_record=log_record.pk,
action_source=action_source,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.resolve_by_user(user, action_source=action_source)
def resolve_by_source(self):
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
# if incident was silenced, unsilence it without starting escalation
if self.silenced:
self.un_silence()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
silence_delay=None,
reason="Resolve by source",
)
self.resolve(resolved_by=AlertGroup.SOURCE)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=self.channel.organization_id,
previous_state=initial_state,
state=self.state,
)
self.stop_escalation()
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: alert"
)
alert_group_action_triggered_signal.send(
sender=self.resolve_by_source,
log_record=log_record.pk,
action_source=None,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.resolve_by_source()
def resolve_by_last_step(self):
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
self.resolve(resolved_by=AlertGroup.LAST_STEP)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=self.channel.organization_id,
previous_state=initial_state,
state=self.state,
)
self.stop_escalation()
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: resolve step"
)
alert_group_action_triggered_signal.send(
sender=self.resolve_by_last_step,
log_record=log_record.pk,
action_source=None,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.resolve_by_last_step()
def resolve_by_disable_maintenance(self):
from apps.alerts.models import AlertGroupLogRecord
self.resolve(resolved_by=AlertGroup.DISABLE_MAINTENANCE)
self.stop_escalation()
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: disable maintenance"
)
alert_group_action_triggered_signal.send(
sender=self.resolve_by_disable_maintenance,
log_record=log_record.pk,
action_source=None,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.resolve_by_disable_maintenance()
def un_resolve_by_user(
self, user: User, action_source: typing.Optional[str] = None
) -> None:
from apps.alerts.models import AlertGroupLogRecord
if self.wiped_at is None:
initial_state = self.state
self.unresolve()
# Update alert group state metric cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user
)
if self.is_root_alert_group:
self.start_escalation_if_needed()
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.un_resolve_by_user,
log_record=log_record.pk,
action_source=action_source,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.un_resolve_by_user(
user, action_source=action_source
)
def attach_by_user(
self,
user: User,
root_alert_group: "AlertGroup",
action_source: typing.Optional[str] = None,
) -> None:
from apps.alerts.models import AlertGroupLogRecord
if root_alert_group.root_alert_group is None and not root_alert_group.resolved:
self.root_alert_group = root_alert_group
self.save(update_fields=["root_alert_group"])
self.stop_escalation()
if root_alert_group.acknowledged and not self.acknowledged:
self.acknowledge_by_user(user, action_source=action_source)
elif not root_alert_group.acknowledged and self.acknowledged:
self.un_acknowledge_by_user(user, action_source=action_source)
if root_alert_group.silenced and not self.silenced:
self.silence_by_user(
user, action_source=action_source, silence_delay=None
)
if not root_alert_group.silenced and self.silenced:
self.un_silence_by_user(user, action_source=action_source)
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_ATTACHED,
author=user,
root_alert_group=root_alert_group,
reason="Attach dropdown",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.attach_by_user,
log_record=log_record.pk,
action_source=action_source,
)
log_record_for_root_incident = root_alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_ATTACHED,
author=user,
dependent_alert_group=self,
reason="Attach dropdown",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {root_alert_group.pk}, "
f"log record {log_record_for_root_incident.pk} with type "
f"'{log_record_for_root_incident.get_type_display()}', action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.attach_by_user,
log_record=log_record_for_root_incident.pk,
action_source=action_source,
)
else:
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_FAILED_ATTACHMENT,
author=user,
root_alert_group=root_alert_group,
reason="Failed to attach dropdown",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.attach_by_user,
log_record=log_record.pk,
action_source=action_source,
)
def un_attach_by_user(
self, user: User, action_source: typing.Optional[str] = None
) -> None:
from apps.alerts.models import AlertGroupLogRecord
root_alert_group: AlertGroup = self.root_alert_group
self.root_alert_group = None
self.save(update_fields=["root_alert_group"])
self.start_escalation_if_needed()
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_UNATTACHED,
author=user,
root_alert_group=root_alert_group,
reason="Unattach button",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.un_attach_by_user,
log_record=log_record.pk,
action_source=action_source,
)
log_record_for_root_incident = root_alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UNATTACHED,
author=user,
dependent_alert_group=self,
reason="Unattach dropdown",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {root_alert_group.pk}, "
f"log record {log_record_for_root_incident.pk} "
f"with type '{log_record_for_root_incident.get_type_display()}', action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.un_attach_by_user,
log_record=log_record_for_root_incident.pk,
action_source=action_source,
)
def un_attach_by_delete(self):
from apps.alerts.models import AlertGroupLogRecord
self.root_alert_group = None
self.save(update_fields=["root_alert_group"])
self.start_escalation_if_needed()
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_UNATTACHED,
reason="Unattach by deleting root incident",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: delete"
)
alert_group_action_triggered_signal.send(
sender=self.un_attach_by_delete,
log_record=log_record.pk,
action_source=None,
)
def silence_by_user(
self,
user: User,
silence_delay: typing.Optional[int],
action_source: typing.Optional[str] = None,
) -> None:
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
if self.resolved:
self.unresolve()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
author=user,
reason="Silence button",
)
if self.acknowledged:
self.unacknowledge()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_ACK,
author=user,
reason="Silence button",
)
if self.silenced:
self.un_silence()
self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
silence_delay=None,
reason="Silence button",
)
now = timezone.now()
if silence_delay is not None and silence_delay > 0:
silence_delay_timedelta = datetime.timedelta(seconds=silence_delay)
silenced_until = now + silence_delay_timedelta
if self.is_root_alert_group:
self.update_next_step_eta(
datetime.timedelta(seconds=silence_delay + START_ESCALATION_DELAY)
)
self.start_unsilence_task(countdown=silence_delay)
else:
silence_delay_timedelta = None
silenced_until = None
self.silence(
silenced_at=now,
silenced_until=silenced_until,
silenced_by_user=user,
raw_escalation_snapshot=self.raw_escalation_snapshot,
)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
silence_delay=silence_delay_timedelta,
reason="Silence button",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.silence_by_user,
log_record=log_record.pk,
action_source=action_source,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.silence_by_user(user, silence_delay, action_source)
def un_silence_by_user(
self, user: User, action_source: typing.Optional[str] = None
) -> None:
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
self.un_silence()
# Update alert group state metric cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
if self.is_root_alert_group:
self.start_escalation_if_needed()
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
silence_delay=None,
# 2.Look like some time ago there was no TYPE_UN_SILENCE
reason="Unsilence button",
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: {action_source}"
)
alert_group_action_triggered_signal.send(
sender=self.un_silence_by_user,
log_record=log_record.pk,
action_source=action_source,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.un_silence_by_user(user, action_source=action_source)
def wipe_by_user(self, user: User) -> None:
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
if not self.wiped_at:
self.resolve(resolved_by=AlertGroup.WIPED)
self.stop_escalation()
self.distinction = ""
self.web_title_cache = None
self.wiped_at = timezone.now()
self.wiped_by = user
update_fields = ["distinction", "web_title_cache", "wiped_at", "wiped_by"]
if self.response_time is None:
self.response_time = self._get_response_time()
update_fields += ["response_time"]
for alert in self.alerts.all():
alert.wipe(wiped_by=self.wiped_by, wiped_at=self.wiped_at)
self.save(update_fields=update_fields)
# Update alert group state and response time metrics cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=self.state,
)
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_WIPED,
author=user,
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: wipe"
)
alert_group_action_triggered_signal.send(
sender=self.wipe_by_user,
log_record=log_record.pk,
action_source=None,
)
for dependent_alert_group in self.dependent_alert_groups.all():
dependent_alert_group.wipe_by_user(user)
def delete_by_user(self, user: User):
from apps.alerts.models import AlertGroupLogRecord
initial_state = self.state
self.stop_escalation()
# prevent creating multiple logs
# filter instead of get_or_create cause it can be multiple logs of this type due deleting error
log_record = self.log_records.filter(
type=AlertGroupLogRecord.TYPE_DELETED
).last()
if not log_record:
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_DELETED,
author=user,
)
logger.debug(
f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
f"action source: delete"
)
alert_group_action_triggered_signal.send(
sender=self.delete_by_user,
log_record=log_record.pk,
action_source=None, # TODO: Action source is none - it is suspicious
# this flag forces synchrony call for action handler in representatives
# (for now it is actual only for Slack representative)
force_sync=True,
)
dependent_alerts = list(self.dependent_alert_groups.all())
self.hard_delete()
# Update alert group state metric cache
self._update_metrics(
organization_id=user.organization_id,
previous_state=initial_state,
state=None,
)
for dependent_alert_group in dependent_alerts: # unattach dependent incidents
dependent_alert_group.un_attach_by_delete()
def hard_delete(self):
from apps.alerts.models import ResolutionNote
alerts = self.alerts.all()
alerts.delete()
self.slack_messages.all().delete()
self.personal_log_records.all().delete()
self.log_records.all().delete()
self.invitations.all().delete()
resolution_notes = ResolutionNote.objects_with_deleted.filter(alert_group=self)
resolution_notes.delete()
self.resolution_note_slack_messages.all().delete()
self.delete()
@staticmethod
def _bulk_acknowledge(
user: User, alert_groups_to_acknowledge: "QuerySet[AlertGroup]"
) -> None:
from apps.alerts.models import AlertGroupLogRecord
# it is needed to unserolve those alert_groups which were resolved to build proper log.
alert_groups_to_unresolve_before_acknowledge = (
alert_groups_to_acknowledge.filter(resolved=models.Value("1"))
)
# it is needed to unsilence those alert_groups which were silenced to build proper log.
alert_groups_to_unsilence_before_acknowledge = (
alert_groups_to_acknowledge.filter(silenced=models.Value("1"))
)
# convert current qs to list to prevent changes by update
alert_groups_to_acknowledge_list = list(alert_groups_to_acknowledge)
alert_groups_to_unresolve_before_acknowledge_list = list(
alert_groups_to_unresolve_before_acknowledge
)
alert_groups_to_unsilence_before_acknowledge_list = list(
alert_groups_to_unsilence_before_acknowledge
)
previous_states = []
for alert_group in alert_groups_to_acknowledge_list:
previous_states.append(alert_group.state)
alert_group.acknowledged = True
alert_group.resolved = False
alert_group.resolved_at = None
alert_group.resolved_by = AlertGroup.NOT_YET
alert_group.resolved_by_user = None
alert_group.silenced_until = None
alert_group.silenced_by_user = None
alert_group.silenced_at = None
alert_group.silenced = False
alert_group.acknowledged_at = timezone.now()
alert_group.acknowledged_by_user = user
alert_group.acknowledged_by = AlertGroup.USER
alert_group.is_escalation_finished = True
if alert_group.response_time is None:
alert_group.response_time = alert_group._get_response_time()
fields_to_update = [
"acknowledged",
"resolved",
"resolved_at",
"resolved_by",
"resolved_by_user",
"silenced_until",
"silenced_by_user",
"silenced_at",
"silenced",
"acknowledged_at",
"acknowledged_by_user",
"acknowledged_by",
"is_escalation_finished",
"response_time",
]
AlertGroup.objects.bulk_update(
alert_groups_to_acknowledge_list, fields=fields_to_update, batch_size=100
)
for alert_group in alert_groups_to_unresolve_before_acknowledge_list:
alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
author=user,
reason="Bulk action acknowledge",
)
for alert_group in alert_groups_to_unsilence_before_acknowledge_list:
alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
reason="Bulk action acknowledge",
)
for alert_group, previous_state in zip(
alert_groups_to_acknowledge_list, previous_states
):
# update metrics cache
alert_group._update_metrics(
organization_id=user.organization_id,
previous_state=previous_state,
state=AlertGroupState.ACKNOWLEDGED,
)
alert_group.start_ack_reminder_if_needed()
log_record = alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_ACK, author=user
)
transaction.on_commit(partial(send_alert_group_signal.delay, log_record.pk))
@staticmethod
def bulk_acknowledge(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
root_alert_groups_to_acknowledge = alert_groups.filter(
~Q(
acknowledged=True, resolved=False
), # don't need to ack acknowledged incidents once again
root_alert_group__isnull=True,
maintenance_uuid__isnull=True, # don't ack maintenance incident
)
# Find all dependent alert_groups to update them in one query
# convert qs to list to prevent changes by update
root_alert_group_pks = list(
root_alert_groups_to_acknowledge.values_list("pk", flat=True)
)
dependent_alert_groups_to_acknowledge = AlertGroup.objects.filter(
root_alert_group__pk__in=root_alert_group_pks
)
with transaction.atomic():
AlertGroup._bulk_acknowledge(user, root_alert_groups_to_acknowledge)
AlertGroup._bulk_acknowledge(user, dependent_alert_groups_to_acknowledge)
@staticmethod
def _bulk_resolve(
user: User, alert_groups_to_resolve: "QuerySet[AlertGroup]"
) -> None:
from apps.alerts.models import AlertGroupLogRecord
# it is needed to unsilence those alert_groups which were silenced to build proper log.
alert_groups_to_unsilence_before_resolve = alert_groups_to_resolve.filter(
silenced=models.Value("1")
)
# convert current qs to list to prevent changes by update
alert_groups_to_resolve_list = list(alert_groups_to_resolve)
alert_groups_to_unsilence_before_resolve_list = list(
alert_groups_to_unsilence_before_resolve
)
previous_states = []
for alert_group in alert_groups_to_resolve_list:
previous_states.append(alert_group.state)
alert_group.resolved = True
alert_group.resolved_at = timezone.now()
alert_group.is_open_for_grouping = None
alert_group.resolved_by_user = user
alert_group.resolved_by = AlertGroup.USER
alert_group.is_escalation_finished = True
alert_group.silenced_until = None
alert_group.silenced_by_user = None
alert_group.silenced_at = None
alert_group.silenced = False
if alert_group.response_time is None:
alert_group.response_time = alert_group._get_response_time()
fields_to_update = [
"resolved",
"resolved_at",
"resolved_by",
"resolved_by_user",
"is_open_for_grouping",
"silenced_until",
"silenced_by_user",
"silenced_at",
"silenced",
"is_escalation_finished",
"response_time",
]
AlertGroup.objects.bulk_update(
alert_groups_to_resolve_list, fields=fields_to_update, batch_size=100
)
for alert_group in alert_groups_to_unsilence_before_resolve_list:
alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
reason="Bulk action resolve",
)
for alert_group, previous_state in zip(
alert_groups_to_resolve_list, previous_states
):
# update metrics cache
alert_group._update_metrics(
organization_id=user.organization_id,
previous_state=previous_state,
state=AlertGroupState.RESOLVED,
)
log_record = alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_RESOLVED, author=user
)
transaction.on_commit(partial(send_alert_group_signal.delay, log_record.pk))
@staticmethod
def bulk_resolve(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
# stop maintenance for maintenance incidents
alert_groups_to_stop_maintenance = alert_groups.filter(
resolved=False, maintenance_uuid__isnull=False
)
for alert_group in alert_groups_to_stop_maintenance:
alert_group.stop_maintenance(user)
root_alert_groups_to_resolve = alert_groups.filter(
resolved=False,
root_alert_group__isnull=True,
maintenance_uuid__isnull=True,
)
if not root_alert_groups_to_resolve.exists():
return
# we know this is an AlertGroup because of the .exists() check just above
first_alert_group: AlertGroup = root_alert_groups_to_resolve.first()
organization = first_alert_group.channel.organization
if organization.is_resolution_note_required:
root_alert_groups_to_resolve = root_alert_groups_to_resolve.filter(
Q(resolution_notes__isnull=False, resolution_notes__deleted_at=None)
)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(
root_alert_groups_to_resolve.values_list("pk", flat=True)
)
dependent_alert_groups_to_resolve = AlertGroup.objects.filter(
root_alert_group__pk__in=root_alert_group_pks
)
with transaction.atomic():
AlertGroup._bulk_resolve(user, root_alert_groups_to_resolve)
AlertGroup._bulk_resolve(user, dependent_alert_groups_to_resolve)
@staticmethod
def _bulk_restart_unack(
user: User, alert_groups_to_restart_unack: "QuerySet[AlertGroup]"
) -> None:
from apps.alerts.models import AlertGroupLogRecord
# convert current qs to list to prevent changes by update
alert_groups_to_restart_unack_list = list(alert_groups_to_restart_unack)
alert_groups_to_restart_unack.update(
acknowledged=False,
acknowledged_at=None,
acknowledged_by_user=None,
acknowledged_by=AlertGroup.NOT_YET,
resolved=False,
resolved_at=None,
is_open_for_grouping=None,
resolved_by_user=None,
resolved_by=AlertGroup.NOT_YET,
silenced_until=None,
silenced_by_user=None,
silenced_at=None,
silenced=False,
restarted_at=timezone.now(),
)
# unacknowledge alert groups
for alert_group in alert_groups_to_restart_unack_list:
# update metrics cache (note alert_group.state is the original alert group's state)
alert_group._update_metrics(
organization_id=user.organization_id,
previous_state=alert_group.state,
state=AlertGroupState.FIRING,
)
log_record = alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_ACK,
author=user,
reason="Bulk action restart",
)
if alert_group.is_root_alert_group:
alert_group.start_escalation_if_needed()
transaction.on_commit(partial(send_alert_group_signal.delay, log_record.pk))
@staticmethod
def _bulk_restart_unresolve(
user: User, alert_groups_to_restart_unresolve: "QuerySet[AlertGroup]"
) -> None:
from apps.alerts.models import AlertGroupLogRecord
# convert current qs to list to prevent changes by update
alert_groups_to_restart_unresolve_list = list(alert_groups_to_restart_unresolve)
alert_groups_to_restart_unresolve.update(
acknowledged=False,
acknowledged_at=None,
acknowledged_by_user=None,
acknowledged_by=AlertGroup.NOT_YET,
resolved=False,
resolved_at=None,
is_open_for_grouping=None,
resolved_by_user=None,
resolved_by=AlertGroup.NOT_YET,
silenced_until=None,
silenced_by_user=None,
silenced_at=None,
silenced=False,
restarted_at=timezone.now(),
)
# unresolve alert groups
for alert_group in alert_groups_to_restart_unresolve_list:
# update metrics cache (note alert_group.state is the original alert group's state)
alert_group._update_metrics(
organization_id=user.organization_id,
previous_state=alert_group.state,
state=AlertGroupState.FIRING,
)
log_record = alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
author=user,
reason="Bulk action restart",
)
if alert_group.is_root_alert_group:
alert_group.start_escalation_if_needed()
transaction.on_commit(partial(send_alert_group_signal.delay, log_record.pk))
@staticmethod
def _bulk_restart_unsilence(
user: User, alert_groups_to_restart_unsilence: "QuerySet[AlertGroup]"
) -> None:
from apps.alerts.models import AlertGroupLogRecord
# convert current qs to list to prevent changes by update
alert_groups_to_restart_unsilence_list = list(alert_groups_to_restart_unsilence)
alert_groups_to_restart_unsilence.update(
acknowledged=False,
acknowledged_at=None,
acknowledged_by_user=None,
acknowledged_by=AlertGroup.NOT_YET,
resolved=False,
resolved_at=None,
is_open_for_grouping=None,
resolved_by_user=None,
resolved_by=AlertGroup.NOT_YET,
silenced_until=None,
silenced_by_user=None,
silenced_at=None,
silenced=False,
restarted_at=timezone.now(),
)
# unsilence alert groups
for alert_group in alert_groups_to_restart_unsilence_list:
# update metrics cache (note alert_group.state is the original alert group's state)
alert_group._update_metrics(
organization_id=user.organization_id,
previous_state=alert_group.state,
state=AlertGroupState.FIRING,
)
log_record = alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
reason="Bulk action restart",
)
alert_group.start_escalation_if_needed()
transaction.on_commit(partial(send_alert_group_signal.delay, log_record.pk))
@staticmethod
def bulk_restart(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
root_alert_groups_unack = alert_groups.filter(
resolved=False,
acknowledged=True,
root_alert_group__isnull=True,
maintenance_uuid__isnull=True, # don't restart maintenance incident
)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(
root_alert_groups_unack.values_list("pk", flat=True)
)
dependent_alert_groups_unack = AlertGroup.objects.filter(
root_alert_group__pk__in=root_alert_group_pks
)
with transaction.atomic():
AlertGroup._bulk_restart_unack(user, root_alert_groups_unack)
AlertGroup._bulk_restart_unack(user, dependent_alert_groups_unack)
root_alert_groups_unresolve = alert_groups.filter(
resolved=True, root_alert_group__isnull=True
)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(
root_alert_groups_unresolve.values_list("pk", flat=True)
)
dependent_alert_groups_unresolve = AlertGroup.objects.filter(
root_alert_group__pk__in=root_alert_group_pks
)
with transaction.atomic():
AlertGroup._bulk_restart_unresolve(user, root_alert_groups_unresolve)
AlertGroup._bulk_restart_unresolve(user, dependent_alert_groups_unresolve)
alert_groups_to_restart_unsilence = alert_groups.filter(
resolved=False,
acknowledged=False,
silenced=True,
root_alert_group__isnull=True,
)
AlertGroup._bulk_restart_unsilence(user, alert_groups_to_restart_unsilence)
@staticmethod
def _bulk_silence(
user: User, alert_groups_to_silence: "QuerySet[AlertGroup]", silence_delay: int
) -> None:
from apps.alerts.models import AlertGroupLogRecord
now = timezone.now()
silence_for_period = silence_delay is not None and silence_delay > 0
if silence_for_period:
silence_delay_timedelta = datetime.timedelta(seconds=silence_delay)
silenced_until = now + silence_delay_timedelta
else:
silence_delay_timedelta = None
silenced_until = None
alert_groups_to_unsilence_before_silence = alert_groups_to_silence.filter(
silenced=True, acknowledged=False, resolved=False
)
alert_groups_to_unacknowledge_before_silence = alert_groups_to_silence.filter(
resolved=False, acknowledged=True
)
alert_groups_to_unresolve_before_silence = alert_groups_to_silence.filter(
resolved=True
)
# convert current qs to list to prevent changes by update
alert_groups_to_silence_list = list(alert_groups_to_silence)
alert_groups_to_unsilence_before_silence_list = list(
alert_groups_to_unsilence_before_silence
)
alert_groups_to_unacknowledge_before_silence_list = list(
alert_groups_to_unacknowledge_before_silence
)
alert_groups_to_unresolve_before_silence_list = list(
alert_groups_to_unresolve_before_silence
)
previous_states = []
for alert_group in alert_groups_to_silence_list:
previous_states.append(alert_group.state)
alert_group.acknowledged = False
alert_group.acknowledged_at = None
alert_group.acknowledged_by_user = None
alert_group.acknowledged_by = AlertGroup.NOT_YET
alert_group.resolved = False
alert_group.resolved_at = None
alert_group.resolved_by_user = None
alert_group.resolved_by = AlertGroup.NOT_YET
alert_group.silenced = True
alert_group.silenced_at = now
alert_group.silenced_until = silenced_until
alert_group.silenced_by_user = user
if not silence_for_period:
alert_group.is_escalation_finished = True
else:
alert_group.update_next_step_eta(
datetime.timedelta(seconds=silence_delay + START_ESCALATION_DELAY)
)
if alert_group.response_time is None:
alert_group.response_time = alert_group._get_response_time()
fields_to_update = [
"acknowledged",
"acknowledged_at",
"acknowledged_by_user",
"acknowledged_by",
"resolved",
"resolved_at",
"resolved_by_user",
"resolved_by",
"silenced",
"silenced_at",
"silenced_until",
"silenced_by_user",
"is_escalation_finished",
"raw_escalation_snapshot",
"response_time",
]
AlertGroup.objects.bulk_update(
alert_groups_to_silence_list, fields=fields_to_update, batch_size=100
)
# create log records
for alert_group in alert_groups_to_unresolve_before_silence_list:
alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
author=user,
reason="Bulk action silence",
)
for alert_group in alert_groups_to_unsilence_before_silence_list:
alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_SILENCE,
author=user,
reason="Bulk action silence",
)
for alert_group in alert_groups_to_unacknowledge_before_silence_list:
alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_UN_ACK,
author=user,
reason="Bulk action silence",
)
for alert_group, previous_state in zip(
alert_groups_to_silence_list, previous_states
):
# update metrics cache
alert_group._update_metrics(
organization_id=user.organization_id,
previous_state=previous_state,
state=AlertGroupState.SILENCED,
)
log_record = alert_group.log_records.create(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
silence_delay=silence_delay_timedelta,
reason="Bulk action silence",
)
transaction.on_commit(partial(send_alert_group_signal.delay, log_record.pk))
if silence_for_period and alert_group.is_root_alert_group:
alert_group.start_unsilence_task(countdown=silence_delay)
@staticmethod
def bulk_silence(
user: User, alert_groups: "QuerySet[AlertGroup]", silence_delay: int
) -> None:
root_alert_groups_to_silence = alert_groups.filter(
root_alert_group__isnull=True,
maintenance_uuid__isnull=True, # don't silence maintenance incident
)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(
root_alert_groups_to_silence.values_list("pk", flat=True)
)
dependent_alert_groups_to_silence = alert_groups.filter(
root_alert_group__pk__in=root_alert_group_pks
)
with transaction.atomic():
AlertGroup._bulk_silence(user, root_alert_groups_to_silence, silence_delay)
AlertGroup._bulk_silence(
user, dependent_alert_groups_to_silence, silence_delay
)
def start_ack_reminder_if_needed(self) -> None:
from apps.user_management.models import Organization
if not self.is_root_alert_group:
return
# Check if the "Remind every N hours" setting is enabled
countdown = Organization.ACKNOWLEDGE_REMIND_DELAY[
self.channel.organization.acknowledge_remind_timeout
]
if not countdown:
return
self.last_unique_unacknowledge_process_id = celery_uuid()
self.save(update_fields=["last_unique_unacknowledge_process_id"])
acknowledge_reminder_task.apply_async(
(self.pk, self.last_unique_unacknowledge_process_id), countdown=countdown
)
def start_unsilence_task(self, countdown):
task_id = celery_uuid()
self.unsilence_task_uuid = task_id
self.save(update_fields=["unsilence_task_uuid"])
unsilence_task.apply_async((self.pk,), task_id=task_id, countdown=countdown)
@property
def is_root_alert_group(self):
return self.root_alert_group is None
def acknowledge(self, **kwargs):
if not self.acknowledged:
self.acknowledged = True
self.acknowledged_at = timezone.now()
for k, v in kwargs.items():
setattr(self, k, v)
update_fields = ["acknowledged", "acknowledged_at", *kwargs.keys()]
if self.response_time is None:
self.response_time = self._get_response_time()
update_fields += ["response_time"]
self.save(update_fields=update_fields)
def unacknowledge(self):
self.un_silence()
if self.acknowledged:
self.acknowledged = False
self.acknowledged_at = None
self.acknowledged_by_user = None
self.acknowledged_by = AlertGroup.NOT_YET
self.save(
update_fields=[
"acknowledged",
"acknowledged_at",
"acknowledged_by_user",
"acknowledged_by",
]
)
def resolve(self, **kwargs):
if not self.resolved:
self.resolved = True
self.resolved_at = timezone.now()
self.is_open_for_grouping = None
for k, v in kwargs.items():
setattr(self, k, v)
update_fields = [
"resolved",
"resolved_at",
"is_open_for_grouping",
*kwargs.keys(),
]
if self.response_time is None:
self.response_time = self._get_response_time()
update_fields += ["response_time"]
self.save(update_fields=update_fields)
def unresolve(self):
self.unacknowledge()
if self.resolved:
self.resolved = False
self.resolved_at = None
self.resolved_by = AlertGroup.NOT_YET
self.resolved_by_user = None
self.save(
update_fields=[
"resolved",
"resolved_at",
"resolved_by",
"resolved_by_user",
]
)
def silence(self, **kwargs):
if not self.silenced:
self.silenced = True
if "silenced_at" not in kwargs:
kwargs["silenced_at"] = timezone.now()
for k, v in kwargs.items():
setattr(self, k, v)
update_fields = ["silenced", *kwargs.keys()]
if self.response_time is None:
self.response_time = self._get_response_time()
update_fields += ["response_time"]
self.save(update_fields=update_fields)
def un_silence(self):
self.silenced_until = None
self.silenced_by_user = None
self.silenced_at = None
self.silenced = False
self.unsilence_task_uuid = None
self.restarted_at = timezone.now()
self.save(
update_fields=[
"silenced_until",
"silenced",
"silenced_by_user",
"silenced_at",
"unsilence_task_uuid",
"restarted_at",
]
)
@property
def long_verbose_name(self):
title = str_or_backup(
self.slack_templated_first_alert.title, DEFAULT_BACKUP_TITLE
)
return title
@property
def long_verbose_name_without_formatting(self):
sf = SlackFormatter(self.channel.organization)
title = self.long_verbose_name
title = sf.format(title)
title = clean_markup(title)
return title
def get_resolve_text(self, mention_user=False):
if self.resolved_by == AlertGroup.SOURCE:
return "Resolved by alert source"
elif self.resolved_by == AlertGroup.LAST_STEP:
return "Resolved automatically"
elif self.resolved_by == AlertGroup.WIPED:
return "Resolved by wipe"
elif self.resolved_by == AlertGroup.DISABLE_MAINTENANCE:
return "Resolved by stop maintenance"
else:
if self.resolved_by_user is not None:
user_text = self.resolved_by_user.get_username_with_slack_verbal(
mention=mention_user
)
return f"Resolved by {user_text}"
else:
return "Resolved"
def get_acknowledge_text(self, mention_user=False):
if self.acknowledged_by == AlertGroup.SOURCE:
return "Acknowledged by alert source"
elif (
self.acknowledged_by == AlertGroup.USER
and self.acknowledged_by_user is not None
):
user_text = self.acknowledged_by_user.get_username_with_slack_verbal(
mention=mention_user
)
return f"Acknowledged by {user_text}"
else:
return "Acknowledged"
def render_after_resolve_report_json(self) -> list[LogRecords]:
from apps.alerts.models import AlertGroupLogRecord, ResolutionNote
from apps.base.models import UserNotificationPolicyLogRecord
log_builder = IncidentLogBuilder(self)
log_records_list = log_builder.get_log_records_list(with_resolution_notes=True)
result_log_report = list()
for log_record in log_records_list:
if type(log_record) == AlertGroupLogRecord:
result_log_report.append(log_record.render_log_line_json())
elif type(log_record) == UserNotificationPolicyLogRecord:
result_log_report.append(log_record.rendered_notification_log_line_json)
elif type(log_record) == ResolutionNote:
result_log_report.append(log_record.render_log_line_json())
return result_log_report
@property
def has_resolution_notes(self):
return self.resolution_notes.exists()
@property
def state(self):
if self.resolved:
return AlertGroupState.RESOLVED
elif self.acknowledged:
return AlertGroupState.ACKNOWLEDGED
elif self.silenced:
return AlertGroupState.SILENCED
else:
return AlertGroupState.FIRING
@property
def notify_in_slack_enabled(self):
channel_filter = self.channel_filter_with_respect_to_escalation_snapshot
if channel_filter is not None:
return channel_filter.notify_in_slack
else:
return True
@property
def is_presented_in_slack(self):
return self.slack_message and self.channel.organization.slack_team_identity
@property
def slack_channel_id(self) -> str | None:
if not self.channel.organization.slack_team_identity:
return None
if self.slack_message:
return self.slack_message.channel_id
if self.channel_filter:
return self.channel_filter.slack_channel_id_or_general_log_id
return None
@property
def slack_message(self) -> typing.Optional["SlackMessage"]:
return self.slack_messages.order_by("created_at").first()
@cached_property
def last_stop_escalation_log(self):
from apps.alerts.models import AlertGroupLogRecord
stop_escalation_log = (
self.log_records.filter(
type__in=[
AlertGroupLogRecord.TYPE_RESOLVED,
AlertGroupLogRecord.TYPE_ACK,
AlertGroupLogRecord.TYPE_SILENCE,
]
)
.order_by("pk")
.last()
)
return stop_escalation_log
def alerts_count_gt(self, max_alerts) -> bool:
"""
alerts_count_gt checks if there are more than max_alerts alerts in given alert group.
It's optimized for alert groups with big number of alerts and relatively small max_alerts.
"""
count = self.alerts.all()[: max_alerts + 1].count()
return count > max_alerts
@receiver(post_save, sender=AlertGroup)
def listen_for_alertgroup_model_save(sender, instance, created, *args, **kwargs):
if created and not instance.is_maintenance_incident:
# Update alert group state and response time metrics cache
instance._update_metrics(
organization_id=instance.channel.organization_id,
previous_state=None,
state=AlertGroupState.FIRING,
)
post_save.connect(listen_for_alertgroup_model_save, AlertGroup)
|
PyObjCTest | test_nsurlprotectionspace | from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSURLProtectionSpace(TestCase):
def testConstants(self):
self.assertIsInstance(NSURLProtectionSpaceHTTPProxy, unicode)
self.assertIsInstance(NSURLProtectionSpaceHTTPSProxy, unicode)
self.assertIsInstance(NSURLProtectionSpaceFTPProxy, unicode)
self.assertIsInstance(NSURLProtectionSpaceSOCKSProxy, unicode)
self.assertIsInstance(NSURLAuthenticationMethodDefault, unicode)
self.assertIsInstance(NSURLAuthenticationMethodHTTPBasic, unicode)
self.assertIsInstance(NSURLAuthenticationMethodHTTPDigest, unicode)
self.assertIsInstance(NSURLAuthenticationMethodHTMLForm, unicode)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance(NSURLProtectionSpaceHTTP, unicode)
self.assertIsInstance(NSURLProtectionSpaceHTTPS, unicode)
self.assertIsInstance(NSURLProtectionSpaceFTP, unicode)
self.assertIsInstance(NSURLAuthenticationMethodNTLM, unicode)
self.assertIsInstance(NSURLAuthenticationMethodNegotiate, unicode)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertIsInstance(NSURLAuthenticationMethodClientCertificate, unicode)
self.assertIsInstance(NSURLAuthenticationMethodServerTrust, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSURLProtectionSpace.receivesCredentialSecurely)
self.assertResultIsBOOL(NSURLProtectionSpace.isProxy)
if __name__ == "__main__":
main()
|
chardet | latin1prober | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 00 - 07
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 08 - 0F
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 10 - 17
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 18 - 1F
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 20 - 27
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 28 - 2F
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 30 - 37
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 38 - 3F
OTH,
ASC,
ASC,
ASC,
ASC,
ASC,
ASC,
ASC, # 40 - 47
ASC,
ASC,
ASC,
ASC,
ASC,
ASC,
ASC,
ASC, # 48 - 4F
ASC,
ASC,
ASC,
ASC,
ASC,
ASC,
ASC,
ASC, # 50 - 57
ASC,
ASC,
ASC,
OTH,
OTH,
OTH,
OTH,
OTH, # 58 - 5F
OTH,
ASS,
ASS,
ASS,
ASS,
ASS,
ASS,
ASS, # 60 - 67
ASS,
ASS,
ASS,
ASS,
ASS,
ASS,
ASS,
ASS, # 68 - 6F
ASS,
ASS,
ASS,
ASS,
ASS,
ASS,
ASS,
ASS, # 70 - 77
ASS,
ASS,
ASS,
OTH,
OTH,
OTH,
OTH,
OTH, # 78 - 7F
OTH,
UDF,
OTH,
ASO,
OTH,
OTH,
OTH,
OTH, # 80 - 87
OTH,
OTH,
ACO,
OTH,
ACO,
UDF,
ACO,
UDF, # 88 - 8F
UDF,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # 90 - 97
OTH,
OTH,
ASO,
OTH,
ASO,
UDF,
ASO,
ACO, # 98 - 9F
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # A0 - A7
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # A8 - AF
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # B0 - B7
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH,
OTH, # B8 - BF
ACV,
ACV,
ACV,
ACV,
ACV,
ACV,
ACO,
ACO, # C0 - C7
ACV,
ACV,
ACV,
ACV,
ACV,
ACV,
ACV,
ACV, # C8 - CF
ACO,
ACO,
ACV,
ACV,
ACV,
ACV,
ACV,
OTH, # D0 - D7
ACV,
ACV,
ACV,
ACV,
ACV,
ACO,
ACO,
ACO, # D8 - DF
ASV,
ASV,
ASV,
ASV,
ASV,
ASV,
ASO,
ASO, # E0 - E7
ASV,
ASV,
ASV,
ASV,
ASV,
ASV,
ASV,
ASV, # E8 - EF
ASO,
ASO,
ASV,
ASV,
ASV,
ASV,
ASV,
OTH, # F0 - F7
ASV,
ASV,
ASV,
ASV,
ASV,
ASO,
ASO,
ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0,
0,
0,
0,
0,
0,
0,
0, # UDF
0,
3,
3,
3,
3,
3,
3,
3, # OTH
0,
3,
3,
3,
3,
3,
3,
3, # ASC
0,
3,
3,
3,
1,
1,
3,
3, # ASS
0,
3,
3,
3,
1,
2,
1,
2, # ACV
0,
3,
3,
3,
3,
3,
3,
3, # ACO
0,
3,
1,
3,
1,
1,
1,
3, # ASV
0,
3,
1,
3,
1,
1,
3,
3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
super(Latin1Prober, self).__init__()
self._last_char_class = None
self._freq_counter = None
self.reset()
def reset(self):
self._last_char_class = OTH
self._freq_counter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
@property
def charset_name(self):
return "ISO-8859-1"
@property
def language(self):
return ""
def feed(self, byte_str):
byte_str = self.filter_with_english_letters(byte_str)
for c in byte_str:
char_class = Latin1_CharToClass[c]
freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class]
if freq == 0:
self._state = ProbingState.NOT_ME
break
self._freq_counter[freq] += 1
self._last_char_class = char_class
return self.state
def get_confidence(self):
if self.state == ProbingState.NOT_ME:
return 0.01
total = sum(self._freq_counter)
if total < 0.01:
confidence = 0.0
else:
confidence = (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
downloader | http | from __future__ import unicode_literals
import errno
import os
import random
import re
import socket
import time
from ..compat import compat_str, compat_urllib_error
from ..utils import (
ContentTooShortError,
XAttrMetadataError,
XAttrUnavailableError,
encodeFilename,
int_or_none,
sanitize_open,
sanitized_Request,
write_xattr,
)
from .common import FileDownloader
class HttpFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict["url"]
class DownloadContext(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
ctx = DownloadContext()
ctx.filename = filename
ctx.tmpfilename = self.temp_name(filename)
ctx.stream = None
# Do not include the Accept-Encoding header
headers = {"Youtubedl-no-compression": "True"}
add_headers = info_dict.get("http_headers")
if add_headers:
headers.update(add_headers)
is_test = self.params.get("test", False)
chunk_size = (
self._TEST_FILE_SIZE
if is_test
else (
info_dict.get("downloader_options", {}).get("http_chunk_size")
or self.params.get("http_chunk_size")
or 0
)
)
ctx.open_mode = "wb"
ctx.resume_len = 0
ctx.data_len = None
ctx.block_size = self.params.get("buffersize", 1024)
ctx.start_time = time.time()
ctx.chunk_size = None
if self.params.get("continuedl", True):
# Establish possible resume length
ctx.resume_len = info_dict.get("frag_resume_len")
if ctx.resume_len is None:
ctx.resume_len = self.filesize_or_none(ctx.tmpfilename) or 0
ctx.is_resume = ctx.resume_len > 0
count = 0
retries = self.params.get("retries", 0)
class SucceedDownload(Exception):
pass
class RetryDownload(Exception):
def __init__(self, source_error):
self.source_error = source_error
class NextFragment(Exception):
pass
def set_range(req, start, end):
range_header = "bytes=%d-" % start
if end:
range_header += compat_str(end)
req.add_header("Range", range_header)
def establish_connection():
ctx.chunk_size = (
random.randint(int(chunk_size * 0.95), chunk_size)
if not is_test and chunk_size
else chunk_size
)
if ctx.resume_len > 0:
range_start = ctx.resume_len
if ctx.is_resume:
self.report_resuming_byte(ctx.resume_len)
ctx.open_mode = "ab"
elif ctx.chunk_size > 0:
range_start = 0
else:
range_start = None
ctx.is_resume = False
range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None
if range_end and ctx.data_len is not None and range_end >= ctx.data_len:
range_end = ctx.data_len - 1
has_range = range_start is not None
ctx.has_range = has_range
request = sanitized_Request(url, None, headers)
if has_range:
set_range(request, range_start, range_end)
# Establish connection
try:
try:
ctx.data = self.ydl.urlopen(request)
except (compat_urllib_error.URLError,) as err:
# reason may not be available, e.g. for urllib2.HTTPError on python 2.6
reason = getattr(err, "reason", None)
if isinstance(reason, socket.timeout):
raise RetryDownload(err)
raise err
# When trying to resume, Content-Range HTTP header of response has to be checked
# to match the value of requested Range HTTP header. This is due to webservers
# that don't support resuming and serve a whole file with no Content-Range
# set in response despite requested Range (see
# https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
if has_range:
content_range = ctx.data.headers.get("Content-Range")
if content_range:
content_range_m = re.search(
r"bytes (\d+)-(\d+)?(?:/(\d+))?", content_range
)
# Content-Range is present and matches requested Range, resume is possible
if content_range_m:
if range_start == int(content_range_m.group(1)):
content_range_end = int_or_none(
content_range_m.group(2)
)
content_len = int_or_none(content_range_m.group(3))
accept_content_len = (
# Non-chunked download
not ctx.chunk_size
# Chunked download and requested piece or
# its part is promised to be served
or content_range_end == range_end
or content_len < range_end
)
if accept_content_len:
ctx.data_len = content_len
return
# Content-Range is either not present or invalid. Assuming remote webserver is
# trying to send the whole file, resume is not possible, so wiping the local file
# and performing entire redownload
if range_start > 0:
self.report_unable_to_resume()
ctx.resume_len = 0
ctx.open_mode = "wb"
ctx.data_len = int_or_none(ctx.data.info().get("Content-length", None))
return
except (compat_urllib_error.HTTPError,) as err:
if err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
ctx.data = self.ydl.urlopen(
sanitized_Request(url, None, headers)
)
content_length = ctx.data.info()["Content-Length"]
except (compat_urllib_error.HTTPError,) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if content_length is not None and (
ctx.resume_len - 100
< int(content_length)
< ctx.resume_len + 100
):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(ctx.filename)
self.try_rename(ctx.tmpfilename, ctx.filename)
self._hook_progress(
{
"filename": ctx.filename,
"status": "finished",
"downloaded_bytes": ctx.resume_len,
"total_bytes": ctx.resume_len,
}
)
raise SucceedDownload()
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
ctx.resume_len = 0
ctx.open_mode = "wb"
return
elif err.code < 500 or err.code >= 600:
# Unexpected HTTP error
raise
raise RetryDownload(err)
except socket.error as err:
if err.errno != errno.ECONNRESET:
# Connection reset is no problem, just retry
raise
raise RetryDownload(err)
def download():
data_len = ctx.data.info().get("Content-length", None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + ctx.resume_len
min_data_len = self.params.get("min_filesize")
max_data_len = self.params.get("max_filesize")
if min_data_len is not None and data_len < min_data_len:
self.to_screen(
"\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting."
% (data_len, min_data_len)
)
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen(
"\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting."
% (data_len, max_data_len)
)
return False
byte_counter = 0 + ctx.resume_len
block_size = ctx.block_size
start = time.time()
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
now = None # needed for slow_down() in the first loop run
before = start # start measuring
def retry(e):
to_stdout = ctx.tmpfilename == "-"
if ctx.stream is not None:
if not to_stdout:
ctx.stream.close()
ctx.stream = None
ctx.resume_len = (
byte_counter
if to_stdout
else os.path.getsize(encodeFilename(ctx.tmpfilename))
)
raise RetryDownload(e)
while True:
try:
# Download and write
data_block = ctx.data.read(
block_size
if data_len is None
else min(block_size, data_len - byte_counter)
)
# socket.timeout is a subclass of socket.error but may not have
# errno set
except socket.timeout as e:
retry(e)
except socket.error as e:
# SSLError on python 2 (inherits socket.error) may have
# no errno set but this error message
if (
e.errno in (errno.ECONNRESET, errno.ETIMEDOUT)
or getattr(e, "message", None) == "The read operation timed out"
):
retry(e)
raise
byte_counter += len(data_block)
# exit loop when download is finished
if len(data_block) == 0:
break
# Open destination file just in time
if ctx.stream is None:
try:
ctx.stream, ctx.tmpfilename = sanitize_open(
ctx.tmpfilename, ctx.open_mode
)
assert ctx.stream is not None
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
self.report_destination(ctx.filename)
except (OSError, IOError) as err:
self.report_error("unable to open for writing: %s" % str(err))
return False
if (
self.params.get("xattr_set_filesize", False)
and data_len is not None
):
try:
write_xattr(
ctx.tmpfilename,
"user.ytdl.filesize",
str(data_len).encode("utf-8"),
)
except (XAttrUnavailableError, XAttrMetadataError) as err:
self.report_error(
"unable to set filesize xattr: %s" % str(err)
)
try:
ctx.stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr("\n")
self.report_error("unable to write data: %s" % str(err))
return False
# Apply rate limit
self.slow_down(start, now, byte_counter - ctx.resume_len)
# end measuring of one loop run
now = time.time()
after = now
# Adjust block size
if not self.params.get("noresizebuffer", False):
block_size = self.best_block_size(after - before, len(data_block))
before = after
# Progress message
speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
eta = self.calc_eta(
speed, ctx.data_len and (ctx.data_len - byte_counter)
)
self._hook_progress(
{
"status": "downloading",
"downloaded_bytes": byte_counter,
"total_bytes": ctx.data_len,
"tmpfilename": ctx.tmpfilename,
"filename": ctx.filename,
"eta": eta,
"speed": speed,
"elapsed": now - ctx.start_time,
}
)
if data_len is not None and byte_counter == data_len:
break
if (
not is_test
and ctx.chunk_size
and ctx.data_len is not None
and byte_counter < ctx.data_len
):
ctx.resume_len = byte_counter
# ctx.block_size = block_size
raise NextFragment()
if ctx.stream is None:
self.to_stderr("\n")
self.report_error("Did not get any data blocks")
return False
if ctx.tmpfilename != "-":
ctx.stream.close()
if data_len is not None and byte_counter != data_len:
err = ContentTooShortError(byte_counter, int(data_len))
if count <= retries:
retry(err)
raise err
self.try_rename(ctx.tmpfilename, ctx.filename)
# Update file modification time
if self.params.get("updatetime", True):
info_dict["filetime"] = self.try_utime(
ctx.filename, ctx.data.info().get("last-modified", None)
)
self._hook_progress(
{
"downloaded_bytes": byte_counter,
"total_bytes": byte_counter,
"filename": ctx.filename,
"status": "finished",
"elapsed": time.time() - ctx.start_time,
}
)
return True
while count <= retries:
try:
establish_connection()
return download()
except RetryDownload as e:
count += 1
if count <= retries:
self.report_retry(e.source_error, count, retries)
continue
except NextFragment:
continue
except SucceedDownload:
return True
self.report_error("giving up after %s retries" % retries)
return False
|
gui | backgroundwindow | # This file is part of MyPaint.
# Copyright (C) 2009-2018 by the MyPaint Development Team.
# Copyright (C) 2008-2014 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Background tile chooser dialog"""
## Imports
from __future__ import division, print_function
import logging
import os
import sys
from gettext import gettext as _
import lib.pixbuf
from lib import helpers, tiledsurface
from lib.gibindings import GdkPixbuf, Gtk
from lib.pycompat import unicode, xrange
from . import pixbuflist, windowing
logger = logging.getLogger(__name__)
## Settings and consts
N = tiledsurface.N
DEFAULT_BACKGROUND = "default.png"
FALLBACK_BACKGROUND = "mrmamurk/mamurk_e_1.png"
BACKGROUNDS_SUBDIR = "backgrounds"
RESPONSE_SAVE_AS_DEFAULT = 1
BLOAT_MAX_SIZE = 1024
## Class defs
class BackgroundWindow(windowing.Dialog):
def __init__(self):
from gui import application
app = application.get_app()
assert app is not None
windowing.Dialog.__init__(self, app=app, title=_("Background"), modal=True)
self.add_button(_("Save as Default"), RESPONSE_SAVE_AS_DEFAULT)
self.add_button(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)
self._current_background_pixbuf = None # set when changed
# Set up window.
self.connect("response", self._response_cb)
notebook = self.nb = Gtk.Notebook()
self.vbox.pack_start(notebook, True, True, 0)
# Set up patterns tab.
patterns_scroll = Gtk.ScrolledWindow()
patterns_scroll.set_policy(
Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC,
)
notebook.append_page(patterns_scroll, Gtk.Label(label=_("Pattern")))
self.bgl = BackgroundList(self)
patterns_scroll.add(self.bgl)
self.connect("realize", self._realize_cb)
self.connect("show", self._show_cb)
self.connect("hide", self._hide_cb)
# Set up colors tab.
color_vbox = Gtk.VBox()
notebook.append_page(color_vbox, Gtk.Label(label=_("Color")))
self.cs = Gtk.ColorSelection()
self.cs.connect("color-changed", self._color_changed_cb)
color_vbox.pack_start(self.cs, True, True, 0)
b = Gtk.Button(label=_("Add color to Patterns"))
b.connect("clicked", self._add_color_to_patterns_cb)
color_vbox.pack_start(b, False, True, 0)
def _realize_cb(self, dialog):
if not self.bgl.initialized:
self.bgl.initialize()
def _show_cb(self, dialog):
self._current_background_pixbuf = None
self.set_response_sensitive(RESPONSE_SAVE_AS_DEFAULT, False)
def _hide_cb(self, dialog):
self._current_background_pixbuf = None
def _response_cb(self, dialog, response, *args):
if response == RESPONSE_SAVE_AS_DEFAULT:
self._save_as_default_cb()
elif response == Gtk.ResponseType.ACCEPT:
self.hide()
def _color_changed_cb(self, widget):
pixbuf = self._get_selected_color_pixbuf()
self.set_background(pixbuf)
def _get_selected_color_pixbuf(self):
rgb = self.cs.get_current_color()
rgb = (rgb.red, rgb.green, rgb.blue)
rgb = (c / 0xFFFF for c in rgb)
pixbuf = new_blank_pixbuf(rgb, N, N)
return pixbuf
def _save_as_default_cb(self):
pixbuf = self._current_background_pixbuf
assert pixbuf is not None, "BG pixbuf was not changed."
path = os.path.join(
self.app.user_datapath,
BACKGROUNDS_SUBDIR,
DEFAULT_BACKGROUND,
)
lib.pixbuf.save(pixbuf, path, "png")
self.hide()
def set_background(self, pixbuf):
doc = self.app.doc.model
doc.layer_stack.set_background(pixbuf, make_default=True)
self._current_background_pixbuf = pixbuf
self.set_response_sensitive(RESPONSE_SAVE_AS_DEFAULT, True)
def _add_color_to_patterns_cb(self, widget):
pixbuf = self._get_selected_color_pixbuf()
i = 1
while True:
filename = os.path.join(
self.app.user_datapath, BACKGROUNDS_SUBDIR, "color%02d.png" % i
)
if not os.path.exists(filename):
break
i += 1
lib.pixbuf.save(pixbuf, filename, "png")
self.bgl.backgrounds.append(pixbuf)
self.bgl.update()
self.bgl.set_selected(pixbuf)
self.nb.set_current_page(0)
class BackgroundList(pixbuflist.PixbufList):
_SUFFIXES = (".jpg", ".jpeg", ".png")
def __init__(self, win):
pixbuflist.PixbufList.__init__(
self,
None,
N,
N,
namefunc=self._get_tooltip,
pixbuffunc=self._get_preview_pixbuf,
)
self.app = win.app
self.win = win
stock_path = os.path.join(self.app.datapath, BACKGROUNDS_SUBDIR)
user_path = os.path.join(self.app.user_datapath, BACKGROUNDS_SUBDIR)
if not os.path.isdir(user_path):
os.mkdir(user_path)
self._background_files = self._list_dir(stock_path)
self._background_files.sort()
self._background_files += self._list_dir(user_path)
# Exclude DEFAULT_BACKGROUND from the list shown to the user
for filename in reversed(self._background_files):
file_basename = os.path.basename(filename)
if file_basename.lower() == DEFAULT_BACKGROUND:
self._background_files.remove(filename)
self._pixbuf_tooltip = {}
self._pixbufs_scaled = {} # lazily loaded by self.initialize()
self.backgrounds = []
self.item_selected += self._item_selected_cb
@classmethod
def _list_dir(cls, path):
"""Recursively find images by suffix"""
contents = []
for dir_path, dir_subdirs, dir_files in os.walk(path):
for file_name in dir_files:
is_matched = False
file_name_lowercase = file_name.lower()
for suffix in cls._SUFFIXES:
if not file_name_lowercase.endswith(suffix):
continue
is_matched = True
break
if is_matched:
file_path = os.path.join(dir_path, file_name)
contents.append(file_path)
contents.sort(key=os.path.getmtime)
return contents
@property
def initialized(self):
return len(self.backgrounds) != 0
def initialize(self):
self.backgrounds = self._load_pixbufs(self._background_files)
self.set_itemlist(self.backgrounds)
def _load_pixbufs(self, files, exclude_default=False):
pixbufs = []
load_errors = []
for filename in files:
is_matched = False
for suffix in self._SUFFIXES:
if not filename.lower().endswith(suffix):
continue
is_matched = True
break
if not is_matched:
logger.warning(
"Excluding %r: not in %r",
filename,
self._SUFFIXES,
)
continue
pixbuf, errors = load_background(filename)
if errors:
for err in errors:
logger.error("Error loading %r: %r", filename, err)
load_errors.append(err)
continue
if os.path.basename(filename).lower() == DEFAULT_BACKGROUND:
if exclude_default:
logger.warning(
"Excluding %r: is default background (%r)",
filename,
DEFAULT_BACKGROUND,
)
continue
pixbufs.append(pixbuf)
tooltip = _filename_to_display(filename)
self._pixbuf_tooltip[pixbuf] = tooltip
if load_errors:
msg = "\n\n".join(load_errors)
self.app.message_dialog(
text=_("One or more backgrounds could not be loaded"),
title=_("Error loading backgrounds"),
secondary_text=_(
"Please remove the unloadable files, or "
"check your libgdkpixbuf installation."
),
long_text=msg,
message_type=Gtk.MessageType.WARNING,
modal=True,
)
logger.info(
"Loaded %d of %d background(s), with %d error(s)",
len(pixbufs),
len(files),
len(errors),
)
return pixbufs
def _get_preview_pixbuf(self, pixbuf):
if pixbuf in self._pixbufs_scaled:
return self._pixbufs_scaled[pixbuf]
w, h = pixbuf.get_width(), pixbuf.get_height()
if w == N and h == N:
return pixbuf
assert w >= N
assert h >= N
scale = max(0.25, N / min(w, h))
scaled = new_blank_pixbuf((0, 0, 0), N, N)
pixbuf.composite(
dest=scaled,
dest_x=0,
dest_y=0,
dest_width=N,
dest_height=N,
offset_x=0,
offset_y=0,
scale_x=scale,
scale_y=scale,
interp_type=GdkPixbuf.InterpType.BILINEAR,
overall_alpha=255,
)
self.app.pixmaps.plus.composite(
dest=scaled,
dest_x=0,
dest_y=0,
dest_width=N,
dest_height=N,
offset_x=0,
offset_y=0,
scale_x=1.0,
scale_y=1.0,
interp_type=GdkPixbuf.InterpType.BILINEAR,
overall_alpha=255,
)
self._pixbufs_scaled[pixbuf] = scaled
return scaled
def _get_tooltip(self, pixbuf):
return self._pixbuf_tooltip.get(pixbuf, None)
def _item_selected_cb(self, self_, pixbuf):
self.win.set_background(pixbuf)
## Helpers
def _filename_to_display(s):
"""Convert a str filename to Unicode without obsessing too much."""
# That said, try to be be correct about Windows/POSIX weirdness.
if not isinstance(s, unicode):
if sys.platform == "win32":
enc = "UTF-8" # always, and sys.getfilesystemencoding() breaks
else:
enc = sys.getfilesystemencoding()
s = s.decode(enc, "replace")
return s
def new_blank_pixbuf(rgb, w, h):
"""Create a blank pixbuf with all pixels set to a color
:param tuple rgb: Color to blank the pixbuf to (``R,G,B``, floats)
:param int w: Width for the new pixbuf
:param int h: Width for the new pixbuf
The returned pixbuf has no alpha channel.
"""
pixbuf = GdkPixbuf.Pixbuf.new(
GdkPixbuf.Colorspace.RGB,
False,
8,
w,
h,
)
r, g, b = (helpers.clamp(int(round(0xFF * x)), 0, 0xFF) for x in rgb)
rgba_pixel = (r << 24) + (g << 16) + (b << 8) + 0xFF
pixbuf.fill(rgba_pixel)
return pixbuf
def load_background(filename, bloatmax=BLOAT_MAX_SIZE):
"""Load a pixbuf, testing it for suitability as a background
:param str filename: Full path to the filename to load.
:param int bloatmax: Repeat up to this size
:rtype: tuple
The returned tuple is a pair ``(PIXBUF, ERRORS)``,
where ``ERRORS`` is a list of localized strings
describing the errors encountered,
and ``PIXBUF`` contains the loaded background pixbuf.
If there were errors, ``PIXBUF`` is None.
The MyPaint rendering engine can only manage
background layers which fit into its tile structure.
Formerly, only background images with dimensions
which were exact multiples of the tile size were permitted.
We have a couple of workarounds now:
* "Bloating" the background by repetition (pixel-perfect)
* Scaling the image down to fit (distorts the image)
"""
filename_display = _filename_to_display(filename)
load_errors = []
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)
except Exception as ex:
logger.error("Failed to load background %r: %s", filename, ex)
msg = unicode(
_('Gdk-Pixbuf couldn\'t load "{filename}", and reported "{error}"')
)
load_errors.append(
msg.format(
filename=filename_display,
error=repr(ex),
)
)
return (None, load_errors)
# Validity check
w, h = pixbuf.get_width(), pixbuf.get_height()
if w == 0 or h == 0:
msg = unicode(_("{filename} has zero size (w={w}, h={h})"))
load_errors.append(
msg.format(
filename=filename_display,
w=w,
h=h,
)
)
return (None, load_errors)
# Flatten
if pixbuf.get_has_alpha():
logger.warning(
"%r has an alpha channel, which should be removed manually",
filename,
)
new_pixbuf = new_blank_pixbuf((0, 0, 0), w, h)
pixbuf.composite(
dest=new_pixbuf,
dest_x=0,
dest_y=0,
dest_width=w,
dest_height=h,
offset_x=0,
offset_y=0,
scale_x=1.0,
scale_y=1.0,
interp_type=GdkPixbuf.InterpType.NEAREST,
overall_alpha=255,
)
pixbuf = new_pixbuf
logger.debug(
"Flattened %s by compositing it onto a black backdrop",
filename,
)
# Attempt to fit the image into our grid.
exact_fit = (w % N, h % N) == (0, 0)
if not exact_fit:
logger.warning(
"%r (%dx%d) does not fit the %dx%d tile grid exactly",
filename,
w,
h,
N,
N,
)
repeats_x = _best_nrepeats_for_scaling(w, bloatmax)
repeats_y = _best_nrepeats_for_scaling(h, bloatmax)
if repeats_x > 1 or repeats_y > 1:
logger.info(
"Tiling %r to %dx%d (was: %dx%d, repeats: %d vert, %d horiz)",
filename,
w * repeats_x,
h * repeats_y,
w,
h,
repeats_x,
repeats_y,
)
pixbuf = _tile_pixbuf(pixbuf, repeats_x, repeats_y)
w, h = pixbuf.get_width(), pixbuf.get_height()
if (w % N != 0) or (h % N != 0):
orig_w, orig_h = w, h
w = max(1, w // N) * N
h = max(1, h // N) * N
logger.info(
"Scaling %r to %dx%d (was: %dx%d)",
filename,
w,
h,
orig_w,
orig_h,
)
pixbuf = pixbuf.scale_simple(
dest_width=w,
dest_height=h,
interp_type=GdkPixbuf.InterpType.BILINEAR,
)
assert (w % N == 0) and (h % N == 0)
if load_errors:
pixbuf = None
return pixbuf, load_errors
def _tile_pixbuf(pixbuf, repeats_x, repeats_y):
"""Make a repeated tiled image of a pixbuf"""
w, h = pixbuf.get_width(), pixbuf.get_height()
result = new_blank_pixbuf((0, 0, 0), repeats_x * w, repeats_y * h)
for xi in xrange(repeats_x):
for yi in xrange(repeats_y):
pixbuf.copy_area(0, 0, w, h, result, w * xi, h * yi)
return result
def _best_nrepeats_for_scaling(src_size, max_dest_size):
min_remainder = N
min_remainder_nrepeats = 1
nrepeats = 0
dest_size = 0
while dest_size <= max_dest_size:
nrepeats += 1
dest_size += src_size
remainder = dest_size % N
if remainder < min_remainder:
min_remainder_nrepeats = nrepeats
min_remainder = remainder
if remainder == 0:
break
return min_remainder_nrepeats
|
vidcutter | _main_ | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
#
# VidCutter - media cutter & joiner
#
# copyright © 2018 Pete Alexandrou
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import logging
import logging.handlers
import os
import shutil
import signal
import sys
import traceback
from typing import Callable, Optional
import vidcutter
import vidcutter.libs.mpv as mpv
from PyQt5.QtCore import (
QCommandLineOption,
QCommandLineParser,
QDir,
QFileInfo,
QProcess,
QProcessEnvironment,
QSettings,
QSize,
QStandardPaths,
Qt,
QTimerEvent,
pyqtSlot,
)
from PyQt5.QtGui import (
QCloseEvent,
QContextMenuEvent,
QDragEnterEvent,
QDropEvent,
QGuiApplication,
QMouseEvent,
QResizeEvent,
QSurfaceFormat,
qt_set_sequence_auto_mnemonic,
)
from PyQt5.QtWidgets import QMainWindow, QMessageBox, QSizePolicy, qApp
from vidcutter.libs.singleapplication import SingleApplication
from vidcutter.libs.widgets import VCMessageBox
from vidcutter.videoconsole import ConsoleHandler, ConsoleWidget, VideoLogger
from vidcutter.videocutter import VideoCutter
if sys.platform == "win32":
# noinspection PyUnresolvedReferences
from PyQt5.QtWinExtras import QWinTaskbarButton
from vidcutter.libs.taskbarprogress import TaskbarProgress
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
class MainWindow(QMainWindow):
EXIT_CODE_REBOOT = 666
TEMP_PROJECT_FILE = "vidcutter_reboot.vcp"
WORKING_FOLDER = os.path.join(QDir.tempPath(), "vidcutter")
def __init__(self):
super(MainWindow, self).__init__()
self.video, self.resizeTimer = "", 0
self.parse_cmdline()
self.init_settings()
self.init_logger()
self.init_scale()
self.init_cutter()
self.setWindowTitle(qApp.applicationName())
self.setContentsMargins(0, 0, 0, 0)
self.statusBar().showMessage("Ready")
self.statusBar().setStyleSheet("border: none; padding: 0; margin: 0;")
self.setAcceptDrops(True)
self.show()
if sys.platform == "win32" and TaskbarProgress.isValidWinVer():
self.win_taskbar_button = QWinTaskbarButton(self)
self.win_taskbar_button.setWindow(self.windowHandle())
self.win_taskbar_button.progress().setVisible(True)
self.win_taskbar_button.progress().setValue(0)
self.console.setGeometry(
int(self.x() - (self.width() / 2)),
self.y() + int(self.height() / 3),
750,
300,
)
if not self.video and os.path.isfile(
os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)
):
self.video = os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)
if self.video:
self.file_opener(self.video)
def init_scale(self) -> None:
screen_size = qApp.desktop().availableGeometry(-1)
self.scale = "LOW" if screen_size.width() <= 1024 else "NORMAL"
self.setMinimumSize(self.get_size(self.scale))
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
@pyqtSlot(str)
def file_opener(self, filename: str) -> None:
try:
if QFileInfo(filename).suffix() == "vcp":
self.cutter.openProject(project_file=filename)
if filename == os.path.join(
QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE
):
os.remove(
os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)
)
else:
self.cutter.loadMedia(filename)
except (FileNotFoundError, PermissionError):
QMessageBox.critical(self, "Error loading file", sys.exc_info()[0])
logging.exception("Error loading file")
qApp.restoreOverrideCursor()
self.restart()
@staticmethod
def get_size(mode: str = "NORMAL") -> QSize:
modes = {
"LOW": QSize(800, 425),
"NORMAL": QSize(930, 680),
"HIGH": QSize(1850, 1300),
}
return modes[mode]
def init_logger(self) -> None:
try:
log_path = self.get_app_config_path()
except AttributeError:
if sys.platform == "win32":
log_path = os.path.join(
QDir.homePath(), "AppData", "Local", qApp.applicationName().lower()
)
elif sys.platform == "darwin":
log_path = os.path.join(
QDir.homePath(),
"Library",
"Preferences",
qApp.applicationName().lower(),
)
else:
log_path = os.path.join(
QDir.homePath(), ".config", qApp.applicationName().lower()
)
os.makedirs(log_path, exist_ok=True)
self.console = ConsoleWidget(self)
self.consoleLogger = ConsoleHandler(self.console)
handlers = [
logging.handlers.RotatingFileHandler(
os.path.join(log_path, "%s.log" % qApp.applicationName().lower()),
maxBytes=1000000,
backupCount=1,
),
self.consoleLogger,
]
if self.parser.isSet(self.debug_option) or self.verboseLogs:
# noinspection PyTypeChecker
handlers.append(logging.StreamHandler())
logging.setLoggerClass(VideoLogger)
logging.basicConfig(
handlers=handlers,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
level=logging.INFO,
)
logging.captureWarnings(capture=True)
sys.excepthook = MainWindow.log_uncaught_exceptions
if os.getenv("DEBUG", False):
logging.info("appconfig folder: {}".format(log_path))
def init_settings(self) -> None:
try:
settings_path = self.get_app_config_path()
except AttributeError:
if sys.platform == "win32":
settings_path = os.path.join(
QDir.homePath(), "AppData", "Local", qApp.applicationName().lower()
)
elif sys.platform == "darwin":
settings_path = os.path.join(
QDir.homePath(),
"Library",
"Preferences",
qApp.applicationName().lower(),
)
else:
settings_path = os.path.join(
QDir.homePath(), ".config", qApp.applicationName().lower()
)
os.makedirs(settings_path, exist_ok=True)
settings_file = "{}.ini".format(qApp.applicationName().lower())
self.settings = QSettings(
os.path.join(settings_path, settings_file), QSettings.IniFormat
)
if self.settings.value("geometry") is not None:
self.restoreGeometry(self.settings.value("geometry"))
if self.settings.value("windowState") is not None:
self.restoreState(self.settings.value("windowState"))
self.theme = self.settings.value("theme", "light", type=str)
self.startupvol = self.settings.value("volume", 100, type=int)
self.verboseLogs = self.settings.value("verboseLogs", "off", type=str) in {
"on",
"true",
}
@staticmethod
def log_uncaught_exceptions(cls, exc, tb) -> None:
logging.critical("".join(traceback.format_tb(tb)))
logging.critical("{0}: {1}".format(cls, exc))
def parse_cmdline(self) -> None:
self.parser = QCommandLineParser()
self.parser.setApplicationDescription(
"\nVidCutter - the simplest + fastest media cutter & joiner"
)
self.parser.addPositionalArgument("video", "Preload video file", "[video]")
self.parser.addPositionalArgument(
"project", "Open VidCutter project file (.vcp)", "[project]"
)
self.debug_option = QCommandLineOption(
["debug"],
"debug mode; verbose console output & logging. "
"This will basically output what is being logged to file to the "
"console stdout. Mainly useful for debugging problems with your "
"system video and/or audio stack and codec configuration.",
)
self.parser.addOption(self.debug_option)
self.parser.addVersionOption()
self.parser.addHelpOption()
self.parser.process(qApp)
self.args = self.parser.positionalArguments()
if self.parser.isSet(self.debug_option):
os.environ["DEBUG"] = "1"
if len(self.args) > 0:
file_path = QFileInfo(self.args[0]).absoluteFilePath()
if not os.path.exists(file_path):
sys.stderr.write("\nERROR: File not found: %s\n" % file_path)
self.close()
qApp.exit(1)
self.video = file_path
def init_cutter(self) -> None:
self.cutter = VideoCutter(self)
self.cutter.errorOccurred.connect(self.errorHandler)
self.setCentralWidget(self.cutter)
qApp.setWindowIcon(VideoCutter.getAppIcon(encoded=False))
@staticmethod
def get_bitness() -> int:
from struct import calcsize
return calcsize("P") * 8
@pyqtSlot()
def reboot(self) -> None:
if self.cutter.mediaAvailable:
self.cutter.saveProject(reboot=True)
self.save_settings()
qApp.exit(MainWindow.EXIT_CODE_REBOOT)
def save_settings(self) -> None:
self.settings.setValue("lastFolder", self.cutter.lastFolder)
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
self.settings.sync()
@pyqtSlot(bool)
def lock_gui(self, locked: bool = True) -> None:
if locked:
qApp.setOverrideCursor(Qt.WaitCursor)
self.cutter.cliplist.setEnabled(False)
self.setEnabled(False)
else:
self.setEnabled(True)
self.cutter.cliplist.setEnabled(True)
qApp.restoreOverrideCursor()
qApp.processEvents()
@property
def flatpak(self) -> bool:
return sys.platform.startswith("linux") and QFileInfo(
__file__
).absolutePath().startswith("/app/")
def get_app_config_path(self) -> str:
if self.flatpak:
confpath = QProcessEnvironment.systemEnvironment().value(
"XDG_CONFIG_HOME", ""
)
if len(confpath):
return confpath
else:
return os.path.join(
QDir.homePath(), ".var", "app", vidcutter.__desktopid__, "config"
)
return QStandardPaths.writableLocation(
QStandardPaths.AppConfigLocation
).replace(qApp.applicationName(), qApp.applicationName().lower())
@staticmethod
def get_path(path: str = None, override: bool = False) -> str:
if override:
if getattr(sys, "frozen", False) and getattr(sys, "_MEIPASS", False):
# noinspection PyProtectedMember, PyUnresolvedReferences
return os.path.join(sys._MEIPASS, path)
return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), path)
return ":{}".format(path)
@pyqtSlot(str)
def errorHandler(self, msg: str, title: str = None) -> None:
qApp.restoreOverrideCursor()
QMessageBox.critical(
self, "An error occurred" if title is None else title, msg, QMessageBox.Ok
)
logging.error(msg)
@staticmethod
@pyqtSlot()
def cleanup():
shutil.rmtree(MainWindow.WORKING_FOLDER, ignore_errors=True)
def contextMenuEvent(self, event: QContextMenuEvent) -> None:
if event.reason() in {QContextMenuEvent.Mouse, QContextMenuEvent.Keyboard}:
self.cutter.appmenu.popup(event.globalPos())
super(MainWindow, self).contextMenuEvent(event)
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton and self.cutter.mediaAvailable:
self.cutter.cliplist.clearSelection()
self.cutter.timeCounter.clearFocus()
self.cutter.frameCounter.clearFocus()
# noinspection PyBroadException
try:
if hasattr(self.cutter, "notify"):
self.cutter.notify.close()
except BaseException:
pass
event.accept()
def dragEnterEvent(self, event: QDragEnterEvent) -> None:
if event.mimeData().hasUrls():
event.accept()
def dropEvent(self, event: QDropEvent) -> None:
filename = event.mimeData().urls()[0].toLocalFile()
self.file_opener(filename)
event.accept()
def resizeEvent(self, event: QResizeEvent) -> None:
try:
if (
self.isEnabled()
and self.cutter.mediaAvailable
and self.cutter.thumbnailsButton.isChecked()
):
if self.cutter.seekSlider.thumbnailsOn:
self.cutter.sliderWidget.setLoader(True)
self.cutter.sliderWidget.hideThumbs()
if self.resizeTimer:
self.killTimer(self.resizeTimer)
self.resizeTimer = self.startTimer(500)
except AttributeError:
pass
def timerEvent(self, event: QTimerEvent) -> None:
try:
self.cutter.seekSlider.reloadThumbs()
self.killTimer(self.resizeTimer)
self.resizeTimer = 0
except AttributeError:
pass
def closeEvent(self, event: QCloseEvent) -> Optional[Callable]:
event.accept()
try:
if not self.isEnabled():
exitwarn = VCMessageBox(
"Warning",
"Media is currently being processed",
"Are you sure you want to exit now?",
parent=self,
)
exitwarn.addButton("Yes", QMessageBox.NoRole)
cancelbutton = exitwarn.addButton("No", QMessageBox.RejectRole)
exitwarn.exec_()
res = exitwarn.clickedButton()
if res == cancelbutton:
event.ignore()
return
noexit, callback = self.cutter.saveWarning()
if noexit:
event.ignore()
if callback is not None:
return callback()
else:
return
except AttributeError:
logging.exception("warning dialogs on app exit exception", exc_info=True)
self.console.deleteLater()
if hasattr(self, "cutter"):
self.save_settings()
try:
if hasattr(self.cutter.videoService, "smartcut_jobs"):
[
self.cutter.videoService.cleanup(job.files.values())
for job in self.cutter.videoService.smartcut_jobs
]
if hasattr(self.cutter, "mpvWidget"):
self.cutter.mpvWidget.shutdown()
except AttributeError:
pass
try:
qApp.exit(0)
except mpv.MPVError:
pass
def main():
qt_set_sequence_auto_mnemonic(False)
if hasattr(Qt, "AA_EnableHighDpiScaling"):
QGuiApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, "AA_Use96Dpi"):
QGuiApplication.setAttribute(Qt.AA_Use96Dpi, True)
if hasattr(Qt, "AA_ShareOpenGLContexts"):
fmt = QSurfaceFormat()
fmt.setDepthBufferSize(24)
QSurfaceFormat.setDefaultFormat(fmt)
QGuiApplication.setAttribute(Qt.AA_ShareOpenGLContexts, True)
# if sys.platform == 'darwin':
# qApp.setStyle('Fusion')
app = SingleApplication(vidcutter.__appid__, sys.argv)
app.setApplicationName(vidcutter.__appname__)
app.setApplicationVersion(vidcutter.__version__)
app.setDesktopFileName(vidcutter.__desktopid__)
app.setOrganizationDomain(vidcutter.__domain__)
app.setQuitOnLastWindowClosed(True)
win = MainWindow()
win.stylename = app.style().objectName().lower()
app.setActivationWindow(win)
app.messageReceived.connect(win.file_opener)
app.aboutToQuit.connect(MainWindow.cleanup)
exit_code = app.exec_()
if exit_code == MainWindow.EXIT_CODE_REBOOT:
if sys.platform == "win32":
if hasattr(win.cutter, "mpvWidget"):
win.close()
QProcess.startDetached('"{}"'.format(qApp.applicationFilePath()))
else:
QProcess.startDetached(" ".join(sys.argv))
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
port | shared | # =============================================================================
# Copyright (C) 2014 Ryan Holmes
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
from abc import ABCMeta, abstractmethod
from logbook import Logger
from service.market import Market
pyfalog = Logger(__name__)
class UserCancelException(Exception):
"""when user cancel on port processing."""
pass
class IPortUser(metaclass=ABCMeta):
ID_PULSE = 1
# Pulse the progress bar
ID_UPDATE = ID_PULSE << 1
# Replace message with data: update messate
ID_DONE = ID_PULSE << 2
# open fits: import process done
ID_ERROR = ID_PULSE << 3
# display error: raise some error
PROCESS_IMPORT = ID_PULSE << 4
# means import process.
PROCESS_EXPORT = ID_PULSE << 5
# means import process.
@abstractmethod
def on_port_processing(self, action, data=None):
"""
While importing fits from file, the logic calls back to this function to
update progress bar to show activity. XML files can contain multiple
ships with multiple fits, whereas EFT cfg files contain many fits of
a single ship. When iterating through the files, we update the message
when we start a new file, and then Pulse the progress bar with every fit
that is processed.
action : a flag that lets us know how to deal with :data
None: Pulse the progress bar
1: Replace message with data
other: Close dialog and handle based on :action (-1 open fits, -2 display error)
"""
"""return: True is continue process, False is cancel."""
pass
def on_port_process_start(self):
pass
def processing_notify(iportuser, flag, data):
if not iportuser.on_port_processing(flag, data):
raise UserCancelException
def fetchItem(typeName, eagerCat=False):
sMkt = Market.getInstance()
eager = "group.category" if eagerCat else None
try:
item = sMkt.getItem(typeName, eager=eager)
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning(
'service.port.shared: unable to fetch item "{}"'.format(typeName)
)
return None
if item is None:
return None
if sMkt.getPublicityByItem(item):
return item
else:
return None
|
commands | createuser | # -*- coding: utf-8 -*-
"""
Management utility to create users
Example usage:
manage.py createuser \
--username test \
--email test@test.test \
"""
import getpass
import sys
from django.conf import settings
from django.contrib.auth import get_user_model, models
from django.contrib.auth.password_validation import validate_password
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.functional import cached_property
from django.utils.text import capfirst
class NotRunningInTTYException(Exception):
pass
class Command(BaseCommand):
help = "Used to create a user"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.UserModel = get_user_model()
self.username_field = self.UserModel._meta.get_field(
self.UserModel.USERNAME_FIELD
)
def add_arguments(self, parser):
parser.add_argument(
f"--{self.UserModel.USERNAME_FIELD}",
help="Specifies the login for a user.",
)
parser.add_argument(
"--email",
dest="email",
default="",
help="Specifies the email for the user. Optional.",
)
parser.add_argument(
"--password",
dest="password",
help="Specifies the password for the user. Optional.",
)
parser.add_argument(
"--read-only",
action="store_true",
default=False,
help="Specifies read-only privileges for the user. Default is False.",
)
parser.add_argument(
"--is-staff",
dest="is_staff",
action="store_true",
default=False,
help="Specifies the staff status for the user. Default is False.",
)
def handle(self, *args, **options):
username = options.get(self.UserModel.USERNAME_FIELD)
password = options.get("password")
is_read_only = options.get("read_only")
user_data = {}
user_password = options.get("password")
verbose_field_name = self.username_field.verbose_name
try:
error_msg = self._validate_username(
username, verbose_field_name, DEFAULT_DB_ALIAS
)
if error_msg:
raise CommandError(error_msg)
user_data[self.UserModel.USERNAME_FIELD] = username
# Prompt for a password interactively (if password not set via arg)
while password is None:
password = getpass.getpass()
password2 = getpass.getpass("Password (again): ")
if password.strip() == "":
self.stderr.write("Error: Blank passwords aren't allowed.")
password = None
continue
if password != password2:
self.stderr.write("Error: Your passwords didn't match.")
password = None
continue
try:
validate_password(password, self.UserModel(**user_data))
except exceptions.ValidationError as err:
self.stderr.write("\n".join(err.messages))
response = input(
"Bypass password validation and create user anyway? [y/N]: "
)
if response.lower() != "y":
password = None
continue
user_password = password
user = self.UserModel._default_manager.db_manager(
DEFAULT_DB_ALIAS
).create_user(**user_data, password=user_password)
user.email = options.get("email")
user.is_staff = options.get("is_staff")
if is_read_only:
user.is_superuser = False
user.save()
group = models.Group.objects.get(
name=settings.BABY_BUDDY["READ_ONLY_GROUP_NAME"]
)
user.groups.add(group)
else:
user.is_superuser = True
user.save()
if options.get("verbosity") > 0:
self.stdout.write(f"User {username} created successfully.")
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
except exceptions.ValidationError as e:
raise CommandError("; ".join(e.messages))
except NotRunningInTTYException:
self.stdout.write(
"User creation skipped due to not running in a TTY. "
"You can run `manage.py createuser` in your project "
"to create one manually."
)
@cached_property
def username_is_unique(self):
"""
Check if username is unique.
"""
if self.username_field.unique:
return True
return any(
len(unique_constraint.fields) == 1
and unique_constraint.fields[0] == self.username_field.name
for unique_constraint in self.UserModel._meta.total_unique_constraints
)
def _validate_username(self, username, verbose_field_name, database):
"""
Validate username. If invalid, return a string error message.
"""
if self.username_is_unique:
try:
self.UserModel._default_manager.db_manager(database).get_by_natural_key(
username
)
except self.UserModel.DoesNotExist:
pass
else:
return f"Error: The {verbose_field_name} is already taken."
if not username:
return f"{capfirst(verbose_field_name)} cannot be blank."
try:
self.username_field.clean(username, None)
except exceptions.ValidationError as e:
return "; ".join(e.messages)
|
mylar | getcomics | # -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import datetime
import gzip
import json
import os
import re
import sys
import time
import urllib
import zipfile
from threading import Thread
import cfscrape
import logger
import mylar
import requests
from bs4 import BeautifulSoup
from mylar import db
from StringIO import StringIO
class GC(object):
def __init__(self, query=None, issueid=None, comicid=None, oneoff=False):
self.valreturn = []
self.url = 'https://getcomics.info'
self.query = query
self.comicid = comicid
self.issueid = issueid
self.oneoff = oneoff
self.local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, "getcomics.html")
self.headers = {'Accept-encoding': 'gzip', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Referer': 'https://getcomics.info/'}
def search(self):
with cfscrape.create_scraper() as s:
cf_cookievalue, cf_user_agent = s.get_tokens(self.url, headers=self.headers)
t = s.get(self.url+'/', params={'s': self.query}, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True, timeout=30)
with open(self.local_filename, 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return self.search_results()
def loadsite(self, id, link):
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
with cfscrape.create_scraper() as s:
self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers)
t = s.get(link, verify=True, cookies=self.cf_cookievalue, headers=self.headers, stream=True, timeout=30)
with open(title+'.html', 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def search_results(self):
results = {}
resultlist = []
soup = BeautifulSoup(open(self.local_filename), 'html.parser')
resultline = soup.find("span", {"class": "cover-article-count"}).get_text(strip=True)
logger.info('There are %s results' % re.sub('Articles', '', resultline).strip())
for f in soup.findAll("article"):
id = f['id']
lk = f.find('a')
link = lk['href']
titlefind = f.find("h1", {"class": "post-title"})
title = titlefind.get_text(strip=True)
title = re.sub(u'\u2013', '-', title).strip()
filename = title
issues = None
pack = False
#see if it's a pack type
issfind_st = title.find('#')
issfind_en = title.find('-', issfind_st)
if issfind_en != -1:
if all([title[issfind_en+1] == ' ', title[issfind_en+2].isdigit()]):
iss_en = title.find(' ', issfind_en+2)
if iss_en != -1:
issues = title[issfind_st+1:iss_en]
pack = True
if title[issfind_en+1].isdigit():
iss_en = title.find(' ', issfind_en+1)
if iss_en != -1:
issues = title[issfind_st+1:iss_en]
pack = True
# if it's a pack - remove the issue-range and the possible issue years (cause it most likely will span) and pass thru as separate items
if pack is True:
title = re.sub(issues, '', title).strip()
if title.endswith('#'):
title = title[:-1].strip()
else:
if any(['Marvel Week+' in title, 'INDIE Week+' in title, 'Image Week' in title, 'DC Week+' in title]):
continue
option_find = f.find("p", {"style": "text-align: center;"})
i = 0
while (i <= 2 and option_find is not None):
option_find = option_find.findNext(text=True)
if 'Year' in option_find:
year = option_find.findNext(text=True)
year = re.sub('\|', '', year).strip()
if pack is True and '-' in year:
title = re.sub('\('+year+'\)', '', title).strip()
else:
size = option_find.findNext(text=True)
if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]):
if 'MB' in size:
size = re.sub('MB', 'M', size).strip()
elif 'GB' in size:
size = re.sub('GB', 'G', size).strip()
if '//' in size:
nwsize = size.find('//')
size = re.sub('\[', '', size[:nwsize]).strip()
else:
size = '0M'
i+=1
dateline = f.find('time')
datefull = dateline['datetime']
datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d"))
resultlist.append({"title": title,
"pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'),
"filename": filename,
"size": re.sub(' ', '', size).strip(),
"pack": pack,
"issues": issues,
"link": link,
"year": year,
"id": re.sub('post-', '', id).strip(),
"site": 'DDL'})
logger.fdebug('%s [%s]' % (title, size))
results['entries'] = resultlist
return results
def parse_downloadresults(self, id, mainlink):
myDB = db.DBConnection()
series = None
year = None
size = None
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
soup = BeautifulSoup(open(title+'.html'), 'html.parser')
orig_find = soup.find("p", {"style": "text-align: center;"})
i = 0
option_find = orig_find
possible_more = None
while True: #i <= 10:
prev_option = option_find
option_find = option_find.findNext(text=True)
if i == 0 and series is None:
series = option_find
elif 'Year' in option_find:
year = option_find.findNext(text=True)
year = re.sub('\|', '', year).strip()
else:
if 'Size' in prev_option:
size = option_find #.findNext(text=True)
possible_more = orig_find.next_sibling
break
i+=1
logger.fdebug('Now downloading: %s [%s] / %s ... this can take a while (go get some take-out)...' % (series, year, size))
link = None
for f in soup.findAll("div", {"class": "aio-pulse"}):
lk = f.find('a')
if lk['title'] == 'Download Now':
link = {"series": series,
"site": lk['title'],
"year": year,
"issues": None,
"size": size,
"link": lk['href']}
break #get the first link just to test
links = []
if link is None and possible_more.name == 'ul':
try:
bb = possible_more.findAll('li')
except:
pass
else:
for x in bb:
linkline = x.find('a')
if linkline:
if 'go.php' in linkline['href']:
volume = x.findNext(text=True)
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
#volume label contains series, issue(s), year(s), and size
series_st = volume.find('(')
issues_st = volume.find('#')
series = volume[:series_st]
if any([issues_st == -1, series_st == -1]):
issues = None
else:
series = volume[:issues_st].strip()
issues = volume[issues_st+1:series_st].strip()
year_end = volume.find(')', series_st+1)
year = re.sub('[\(\)]', '', volume[series_st+1: year_end]).strip()
size_end = volume.find(')', year_end+1)
size = re.sub('[\(\)]', '', volume[year_end+1: size_end]).strip()
linked = linkline['href']
site = linkline.findNext(text=True)
if site == 'Main Server':
links.append({"series": series,
"site": site,
"year": year,
"issues": issues,
"size": size,
"link": linked})
else:
check_extras = soup.findAll("h3")
for sb in check_extras:
header = sb.findNext(text=True)
if header == 'TPBs':
nxt = sb.next_sibling
if nxt.name == 'ul':
bb = nxt.findAll('li')
for x in bb:
volume = x.findNext(text=True)
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
series_st = volume.find('(')
issues_st = volume.find('#')
series = volume[:issues_st].strip()
issues = volume[issues_st:series_st].strip()
year_end = volume.find(')', series_st+1)
year = re.sub('[\(\)\|]', '', volume[series_st+1: year_end]).strip()
size_end = volume.find(')', year_end+1)
size = re.sub('[\(\)\|]', '', volume[year_end+1: size_end]).strip()
linkline = x.find('a')
linked = linkline['href']
site = linkline.findNext(text=True)
links.append({"series": series,
"volume": volume,
"site": site,
"year": year,
"issues": issues,
"size": size,
"link": linked})
if all([link is None, len(links) == 0]):
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
return {'success': False}
if all([link is not None, len(links) == 0]):
logger.info('only one item discovered, changing queue length to accomodate: %s [%s]' % (link, type(link)))
links = [link]
elif len(links) > 0:
if link is not None:
links.append(link)
logger.fdebug('[DDL-QUEUE] Making sure we download the original item in addition to the extra packs.')
if len(links) > 1:
logger.fdebug('[DDL-QUEUER] This pack has been broken up into %s separate packs - queueing each in sequence for your enjoyment.' % len(links))
cnt = 1
for x in links:
if len(links) == 1:
mod_id = id
else:
mod_id = id+'-'+str(cnt)
#logger.fdebug('[%s] %s (%s) %s [%s][%s]' % (x['site'], x['series'], x['year'], x['issues'], x['size'], x['link']))
ctrlval = {'id': mod_id}
vals = {'series': x['series'],
'year': x['year'],
'size': x['size'],
'issues': x['issues'],
'issueid': self.issueid,
'comicid': self.comicid,
'link': x['link'],
'mainlink': mainlink,
'updated_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),
'status': 'Queued'}
myDB.upsert('ddl_info', vals, ctrlval)
mylar.DDL_QUEUE.put({'link': x['link'],
'mainlink': mainlink,
'series': x['series'],
'year': x['year'],
'size': x['size'],
'comicid': self.comicid,
'issueid': self.issueid,
'oneoff': self.oneoff,
'id': mod_id,
'resume': None})
cnt+=1
return {'success': True}
def downloadit(self, id, link, mainlink, resume=None):
#logger.info('[%s] %s -- mainlink: %s' % (id, link, mainlink))
if mylar.DDL_LOCK is True:
logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.')
return
else:
mylar.DDL_LOCK = True
myDB = db.DBConnection()
filename = None
try:
with cfscrape.create_scraper() as s:
if resume is not None:
logger.info('[DDL-RESUME] Attempting to resume from: %s bytes' % resume)
self.headers['Range'] = 'bytes=%d-' % resume
cf_cookievalue, cf_user_agent = s.get_tokens(mainlink, headers=self.headers, timeout=30)
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True, timeout=30)
filename = os.path.basename(urllib.unquote(t.url).decode('utf-8'))
if 'GetComics.INFO' in filename:
filename = re.sub('GetComics.INFO', '', filename, re.I).strip()
try:
remote_filesize = int(t.headers['Content-length'])
logger.fdebug('remote filesize: %s' % remote_filesize)
except Exception as e:
if 'go.php-urls' not in link:
link = re.sub('go.php-url=', 'go.php-urls', link)
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True, timeout=30)
filename = os.path.basename(urllib.unquote(t.url).decode('utf-8'))
if 'GetComics.INFO' in filename:
filename = re.sub('GetComics.INFO', '', filename, re.I).strip()
try:
remote_filesize = int(t.headers['Content-length'])
logger.fdebug('remote filesize: %s' % remote_filesize)
except Exception as e:
logger.warn('[WARNING] Unable to retrieve remote file size - this is usually due to the page being behind a different click-bait/ad page. Error returned as : %s' % e)
logger.warn('[WARNING] Considering this particular download as invalid and will ignore this result.')
remote_filesize = 0
mylar.DDL_LOCK = False
return ({"success": False,
"filename": filename,
"path": None})
else:
logger.warn('[WARNING] Unable to retrieve remote file size - this is usually due to the page being behind a different click-bait/ad page. Error returned as : %s' % e)
logger.warn('[WARNING] Considering this particular download as invalid and will ignore this result.')
remote_filesize = 0
mylar.DDL_LOCK = False
return ({"success": False,
"filename": filename,
"path": None})
#write the filename to the db for tracking purposes...
myDB.upsert('ddl_info', {'filename': filename, 'remote_filesize': remote_filesize}, {'id': id})
if mylar.CONFIG.DDL_LOCATION is not None and not os.path.isdir(mylar.CONFIG.DDL_LOCATION):
checkdirectory = mylar.filechecker.validateAndCreateDirectory(mylar.CONFIG.DDL_LOCATION, True)
if not checkdirectory:
logger.warn('[ABORTING] Error trying to validate/create DDL download directory: %s.' % mylar.CONFIG.DDL_LOCATION)
return ({"success": False,
"filename": filename,
"path": None})
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
buf = StringIO(t.content)
f = gzip.GzipFile(fileobj=buf)
if resume is not None:
with open(path, 'ab') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
else:
with open(path, 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
except Exception as e:
logger.error('[ERROR] %s' % e)
mylar.DDL_LOCK = False
return ({"success": False,
"filename": filename,
"path": None})
else:
mylar.DDL_LOCK = False
if os.path.isfile(path):
if path.endswith('.zip'):
new_path = os.path.join(mylar.CONFIG.DDL_LOCATION, re.sub('.zip', '', filename).strip())
logger.info('Zip file detected. Unzipping into new modified path location: %s' % new_path)
try:
zip_f = zipfile.ZipFile(path, 'r')
zip_f.extractall(new_path)
zip_f.close()
except Exception as e:
logger.warn('[ERROR: %s] Unable to extract zip file: %s' % (e, new_path))
return ({"success": False,
"filename": filename,
"path": None})
else:
try:
os.remove(path)
except Exception as e:
logger.warn('[ERROR: %s] Unable to remove zip file from %s after extraction.' % (e, path))
filename = None
else:
new_path = path
return ({"success": True,
"filename": filename,
"path": new_path})
def issue_list(self, pack):
#packlist = [x.strip() for x in pack.split(',)]
packlist = pack.replace('+', ' ').replace(',', ' ').split()
print packlist
plist = []
pack_issues = []
for pl in packlist:
if '-' in pl:
plist.append(range(int(pl[:pl.find('-')]),int(pl[pl.find('-')+1:])+1))
else:
if 'TPBs' not in pl:
plist.append(int(pl))
else:
plist.append('TPBs')
for pi in plist:
if type(pi) == list:
for x in pi:
pack_issues.append(x)
else:
pack_issues.append(pi)
pack_issues.sort()
print "pack_issues: %s" % pack_issues
#if __name__ == '__main__':
# ab = GC(sys.argv[1]) #'justice league aquaman') #sys.argv[0])
# #c = ab.search()
# b = ab.loadsite('test', sys.argv[2])
# c = ab.parse_downloadresults('test', '60MB')
# #c = ab.issue_list(sys.argv[2])
|
migrations | 0027_persons_and_groups_on_events | from infi.clickhouse_orm import migrations
from posthog.clickhouse.client.migration_tools import run_sql_with_exceptions
from posthog.client import sync_execute
from posthog.models.event.sql import (
EVENTS_TABLE_JSON_MV_SQL,
KAFKA_EVENTS_TABLE_JSON_SQL,
)
from posthog.settings import CLICKHOUSE_CLUSTER
ADD_COLUMNS_BASE_SQL = """
ALTER TABLE {table}
ON CLUSTER '{cluster}'
ADD COLUMN IF NOT EXISTS person_id UUID,
ADD COLUMN IF NOT EXISTS person_properties VARCHAR,
ADD COLUMN IF NOT EXISTS group0_properties VARCHAR,
ADD COLUMN IF NOT EXISTS group1_properties VARCHAR,
ADD COLUMN IF NOT EXISTS group2_properties VARCHAR,
ADD COLUMN IF NOT EXISTS group3_properties VARCHAR,
ADD COLUMN IF NOT EXISTS group4_properties VARCHAR
"""
def add_columns_to_required_tables(_):
sync_execute(
ADD_COLUMNS_BASE_SQL.format(table="events", cluster=CLICKHOUSE_CLUSTER)
)
sync_execute(
ADD_COLUMNS_BASE_SQL.format(table="writable_events", cluster=CLICKHOUSE_CLUSTER)
)
sync_execute(
ADD_COLUMNS_BASE_SQL.format(table="sharded_events", cluster=CLICKHOUSE_CLUSTER)
)
operations = [
run_sql_with_exceptions(
f"DROP TABLE IF EXISTS events_json_mv ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
),
run_sql_with_exceptions(
f"DROP TABLE IF EXISTS kafka_events_json ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
),
migrations.RunPython(add_columns_to_required_tables),
run_sql_with_exceptions(KAFKA_EVENTS_TABLE_JSON_SQL()),
run_sql_with_exceptions(EVENTS_TABLE_JSON_MV_SQL()),
]
|
clientScripts | dip_generation_helper | #!/usr/bin/env python
import argparse
import csv
import archivematicaFunctions
import django
from agentarchives import archivesspace
from custom_handlers import get_script_logger
from django.db.models import Q
from main import models
# dashboard
# archivematicaCommon
# Third party dependencies, alphabetical by import source
# initialize Django (required for Django 1.7)
django.setup()
from django.db import transaction
logger = get_script_logger("archivematica.mcp.client.moveTransfer")
def create_archivesspace_client():
"""
Create an ArchivesSpace client instance.
"""
# TODO use same code as views_as.py?
config = models.DashboardSetting.objects.get_dict("upload-archivesspace_v0.0")
try:
client = archivesspace.ArchivesSpaceClient(
host=config["base_url"],
user=config["user"],
passwd=config["passwd"],
repository=config["repository"],
)
except archivesspace.AuthenticationError:
logger.error(
"Unable to authenticate to ArchivesSpace server using the default user! Check administrative settings."
)
return None
except archivesspace.ConnectionError:
logger.error(
"Unable to connect to ArchivesSpace server at the default location! Check administrative settings."
)
return None
return client
def parse_archivesspaceids_csv(files):
"""
Parse filename and reference ID from archivesspaceids.csv files
:param files: List of paths to archivesspaceids.csv files
:return: Dict with {filename: reference ID}
"""
file_info = {}
# SIP is last, so takes priority
for csv_path in files:
with open(csv_path) as f:
reader = csv.reader(f)
for row in reader:
filename = row[0]
ref_id = row[1]
file_info[filename] = ref_id
return file_info
def parse_archivesspace_ids(sip_path, sip_uuid):
"""
Parse an archivesspaceids.csv to pre-populate the matching GUI.
:param sip_path: Path to the SIP to check for an archivesspaceids.csv
:param sip_uuid: UUID of the SIP to auto-populate ArchivesSpace IDs for
:return: 0 on success, 1 on failure
"""
# Check for archivesspaceids.csv
csv_paths = archivematicaFunctions.find_metadata_files(
sip_path, "archivesspaceids.csv"
)
if not csv_paths:
logger.info("No archivesspaceids.csv files found, exiting")
return 0
file_info = parse_archivesspaceids_csv(csv_paths)
if not file_info:
logger.info("No information found in archivesspaceids.csv files")
return 1
logger.info("File info: %s", file_info)
# Create client
client = create_archivesspace_client()
if not client:
return 1
for filename, ref_id in file_info.items():
# Get file object (for fileUUID, to see if in DIP)
logger.debug('Getting file object: filename="%s" ref_id="%s"', filename, ref_id)
try:
f = models.File.objects.get(
Q(originallocation="%transferDirectory%" + filename)
| Q(originallocation="%transferDirectory%objects/" + filename)
| Q(originallocation="%SIPDirectory%" + filename)
| Q(originallocation="%SIPDirectory%objects/" + filename),
sip_id=sip_uuid,
)
except models.File.DoesNotExist:
logger.error("%s not found in database, skipping", filename)
continue
except models.File.MultipleObjectsReturned:
logger.error(
"Multiple entries for %s found in database, skipping", filename
)
continue
logger.debug("File: %s", f)
# Query ref_id to client for resource_id
resource = client.find_by_id("archival_objects", "ref_id", ref_id)
try:
resource_id = resource[0]["id"]
except IndexError:
logger.error("ArchivesSpace did not return an ID for %s", ref_id)
logger.error("Returned %s", resource)
continue
logger.debug("Resource ID: %s", resource_id)
# Add to ArchivesSpaceDIPObjectResourcePairing
models.ArchivesSpaceDIPObjectResourcePairing.objects.create(
dipuuid=sip_uuid, fileuuid=f.uuid, resourceid=resource_id
)
# Check if any files were processed?
return 0
def call(jobs):
parser = argparse.ArgumentParser(description="Parse metadata for DIP helpers")
parser.add_argument("--sipUUID", required=True, help="%SIPUUID%")
parser.add_argument("--sipPath", required=True, help="%SIPDirectory%")
with transaction.atomic():
for job in jobs:
with job.JobContext(logger=logger):
args = parser.parse_args(job.args[1:])
# Return non-zero if any of the helpers fail
rc = 0
rc = rc or parse_archivesspace_ids(args.sipPath, args.sipUUID)
# rc = rc or another_dip_helper(args.sipPath, args.sipUUID)
job.set_status(rc)
|
heartbeat | alertmanager | from pathlib import PurePath
from apps.integrations.metadata.heartbeat._heartbeat_text_creator import (
HeartBeatTextCreator,
)
integration_verbal = PurePath(__file__).stem
creator = HeartBeatTextCreator(integration_verbal)
heartbeat_text = creator.get_heartbeat_texts()
heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
heartbeat_expired_payload = {
"alerts": [
{
"endsAt": "",
"labels": {
"alertname": "OnCallHeartBeatMissing",
},
"status": "firing",
"startsAt": "",
"annotations": {
"title": heartbeat_expired_title,
"description": heartbeat_expired_message,
},
"fingerprint": "fingerprint",
"generatorURL": "",
},
],
"status": "firing",
"version": "4",
"groupKey": '{}:{alertname="OnCallHeartBeatMissing"}',
"receiver": "",
"numFiring": 1,
"externalURL": "",
"groupLabels": {"alertname": "OnCallHeartBeatMissing"},
"numResolved": 0,
"commonLabels": {"alertname": "OnCallHeartBeatMissing"},
"truncatedAlerts": 0,
"commonAnnotations": {
"title": heartbeat_expired_title,
"description": heartbeat_expired_message,
},
}
heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
heartbeat_restored_payload = {
"alerts": [
{
"endsAt": "",
"labels": {
"alertname": "OnCallHeartBeatMissing",
},
"status": "resolved",
"startsAt": "",
"annotations": {
"title": heartbeat_restored_title,
"description": heartbeat_restored_message,
},
"fingerprint": "fingerprint",
"generatorURL": "",
},
],
"status": "resolved",
"version": "4",
"groupKey": '{}:{alertname="OnCallHeartBeatMissing"}',
"receiver": "",
"numFiring": 0,
"externalURL": "",
"groupLabels": {"alertname": "OnCallHeartBeatMissing"},
"numResolved": 1,
"commonLabels": {"alertname": "OnCallHeartBeatMissing"},
"truncatedAlerts": 0,
"commonAnnotations": {
"title": heartbeat_restored_title,
"description": heartbeat_restored_message,
},
}
|
blocks | proxyport | from gaphor.core.modeling.properties import attribute
from gaphor.diagram.presentation import AttachedPresentation, Named
from gaphor.diagram.shapes import (
Box,
IconBox,
Text,
TextAlign,
VerticalAlign,
draw_border,
)
from gaphor.diagram.support import represents
from gaphor.SysML import sysml
from gaphor.UML.recipes import stereotypes_str
def text_position(position):
return {
"text-align": TextAlign.LEFT if position == "left" else TextAlign.RIGHT,
"vertical-align": VerticalAlign.BOTTOM
if position == "bottom"
else VerticalAlign.TOP,
}
@represents(sysml.ProxyPort)
class ProxyPortItem(Named, AttachedPresentation[sysml.ProxyPort]):
def __init__(self, diagram, id=None):
super().__init__(diagram, id, width=16, height=16)
self.watch("subject[NamedElement].name").watch(
"subject[TypedElement].type.name"
).watch("show_type")
show_type: attribute[int] = attribute("show_type", int, default=False)
def update_shapes(self):
self.shape = IconBox(
Box(draw=draw_border),
Text(
text=lambda: stereotypes_str(
self.subject, [self.diagram.gettext("proxy")]
)
),
Text(text=self._format_name),
style=text_position(self.connected_side()),
)
def _format_name(self):
if not self.subject:
return ""
name = self.subject.name or ""
if self.show_type and self.subject.type:
return f"{name}: {self.subject.type.name or ''}"
return name
|
extractor | golem | # coding: utf-8
from __future__ import unicode_literals
from ..compat import compat_str, compat_urlparse
from ..utils import determine_ext
from .common import InfoExtractor
class GolemIE(InfoExtractor):
_VALID_URL = r"^https?://video\.golem\.de/.+?/(?P<id>.+?)/"
_TEST = {
"url": "http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html",
"md5": "c1a2c0a3c863319651c7c992c5ee29bf",
"info_dict": {
"id": "14095",
"format_id": "high",
"ext": "mp4",
"title": "iPhone 6 und 6 Plus - Test",
"duration": 300.44,
"filesize": 65309548,
},
}
_PREFIX = "http://video.golem.de"
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
"https://video.golem.de/xml/{0}.xml".format(video_id), video_id
)
info = {
"id": video_id,
"title": config.findtext("./title", "golem"),
"duration": self._float(config.findtext("./playtime"), "duration"),
}
formats = []
for e in config:
url = e.findtext("./url")
if not url:
continue
formats.append(
{
"format_id": compat_str(e.tag),
"url": compat_urlparse.urljoin(self._PREFIX, url),
"height": self._int(e.get("height"), "height"),
"width": self._int(e.get("width"), "width"),
"filesize": self._int(e.findtext("filesize"), "filesize"),
"ext": determine_ext(e.findtext("./filename")),
}
)
self._sort_formats(formats)
info["formats"] = formats
thumbnails = []
for e in config.findall(".//teaser"):
url = e.findtext("./url")
if not url:
continue
thumbnails.append(
{
"url": compat_urlparse.urljoin(self._PREFIX, url),
"width": self._int(e.get("width"), "thumbnail width"),
"height": self._int(e.get("height"), "thumbnail height"),
}
)
info["thumbnails"] = thumbnails
return info
|
femobjects | result_mechanical | # ***************************************************************************
# * Copyright (c) 2016 Qingfeng Xia <qingfeng.xia()eng.ox.ac.uk> *
# * Copyright (c) 2016 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM result mechanical document object"
__author__ = "Qingfeng Xia, Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## @package result_mechanical
# \ingroup FEM
# \brief mechanical result object
from . import base_fempythonobject
class ResultMechanical(base_fempythonobject.BaseFemPythonObject):
"""
The Fem::ResultMechanical's Proxy python type, add result specific properties
"""
Type = "Fem::ResultMechanical"
def __init__(self, obj):
super(ResultMechanical, self).__init__(obj)
obj.addProperty(
"App::PropertyString",
"ResultType",
"Base",
"Type of the result",
1, # the 1 set the property to ReadOnly
)
obj.ResultType = str(self.Type)
# for frequency analysis
obj.addProperty("App::PropertyInteger", "Eigenmode", "Data", "", True)
obj.addProperty(
"App::PropertyFloat",
"EigenmodeFrequency",
"Data",
"User Defined Results",
True,
)
# node results
# set read only or hide a property:
# https://forum.freecad.org/viewtopic.php?f=18&t=13460&start=10#p108072
# do not show up in propertyEditor of comboView
obj.addProperty(
"App::PropertyVectorList",
"DisplacementVectors",
"NodeData",
"List of displacement vectors",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"Peeq",
"NodeData",
"List of equivalent plastic strain values",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"MohrCoulomb",
"NodeData",
"List of Mohr Coulomb stress values",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"ReinforcementRatio_x",
"NodeData",
"Reinforcement ratio x-direction",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"ReinforcementRatio_y",
"NodeData",
"Reinforcement ratio y-direction",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"ReinforcementRatio_z",
"NodeData",
"Reinforcement ratio z-direction",
True,
)
# these three principal vectors are used only if there is a reinforced mat obj
# https://forum.freecad.org/viewtopic.php?f=18&t=33106&p=416006#p416006
obj.addProperty(
"App::PropertyVectorList",
"PS1Vector",
"NodeData",
"List of 1st Principal Stress Vectors",
True,
)
obj.addProperty(
"App::PropertyVectorList",
"PS2Vector",
"NodeData",
"List of 2nd Principal Stress Vectors",
True,
)
obj.addProperty(
"App::PropertyVectorList",
"PS3Vector",
"NodeData",
"List of 3rd Principal Stress Vectors",
True,
)
# readonly in propertyEditor of comboView
obj.addProperty(
"App::PropertyFloatList",
"DisplacementLengths",
"NodeData",
"List of displacement lengths",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"vonMises",
"NodeData",
"List of von Mises equivalent stresses",
True,
)
obj.addProperty("App::PropertyFloatList", "PrincipalMax", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "PrincipalMed", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "PrincipalMin", "NodeData", "", True)
obj.addProperty(
"App::PropertyFloatList",
"MaxShear",
"NodeData",
"List of Maximum Shear stress values",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"MassFlowRate",
"NodeData",
"List of mass flow rate values",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"NetworkPressure",
"NodeData",
"List of network pressure values",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"UserDefined",
"NodeData",
"User Defined Results",
True,
)
obj.addProperty(
"App::PropertyFloatList",
"Temperature",
"NodeData",
"Temperature field",
True,
)
obj.addProperty("App::PropertyFloatList", "NodeStressXX", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStressYY", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStressZZ", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStressXY", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStressXZ", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStressYZ", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStrainXX", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStrainYY", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStrainZZ", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStrainXY", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStrainXZ", "NodeData", "", True)
obj.addProperty("App::PropertyFloatList", "NodeStrainYZ", "NodeData", "", True)
obj.addProperty(
"App::PropertyFloatList", "CriticalStrainRatio", "NodeData", "", True
)
# initialize the Stats with the appropriate count of items
# see fill_femresult_stats in femresult/resulttools.py
zero_list = 26 * [0]
obj.Stats = zero_list
def onDocumentRestored(self, obj):
# migrate old result objects, because property "StressValues"
# was renamed to "vonMises" in commit 8b68ab7
if hasattr(obj, "StressValues") is True:
obj.addProperty(
"App::PropertyFloatList",
"vonMises",
"NodeData",
"List of von Mises equivalent stresses",
True,
)
obj.vonMises = obj.StressValues
obj.removeProperty("StressValues")
# migrate old result objects, because property "Stats"
# consisting of min, avg, max values was reduced to min, max in commit c2a57b3e
if len(obj.Stats) == 39:
temp = obj.Stats
for i in range(12, -1, -1):
del temp[3 * i + 1]
obj.Stats = temp
|
server | packages | """Package management."""
import abc
import ast
import collections
import logging
import os
from pathlib import Path
from tempfile import mkdtemp
from uuid import UUID, uuid4
import storageService as storage_service
from django.conf import settings
from django.utils import timezone
from main import models
from server.db import auto_close_old_connections
from server.jobs import JobChain
from server.processing_config import processing_configuration_file_exists
from server.utils import uuid_from_path
logger = logging.getLogger("archivematica.mcp.server.packages")
StartingPoint = collections.namedtuple("StartingPoint", "watched_dir chain link")
def _get_setting(name):
"""Retrieve a Django setting decoded as a unicode string."""
return getattr(settings, name)
# Each package type has its corresponding watched directory and its
# associated chain, e.g. a "standard" transfer triggers the chain with UUID
# "fffd5342-2337-463f-857a-b2c8c3778c6d". This is stored in the
# WatchedDirectory model. These starting chains all have in common that they
# prompt the user to Accept/Reject the transfer.
#
# In this module we don't want to prompt the user. Instead we want to jump
# directly into action, this is whatever happens when the transfer is
# accepted. The following dictionary points to the chains and links where
# this is happening. Presumably this could be written in a generic way querying
# the workflow data but in the first iteration we've decided to do it this way.
# There is also the hope that the watched directories can be deprecated in the
# near future.
PACKAGE_TYPE_STARTING_POINTS = {
"standard": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/standardTransfer"
),
chain="6953950b-c101-4f4c-a0c3-0cd0684afe5e",
link="045c43ae-d6cf-44f7-97d6-c8a602748565",
),
"zipfile": StartingPoint(
watched_dir=os.path.join(
settings.WATCH_DIRECTORY, "activeTransfers/zippedDirectory"
),
chain="f3caceff-5ad5-4bad-b98c-e73f8cd03450",
link="541f5994-73b0-45bb-9cb5-367c06a21be7",
),
"unzipped bag": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/baggitDirectory"
),
chain="c75ef451-2040-4511-95ac-3baa0f019b48",
link="154dd501-a344-45a9-97e3-b30093da35f5",
),
"zipped bag": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/baggitZippedDirectory"
),
chain="167dc382-4ab1-4051-8e22-e7f1c1bf3e6f",
link="3229e01f-adf3-4294-85f7-4acb01b3fbcf",
),
"dspace": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/Dspace"
),
chain="1cb2ef0e-afe8-45b5-8d8f-a1e120f06605",
link="bda96b35-48c7-44fc-9c9e-d7c5a05016c1",
),
"maildir": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/maildir"
),
chain="d381cf76-9313-415f-98a1-55c91e4d78e0",
link="da2d650e-8ce3-4b9a-ac97-8ca4744b019f",
),
"TRIM": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/TRIM"
),
chain="e4a59e3e-3dba-4eb5-9cf1-c1fb3ae61fa9",
link="2483c25a-ade8-4566-a259-c6c37350d0d6",
),
"dataverse": StartingPoint(
watched_dir=os.path.join(
_get_setting("WATCH_DIRECTORY"), "activeTransfers/dataverseTransfer"
),
# Approve Dataverse Transfer Chain
chain="10c00bc8-8fc2-419f-b593-cf5518695186",
# Chain link setting transfer-type: Dataverse
link="0af6b163-5455-4a76-978b-e35cc9ee445f",
),
}
BASE_REPLACEMENTS = {
r"%tmpDirectory%": os.path.join(_get_setting("SHARED_DIRECTORY"), "tmp", ""),
r"%processingDirectory%": _get_setting("PROCESSING_DIRECTORY"),
r"%watchDirectoryPath%": _get_setting("WATCH_DIRECTORY"),
r"%rejectedDirectory%": _get_setting("REJECTED_DIRECTORY"),
}
def get_approve_transfer_chain_id(transfer_type):
"""Return chain ID to approve a transfer given its type."""
try:
item = PACKAGE_TYPE_STARTING_POINTS[transfer_type]
except KeyError:
raise ValueError("Unknown transfer type")
return item.chain
def _file_is_an_archive(filepath):
filepath = filepath.lower()
return (
filepath.endswith(".zip")
or filepath.endswith(".tgz")
or filepath.endswith(".tar.gz")
)
def _pad_destination_filepath_if_it_already_exists(filepath, original=None, attempt=0):
"""
Return a version of the filepath that does not yet exist, padding with numbers
as necessary and reattempting until a non-existent filepath is found
:param filepath: `Path` or string of the desired destination filepath
:param original: `Path` or string of the original filepath (before padding attempts)
:param attempt: Number
:returns: `Path` object, padded as necessary
"""
if original is None:
original = filepath
filepath = Path(filepath)
original = Path(original)
attempt = attempt + 1
if not filepath.exists():
return filepath
if filepath.is_dir():
return _pad_destination_filepath_if_it_already_exists(
f"{original.as_posix()}_{attempt}",
original,
attempt,
)
# need to work out basename
basedirectory = original.parent
basename = original.name
# do more complex padding to preserve file extension
period_position = basename.index(".")
non_extension = basename[0:period_position]
extension = basename[period_position:]
new_basename = f"{non_extension}_{attempt}{extension}"
new_filepath = basedirectory / new_basename
return _pad_destination_filepath_if_it_already_exists(
new_filepath, original, attempt
)
def _check_filepath_exists(filepath):
if filepath == "":
return "No filepath provided."
if not os.path.exists(filepath):
return f"Filepath {filepath} does not exist."
if ".." in filepath: # check for trickery
return "Illegal path."
return None
_default_location_uuid = None
@auto_close_old_connections()
def _default_transfer_source_location_uuid():
global _default_location_uuid
if _default_location_uuid is not None:
return _default_location_uuid
location = storage_service.get_default_location("TS")
_default_location_uuid = location["uuid"]
return _default_location_uuid
@auto_close_old_connections()
def _copy_from_transfer_sources(paths, relative_destination):
"""Copy files from source locations to the currently processing location.
Any files in locations not associated with this pipeline will be ignored.
:param list paths: List of paths. Each path should be formatted
<uuid of location>:<full path in location>
:param str relative_destination: Path relative to the currently processing
space to move the files to.
"""
processing_location = storage_service.get_first_location(purpose="CP")
transfer_sources = storage_service.get_location(purpose="TS")
files = {ts["uuid"]: {"location": ts, "files": []} for ts in transfer_sources}
for item in paths:
location, path = LocationPath(item).parts()
if location is None:
location = _default_transfer_source_location_uuid()
if location not in files:
raise Exception(
"Location %(location)s is not associated"
" with this pipeline" % {"location": location}
)
# ``path`` will be a UTF-8 bytestring but the replacement pattern path
# from ``files`` will be a Unicode object. Therefore, the latter must
# be UTF-8 encoded prior. Same reasoning applies to ``destination``
# below. This allows transfers to be started on UTF-8-encoded directory
# names.
source = path.replace(str(files[location]["location"]["path"]), "", 1).lstrip(
"/"
)
# Use the last segment of the path for the destination - basename for a
# file, or the last folder if not. Keep the trailing / for folders.
last_segment = (
os.path.basename(source.rstrip("/")) + "/"
if source.endswith("/")
else os.path.basename(source)
)
destination = os.path.join(
str(processing_location["path"]),
relative_destination,
last_segment,
).replace("%sharedPath%", "")
files[location]["files"].append({"source": source, "destination": destination})
logger.debug("source: %s, destination: %s", source, destination)
message = []
for item in files.values():
reply, error = storage_service.copy_files(
item["location"], processing_location, item["files"]
)
if reply is None:
message.append(str(error))
if message:
raise Exception(
"The following errors occurred: %(message)s"
% {"message": ", ".join(message)}
)
@auto_close_old_connections()
def _move_to_internal_shared_dir(filepath, dest, transfer):
"""Move package to an internal Archivematica directory.
The side effect of this function is to update the transfer object with the
final location. This is important so other components can continue the
processing. When relying on watched directories to start a transfer (see
_start_package_transfer), this also matters because Transfer is going
to look up the object in the database based on the location.
"""
error = _check_filepath_exists(filepath)
if error:
raise Exception(error)
filepath = Path(filepath)
dest = Path(dest)
# Confine destination to subdir of originals.
basename = filepath.name
dest = _pad_destination_filepath_if_it_already_exists(dest / basename)
try:
filepath.rename(dest)
except OSError as e:
raise Exception("Error moving from %s to %s (%s)", filepath, dest, e)
else:
transfer.currentlocation = dest.as_posix().replace(
_get_setting("SHARED_DIRECTORY"), r"%sharedPath%", 1
)
transfer.save()
@auto_close_old_connections()
def create_package(
package_queue,
executor,
name,
type_,
accession,
access_system_id,
path,
metadata_set_id,
user_id,
workflow,
auto_approve=True,
processing_config=None,
):
"""Launch transfer and return its object immediately.
``auto_approve`` changes significantly the way that the transfer is
initiated. See ``_start_package_transfer_with_auto_approval`` and
``_start_package_transfer`` for more details.
"""
if not name:
raise ValueError("No transfer name provided.")
if type_ is None or type_ == "disk image":
type_ = "standard"
if type_ not in PACKAGE_TYPE_STARTING_POINTS:
raise ValueError(f"Unexpected type of package provided '{type_}'")
if not path:
raise ValueError("No path provided.")
if isinstance(auto_approve, bool) is False:
raise ValueError("Unexpected value in auto_approve parameter")
try:
int(user_id)
except (TypeError, ValueError):
raise ValueError("Unexpected value in user_id parameter")
# Create Transfer object.
kwargs = {"uuid": str(uuid4())}
if accession is not None:
kwargs["accessionid"] = accession
if access_system_id is not None:
kwargs["access_system_id"] = access_system_id
if metadata_set_id is not None:
try:
kwargs["transfermetadatasetrow"] = models.TransferMetadataSet.objects.get(
id=metadata_set_id
)
except models.TransferMetadataSet.DoesNotExist:
pass
transfer = models.Transfer.objects.create(**kwargs)
if not processing_configuration_file_exists(processing_config):
processing_config = "default"
transfer.set_processing_configuration(processing_config)
transfer.update_active_agent(user_id)
logger.debug("Transfer object created: %s", transfer.pk)
# TODO: use tempfile.TemporaryDirectory as a context manager in Py3.
tmpdir = mkdtemp(dir=os.path.join(_get_setting("SHARED_DIRECTORY"), "tmp"))
starting_point = PACKAGE_TYPE_STARTING_POINTS.get(type_)
logger.debug(
"Package %s: starting transfer (%s)", transfer.pk, (name, type_, path, tmpdir)
)
params = (transfer, name, path, tmpdir, starting_point)
if auto_approve:
params = params + (workflow, package_queue)
result = executor.submit(_start_package_transfer_with_auto_approval, *params)
else:
result = executor.submit(_start_package_transfer, *params)
result.add_done_callback(lambda f: os.chmod(tmpdir, 0o770))
return transfer
def _capture_transfer_failure(fn):
"""Silence errors during transfer/ingest."""
def wrap(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as err:
# The main purpose of this decorator is to update the Transfer with
# the new state (fail). If the Transfer does not exist we give up.
if isinstance(err, models.Transfer.DoesNotExist):
raise
else:
logger.exception("Exception occurred during transfer processing")
return wrap
def _determine_transfer_paths(name, path, tmpdir):
if _file_is_an_archive(path):
transfer_dir = tmpdir
p = LocationPath(path).path
filepath = os.path.join(tmpdir, os.path.basename(p))
else:
path = os.path.join(path, ".") # Copy contents of dir but not dir
transfer_dir = filepath = os.path.join(tmpdir, name)
return (
transfer_dir.replace(_get_setting("SHARED_DIRECTORY"), "", 1),
filepath,
path,
)
@_capture_transfer_failure
def _start_package_transfer_with_auto_approval(
transfer, name, path, tmpdir, starting_point, workflow, package_queue
):
"""Start a new transfer the new way.
This method does not rely on the activeTransfer watched directory. It
blocks until the process completes. It does not prompt the user to accept
the transfer because we go directly into the next chain link.
"""
transfer_rel, filepath, path = _determine_transfer_paths(name, path, tmpdir)
logger.debug(
"Package %s: determined vars" " transfer_rel=%s, filepath=%s, path=%s",
transfer.pk,
transfer_rel,
filepath,
path,
)
logger.debug(
"Package %s: copying chosen contents from transfer sources" " (from=%s, to=%s)",
transfer.pk,
path,
transfer_rel,
)
_copy_from_transfer_sources([path], transfer_rel)
logger.debug("Package %s: moving package to processing directory", transfer.pk)
_move_to_internal_shared_dir(
filepath, _get_setting("PROCESSING_DIRECTORY"), transfer
)
logger.debug("Package %s: starting workflow processing", transfer.pk)
unit = Transfer(path, transfer.pk)
job_chain = JobChain(
unit,
workflow.get_chain(starting_point.chain),
workflow,
starting_link=workflow.get_link(starting_point.link),
)
package_queue.schedule_job(next(job_chain))
@_capture_transfer_failure
def _start_package_transfer(transfer, name, path, tmpdir, starting_point):
"""Start a new transfer the old way.
This means copying the transfer into one of the standard watched dirs.
MCPServer will continue the processing and prompt the user once the
contents in the watched directory are detected by the watched directory
observer.
"""
transfer_rel, filepath, path = _determine_transfer_paths(name, path, tmpdir)
logger.debug(
"Package %s: determined vars" " transfer_rel=%s, filepath=%s, path=%s",
transfer.pk,
transfer_rel,
filepath,
path,
)
logger.debug(
"Package %s: copying chosen contents from transfer sources" " (from=%s, to=%s)",
transfer.pk,
path,
transfer_rel,
)
_copy_from_transfer_sources([path], transfer_rel)
logger.debug(
"Package %s: moving package to activeTransfers dir (from=%s," " to=%s)",
transfer.pk,
filepath,
starting_point.watched_dir,
)
_move_to_internal_shared_dir(filepath, starting_point.watched_dir, transfer)
class LocationPath:
"""Path wraps a path that is a pair of two values: UUID and path."""
uuid, path = None, None
def __init__(self, path, sep=":"):
self.sep = sep
parts = path.partition(self.sep)
if parts[1] != self.sep:
self.path = parts[0]
else:
self.uuid = parts[0]
self.path = parts[2]
def __repr__(self):
return "{} (uuid={!r}, sep={!r}, path={!r})".format(
self.__class__,
self.uuid,
self.sep,
self.path,
)
def parts(self):
return self.uuid, self.path
def get_file_replacement_mapping(file_obj, unit_directory):
mapping = BASE_REPLACEMENTS.copy()
dirname = os.path.dirname(file_obj.currentlocation)
name, ext = os.path.splitext(file_obj.currentlocation)
name = os.path.basename(name)
absolute_path = file_obj.currentlocation.replace(r"%SIPDirectory%", unit_directory)
absolute_path = absolute_path.replace(r"%transferDirectory%", unit_directory)
mapping.update(
{
r"%fileUUID%": file_obj.pk,
r"%originalLocation%": file_obj.originallocation,
r"%currentLocation%": file_obj.currentlocation,
r"%fileGrpUse%": file_obj.filegrpuse,
r"%fileDirectory%": dirname,
r"%fileName%": name,
r"%fileExtension%": ext[1:],
r"%fileExtensionWithDot%": ext,
r"%relativeLocation%": absolute_path,
# TODO: standardize duplicates
r"%inputFile%": absolute_path,
r"%fileFullName%": absolute_path,
}
)
return mapping
class Package(metaclass=abc.ABCMeta):
"""A `Package` can be a Transfer, a SIP, or a DIP."""
def __init__(self, current_path, uuid):
self._current_path = current_path.replace(
r"%sharedPath%", _get_setting("SHARED_DIRECTORY")
)
if uuid and not isinstance(uuid, UUID):
uuid = UUID(uuid)
self.uuid = uuid
def __repr__(self):
return '{class_name}("{current_path}", {uuid})'.format(
class_name=self.__class__.__name__,
uuid=self.uuid,
current_path=self.current_path,
)
@classmethod
@auto_close_old_connections()
def cleanup_old_db_entries(cls):
"""Update the status of any in progress package.
This command is run on startup.
TODO: we could try to recover, instead of just failing.
"""
completed_at = timezone.now()
statuses = (models.PACKAGE_STATUS_UNKNOWN, models.PACKAGE_STATUS_PROCESSING)
models.Transfer.objects.filter(status__in=statuses).update(
status=models.PACKAGE_STATUS_FAILED,
completed_at=completed_at,
)
models.SIP.objects.filter(status__in=statuses).update(
status=models.PACKAGE_STATUS_FAILED,
completed_at=completed_at,
)
@abc.abstractmethod
def queryset(self):
raise NotImplementedError
def change_status(self, status, **defaults):
"""Change the status of the package.
Use one of the possible values in ``models.PACKAGE_STATUS_CHOICES``.
"""
with auto_close_old_connections():
self.queryset().update(status=status, **defaults)
def mark_as_done(self):
"""Change the status of the package to Done."""
self.change_status(models.PACKAGE_STATUS_DONE, completed_at=timezone.now())
def mark_as_processing(self):
"""Change the status of the package to Processing."""
self.change_status(models.PACKAGE_STATUS_PROCESSING)
@property
def current_path(self):
return self._current_path
@current_path.setter
def current_path(self, value):
"""The real (no shared dir vars) path to the package."""
self._current_path = value.replace(
r"%sharedPath%", _get_setting("SHARED_DIRECTORY")
)
@property
def current_path_for_db(self):
"""The path to the package, as stored in the database."""
return self.current_path.replace(
_get_setting("SHARED_DIRECTORY"), r"%sharedPath%", 1
)
@property
def package_name(self):
basename = os.path.basename(self.current_path.rstrip("/"))
return basename.replace("-" + str(self.uuid), "")
@property
@auto_close_old_connections()
def base_queryset(self):
return models.File.objects.filter(sip_id=self.uuid)
@property
def context(self):
"""Returns a `PackageContext` for this package."""
# This needs to be reloaded from the db every time, because new values
# could have been added by a client script.
# TODO: pass context changes back from client
return PackageContext.load_from_db(self.uuid)
@abc.abstractmethod
def reload(self):
pass
def get_replacement_mapping(self, filter_subdir_path=None):
mapping = BASE_REPLACEMENTS.copy()
mapping.update(
{
r"%SIPUUID%": str(self.uuid),
r"%SIPName%": self.package_name,
r"%SIPLogsDirectory%": os.path.join(self.current_path, "logs", ""),
r"%SIPObjectsDirectory%": os.path.join(
self.current_path, "objects", ""
),
r"%SIPDirectory%": self.current_path,
r"%SIPDirectoryBasename%": os.path.basename(
os.path.abspath(self.current_path)
),
r"%relativeLocation%": self.current_path_for_db,
}
)
return mapping
def files(
self, filter_filename_start=None, filter_filename_end=None, filter_subdir=None
):
"""Generator that yields all files associated with the package or that
should be associated with a package.
"""
with auto_close_old_connections():
queryset = self.base_queryset
if filter_filename_start:
# TODO: regex filter
raise NotImplementedError("filter_filename_start is not implemented")
if filter_filename_end:
queryset = queryset.filter(
currentlocation__endswith=filter_filename_end
)
if filter_subdir:
filter_path = "".join([self.REPLACEMENT_PATH_STRING, filter_subdir])
queryset = queryset.filter(currentlocation__startswith=filter_path)
start_path = self.current_path
if filter_subdir:
start_path = start_path + filter_subdir
files_returned_already = set()
if queryset.exists():
for file_obj in queryset.iterator():
file_obj_mapped = get_file_replacement_mapping(
file_obj, self.current_path
)
if not os.path.exists(file_obj_mapped.get("%inputFile%")):
continue
files_returned_already.add(file_obj_mapped.get("%inputFile%"))
yield file_obj_mapped
for basedir, subdirs, files in os.walk(start_path):
for file_name in files:
if (
filter_filename_start
and not file_name.startswith(filter_filename_start)
) or (
filter_filename_end
and not file_name.endswith(filter_filename_end)
):
continue
file_path = os.path.join(basedir, file_name)
if file_path not in files_returned_already:
yield {
r"%relativeLocation%": file_path,
r"%fileUUID%": "None",
r"%fileGrpUse%": "",
}
@auto_close_old_connections()
def set_variable(self, key, value, chain_link_id):
"""Sets a UnitVariable, which tracks choices made by users during processing."""
# TODO: refactor this concept
if not value:
value = ""
else:
value = str(value)
unit_var, created = models.UnitVariable.objects.update_or_create(
unittype=self.UNIT_VARIABLE_TYPE,
unituuid=self.uuid,
variable=key,
defaults=dict(variablevalue=value, microservicechainlink=chain_link_id),
)
if created:
message = "New UnitVariable %s created for %s: %s (MSCL: %s)"
else:
message = "Existing UnitVariable %s for %s updated to %s (MSCL" " %s)"
logger.info(message, key, self.uuid, value, chain_link_id)
class SIPDIP(Package):
"""SIPDIP captures behavior shared between SIP- and DIP-type packages that
share the same model in Archivematica.
"""
def queryset(self):
return models.SIP.objects.filter(pk=self.uuid)
@classmethod
@auto_close_old_connections()
def get_or_create_from_db_by_path(cls, path, watched_dir_path=None):
"""Matches a directory to a database SIP by its appended UUID, or path."""
path = path.replace(_get_setting("SHARED_DIRECTORY"), r"%sharedPath%", 1)
package_type = cls.UNIT_VARIABLE_TYPE
sip_uuid = uuid_from_path(path)
created = True
if sip_uuid:
sip_obj, created = models.SIP.objects.get_or_create(
uuid=sip_uuid,
defaults={
"sip_type": package_type,
"currentpath": path,
"diruuids": False,
},
)
# TODO: we thought this path was unused but some tests have proved
# us wrong (see issue #1141) - needs to be investigated.
if package_type == "SIP" and (not created and sip_obj.currentpath != path):
sip_obj.currentpath = path
sip_obj.save()
else:
try:
sip_obj = models.SIP.objects.get(currentpath=path)
created = False
except models.SIP.DoesNotExist:
sip_obj = models.SIP.objects.create(
uuid=uuid4(),
currentpath=path,
sip_type=package_type,
diruuids=False,
)
if package_type == "SIP" and watched_dir_path == "/system/reingestAIP/":
# SIP package is a partial (objects or metadata-only) reingest.
# Full reingests use a different workflow chain.
sip_obj.set_partial_reingest()
logger.info(
"%s %s %s (%s)",
package_type,
sip_obj.uuid,
"created" if created else "updated",
path,
)
return cls(path, sip_obj.uuid)
class DIP(SIPDIP):
REPLACEMENT_PATH_STRING = r"%SIPDirectory%"
UNIT_VARIABLE_TYPE = "DIP"
JOB_UNIT_TYPE = "unitDIP"
def reload(self):
# reload is a no-op for DIPs
pass
def get_replacement_mapping(self, filter_subdir_path=None):
mapping = super().get_replacement_mapping(filter_subdir_path=filter_subdir_path)
mapping[r"%unitType%"] = "DIP"
if filter_subdir_path:
relative_location = filter_subdir_path.replace(
_get_setting("SHARED_DIRECTORY"), r"%sharedPath%", 1
)
mapping[r"%relativeLocation%"] = relative_location
return mapping
class Transfer(Package):
REPLACEMENT_PATH_STRING = r"%transferDirectory%"
UNIT_VARIABLE_TYPE = "Transfer"
JOB_UNIT_TYPE = "unitTransfer"
def queryset(self):
return models.Transfer.objects.filter(pk=self.uuid)
@classmethod
@auto_close_old_connections()
def get_or_create_from_db_by_path(cls, path, watched_dir_path=None):
"""Matches a directory to a database Transfer by its appended UUID, or path."""
path = path.replace(_get_setting("SHARED_DIRECTORY"), r"%sharedPath%", 1)
transfer_uuid = uuid_from_path(path)
created = True
if transfer_uuid:
transfer_obj, created = models.Transfer.objects.get_or_create(
uuid=transfer_uuid, defaults={"currentlocation": path}
)
# TODO: we thought this path was unused but some tests have proved
# us wrong (see issue #1141) - needs to be investigated.
if not created and transfer_obj.currentlocation != path:
transfer_obj.currentlocation = path
transfer_obj.save()
else:
try:
transfer_obj = models.Transfer.objects.get(currentlocation=path)
created = False
except models.Transfer.DoesNotExist:
transfer_obj = models.Transfer.objects.create(
uuid=uuid4(), currentlocation=path
)
logger.info(
"Transfer %s %s (%s)",
transfer_obj.uuid,
"created" if created else "updated",
path,
)
return cls(path, transfer_obj.uuid)
@property
@auto_close_old_connections()
def base_queryset(self):
return models.File.objects.filter(transfer_id=self.uuid)
@auto_close_old_connections()
def reload(self):
transfer = models.Transfer.objects.get(uuid=self.uuid)
self.current_path = transfer.currentlocation
self.processing_configuration = transfer.processing_configuration
def get_replacement_mapping(self, filter_subdir_path=None):
mapping = super().get_replacement_mapping(filter_subdir_path=filter_subdir_path)
mapping.update(
{
self.REPLACEMENT_PATH_STRING: self.current_path,
r"%unitType%": "Transfer",
r"%processingConfiguration%": self.processing_configuration,
}
)
return mapping
class SIP(SIPDIP):
REPLACEMENT_PATH_STRING = r"%SIPDirectory%"
UNIT_VARIABLE_TYPE = "SIP"
JOB_UNIT_TYPE = "unitSIP"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.aip_filename = None
self.sip_type = None
@auto_close_old_connections()
def reload(self):
sip = models.SIP.objects.get(uuid=self.uuid)
self.current_path = sip.currentpath
self.aip_filename = sip.aip_filename or ""
self.sip_type = sip.sip_type
def get_replacement_mapping(self, filter_subdir_path=None):
mapping = super().get_replacement_mapping(filter_subdir_path=filter_subdir_path)
mapping.update(
{
r"%unitType%": "SIP",
r"%AIPFilename%": self.aip_filename,
r"%SIPType%": self.sip_type,
}
)
return mapping
class PackageContext:
"""Package context tracks choices made previously while processing"""
def __init__(self, *items):
self._data = collections.OrderedDict()
for key, value in items:
self._data[key] = value
def __repr__(self):
return f"PackageContext({dict(self._data.items())!r})"
def __iter__(self):
yield from self._data.items()
def __len__(self):
return len(self._data)
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
@classmethod
@auto_close_old_connections()
def load_from_db(cls, uuid):
"""
Loads a context from the UnitVariable table.
"""
context = cls()
# TODO: we shouldn't need one UnitVariable per chain, with all the same values
unit_vars_queryset = models.UnitVariable.objects.filter(
unituuid=uuid, variable="replacementDict"
)
# Distinct helps here, at least
unit_vars_queryset = unit_vars_queryset.values_list("variablevalue").distinct()
for unit_var_value in unit_vars_queryset:
# TODO: nope nope nope, fix eval usage
try:
unit_var = ast.literal_eval(unit_var_value[0])
except (ValueError, SyntaxError):
logger.exception(
"Failed to eval unit variable value %s", unit_var_value[0]
)
else:
context.update(unit_var)
return context
def copy(self):
clone = PackageContext()
clone._data = self._data.copy()
return clone
def update(self, mapping):
for key, value in mapping.items():
self._data[key] = value
|
femexamples | square_pipe_end_twisted_nodeforces | # ***************************************************************************
# * Copyright (c) 2020 Sudhanshu Dubey <sudhanshu.thethunder@gmail.com> *
# * Copyright (c) 2021 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import Fem
import FreeCAD
import ObjectsFem
import Part
from FreeCAD import Vector
from . import manager
from .manager import get_meshname, init_doc
def get_information():
return {
"name": "Square Pipe End Twisted Nodeforces",
"meshtype": "face",
"meshelement": "Tria6",
"constraints": ["force", "fixed"],
"solvers": ["calculix", "ccxtools"],
"material": "solid",
"equations": ["mechanical"],
}
def get_explanation(header=""):
return (
header
+ """
To run the example from Python console use:
from femexamples.square_pipe_end_twisted_nodeforces import setup
setup()
See forum topic post:
...
"""
)
def setup(doc=None, solvertype="ccxtools"):
# init FreeCAD document
if doc is None:
doc = init_doc()
# explanation object
# just keep the following line and change text string in get_explanation method
manager.add_explanation_obj(
doc, get_explanation(manager.get_header(get_information()))
)
# geometric object
# name is important because the other method in this module use obj name
l1 = Part.makeLine((-142.5, -142.5, 0), (142.5, -142.5, 0))
l2 = Part.makeLine((142.5, -142.5, 0), (142.5, 142.5, 0))
l3 = Part.makeLine((142.5, 142.5, 0), (-142.5, 142.5, 0))
l4 = Part.makeLine((-142.5, 142.5, 0), (-142.5, -142.5, 0))
wire = Part.Wire([l1, l2, l3, l4])
shape = wire.extrude(Vector(0, 0, 1000))
geom_obj = doc.addObject("Part::Feature", "SquareTube")
geom_obj.Shape = shape
points_forces = []
points_forces.append(Part.Vertex(-142.5, 142.5, 0.0))
points_forces.append(Part.Vertex(-142.5, -142.5, 0.0))
points_forces.append(Part.Vertex(-142.5, 95.0, 0.0))
points_forces.append(Part.Vertex(-142.5, 47.5, 0.0))
points_forces.append(Part.Vertex(-142.5, 0.0, 0.0))
points_forces.append(Part.Vertex(-142.5, -47.5, 0.0))
points_forces.append(Part.Vertex(-142.5, -95.0, 0.0))
points_forces.append(Part.Vertex(142.5, -142.5, 0.0))
points_forces.append(Part.Vertex(-95.0, -142.5, 0.0))
points_forces.append(Part.Vertex(-47.5, -142.5, 0.0))
points_forces.append(Part.Vertex(0.0, -142.5, 0.0))
points_forces.append(Part.Vertex(47.5, -142.5, 0.0))
points_forces.append(Part.Vertex(95.0, -142.5, 0.0))
points_forces.append(Part.Vertex(142.5, 142.5, 0.0))
points_forces.append(Part.Vertex(142.5, -95.0, 0.0))
points_forces.append(Part.Vertex(142.5, -47.5, 0.0))
points_forces.append(Part.Vertex(142.5, 0.0, 0.0))
points_forces.append(Part.Vertex(142.5, 47.5, 0.0))
points_forces.append(Part.Vertex(142.5, 95.0, 0.0))
points_forces.append(Part.Vertex(95.0, 142.5, 0.0))
points_forces.append(Part.Vertex(47.5, 142.5, 0.0))
points_forces.append(Part.Vertex(0.0, 142.5, 0.0))
points_forces.append(Part.Vertex(-47.5, 142.5, 0.0))
points_forces.append(Part.Vertex(-95.0, 142.5, 0.0))
points_forces.append(Part.Vertex(-142.5, 118.75, 0.0))
points_forces.append(Part.Vertex(-142.5, -118.75, 0.0))
points_forces.append(Part.Vertex(-142.5, 71.25, 0.0))
points_forces.append(Part.Vertex(-142.5, 23.75, 0.0))
points_forces.append(Part.Vertex(-142.5, -23.75, 0.0))
points_forces.append(Part.Vertex(-142.5, -71.25, 0.0))
points_forces.append(Part.Vertex(118.75, -142.5, 0.0))
points_forces.append(Part.Vertex(-71.25, -142.5, 0.0))
points_forces.append(Part.Vertex(-118.75, -142.5, 0.0))
points_forces.append(Part.Vertex(-23.75, -142.5, 0.0))
points_forces.append(Part.Vertex(23.75, -142.5, 0.0))
points_forces.append(Part.Vertex(71.25, -142.5, 0.0))
points_forces.append(Part.Vertex(142.5, 118.75, 0.0))
points_forces.append(Part.Vertex(142.5, -71.25, 0.0))
points_forces.append(Part.Vertex(142.5, -118.75, 0.0))
points_forces.append(Part.Vertex(142.5, -23.75, 0.0))
points_forces.append(Part.Vertex(142.5, 23.75, 0.0))
points_forces.append(Part.Vertex(142.5, 71.25, 0.0))
points_forces.append(Part.Vertex(71.25, 142.5, 0.0))
points_forces.append(Part.Vertex(118.75, 142.5, 0.0))
points_forces.append(Part.Vertex(23.75, 142.5, 0.0))
points_forces.append(Part.Vertex(-23.75, 142.5, 0.0))
points_forces.append(Part.Vertex(-71.25, 142.5, 0.0))
points_forces.append(Part.Vertex(-118.75, 142.5, 0.0))
points_fixes = []
points_fixes.append(Part.Vertex(-142.5, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-142.5, -142.5, 1000.0))
points_fixes.append(Part.Vertex(-142.5, 95.0, 1000.0))
points_fixes.append(Part.Vertex(-142.5, 47.5, 1000.0))
points_fixes.append(Part.Vertex(-142.5, 0.0, 1000.0))
points_fixes.append(Part.Vertex(-142.5, -47.5, 1000.0))
points_fixes.append(Part.Vertex(-142.5, -95.0, 1000.0))
points_fixes.append(Part.Vertex(142.5, -142.5, 1000.0))
points_fixes.append(Part.Vertex(-95.0, -142.5, 1000.0))
points_fixes.append(Part.Vertex(-47.5, -142.5, 1000.0))
points_fixes.append(Part.Vertex(0.0, -142.5, 1000.0))
points_fixes.append(Part.Vertex(47.5, -142.5, 1000.0))
points_fixes.append(Part.Vertex(95.0, -142.5, 1000.0))
points_fixes.append(Part.Vertex(142.5, 142.5, 1000.0))
points_fixes.append(Part.Vertex(142.5, -95.0, 1000.0))
points_fixes.append(Part.Vertex(142.5, -47.5, 1000.0))
points_fixes.append(Part.Vertex(142.5, 0.0, 1000.0))
points_fixes.append(Part.Vertex(142.5, 47.5, 1000.0))
points_fixes.append(Part.Vertex(142.5, 95.0, 1000.0))
points_fixes.append(Part.Vertex(95.0, 142.5, 1000.0))
points_fixes.append(Part.Vertex(47.5, 142.5, 1000.0))
points_fixes.append(Part.Vertex(0.0, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-47.5, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-95.0, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-142.5, 118.75, 1000.0))
points_fixes.append(Part.Vertex(-142.5, -118.75, 1000.0))
points_fixes.append(Part.Vertex(-142.5, 71.25, 1000.0))
points_fixes.append(Part.Vertex(-142.5, 23.75, 1000.0))
points_fixes.append(Part.Vertex(-142.5, -23.75, 1000.0))
points_fixes.append(Part.Vertex(-142.5, -71.25, 1000.0))
points_fixes.append(Part.Vertex(118.75, -142.5, 1000.0))
points_fixes.append(Part.Vertex(-71.25, -142.5, 1000.0))
points_fixes.append(Part.Vertex(-118.75, -142.5, 1000.0))
points_fixes.append(Part.Vertex(-23.75, -142.5, 1000.0))
points_fixes.append(Part.Vertex(23.75, -142.5, 1000.0))
points_fixes.append(Part.Vertex(71.25, -142.5, 1000.0))
points_fixes.append(Part.Vertex(142.5, 118.75, 1000.0))
points_fixes.append(Part.Vertex(142.5, -71.25, 1000.0))
points_fixes.append(Part.Vertex(142.5, -118.75, 1000.0))
points_fixes.append(Part.Vertex(142.5, -23.75, 1000.0))
points_fixes.append(Part.Vertex(142.5, 23.75, 1000.0))
points_fixes.append(Part.Vertex(142.5, 71.25, 1000.0))
points_fixes.append(Part.Vertex(71.25, 142.5, 1000.0))
points_fixes.append(Part.Vertex(118.75, 142.5, 1000.0))
points_fixes.append(Part.Vertex(23.75, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-23.75, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-71.25, 142.5, 1000.0))
points_fixes.append(Part.Vertex(-118.75, 142.5, 1000.0))
geoforces_obj = doc.addObject("Part::Feature", "Forces")
geoforces_obj.Shape = Part.makeCompound(points_forces)
geofixes_obj = doc.addObject("Part::Feature", "Fixes")
geofixes_obj.Shape = Part.makeCompound(points_fixes)
doc.recompute()
if FreeCAD.GuiUp:
geoforces_obj.ViewObject.PointColor = (1.0, 0.0, 0.0, 0.0)
geoforces_obj.ViewObject.PointSize = 10.0
geofixes_obj.ViewObject.PointColor = (1.0, 0.0, 0.0, 0.0)
geofixes_obj.ViewObject.PointSize = 10.0
geom_obj.ViewObject.Document.activeView().viewAxonometric()
geom_obj.ViewObject.Document.activeView().fitAll()
# analysis
analysis = ObjectsFem.makeAnalysis(doc, "Analysis")
# solver
if solvertype == "calculix":
solver_obj = ObjectsFem.makeSolverCalculix(doc, "SolverCalculiX")
elif solvertype == "ccxtools":
solver_obj = ObjectsFem.makeSolverCalculixCcxTools(doc, "CalculiXccxTools")
solver_obj.WorkingDir = ""
else:
FreeCAD.Console.PrintWarning(
"Unknown or unsupported solver type: {}. "
"No solver object was created.\n".format(solvertype)
)
if solvertype == "calculix" or solvertype == "ccxtools":
solver_obj.SplitInputWriter = False
solver_obj.AnalysisType = "static"
solver_obj.GeometricalNonlinearity = "linear"
solver_obj.ThermoMechSteadyState = False
solver_obj.MatrixSolverType = "default"
solver_obj.IterationsControlParameterTimeUse = False
analysis.addObject(solver_obj)
# shell thickness
thickness_obj = ObjectsFem.makeElementGeometry2D(doc, 15.0, "ShellThickness")
analysis.addObject(thickness_obj)
# material
material_obj = ObjectsFem.makeMaterialSolid(doc, "FemMaterial")
mat = material_obj.Material
mat["Name"] = "Steel-Generic"
mat["YoungsModulus"] = "200000 MPa"
mat["PoissonRatio"] = "0.30"
material_obj.Material = mat
analysis.addObject(material_obj)
# constraint fixed
con_fixed = ObjectsFem.makeConstraintFixed(doc, "ConstraintFixed")
con_fixed.References = [
(geofixes_obj, "Vertex6"),
(geofixes_obj, "Vertex15"),
(geofixes_obj, "Vertex5"),
(geofixes_obj, "Vertex29"),
(geofixes_obj, "Vertex42"),
(geofixes_obj, "Vertex30"),
(geofixes_obj, "Vertex9"),
(geofixes_obj, "Vertex31"),
(geofixes_obj, "Vertex33"),
(geofixes_obj, "Vertex32"),
(geofixes_obj, "Vertex3"),
(geofixes_obj, "Vertex34"),
(geofixes_obj, "Vertex46"),
(geofixes_obj, "Vertex1"),
(geofixes_obj, "Vertex36"),
(geofixes_obj, "Vertex11"),
(geofixes_obj, "Vertex38"),
(geofixes_obj, "Vertex12"),
(geofixes_obj, "Vertex39"),
(geofixes_obj, "Vertex13"),
(geofixes_obj, "Vertex40"),
(geofixes_obj, "Vertex16"),
(geofixes_obj, "Vertex35"),
(geofixes_obj, "Vertex14"),
(geofixes_obj, "Vertex47"),
(geofixes_obj, "Vertex20"),
(geofixes_obj, "Vertex37"),
(geofixes_obj, "Vertex18"),
(geofixes_obj, "Vertex41"),
(geofixes_obj, "Vertex17"),
(geofixes_obj, "Vertex10"),
(geofixes_obj, "Vertex26"),
(geofixes_obj, "Vertex43"),
(geofixes_obj, "Vertex21"),
(geofixes_obj, "Vertex44"),
(geofixes_obj, "Vertex19"),
(geofixes_obj, "Vertex4"),
(geofixes_obj, "Vertex28"),
(geofixes_obj, "Vertex48"),
(geofixes_obj, "Vertex22"),
(geofixes_obj, "Vertex8"),
(geofixes_obj, "Vertex23"),
(geofixes_obj, "Vertex7"),
(geofixes_obj, "Vertex24"),
(geofixes_obj, "Vertex45"),
(geofixes_obj, "Vertex27"),
(geofixes_obj, "Vertex2"),
(geofixes_obj, "Vertex25"),
]
analysis.addObject(con_fixed)
# con_force1
con_force1 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce1")
con_force1.References = [(geoforces_obj, "Vertex1"), (geoforces_obj, "Vertex14")]
con_force1.Force = 5555.56
con_force1.Direction = (geom_obj, ["Edge9"])
con_force1.Reversed = False
analysis.addObject(con_force1)
# con_force2
con_force2 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce2")
con_force2.References = [(geoforces_obj, "Vertex2"), (geoforces_obj, "Vertex8")]
con_force2.Force = 5555.56
con_force2.Direction = (geom_obj, ["Edge3"])
con_force2.Reversed = False
analysis.addObject(con_force2)
# con_force3
con_force3 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce3")
con_force3.References = [
(geoforces_obj, "Vertex20"),
(geoforces_obj, "Vertex21"),
(geoforces_obj, "Vertex22"),
(geoforces_obj, "Vertex23"),
(geoforces_obj, "Vertex24"),
]
con_force3.Force = 27777.78
con_force3.Direction = (geom_obj, ["Edge9"])
con_force3.Reversed = False
analysis.addObject(con_force3)
# con_force4
con_force4 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce4")
con_force4.References = [
(geoforces_obj, "Vertex9"),
(geoforces_obj, "Vertex10"),
(geoforces_obj, "Vertex11"),
(geoforces_obj, "Vertex12"),
(geoforces_obj, "Vertex13"),
]
con_force4.Force = 27777.78
con_force4.Direction = (geom_obj, ["Edge3"])
con_force4.Reversed = False
analysis.addObject(con_force4)
# con_force5
con_force5 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce5")
con_force5.References = [
(geoforces_obj, "Vertex43"),
(geoforces_obj, "Vertex44"),
(geoforces_obj, "Vertex45"),
(geoforces_obj, "Vertex46"),
(geoforces_obj, "Vertex47"),
(geoforces_obj, "Vertex48"),
]
con_force5.Force = 66666.67
con_force5.Direction = (geom_obj, ["Edge9"])
con_force5.Reversed = False
analysis.addObject(con_force5)
# con_force6
con_force6 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce6")
con_force6.References = [
(geoforces_obj, "Vertex31"),
(geoforces_obj, "Vertex32"),
(geoforces_obj, "Vertex33"),
(geoforces_obj, "Vertex34"),
(geoforces_obj, "Vertex35"),
(geoforces_obj, "Vertex36"),
]
con_force6.Force = 66666.67
con_force6.Direction = (geom_obj, ["Edge3"])
con_force6.Reversed = False
analysis.addObject(con_force6)
# con_force7
con_force7 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce7")
con_force7.References = [(geoforces_obj, "Vertex1"), (geoforces_obj, "Vertex2")]
con_force7.Force = 5555.56
con_force7.Direction = (geom_obj, ["Edge11"])
con_force7.Reversed = False
analysis.addObject(con_force7)
# con_force8
con_force8 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce8")
con_force8.References = [(geoforces_obj, "Vertex8"), (geoforces_obj, "Vertex14")]
con_force8.Force = 5555.56
con_force8.Direction = (geom_obj, ["Edge6"])
con_force8.Reversed = False
analysis.addObject(con_force8)
# con_force9
con_force9 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce9")
con_force9.References = [
(geoforces_obj, "Vertex3"),
(geoforces_obj, "Vertex4"),
(geoforces_obj, "Vertex5"),
(geoforces_obj, "Vertex6"),
(geoforces_obj, "Vertex7"),
]
con_force9.Force = 27777.78
con_force9.Direction = (geom_obj, ["Edge11"])
con_force9.Reversed = False
analysis.addObject(con_force9)
# con_force10
con_force10 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce10")
con_force10.References = [
(geoforces_obj, "Vertex15"),
(geoforces_obj, "Vertex16"),
(geoforces_obj, "Vertex17"),
(geoforces_obj, "Vertex18"),
(geoforces_obj, "Vertex19"),
]
con_force10.Force = 27777.78
con_force10.Direction = (geom_obj, ["Edge6"])
con_force10.Reversed = False
analysis.addObject(con_force10)
# con_force11
con_force11 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce11")
con_force11.References = [
(geoforces_obj, "Vertex25"),
(geoforces_obj, "Vertex26"),
(geoforces_obj, "Vertex27"),
(geoforces_obj, "Vertex28"),
(geoforces_obj, "Vertex29"),
(geoforces_obj, "Vertex30"),
]
con_force11.Force = 66666.67
con_force11.Direction = (geom_obj, ["Edge11"])
con_force11.Reversed = False
analysis.addObject(con_force11)
# con_force12
con_force12 = ObjectsFem.makeConstraintForce(doc, name="ConstraintForce12")
con_force12.References = [
(geoforces_obj, "Vertex37"),
(geoforces_obj, "Vertex38"),
(geoforces_obj, "Vertex39"),
(geoforces_obj, "Vertex40"),
(geoforces_obj, "Vertex41"),
(geoforces_obj, "Vertex42"),
]
con_force12.Force = 66666.67
con_force12.Direction = (geom_obj, ["Edge6"])
con_force12.Reversed = False
analysis.addObject(con_force12)
# mesh
from .meshes.mesh_square_pipe_end_twisted_tria6 import create_elements, create_nodes
fem_mesh = Fem.FemMesh()
control = create_nodes(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating nodes.\n")
control = create_elements(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating elements.\n")
femmesh_obj = analysis.addObject(ObjectsFem.makeMeshGmsh(doc, get_meshname()))[0]
femmesh_obj.FemMesh = fem_mesh
femmesh_obj.Part = geom_obj
femmesh_obj.SecondOrderLinear = False
doc.recompute()
return doc
|
femmesh | meshsetsgetter | # ***************************************************************************
# * Copyright (c) 2021 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM sets getter"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## \addtogroup FEM
# @{
import time
import FreeCAD
from femmesh import meshtools
from femtools.femutils import type_of_obj
class MeshSetsGetter:
def __init__(
self,
analysis_obj,
solver_obj,
mesh_obj,
member,
):
# class attributes from parameter values
self.analysis = analysis_obj
self.solver_obj = solver_obj # TODO without _obj
self.mesh_object = mesh_obj # TODO without _object
self.member = member
# more attributes
self.analysis_type = self.solver_obj.AnalysisType
self.document = self.analysis.Document
self.fc_ver = FreeCAD.Version()
self.ccx_nall = "Nall"
self.ccx_eall = "Eall"
self.ccx_evolumes = "Evolumes"
self.ccx_efaces = "Efaces"
self.ccx_eedges = "Eedges"
self.mat_geo_sets = []
self.theshape = None
if self.mesh_object:
if hasattr(self.mesh_object, "Shape"):
self.theshape = self.mesh_object.Shape
elif hasattr(self.mesh_object, "Part"):
self.theshape = self.mesh_object.Part
else:
FreeCAD.Console.PrintWarning(
"A finite mesh without a link to a Shape was given. "
"Happen on pure mesh objects. "
"Not all methods do work without this link.\n"
)
# ATM only used in meshtools.get_femelement_direction1D_set
# TODO somehow this is not smart, pur mesh objects might be used often
if self.member.geos_beamsection and (
type_of_obj(self.solver_obj) == "Fem::SolverCcxTools"
or type_of_obj(self.solver_obj) == "Fem::SolverCalculix"
):
FreeCAD.Console.PrintError(
"The mesh does not know the geometry it is made from. "
"Beam rotations can not retrieved but they are needed "
"for writing CalculiX solver input. "
"There might be problems in retrieving mesh data.\n"
)
# Z88 will run but CalculiX not
self.femmesh = self.mesh_object.FemMesh
else:
FreeCAD.Console.PrintWarning(
"No finite element mesh object was given to the writer class. "
"In rare cases this might not be an error.\n"
)
self.femnodes_mesh = {}
self.femelement_table = {}
self.constraint_conflict_nodes = []
self.femnodes_ele_table = {}
self.femelements_edges_only = []
self.femelements_faces_only = []
self.femelement_volumes_table = {}
self.femelement_faces_table = {}
self.femelement_edges_table = {}
self.femelement_count_test = True
self.mat_geo_sets = []
# ********************************************************************************************
# ********************************************************************************************
# use set for node sets to be sure all nodes are unique
# use sorted to be sure the order is the same on different runs
# be aware a sorted set returns a list, because set are not sorted by default
# - done in return value of meshtools.get_femnodes_by_femobj_with_references
# TODO FIXME might be appropriate for element sets and surfaceface sets too
# ********************************************************************************************
# ********************************************************************************************
# get all known sets
def get_mesh_sets(self):
FreeCAD.Console.PrintMessage("\n") # because of time print in separate line
FreeCAD.Console.PrintMessage(
"Get mesh data for constraints, materials and element geometry...\n"
)
FreeCAD.Console.PrintLog(
"MeshSetsGetter: Get mesh data for "
"node sets (groups), surface sets (groups) and element sets (groups)\n"
)
time_start = time.process_time()
# materials and element geometry element sets getter
self.get_element_sets_material_and_femelement_geometry()
# constraints element sets getter
self.get_constraints_centrif_elements()
# constraints node sets getter
self.get_constraints_fixed_nodes()
self.get_constraints_displacement_nodes()
self.get_constraints_planerotation_nodes()
# constraints surface sets getter
self.get_constraints_contact_faces()
self.get_constraints_tie_faces()
self.get_constraints_sectionprint_faces()
self.get_constraints_transform_nodes()
self.get_constraints_temperature_nodes()
# constraints sets with constraint data
self.get_constraints_force_nodeloads()
self.get_constraints_pressure_faces()
self.get_constraints_heatflux_faces()
setstime = round((time.process_time() - time_start), 3)
FreeCAD.Console.PrintMessage(
"Getting mesh data time: {} seconds.\n".format(setstime)
)
# ********************************************************************************************
# ********************************************************************************************
# node sets
def get_constraints_fixed_nodes(self):
if not self.member.cons_fixed:
return
# get nodes
for femobj in self.member.cons_fixed:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
femobj["Nodes"] = meshtools.get_femnodes_by_femobj_with_references(
self.femmesh, femobj
)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj["Nodes"]:
self.constraint_conflict_nodes.append(node)
# if mixed mesh with solids the node set needs to be split
# because solid nodes do not have rotational degree of freedom
if self.femmesh.Volumes and (
len(self.member.geos_shellthickness) > 0
or len(self.member.geos_beamsection) > 0
):
FreeCAD.Console.PrintMessage("We need to find the solid nodes.\n")
if not self.femelement_volumes_table:
self.femelement_volumes_table = meshtools.get_femelement_volumes_table(
self.femmesh
)
for femobj in self.member.cons_fixed:
# femobj --> dict, FreeCAD document object is femobj["Object"]
nds_solid = []
nds_faceedge = []
for n in femobj["Nodes"]:
solid_node = False
for ve in self.femelement_volumes_table:
if n in self.femelement_volumes_table[ve]:
solid_node = True
nds_solid.append(n)
break
if not solid_node:
nds_faceedge.append(n)
femobj["NodesSolid"] = set(nds_solid)
femobj["NodesFaceEdge"] = set(nds_faceedge)
def get_constraints_displacement_nodes(self):
if not self.member.cons_displacement:
return
# get nodes
for femobj in self.member.cons_displacement:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
femobj["Nodes"] = meshtools.get_femnodes_by_femobj_with_references(
self.femmesh, femobj
)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj["Nodes"]:
self.constraint_conflict_nodes.append(node)
def get_constraints_planerotation_nodes(self):
if not self.member.cons_planerotation:
return
# get nodes
for femobj in self.member.cons_planerotation:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
femobj["Nodes"] = meshtools.get_femnodes_by_femobj_with_references(
self.femmesh, femobj
)
def get_constraints_transform_nodes(self):
if not self.member.cons_transform:
return
# get nodes
for femobj in self.member.cons_transform:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
femobj["Nodes"] = meshtools.get_femnodes_by_femobj_with_references(
self.femmesh, femobj
)
def get_constraints_temperature_nodes(self):
if not self.member.cons_temperature:
return
# get nodes
for femobj in self.member.cons_temperature:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
femobj["Nodes"] = meshtools.get_femnodes_by_femobj_with_references(
self.femmesh, femobj
)
def get_constraints_fluidsection_nodes(self):
if not self.member.geos_fluidsection:
return
# get nodes
for femobj in self.member.geos_fluidsection:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
femobj["Nodes"] = meshtools.get_femnodes_by_femobj_with_references(
self.femmesh, femobj
)
def get_constraints_force_nodeloads(self):
if not self.member.cons_force:
return
# check shape type of reference shape
for femobj in self.member.cons_force:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"], log=True)
if femobj["RefShapeType"] == "Vertex":
FreeCAD.Console.PrintLog(
" load on vertices --> The femelement_table "
"and femnodes_mesh are not needed for node load calculation.\n"
)
elif (
femobj["RefShapeType"] == "Face"
and meshtools.is_solid_femmesh(self.femmesh)
and not meshtools.has_no_face_data(self.femmesh)
):
FreeCAD.Console.PrintLog(
" solid_mesh with face data --> The femelement_table is not "
"needed but the femnodes_mesh is needed for node load calculation.\n"
)
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
else:
FreeCAD.Console.PrintLog(
" mesh without needed data --> The femelement_table "
"and femnodes_mesh are not needed for node load calculation.\n"
)
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = meshtools.get_femelement_table(self.femmesh)
# get node loads
FreeCAD.Console.PrintLog(
" Finite element mesh nodes will be retrieved by searching "
"the appropriate nodes in the finite element mesh.\n"
)
FreeCAD.Console.PrintLog(
" The appropriate finite element mesh node load values will "
"be calculated according to the finite element definition.\n"
)
for femobj in self.member.cons_force:
# femobj --> dict, FreeCAD document object is femobj["Object"]
frc_obj = femobj["Object"]
print_obj_info(frc_obj)
if frc_obj.Force == 0:
FreeCAD.Console.PrintMessage(" Warning --> Force = 0\n")
if femobj["RefShapeType"] == "Vertex": # point load on vertices
femobj["NodeLoadTable"] = meshtools.get_force_obj_vertex_nodeload_table(
self.femmesh, frc_obj
)
elif femobj["RefShapeType"] == "Edge": # line load on edges
femobj["NodeLoadTable"] = meshtools.get_force_obj_edge_nodeload_table(
self.femmesh, self.femelement_table, self.femnodes_mesh, frc_obj
)
elif femobj["RefShapeType"] == "Face": # area load on faces
femobj["NodeLoadTable"] = meshtools.get_force_obj_face_nodeload_table(
self.femmesh, self.femelement_table, self.femnodes_mesh, frc_obj
)
# ********************************************************************************************
# ********************************************************************************************
# faces sets
def get_constraints_pressure_faces(self):
if not self.member.cons_pressure:
return
# TODO see comments in get_constraints_force_nodeloads()
# it applies here too. Mhh it applies to all constraints ...
"""
# deprecated version
# get the faces and face numbers
for femobj in self.member.cons_pressure:
# femobj --> dict, FreeCAD document object is femobj["Object"]
femobj["PressureFaces"] = meshtools.get_pressure_obj_faces_depreciated(
self.femmesh,
femobj
)
# print(femobj["PressureFaces"])
"""
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = meshtools.get_femelement_table(self.femmesh)
if not self.femnodes_ele_table:
self.femnodes_ele_table = meshtools.get_femnodes_ele_table(
self.femnodes_mesh, self.femelement_table
)
for femobj in self.member.cons_pressure:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
pressure_faces = meshtools.get_pressure_obj_faces(
self.femmesh, self.femelement_table, self.femnodes_ele_table, femobj
)
# the data model is for compatibility reason with deprecated version
# get_pressure_obj_faces_depreciated returns the face ids in a tuple per ref_shape
# some_string was the reference_shape_element_string in deprecated method
# [(some_string, [ele_id, ele_face_id], [ele_id, ele_face_id], ...])]
some_string = "{}: face load".format(femobj["Object"].Name)
femobj["PressureFaces"] = [(some_string, pressure_faces)]
FreeCAD.Console.PrintLog("{}\n".format(femobj["PressureFaces"]))
def get_constraints_contact_faces(self):
if not self.member.cons_contact:
return
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = meshtools.get_femelement_table(self.femmesh)
if not self.femnodes_ele_table:
self.femnodes_ele_table = meshtools.get_femnodes_ele_table(
self.femnodes_mesh, self.femelement_table
)
for femobj in self.member.cons_contact:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
contact_slave_faces, contact_master_faces = meshtools.get_contact_obj_faces(
self.femmesh, self.femelement_table, self.femnodes_ele_table, femobj
)
# [ele_id, ele_face_id], [ele_id, ele_face_id], ...]
# whereas the ele_face_id might be ccx specific
femobj["ContactSlaveFaces"] = contact_slave_faces
femobj["ContactMasterFaces"] = contact_master_faces
# FreeCAD.Console.PrintLog("{}\n".format(femobj["ContactSlaveFaces"]))
# FreeCAD.Console.PrintLog("{}\n".format(femobj["ContactMasterFaces"]))
# information in the regard of element faces constraints
# forum post: https://forum.freecad.org/viewtopic.php?f=18&t=42783&p=370286#p366723
# contact: master and slave could be the same face: rubber of a damper
# tie: master and slave have to be separate faces AFA UR_ K
# section print: only the element faces of solid elements
# from one side of the geometric face are needed
def get_constraints_tie_faces(self):
if not self.member.cons_tie:
return
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = meshtools.get_femelement_table(self.femmesh)
if not self.femnodes_ele_table:
self.femnodes_ele_table = meshtools.get_femnodes_ele_table(
self.femnodes_mesh, self.femelement_table
)
for femobj in self.member.cons_tie:
# femobj --> dict, FreeCAD document object is femobj["Object"]
print_obj_info(femobj["Object"])
slave_faces, master_faces = meshtools.get_tie_obj_faces(
self.femmesh, self.femelement_table, self.femnodes_ele_table, femobj
)
# [ele_id, ele_face_id], [ele_id, ele_face_id], ...]
# whereas the ele_face_id might be ccx specific
femobj["TieSlaveFaces"] = slave_faces
femobj["TieMasterFaces"] = master_faces
# FreeCAD.Console.PrintLog("{}\n".format(femobj["ContactSlaveFaces"]))
# FreeCAD.Console.PrintLog("{}\n".format(femobj["ContactMasterFaces"]))
def get_constraints_sectionprint_faces(self):
if not self.member.cons_sectionprint:
return
# TODO: use meshtools to get the surfaces
# see constraint contact or constraint tie
for femobj in self.member.cons_sectionprint:
# femobj --> dict, FreeCAD document object is femobj["Object"]
sectionprint_obj = femobj["Object"]
if len(sectionprint_obj.References) > 1:
FreeCAD.Console.PrintError(
"Only one reference shape allowed for a section print "
"but {} found: {}\n".format(
len(sectionprint_obj.References), sectionprint_obj.References
)
)
for o, elem_tup in sectionprint_obj.References:
for elem in elem_tup:
# there should only be one reference for each section print object
# in the gui this is checked
ref_shape = o.Shape.getElement(elem)
if ref_shape.ShapeType == "Face":
v = self.mesh_object.FemMesh.getccxVolumesByFace(ref_shape)
if len(v) > 0:
femobj["SectionPrintFaces"] = v
# volume elements found
FreeCAD.Console.PrintLog(
"{}, surface {}, {} touching volume elements found\n".format(
sectionprint_obj.Label,
sectionprint_obj.Name,
len(v),
)
)
else:
# no volume elements found, shell elements not allowed
FreeCAD.Console.PrintError(
"{}, surface {}, Error: "
"No volume elements found!\n".format(
sectionprint_obj.Label, sectionprint_obj.Name
)
)
else:
# in Gui only Faces can be added
FreeCAD.Console.PrintError(
"Wrong reference shape type for {} "
"Only Faces are allowed, but a {} was found.\n".format(
sectionprint_obj.Name, ref_shape.ShapeType
)
)
def get_constraints_heatflux_faces(self):
if not self.member.cons_heatflux:
return
# TODO: use meshtools to get the surfaces (or move to mesh tools)
# see constraint contact or constraint tie and constraint force
# heatflux_obj_face_table: see force_obj_node_load_table
# [
# ("refshape_name:elemname", face_table),
# ...,
# ("refshape_name:elemname", face_table)
# ]
for femobj in self.member.cons_heatflux:
# femobj --> dict, FreeCAD document object is femobj["Object"]
heatflux_obj = femobj["Object"]
femobj["HeatFluxFaceTable"] = []
for o, elem_tup in heatflux_obj.References:
for elem in elem_tup:
ho = o.Shape.getElement(elem)
if ho.ShapeType == "Face":
elem_info = "{}:{}".format(o.Name, elem)
face_table = self.mesh_object.FemMesh.getccxVolumesByFace(ho)
femobj["HeatFluxFaceTable"].append((elem_info, face_table))
# ********************************************************************************************
# ********************************************************************************************
# element sets constraints
def get_constraints_centrif_elements(self):
# get element ids and write them into the femobj
if not self.member.cons_centrif:
return
if (
len(self.member.cons_centrif) == 1
and not self.member.cons_centrif[0]["Object"].References
):
self.member.cons_centrif[0]["FEMElements"] = self.ccx_evolumes
else:
self.get_solid_element_sets(self.member.cons_centrif)
# ********************************************************************************************
# ********************************************************************************************
# element sets material and element geometry
def get_solid_element_sets(self, femobjs):
# get element ids and write them into the femobj
all_found = False
if self.femmesh.GroupCount:
all_found = meshtools.get_femelement_sets_from_group_data(
self.femmesh, femobjs
)
FreeCAD.Console.PrintMessage(all_found)
FreeCAD.Console.PrintMessage("\n")
if all_found is False:
if not self.femelement_table:
self.femelement_table = meshtools.get_femelement_table(self.femmesh)
# we're going to use the binary search for get_femelements_by_femnodes()
# thus we need the parameter values self.femnodes_ele_table
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femnodes_ele_table:
self.femnodes_ele_table = meshtools.get_femnodes_ele_table(
self.femnodes_mesh, self.femelement_table
)
control = meshtools.get_femelement_sets(
self.femmesh, self.femelement_table, femobjs, self.femnodes_ele_table
)
# we only need to set it, if it is still True
if (self.femelement_count_test is True) and (control is False):
self.femelement_count_test = False
def get_element_geometry2D_elements(self):
# get element ids and write them into the objects
FreeCAD.Console.PrintMessage("Shell thicknesses\n")
if not self.femelement_faces_table:
self.femelement_faces_table = meshtools.get_femelement_faces_table(
self.femmesh
)
meshtools.get_femelement_sets(
self.femmesh, self.femelement_faces_table, self.member.geos_shellthickness
)
def get_element_geometry1D_elements(self):
# get element ids and write them into the objects
FreeCAD.Console.PrintMessage("Beam sections\n")
if not self.femelement_edges_table:
self.femelement_edges_table = meshtools.get_femelement_edges_table(
self.femmesh
)
meshtools.get_femelement_sets(
self.femmesh, self.femelement_edges_table, self.member.geos_beamsection
)
def get_element_rotation1D_elements(self):
# get for each geometry edge direction the element ids and rotation norma
FreeCAD.Console.PrintMessage("Beam rotations\n")
if self.theshape is None:
FreeCAD.Console.PrintError(
"Beam rotations set can not be retrieved, "
"because the mesh does not know the Geometry it is made from\n"
)
return
if not self.femelement_edges_table:
self.femelement_edges_table = meshtools.get_femelement_edges_table(
self.femmesh
)
meshtools.get_femelement_direction1D_set(
self.femmesh,
self.femelement_edges_table,
self.member.geos_beamrotation,
self.theshape,
)
def get_element_fluid1D_elements(self):
# get element ids and write them into the objects
FreeCAD.Console.PrintMessage("Fluid sections\n")
if not self.femelement_edges_table:
self.femelement_edges_table = meshtools.get_femelement_edges_table(
self.femmesh
)
meshtools.get_femelement_sets(
self.femmesh, self.femelement_edges_table, self.member.geos_fluidsection
)
def get_material_elements(self):
# it only works if either Volumes or Shellthicknesses or Beamsections
# are in the material objects, it means it does not work
# for mixed meshes and multiple materials, this is checked in check_prerequisites
# the femelement_table is only calculated for
# the highest dimension in get_femelement_table
FreeCAD.Console.PrintMessage("Materials\n")
if self.femmesh.Volumes:
# we only could do this for volumes
# if a mesh contains volumes we're going to use them in the analysis
# but a mesh could contain
# the element faces of the volumes as faces
# and the edges of the faces as edges
# there we have to check of some geometric objects
# get element ids and write them into the femobj
self.get_solid_element_sets(self.member.mats_linear)
if self.member.geos_shellthickness:
if not self.femelement_faces_table:
self.femelement_faces_table = meshtools.get_femelement_faces_table(
self.femmesh
)
meshtools.get_femelement_sets(
self.femmesh, self.femelement_faces_table, self.member.mats_linear
)
if self.member.geos_beamsection or self.member.geos_fluidsection:
if not self.femelement_edges_table:
self.femelement_edges_table = meshtools.get_femelement_edges_table(
self.femmesh
)
meshtools.get_femelement_sets(
self.femmesh, self.femelement_edges_table, self.member.mats_linear
)
def get_element_sets_material_and_femelement_geometry(self):
if not self.member.mats_linear:
return
# in any case if we have beams, we're going to need the element ids for the rotation elsets
if self.member.geos_beamsection:
# we will need to split the beam even for one beamobj
# because no beam in z-direction can be used in ccx without a special adjustment
# thus they need an own matgeoset
self.get_element_rotation1D_elements()
# get the element ids for face and edge elements and write them into the objects
if len(self.member.geos_shellthickness) > 1:
self.get_element_geometry2D_elements()
if len(self.member.geos_beamsection) > 1:
self.get_element_geometry1D_elements()
if len(self.member.geos_fluidsection) > 1:
self.get_element_fluid1D_elements()
# get the element ids for material objects and write them into the material object
if len(self.member.mats_linear) > 1:
self.get_material_elements()
# create the mat_geo_sets
if len(self.member.mats_linear) == 1:
if self.femmesh.Volumes:
# we only could do this for volumes, if a mesh contains volumes
# we're going to use them in the analysis
# but a mesh could contain the element faces of the volumes as faces
# and the edges of the faces as edges
# there we have to check for some geometric objects
self.get_mat_geo_sets_single_mat_solid()
if len(self.member.geos_shellthickness) == 1:
self.get_mat_geo_sets_single_mat_single_shell()
elif len(self.member.geos_shellthickness) > 1:
self.get_mat_geo_sets_single_mat_multiple_shell()
if len(self.member.geos_beamsection) == 1:
self.get_mat_geo_sets_single_mat_single_beam()
elif len(self.member.geos_beamsection) > 1:
self.get_mat_geo_sets_single_mat_multiple_beam()
if len(self.member.geos_fluidsection) == 1:
self.get_mat_geo_sets_single_mat_single_fluid()
elif len(self.member.geos_fluidsection) > 1:
self.get_mat_geo_sets_single_mat_multiple_fluid()
elif len(self.member.mats_linear) > 1:
if self.femmesh.Volumes:
# we only could do this for volumes, if a mseh contains volumes
# we're going to use them in the analysis
# but a mesh could contain the element faces of the volumes as faces
# and the edges of the faces as edges
# there we have to check for some geometric objects
# volume is a bit special
# because retrieving ids from group mesh data is implemented
self.get_mat_geo_sets_multiple_mat_solid()
if len(self.member.geos_shellthickness) == 1:
self.get_mat_geo_sets_multiple_mat_single_shell()
elif len(self.member.geos_shellthickness) > 1:
self.get_mat_geo_sets_multiple_mat_multiple_shell()
if len(self.member.geos_beamsection) == 1:
self.get_mat_geo_sets_multiple_mat_single_beam()
elif len(self.member.geos_beamsection) > 1:
self.get_mat_geo_sets_multiple_mat_multiple_beam()
if len(self.member.geos_fluidsection) == 1:
self.get_mat_geo_sets_multiple_mat_single_fluid()
elif len(self.member.geos_fluidsection) > 1:
self.get_mat_geo_sets_multiple_mat_multiple_fluid()
# self.mat_geo_sets = [ {
# "ccx_elset" : [e1, e2, e3, ... , en] or elements set name strings
# "ccx_elset_name" : "ccx_identifier_elset"
# "mat_obj_name" : "mat_obj.Name"
# "ccx_mat_name" : "mat_obj.Material["Name"]" !!! not unique !!!
# "beamsection_obj" : "beamsection_obj" if exists
# "fluidsection_obj" : "fluidsection_obj" if exists
# "shellthickness_obj" : shellthickness_obj" if exists
# "beam_axis_m" : main local beam axis for beams only
# },
# {}, ... , {} ]
# beam
# TODO support multiple beamrotations
# we do not need any more any data from the rotation document object,
# thus we do not need to save the rotation document object name in the else
def get_mat_geo_sets_single_mat_single_beam(self):
mat_obj = self.member.mats_linear[0]["Object"]
beamsec_obj = self.member.geos_beamsection[0]["Object"]
beamrot_data = self.member.geos_beamrotation[0]
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
# ID's for this direction
elset_data = beamdirection["ids"]
names = [
{"short": "M0"},
{"short": "B0"},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["beamsection_obj"] = beamsec_obj
# beam_axis_m for this direction
matgeoset["beam_axis_m"] = beamdirection["beam_axis_m"]
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_single_mat_multiple_beam(self):
mat_obj = self.member.mats_linear[0]["Object"]
beamrot_data = self.member.geos_beamrotation[0]
for beamsec_data in self.member.geos_beamsection:
beamsec_obj = beamsec_data["Object"]
beamsec_ids = set(beamsec_data["FEMElements"])
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
beamdir_ids = set(beamdirection["ids"])
# empty intersection sets possible
elset_data = list(sorted(beamsec_ids.intersection(beamdir_ids)))
if elset_data:
names = [
{"short": "M0"},
{"short": beamsec_data["ShortName"]},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["beamsection_obj"] = beamsec_obj
# beam_axis_m for this direction
matgeoset["beam_axis_m"] = beamdirection["beam_axis_m"]
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_multiple_mat_single_beam(self):
beamsec_obj = self.member.geos_beamsection[0]["Object"]
beamrot_data = self.member.geos_beamrotation[0]
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
mat_ids = set(mat_data["FEMElements"])
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
beamdir_ids = set(beamdirection["ids"])
elset_data = list(sorted(mat_ids.intersection(beamdir_ids)))
if elset_data:
names = [
{"short": mat_data["ShortName"]},
{"short": "B0"},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["beamsection_obj"] = beamsec_obj
# beam_axis_m for this direction
matgeoset["beam_axis_m"] = beamdirection["beam_axis_m"]
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_multiple_mat_multiple_beam(self):
beamrot_data = self.member.geos_beamrotation[0]
for beamsec_data in self.member.geos_beamsection:
beamsec_obj = beamsec_data["Object"]
beamsec_ids = set(beamsec_data["FEMElements"])
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
mat_ids = set(mat_data["FEMElements"])
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
beamdir_ids = set(beamdirection["ids"])
# empty intersection sets possible
elset_data = list(
sorted(
beamsec_ids.intersection(mat_ids).intersection(beamdir_ids)
)
)
if elset_data:
names = [
{"short": mat_data["ShortName"]},
{"short": beamsec_data["ShortName"]},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["beamsection_obj"] = beamsec_obj
# beam_axis_m for this direction
matgeoset["beam_axis_m"] = beamdirection["beam_axis_m"]
self.mat_geo_sets.append(matgeoset)
# fluid
def get_mat_geo_sets_single_mat_single_fluid(self):
mat_obj = self.member.mats_linear[0]["Object"]
fluidsec_obj = self.member.geos_fluidsection[0]["Object"]
elset_data = self.ccx_eedges
names = [{"short": "M0"}, {"short": "F0"}]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["fluidsection_obj"] = fluidsec_obj
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_single_mat_multiple_fluid(self):
mat_obj = self.member.mats_linear[0]["Object"]
for fluidsec_data in self.member.geos_fluidsection:
fluidsec_obj = fluidsec_data["Object"]
elset_data = fluidsec_data["FEMElements"]
names = [{"short": "M0"}, {"short": fluidsec_data["ShortName"]}]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["fluidsection_obj"] = fluidsec_obj
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_multiple_mat_single_fluid(self):
fluidsec_obj = self.member.geos_fluidsection[0]["Object"]
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
elset_data = mat_data["FEMElements"]
names = [{"short": mat_data["ShortName"]}, {"short": "F0"}]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["fluidsection_obj"] = fluidsec_obj
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_multiple_mat_multiple_fluid(self):
for fluidsec_data in self.member.geos_fluidsection:
fluidsec_obj = fluidsec_data["Object"]
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
fluidsec_ids = set(fluidsec_data["FEMElements"])
mat_ids = set(mat_data["FEMElements"])
# empty intersection sets possible
elset_data = list(sorted(fluidsec_ids.intersection(mat_ids)))
if elset_data:
names = [
{"short": mat_data["ShortName"]},
{"short": fluidsec_data["ShortName"]},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_short(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["fluidsection_obj"] = fluidsec_obj
self.mat_geo_sets.append(matgeoset)
# shell
def get_mat_geo_sets_single_mat_single_shell(self):
mat_obj = self.member.mats_linear[0]["Object"]
shellth_obj = self.member.geos_shellthickness[0]["Object"]
elset_data = self.ccx_efaces
names = [
{"long": mat_obj.Name, "short": "M0"},
{"long": shellth_obj.Name, "short": "S0"},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_standard(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["shellthickness_obj"] = shellth_obj
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_single_mat_multiple_shell(self):
mat_obj = self.member.mats_linear[0]["Object"]
for shellth_data in self.member.geos_shellthickness:
shellth_obj = shellth_data["Object"]
elset_data = shellth_data["FEMElements"]
names = [
{"long": mat_obj.Name, "short": "M0"},
{"long": shellth_obj.Name, "short": shellth_data["ShortName"]},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_standard(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["shellthickness_obj"] = shellth_obj
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_multiple_mat_single_shell(self):
shellth_obj = self.member.geos_shellthickness[0]["Object"]
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
elset_data = mat_data["FEMElements"]
names = [
{"long": mat_obj.Name, "short": mat_data["ShortName"]},
{"long": shellth_obj.Name, "short": "S0"},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_standard(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["shellthickness_obj"] = shellth_obj
self.mat_geo_sets.append(matgeoset)
def get_mat_geo_sets_multiple_mat_multiple_shell(self):
for shellth_data in self.member.geos_shellthickness:
shellth_obj = shellth_data["Object"]
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
shellth_ids = set(shellth_data["FEMElements"])
mat_ids = set(mat_data["FEMElements"])
# empty intersection sets possible
elset_data = list(sorted(shellth_ids.intersection(mat_ids)))
if elset_data:
names = [
{"long": mat_obj.Name, "short": mat_data["ShortName"]},
{"long": shellth_obj.Name, "short": shellth_data["ShortName"]},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_standard(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
matgeoset["shellthickness_obj"] = shellth_obj
self.mat_geo_sets.append(matgeoset)
# solid
def get_mat_geo_sets_single_mat_solid(self):
mat_obj = self.member.mats_linear[0]["Object"]
elset_data = self.ccx_evolumes
names = [
{"long": mat_obj.Name, "short": "M0"},
{"long": "Solid", "short": "Solid"},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_standard(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
self.mat_geo_sets.append(matgeoset)
print(self.mat_geo_sets)
def get_mat_geo_sets_multiple_mat_solid(self):
for mat_data in self.member.mats_linear:
mat_obj = mat_data["Object"]
elset_data = mat_data["FEMElements"]
names = [
{"long": mat_obj.Name, "short": mat_data["ShortName"]},
{"long": "Solid", "short": "Solid"},
]
matgeoset = {}
matgeoset["ccx_elset"] = elset_data
matgeoset["ccx_elset_name"] = get_elset_name_standard(names)
matgeoset["mat_obj_name"] = mat_obj.Name
matgeoset["ccx_mat_name"] = mat_obj.Material["Name"]
self.mat_geo_sets.append(matgeoset)
# ************************************************************************************************
# Helpers
# ccx elset names:
# M .. Material
# B .. Beam
# R .. BeamRotation
# D .. Direction
# F .. Fluid
# S .. Shell,
# TODO write comment into input file to elset ids and elset attributes
def get_elset_name_standard(names):
# standard max length = 80
elset_name = ""
for name in names:
elset_name += name["long"]
if len(elset_name) < 81:
return elset_name
else:
elset_name = ""
for name in names:
elset_name += name["short"]
if len(elset_name) < 81:
return elset_name
else:
error = (
"FEM: Trouble in elset name, because an "
"elset name is longer than 80 character! {}\n".format(elset_name)
)
raise Exception(error)
def get_elset_name_short(names):
# restricted max length = 20 (elsets)
# in CalculiX solver input this is needed for beam elsets
elset_name = ""
for name in names:
elset_name += name["short"]
if len(elset_name) < 21:
return elset_name
else:
error = (
"FEM: Trouble in elset name, because an"
"short elset name is longer than 20 characters! {}\n".format(elset_name)
)
raise Exception(error)
def print_obj_info(obj, log=False):
if log is False:
FreeCAD.Console.PrintMessage("{}:\n".format(obj.Label))
FreeCAD.Console.PrintMessage(
" Type: {}, Name: {}\n".format(type_of_obj(obj), obj.Name)
)
else:
FreeCAD.Console.PrintLog("{}:\n".format(obj.Label))
FreeCAD.Console.PrintLog(
" Type: {}, Name: {}\n".format(type_of_obj(obj), obj.Name)
)
## @}
|
util | winpipe | # Copyright 2014,2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import ctypes
import os
import threading
if os.name == "nt":
from . import winapi
from gi.repository import GLib
def write_pipe(pipe_name, data):
"""Writes the data to the pipe or raises EnvironmentError"""
assert isinstance(data, bytes)
# XXX: otherwise many consecutive open fail, no idea..
pipe_exists(pipe_name)
filename = NamedPipeServer._get_filename(pipe_name)
with open(filename, "wb") as h:
h.write(data)
def pipe_exists(pipe_name):
"""Returns True if the named pipe named 'pipe_name' currently exists"""
timeout_ms = 1
filename = NamedPipeServer._get_filename(pipe_name)
try:
if winapi.WaitNamedPipeW(filename, timeout_ms) == 0:
raise ctypes.WinError()
except WindowsError:
return False
return True
class NamedPipeServerError(Exception):
pass
class NamedPipeServer(threading.Thread):
"""A named pipe for Windows.
* server:
server = NamedPipeServer("foo", lambda data: ...)
server.start()
glib_loop()
server.stop()
* client:
with open(NamedPipeServer.get_filename("foo"), "wb") as h:
h.write("Hello World")
"""
def __init__(self, name, callback):
"""name is the name of the pipe file (should be unique I guess)
callback will be called with new data until close() is called.
"""
super().__init__()
self._event = threading.Event()
self._filename = self._get_filename(name)
self._callback = callback
self._stopped = False
@classmethod
def _get_filename(cls, name):
return "\\\\.\\pipe\\%s" % name
def _process(self, data):
def idle_process(data):
if not self._stopped:
self._callback(data)
return False
GLib.idle_add(idle_process, data)
def start(self):
super().start()
# make sure we can use write_pipe() immediately after this returns
self._event.wait()
if self._stopped:
# something went wrong (maybe another instance is running)
raise NamedPipeServerError("Setting up named pipe failed")
def run(self):
buffer_size = 4096
try:
handle = winapi.CreateNamedPipeW(
self._filename,
(winapi.PIPE_ACCESS_INBOUND | winapi.FILE_FLAG_FIRST_PIPE_INSTANCE),
(
winapi.PIPE_TYPE_BYTE
| winapi.PIPE_READMODE_BYTE
| winapi.PIPE_WAIT
| winapi.PIPE_REJECT_REMOTE_CLIENTS
),
winapi.PIPE_UNLIMITED_INSTANCES,
buffer_size,
buffer_size,
winapi.NMPWAIT_USE_DEFAULT_WAIT,
None,
)
if handle == winapi.INVALID_HANDLE_VALUE:
raise ctypes.WinError()
except WindowsError:
# due to FILE_FLAG_FIRST_PIPE_INSTANCE and not the first instance
self._stopped = True
self._event.set()
return
self._event.set()
while 1:
data = bytearray()
try:
if winapi.ConnectNamedPipe(handle, None) == 0:
raise ctypes.WinError()
while 1:
readbuf = ctypes.create_string_buffer(buffer_size)
bytesread = winapi.DWORD()
try:
if (
winapi.ReadFile(
handle,
readbuf,
buffer_size,
ctypes.byref(bytesread),
None,
)
== 0
):
raise ctypes.WinError()
except WindowsError:
break
else:
message = readbuf[: bytesread.value]
data += message
if winapi.DisconnectNamedPipe(handle) == 0:
raise ctypes.WinError()
except WindowsError:
# better not loop forever..
break
finally:
if self._stopped:
break
if data:
self._process(bytes(data))
# ignore errors here..
winapi.CloseHandle(handle)
def stop(self):
"""After this returns the callback will no longer be called.
Can be called multiple times.
"""
self._event.wait()
if self._stopped:
return
self._stopped = True
try:
with open(self._filename, "wb") as h:
h.write(b"stop!")
except EnvironmentError:
pass
self._callback = None
self.join()
|
addons | CloudFlareDdos | # -*- coding: utf-8 -*-
import inspect
import re
import urllib.parse
from pyload.core.network.http.exceptions import BadHeader
from pyload.core.utils.misc import eval_js
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.addon import BaseAddon
from ..helpers import parse_html_header
def plugin_id(plugin):
return "<{plugintype} {pluginname}{id}>".format(
plugintype=plugin.__type__.upper(),
pluginname=plugin.__name__,
id="[{}]".format(plugin.pyfile.id if plugin.pyfile else ""),
)
def is_simple_plugin(obj):
return any(
k.__name__ in ("SimpleDownloader", "SimpleDecrypter")
for k in inspect.getmro(type(obj))
)
def get_plugin_last_header(plugin):
# NOTE: req can be a HTTPRequest or a Browser object
return (
plugin.req.http.response_header
if hasattr(plugin.req, "http")
else plugin.req.response_header
)
class CloudFlare:
@staticmethod
def handle_function(addon_plugin, owner_plugin, func_name, orig_func, args):
addon_plugin.log_debug(
"Calling {}() of {}".format(func_name, plugin_id(owner_plugin))
)
try:
data = orig_func(*args[0], **args[1])
addon_plugin.log_debug(f"{func_name}() returned successfully")
return data
except BadHeader as exc:
addon_plugin.log_debug(
"{}(): got BadHeader exception {}".format(func_name, exc.code)
)
header = parse_html_header(exc.header)
if "cloudflare" in header.get("server", ""):
if exc.code == 403:
data = CloudFlare._solve_cf_security_check(
addon_plugin, owner_plugin, exc.content
)
elif exc.code == 503:
for _i in range(3):
try:
data = CloudFlare._solve_cf_ddos_challenge(
addon_plugin, owner_plugin, exc.content
)
break
except (
BadHeader
) as exc: #: Possibly we got another ddos challenge
addon_plugin.log_debug(
f"{func_name}(): got BadHeader exception {exc.code}"
)
header = parse_html_header(exc.header)
if exc.code == 503 and "cloudflare" in header.get(
"server", ""
):
continue #: Yes, it's a ddos challenge again..
else:
data = None # Tell the exception handler to re-throw the exception
break
else:
addon_plugin.log_error(
addon_plugin._("{}(): Max solve retries reached").format(
func_name
)
)
data = (
None # Tell the exception handler to re-throw the exception
)
else:
addon_plugin.log_warning(
addon_plugin._("Unknown CloudFlare response code {}").format(
exc.code
)
)
raise
if data is None:
raise exc
else:
return data
else:
raise
@staticmethod
def _solve_cf_ddos_challenge(addon_plugin, owner_plugin, data):
try:
addon_plugin.log_info(
addon_plugin._("Detected CloudFlare's DDoS protection page")
)
wait_time = (
int(re.search(r"submit\(\);\r?\n\s*},\s*([0-9]+)", data).group(1)) + 999
) // 1000
owner_plugin.set_wait(wait_time)
last_url = owner_plugin.req.last_effective_url
urlp = urllib.parse.urlparse(last_url)
domain = urlp.netloc
submit_url = "{}://{}/cdn-cgi/l/chk_jschl".format(urlp.scheme, domain)
get_params = {}
try:
get_params["jschl_vc"] = re.search(
r'name="jschl_vc" value="(\w+)"', data
).group(1)
get_params["pass"] = re.search(
r'name="pass" value="(.+?)"', data
).group(1)
get_params["s"] = re.search(r'name="s" value="(.+?)"', data).group(1)
# Extract the arithmetic operation
js = re.search(
r"setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n",
data,
).group(1)
js = re.sub(r"a\.value = (.+\.toFixed\(10\);).+", r"\1", js)
solution_name = re.search(
r"s,t,o,p,b,r,e,a,k,i,n,g,f,\s*(.+)\s*=", js
).group(1)
g = re.search(
r"(.*};)\n\s*(t\s*=(.+))\n\s*(;%s.*)" % solution_name,
js,
re.M | re.I | re.S,
).groups()
js = g[0] + g[-1]
js = re.sub(r"[\n\\']", "", js)
except Exception:
# Something is wrong with the page.
# This may indicate CloudFlare has changed their anti-bot
# technique.
owner_plugin.log_error(
addon_plugin._("Unable to parse CloudFlare's DDoS protection page")
)
return None #: Tell the exception handler to re-throw the exception
if "toFixed" not in js:
owner_plugin.log_error(
owner_plugin._("Unable to parse CloudFlare's DDoS protection page")
)
return None # Tell the exception handler to re-throw the exception
atob = 'var atob = function(str) {return Buffer.from(str, "base64").toString("binary");}'
try:
k = re.search(r"k\s*=\s*\'(.+?)\';", data).group(1)
v = re.search(r'<div(?:.*)id="%s"(?:.*)>(.*)</div>' % k, data).group(1)
doc = (
'var document= {getElementById: function(x) { return {innerHTML:"%s"};}}'
% v
)
except (AttributeError, IndexError):
doc = ""
js = '%s;%s;var t="%s";%s' % (doc, atob, domain, js)
# Safely evaluate the Javascript expression
res = eval_js(js)
try:
get_params["jschl_answer"] = str(float(res))
except ValueError:
owner_plugin.log_error(
owner_plugin._("Unable to parse CloudFlare's DDoS protection page")
)
return None # Tell the exception handler to re-throw the exception
owner_plugin.wait() #: Do the actual wait
return owner_plugin.load(submit_url, get=get_params, ref=last_url)
except BadHeader as exc:
raise exc #: Huston, we have a BadHeader!
except Exception as exc:
addon_plugin.log_error(exc)
return None #: Tell the exception handler to re-throw the exception
@staticmethod
def _solve_cf_security_check(addon_plugin, owner_plugin, data):
try:
last_url = owner_plugin.req.last_effective_url
captcha = ReCaptcha(owner_plugin.pyfile)
captcha_key = captcha.detect_key(data)
if captcha_key:
addon_plugin.log_info(
addon_plugin._("Detected CloudFlare's security check page")
)
response = captcha.challenge(captcha_key, data)
return owner_plugin.load(
owner_plugin.fixurl("/cdn-cgi/l/chk_captcha"),
get={"g-recaptcha-response": response},
ref=last_url,
)
else:
addon_plugin.log_warning(
addon_plugin._("Got unexpected CloudFlare html page")
)
return None #: Tell the exception handler to re-throw the exception
except Exception as exc:
addon_plugin.log_error(exc)
return None #: Tell the exception handler to re-throw the exception
class PreloadStub:
def __init__(self, addon_plugin, owner_plugin):
self.addon_plugin = addon_plugin
self.owner_plugin = owner_plugin
self.old_preload = owner_plugin._preload
def my_preload(self, *args, **kwargs):
data = CloudFlare.handle_function(
self.addon_plugin,
self.owner_plugin,
"_preload",
self.old_preload,
(args, kwargs),
)
if data is not None:
self.owner_plugin.data = data
def __repr__(self):
return "<PreloadStub object at {}>".format(hex(id(self)))
class CloudFlareDdos(BaseAddon):
__name__ = "CloudFlareDdos"
__type__ = "addon"
__version__ = "0.17"
__status__ = "testing"
__config__ = [("enabled", "bool", "Activated", False)]
__description__ = """CloudFlare DDoS protection support"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def activate(self):
self.stubs = {}
self._override_get_url()
def deactivate(self):
while len(self.stubs):
stub = next(iter(self.stubs.values()))
self._unoverride_preload(stub.owner_plugin)
self._unoverride_get_url()
def _unoverride_preload(self, plugin):
if id(plugin) in self.stubs:
self.log_debug(f"Unoverriding _preload() for {plugin_id(plugin)}")
stub = self.stubs.pop(id(plugin))
stub.owner_plugin._preload = stub.old_preload
else:
self.log_warning(
self._(
"No _preload() override found for {}, cannot un-override>"
).format(plugin_id(plugin))
)
def _override_preload(self, plugin):
if id(plugin) not in self.stubs:
stub = PreloadStub(self, plugin)
self.stubs[id(plugin)] = stub
self.log_debug(f"Overriding _preload() for {plugin_id(plugin)}")
plugin._preload = stub.my_preload
else:
self.log_warning(
self._("Already overrided _preload() for {}").format(plugin_id(plugin))
)
def _override_get_url(self):
self.log_debug("Overriding get_url()")
self.old_get_url = self.pyload.request_factory.get_url
self.pyload.request_factory.get_url = self.my_get_url
def _unoverride_get_url(self):
self.log_debug("Unoverriding get_url()")
self.pyload.request_factory.get_url = self.old_get_url
def _find_owner_plugin(self):
"""
Walk the callstack until we find SimpleDownloader or SimpleDecrypter class.
Dirty but works.
"""
f = frame = inspect.currentframe()
try:
while True:
if f is None:
return None
elif "self" in f.f_locals and is_simple_plugin(f.f_locals["self"]):
return f.f_locals["self"]
else:
f = f.f_back
finally:
del frame
def download_preparing(self, pyfile):
#: Only SimpleDownloader and SimpleDecrypter based plugins are supported
if not is_simple_plugin(pyfile.plugin):
self.log_debug(f"Skipping plugin {plugin_id(pyfile.plugin)}")
return
attr = getattr(pyfile.plugin, "_preload", None)
if not attr and not callable(attr):
self.log_error(
self._("{} is missing _preload() function, cannot override!").format(
plugin_id(pyfile.plugin)
)
)
return
self._override_preload(pyfile.plugin)
def download_processed(self, pyfile):
if id(pyfile.plugin) in self.stubs:
self._unoverride_preload(pyfile.plugin)
def my_get_url(self, *args, **kwargs):
owner_plugin = self._find_owner_plugin()
if owner_plugin is None:
self.log_warning(self._("Owner plugin not found, cannot process"))
return self.old_get_url(*args, **kwargs)
else:
# NOTE: Better use owner_plugin.load() instead of get_url() so cookies are saved and so captcha credits
# NOTE: Also that way we can use 'owner_plugin.req.header' to get the
# headers, otherwise we cannot get them
res = CloudFlare.handle_function(
self, owner_plugin, "get_url", owner_plugin.load, (args, kwargs)
)
if kwargs.get("just_header", False):
# NOTE: SimpleDownloader/SimpleDecrypter returns a dict while get_url() returns raw headers string,
# make sure we return a string for get_url('just_header'=True)
res = get_plugin_last_header(owner_plugin)
return res
|
SceneDetails | Pickability | # /***************************************************************************
# * Copyright (c) 2019 Victor Titov (DeepSOIC) <vv.titov@gmail.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ***************************************************************************/
from Show.SceneDetail import SceneDetail
class Pickability(SceneDetail):
"""Pickability(object, pickstyle = None):Plugin for TempoVis for altering pick style
of objects (i.e., selectability).
pickstyle may be:
PS_REGULAR = 0 # selectable
PS_BOUNDBOX = 1 # selectable, but faster hit testing using bounding box
PS_UNPICKABLE = 2 # not selectable and not obstructing."""
class_id = "SDPickability"
propname = ""
objname = ""
def __init__(self, object, pickstyle=None):
self.objname = object.Name
self.doc = object.Document
self.key = self.objname
if pickstyle is not None:
self.data = pickstyle
def scene_value(self):
return getPickStyle(self.doc.getObject(self.objname).ViewObject)
def apply_data(self, val):
setPickStyle(self.doc.getObject(self.objname).ViewObject, val)
PS_REGULAR = 0
PS_BOUNDBOX = 1
PS_UNPICKABLE = 2
def getPickStyleNode(viewprovider, make_if_missing=True):
from pivy import coin
sa = coin.SoSearchAction()
sa.setType(coin.SoPickStyle.getClassTypeId())
sa.traverse(viewprovider.RootNode)
if sa.isFound() and sa.getPath().getLength() == 1:
return sa.getPath().getTail()
else:
if not make_if_missing:
return None
pick_style = coin.SoPickStyle()
pick_style.style.setValue(coin.SoPickStyle.SHAPE)
viewprovider.RootNode.insertChild(pick_style, 0)
return pick_style
def getPickStyle(viewprovider):
ps = getPickStyleNode(viewprovider, make_if_missing=False)
if ps is not None:
return ps.style.getValue()
else:
return PS_REGULAR
def setPickStyle(viewprovider, pickstyle):
ps = getPickStyleNode(
viewprovider, make_if_missing=pickstyle != 0
) # coin.SoPickStyle.SHAPE
if ps is not None:
return ps.style.setValue(pickstyle)
|
migrations | 0008_update_django_autoslug | import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("fpr", "0007_embedded_default_thumbnail")]
operations = [
migrations.AlterField(
model_name="format",
name="slug",
field=autoslug.fields.AutoSlugField(
populate_from="description", verbose_name="slug", editable=False
),
),
migrations.AlterField(
model_name="formatgroup",
name="slug",
field=autoslug.fields.AutoSlugField(
populate_from="description", verbose_name="slug", editable=False
),
),
migrations.AlterField(
model_name="formatversion",
name="slug",
field=autoslug.fields.AutoSlugField(
always_update=True,
populate_from="description",
unique_with=("format",),
editable=False,
),
),
migrations.AlterField(
model_name="fptool",
name="slug",
field=autoslug.fields.AutoSlugField(
populate_from="_slug", verbose_name="slug", editable=False
),
),
migrations.AlterField(
model_name="idtool",
name="slug",
field=autoslug.fields.AutoSlugField(
editable=False,
populate_from="_slug",
always_update=True,
verbose_name="slug",
),
),
]
|
extractor | hotnewhiphop | from __future__ import unicode_literals
from ..compat import compat_b64decode
from ..utils import ExtractorError, HEADRequest, sanitized_Request, urlencode_postdata
from .common import InfoExtractor
class HotNewHipHopIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?hotnewhiphop\.com/.*\.(?P<id>.*)\.html"
_TEST = {
"url": "http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html",
"md5": "2c2cd2f76ef11a9b3b581e8b232f3d96",
"info_dict": {
"id": "1435540",
"ext": "mp3",
"title": "Freddie Gibbs - Lay It Down",
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url_base64 = self._search_regex(
r'data-path="(.*?)"', webpage, "video URL", default=None
)
if video_url_base64 is None:
video_url = self._search_regex(
r'"contentUrl" content="(.*?)"', webpage, "content URL"
)
return self.url_result(video_url, ie="Youtube")
reqdata = urlencode_postdata(
[
("mediaType", "s"),
("mediaId", video_id),
]
)
r = sanitized_Request(
"http://www.hotnewhiphop.com/ajax/media/getActions/", data=reqdata
)
r.add_header("Content-Type", "application/x-www-form-urlencoded")
mkd = self._download_json(
r,
video_id,
note="Requesting media key",
errnote="Could not download media key",
)
if "mediaKey" not in mkd:
raise ExtractorError("Did not get a media key")
redirect_url = compat_b64decode(video_url_base64).decode("utf-8")
redirect_req = HEADRequest(redirect_url)
req = self._request_webpage(
redirect_req,
video_id,
note="Resolving final URL",
errnote="Could not resolve final URL",
)
video_url = req.geturl()
if video_url.endswith(".html"):
raise ExtractorError("Redirect failed")
video_title = self._og_search_title(webpage).strip()
return {
"id": video_id,
"url": video_url,
"title": video_title,
"thumbnail": self._og_search_thumbnail(webpage),
}
|
templatetags | posthog_assets | import re
from typing import List
from django.conf import settings
from django.template import Library
from posthog.utils import absolute_uri as util_absolute_uri
register = Library()
@register.simple_tag
def absolute_uri(url: str = "") -> str:
return util_absolute_uri(url)
@register.simple_tag
def absolute_asset_url(path: str) -> str:
"""
Returns a versioned absolute asset URL (located within PostHog's static files).
Example:
{% absolute_asset_url 'dist/posthog.css' %}
=> "http://posthog.example.com/_static/74d127b78dc7daf2c51f/dist/posthog.css"
"""
return absolute_uri(f"{settings.STATIC_URL.rstrip('/')}/{path.lstrip('/')}")
@register.simple_tag
def human_social_providers(providers: List[str]) -> str:
"""
Returns a human-friendly name for a social login provider.
Example:
{% human_social_providers ["google-oauth2", "github"] %}
=> "Google, GitHub"
"""
def friendly_provider(prov: str) -> str:
if prov == "google-oauth2":
return "Google"
elif prov == "github":
return "GitHub"
elif prov == "gitlab":
return "GitLab"
return "single sign-on (SAML)"
return ", ".join(map(friendly_provider, providers))
@register.simple_tag
def strip_protocol(path: str) -> str:
"""
Returns a URL removing the http/https protocol
Example:
{% strip_protocol 'https://app.posthog.com' %}
=> "app.posthog.com"
"""
return re.sub(r"https?:\/\/", "", path)
|
digital | qa_ofdm_serializer_vcc | #!/usr/bin/env python
#
# Copyright 2012-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import numpy
import pmt
from gnuradio import analog, blocks, digital, fft, gr, gr_unittest
class qa_ofdm_serializer_vcc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.tsb_key = "ts_last"
def tearDown(self):
self.tb = None
def test_001_simple(self):
"""Standard test"""
fft_len = 16
tx_symbols = (
0,
1,
1j,
2,
3,
0,
0,
0,
0,
0,
0,
4,
5,
2j,
6,
0,
0,
7,
8,
3j,
9,
0,
0,
0,
0,
0,
0,
10,
4j,
11,
12,
0,
0,
13,
1j,
14,
15,
0,
0,
0,
0,
0,
0,
0,
0,
2j,
0,
0,
)
expected_result = list(range(1, 16)) + [0, 0, 0]
occupied_carriers = (
(1, 3, 4, 11, 12, 14),
(1, 2, 4, 11, 13, 14),
)
n_syms = len(tx_symbols) // fft_len
src = blocks.vector_source_c(tx_symbols, False, fft_len)
serializer = digital.ofdm_serializer_vcc(
fft_len, occupied_carriers, self.tsb_key, "", 0, "", False
)
sink = blocks.tsb_vector_sink_c(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key
),
serializer,
sink,
)
self.tb.run()
self.assertEqual(sink.data()[0], expected_result)
def test_001b_shifted(self):
"""Same as before, but shifted, because that's the normal mode in OFDM Rx"""
fft_len = 16
tx_symbols = (
0,
0,
0,
0,
0,
0,
1,
2,
0,
3,
4,
5,
0,
0,
0,
0,
0,
0,
0,
0,
6,
1j,
7,
8,
0,
9,
10,
1j,
11,
0,
0,
0,
0,
0,
0,
0,
0,
12,
13,
14,
0,
15,
16,
17,
0,
0,
0,
0,
)
expected_result = list(range(18))
occupied_carriers = (
(13, 14, 15, 1, 2, 3),
(-4, -2, -1, 1, 2, 4),
)
n_syms = len(tx_symbols) // fft_len
src = blocks.vector_source_c(tx_symbols, False, fft_len)
serializer = digital.ofdm_serializer_vcc(
fft_len, occupied_carriers, self.tsb_key
)
sink = blocks.tsb_vector_sink_c(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key
),
serializer,
sink,
)
self.tb.run()
self.assertEqual(sink.data()[0], expected_result)
def test_002_with_offset(self):
"""Standard test, carrier offset"""
fft_len = 16
tx_symbols = list(range(1, 16))
tx_symbols = (
0,
0,
1,
1j,
2,
3,
0,
0,
0,
0,
0,
0,
4,
5,
2j,
6,
0,
0,
7,
8,
3j,
9,
0,
0,
0,
0,
0,
0,
10,
4j,
11,
12,
0,
0,
13,
1j,
14,
15,
0,
0,
0,
0,
0,
0,
0,
0,
2j,
0,
)
carr_offset = 1 # Compare this with tx_symbols from the previous test
expected_result = list(range(1, 16)) + [0, 0, 0]
occupied_carriers = (
(1, 3, 4, 11, 12, 14),
(1, 2, 4, 11, 13, 14),
)
n_syms = len(tx_symbols) // fft_len
offsettag = gr.tag_t()
offsettag.offset = 0
offsettag.key = pmt.string_to_symbol("ofdm_sync_carr_offset")
offsettag.value = pmt.from_long(carr_offset)
src = blocks.vector_source_c(tx_symbols, False, fft_len, (offsettag,))
sink = blocks.tsb_vector_sink_c(tsb_key=self.tsb_key)
serializer = digital.ofdm_serializer_vcc(
fft_len,
occupied_carriers,
self.tsb_key,
"",
0,
"ofdm_sync_carr_offset",
False,
)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key
),
serializer,
sink,
)
self.tb.run()
self.assertEqual(sink.data()[0], expected_result)
self.assertEqual(len(sink.tags()), 1)
def test_003_connect(self):
"""Connect carrier_allocator to ofdm_serializer,
make sure output==input"""
fft_len = 8
n_syms = 1
occupied_carriers = ((1, 2, 6, 7),)
pilot_carriers = ((3,), (5,))
pilot_symbols = ((1j,), (-1j,))
# tx_data = tuple([numpy.random.randint(0, 10) for x in range(4 * n_syms)])
tx_data = [1, 2, 3, 4]
src = blocks.vector_source_c(tx_data, False, 1)
alloc = digital.ofdm_carrier_allocator_cvc(
fft_len,
occupied_carriers,
pilot_carriers,
pilot_symbols,
(), # No sync word
self.tsb_key,
True, # Output is shifted (default)
)
serializer = digital.ofdm_serializer_vcc(
alloc,
"", # Len tag key
0, # Symbols skipped
"", # Carrier offset key
True, # Input is shifted (default)
)
sink = blocks.tsb_vector_sink_c(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex, 1, len(tx_data), self.tsb_key
),
alloc,
serializer,
sink,
)
self.tb.run()
self.assertEqual(sink.data()[0], tx_data)
def test_004_connect(self):
"""
Advanced test:
- Allocator -> IFFT -> Frequency offset -> FFT -> Serializer
- FFT does shift (moves DC to middle)
- Make sure input == output
- Frequency offset is -2 carriers
"""
fft_len = 8
n_syms = 1
carr_offset = -2
freq_offset = 1.0 / fft_len * carr_offset # Normalized frequency
occupied_carriers = ((-2, -1, 1, 2),)
pilot_carriers = ((-3,), (3,))
pilot_symbols = ((1j,), (-1j,))
tx_data = [1, 2, 3, 4]
offsettag = gr.tag_t()
offsettag.offset = 0
offsettag.key = pmt.string_to_symbol("ofdm_sync_carr_offset")
offsettag.value = pmt.from_long(carr_offset)
src = blocks.vector_source_c(tx_data, False, 1, (offsettag,))
alloc = digital.ofdm_carrier_allocator_cvc(
fft_len, occupied_carriers, pilot_carriers, pilot_symbols, (), self.tsb_key
)
tx_ifft = fft.fft_vcc(fft_len, False, (1.0 / fft_len,) * fft_len, True)
oscillator = analog.sig_source_c(1.0, analog.GR_COS_WAVE, freq_offset, 1.0)
mixer = blocks.multiply_cc()
rx_fft = fft.fft_vcc(fft_len, True, (), True)
sink2 = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key)
self.tb.connect(rx_fft, sink2)
serializer = digital.ofdm_serializer_vcc(
alloc, "", 0, "ofdm_sync_carr_offset", True
)
sink = blocks.tsb_vector_sink_c(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex, 1, len(tx_data), self.tsb_key
),
alloc,
tx_ifft,
blocks.vector_to_stream(gr.sizeof_gr_complex, fft_len),
(mixer, 0),
blocks.stream_to_vector(gr.sizeof_gr_complex, fft_len),
rx_fft,
serializer,
sink,
)
self.tb.connect(oscillator, (mixer, 1))
self.tb.run()
self.assertComplexTuplesAlmostEqual(
sink.data()[0][-len(occupied_carriers[0]) :], tx_data, places=4
)
def test_005_packet_len_tag(self):
"""Standard test"""
fft_len = 16
tx_symbols = list(range(1, 16))
tx_symbols = (
0,
1,
1j,
2,
3,
0,
0,
0,
0,
0,
0,
4,
5,
2j,
6,
0,
0,
7,
8,
3j,
9,
0,
0,
0,
0,
0,
0,
10,
4j,
11,
12,
0,
0,
13,
1j,
14,
15,
0,
0,
0,
0,
0,
0,
0,
0,
2j,
0,
0,
)
expected_result = list(range(1, 16))
occupied_carriers = (
(1, 3, 4, 11, 12, 14),
(1, 2, 4, 11, 13, 14),
)
n_syms = len(tx_symbols) // fft_len
packet_len_tsb_key = "packet_len"
tag2 = gr.tag_t()
tag2.offset = 0
tag2.key = pmt.string_to_symbol("packet_len")
tag2.value = pmt.from_long(len(expected_result))
src = blocks.vector_source_c(tx_symbols, False, fft_len, (tag2,))
serializer = digital.ofdm_serializer_vcc(
fft_len, occupied_carriers, self.tsb_key, packet_len_tsb_key, 0, "", False
)
sink = blocks.tsb_vector_sink_c(tsb_key=packet_len_tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key
),
serializer,
sink,
)
self.tb.run()
self.assertEqual(sink.data()[0], expected_result)
def test_099(self):
"""Make sure it fails if it should"""
fft_len = 16
occupied_carriers = ((1, 3, 4, 11, 12, 112),) # Something invalid
# self.assertRaises(TypeError, digital.ofdm_serializer_vcc, fft_len, occupied_carriers, self.tsb_key)
# pybind11 raises ValueError instead of TypeError
self.assertRaises(
ValueError,
digital.ofdm_serializer_vcc,
fft_len,
occupied_carriers,
self.tsb_key,
)
if __name__ == "__main__":
gr_unittest.run(qa_ofdm_serializer_vcc)
|
schema | groups | from typing import Dict, List
from posthog.hogql.database.argmax import argmax_select
from posthog.hogql.database.models import (
DateTimeDatabaseField,
FieldOrTable,
IntegerDatabaseField,
LazyTable,
StringDatabaseField,
StringJSONDatabaseField,
Table,
)
GROUPS_TABLE_FIELDS = {
"index": IntegerDatabaseField(name="group_type_index"),
"team_id": IntegerDatabaseField(name="team_id"),
"key": StringDatabaseField(name="group_key"),
"created_at": DateTimeDatabaseField(name="created_at"),
"updated_at": DateTimeDatabaseField(name="_timestamp"),
"properties": StringJSONDatabaseField(name="group_properties"),
}
def select_from_groups_table(requested_fields: Dict[str, List[str]]):
return argmax_select(
table_name="raw_groups",
select_fields=requested_fields,
group_fields=["index", "key"],
argmax_field="updated_at",
)
class RawGroupsTable(Table):
fields: Dict[str, FieldOrTable] = GROUPS_TABLE_FIELDS
def to_printed_clickhouse(self, context):
return "groups"
def to_printed_hogql(self):
return "groups"
class GroupsTable(LazyTable):
fields: Dict[str, FieldOrTable] = GROUPS_TABLE_FIELDS
def lazy_select(self, requested_fields: Dict[str, List[str]]):
return select_from_groups_table(requested_fields)
def to_printed_clickhouse(self, context):
return "groups"
def to_printed_hogql(self):
return "groups"
|
Gui | Bit | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2019 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import os
import FreeCAD
import FreeCADGui
import Path
import Path.Base.Gui.IconViewProvider as PathIconViewProvider
import Path.Tool.Bit as PathToolBit
import Path.Tool.Gui.BitEdit as PathToolBitEdit
from PySide import QtCore, QtGui
from PySide.QtCore import QT_TRANSLATE_NOOP
__title__ = "Tool Bit UI"
__author__ = "sliptonic (Brad Collette)"
__url__ = "https://www.freecad.org"
__doc__ = "Task panel editor for a ToolBit"
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
translate = FreeCAD.Qt.translate
class ViewProvider(object):
"""ViewProvider for a ToolBit.
It's sole job is to provide an icon and invoke the TaskPanel on edit."""
def __init__(self, vobj, name):
Path.Log.track(name, vobj.Object)
self.panel = None
self.icon = name
self.obj = vobj.Object
self.vobj = vobj
vobj.Proxy = self
def attach(self, vobj):
Path.Log.track(vobj.Object)
self.vobj = vobj
self.obj = vobj.Object
def getIcon(self):
png = self.obj.Proxy.getBitThumbnail(self.obj)
if png:
pixmap = QtGui.QPixmap()
pixmap.loadFromData(png, "PNG")
return QtGui.QIcon(pixmap)
return ":/icons/Path_ToolBit.svg"
def dumps(self):
return None
def loads(self, state):
return None
def onDelete(self, vobj, arg2=None):
Path.Log.track(vobj.Object.Label)
vobj.Object.Proxy.onDelete(vobj.Object)
def getDisplayMode(self, mode):
return "Default"
def _openTaskPanel(self, vobj, deleteOnReject):
Path.Log.track()
self.panel = TaskPanel(vobj, deleteOnReject)
FreeCADGui.Control.closeDialog()
FreeCADGui.Control.showDialog(self.panel)
self.panel.setupUi()
def setCreate(self, vobj):
Path.Log.track()
self._openTaskPanel(vobj, True)
def setEdit(self, vobj, mode=0):
self._openTaskPanel(vobj, False)
return True
def unsetEdit(self, vobj, mode):
FreeCADGui.Control.closeDialog()
self.panel = None
return
def claimChildren(self):
if self.obj.BitBody:
return [self.obj.BitBody]
return []
def doubleClicked(self, vobj):
if os.path.exists(vobj.Object.BitShape):
self.setEdit(vobj)
else:
msg = translate(
"PathToolBit", "Toolbit cannot be edited: Shapefile not found"
)
diag = QtGui.QMessageBox(QtGui.QMessageBox.Warning, "Error", msg)
diag.setWindowModality(QtCore.Qt.ApplicationModal)
diag.exec_()
class TaskPanel:
"""TaskPanel for the SetupSheet - if it is being edited directly."""
def __init__(self, vobj, deleteOnReject):
Path.Log.track(vobj.Object.Label)
self.vobj = vobj
self.obj = vobj.Object
self.editor = PathToolBitEdit.ToolBitEditor(self.obj)
self.form = self.editor.form
self.deleteOnReject = deleteOnReject
FreeCAD.ActiveDocument.openTransaction("Edit ToolBit")
def reject(self):
FreeCAD.ActiveDocument.abortTransaction()
self.editor.reject()
FreeCADGui.Control.closeDialog()
if self.deleteOnReject:
FreeCAD.ActiveDocument.openTransaction("Uncreate ToolBit")
self.editor.reject()
FreeCAD.ActiveDocument.removeObject(self.obj.Name)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
def accept(self):
self.editor.accept()
FreeCAD.ActiveDocument.commitTransaction()
FreeCADGui.ActiveDocument.resetEdit()
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
def updateUI(self):
Path.Log.track()
self.editor.updateUI()
def updateModel(self):
self.editor.updateTool()
FreeCAD.ActiveDocument.recompute()
def setupUi(self):
self.editor.setupUI()
class ToolBitGuiFactory(PathToolBit.ToolBitFactory):
def Create(self, name="ToolBit", shapeFile=None, path=None):
"""Create(name = 'ToolBit') ... creates a new tool bit.
It is assumed the tool will be edited immediately so the internal bit body is still attached.
"""
Path.Log.track(name, shapeFile, path)
FreeCAD.ActiveDocument.openTransaction("Create ToolBit")
tool = PathToolBit.ToolBitFactory.Create(self, name, shapeFile, path)
PathIconViewProvider.Attach(tool.ViewObject, name)
FreeCAD.ActiveDocument.commitTransaction()
return tool
def isValidFileName(filename):
print(filename)
try:
with open(filename, "w") as tempfile:
return True
except Exception:
return False
def GetNewToolFile(parent=None):
if parent is None:
parent = QtGui.QApplication.activeWindow()
foo = QtGui.QFileDialog.getSaveFileName(
parent, "Tool", Path.Preferences.lastPathToolBit(), "*.fctb"
)
if foo and foo[0]:
if not isValidFileName(foo[0]):
msgBox = QtGui.QMessageBox()
msg = translate("Path", "Invalid Filename")
msgBox.setText(msg)
msgBox.exec_()
else:
Path.Preferences.setLastPathToolBit(os.path.dirname(foo[0]))
return foo[0]
return None
def GetToolFile(parent=None):
if parent is None:
parent = QtGui.QApplication.activeWindow()
foo = QtGui.QFileDialog.getOpenFileName(
parent, "Tool", Path.Preferences.lastPathToolBit(), "*.fctb"
)
if foo and foo[0]:
Path.Preferences.setLastPathToolBit(os.path.dirname(foo[0]))
return foo[0]
return None
def GetToolFiles(parent=None):
if parent is None:
parent = QtGui.QApplication.activeWindow()
foo = QtGui.QFileDialog.getOpenFileNames(
parent, "Tool", Path.Preferences.lastPathToolBit(), "*.fctb"
)
if foo and foo[0]:
Path.Preferences.setLastPathToolBit(os.path.dirname(foo[0][0]))
return foo[0]
return []
def GetToolShapeFile(parent=None):
if parent is None:
parent = QtGui.QApplication.activeWindow()
location = Path.Preferences.lastPathToolShape()
if os.path.isfile(location):
location = os.path.split(location)[0]
elif not os.path.isdir(location):
location = Path.Preferences.filePath()
fname = QtGui.QFileDialog.getOpenFileName(
parent, "Select Tool Shape", location, "*.fcstd"
)
if fname and fname[0]:
if fname != location:
newloc = os.path.dirname(fname[0])
Path.Preferences.setLastPathToolShape(newloc)
return fname[0]
else:
return None
def LoadTool(parent=None):
"""
LoadTool(parent=None) ... Open a file dialog to load a tool from a file.
"""
foo = GetToolFile(parent)
return PathToolBit.Factory.CreateFrom(foo) if foo else foo
def LoadTools(parent=None):
"""
LoadTool(parent=None) ... Open a file dialog to load a tool from a file.
"""
return [PathToolBit.Factory.CreateFrom(foo) for foo in GetToolFiles(parent)]
# Set the factory so all tools are created with UI
PathToolBit.Factory = ToolBitGuiFactory()
PathIconViewProvider.RegisterViewProvider("ToolBit", ViewProvider)
|
Arch | ArchStructure | # ***************************************************************************
# * Copyright (c) 2011 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
# Modified 2016-01-03 JAndersM
import ArchCommands
import ArchComponent
import ArchProfile
import Draft
import DraftVecUtils
import FreeCAD
from FreeCAD import Vector
if FreeCAD.GuiUp:
import ArchPrecast
import draftguitools.gui_trackers as DraftTrackers
import FreeCADGui
from draftutils.translate import translate
from PySide import QtCore, QtGui
from PySide.QtCore import QT_TRANSLATE_NOOP
else:
# \cond
def translate(ctxt, txt):
return txt
def QT_TRANSLATE_NOOP(ctxt, txt):
return txt
# \endcond
## @package ArchStructure
# \ingroup ARCH
# \brief The Structure object and tools
#
# This module provides tools to build Structure objects.
# Structure elements are beams, columns, slabs, and other
# elements that have a structural function, that is, that
# support other parts of the building.
__title__ = "FreeCAD Structure"
__author__ = "Yorik van Havre"
__url__ = "https://www.freecad.org"
# Reads preset profiles and categorizes them
Categories = []
Presets = ArchProfile.readPresets()
for pre in Presets:
if pre[1] not in Categories:
Categories.append(pre[1])
def makeStructure(baseobj=None, length=None, width=None, height=None, name=None):
"""makeStructure([baseobj],[length],[width],[height],[name]): creates a
structure element based on the given profile object and the given
extrusion height. If no base object is given, you can also specify
length and width for a cubic object."""
if not FreeCAD.ActiveDocument:
FreeCAD.Console.PrintError("No active document. Aborting\n")
return
p = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch")
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython", "Structure")
_Structure(obj)
if FreeCAD.GuiUp:
_ViewProviderStructure(obj.ViewObject)
if baseobj:
obj.Base = baseobj
if FreeCAD.GuiUp:
obj.Base.ViewObject.hide()
if width:
obj.Width = width
else:
obj.Width = p.GetFloat("StructureWidth", 100)
if height:
obj.Height = height
else:
if not length:
obj.Height = p.GetFloat("StructureHeight", 1000)
if length:
obj.Length = length
else:
if not baseobj:
# don't set the length if we have a base object, otherwise the length X height calc
# gets wrong
obj.Length = p.GetFloat("StructureLength", 100)
if baseobj:
w = 0
h = 0
if hasattr(baseobj, "Width") and hasattr(baseobj, "Height"):
w = baseobj.Width.Value
h = baseobj.Height.Value
elif hasattr(baseobj, "Length") and hasattr(baseobj, "Width"):
w = baseobj.Length.Value
h = baseobj.Width.Value
elif hasattr(baseobj, "Length") and hasattr(baseobj, "Height"):
w = baseobj.Length.Value
h = baseobj.Height.Value
if w and h:
if length and not height:
obj.Width = w
obj.Height = h
elif height and not length:
obj.Width = w
obj.Length = h
if not height and not length:
obj.IfcType = "Building Element Proxy"
obj.Label = name if name else translate("Arch", "Structure")
elif obj.Length > obj.Height:
obj.IfcType = "Beam"
obj.Label = name if name else translate("Arch", "Beam")
elif obj.Height > obj.Length:
obj.IfcType = "Column"
obj.Label = name if name else translate("Arch", "Column")
return obj
def makeStructuralSystem(objects=[], axes=[], name=None):
"""makeStructuralSystem([objects],[axes],[name]): makes a structural system
based on the given objects and axes"""
if not FreeCAD.ActiveDocument:
FreeCAD.Console.PrintError("No active document. Aborting\n")
return
result = []
if not axes:
print("At least one axis must be given")
return
if objects:
if not isinstance(objects, list):
objects = [objects]
else:
objects = [None]
for o in objects:
obj = FreeCAD.ActiveDocument.addObject(
"Part::FeaturePython", "StructuralSystem"
)
obj.Label = name if name else translate("Arch", "StructuralSystem")
_StructuralSystem(obj)
if FreeCAD.GuiUp:
_ViewProviderStructuralSystem(obj.ViewObject)
if o:
obj.Base = o
obj.Axes = axes
result.append(obj)
if FreeCAD.GuiUp and o:
o.ViewObject.hide()
Draft.formatObject(obj, o)
FreeCAD.ActiveDocument.recompute()
if len(result) == 1:
return result[0]
else:
return result
def placeAlongEdge(p1, p2, horizontal=False):
"""placeAlongEdge(p1,p2,[horizontal]): returns a Placement positioned at p1, with Z axis oriented towards p2.
If horizontal is True, then the X axis is oriented towards p2, not the Z axis"""
pl = FreeCAD.Placement()
pl.Base = p1
up = FreeCAD.Vector(0, 0, 1)
if hasattr(FreeCAD, "DraftWorkingPlane"):
up = FreeCAD.DraftWorkingPlane.axis
zaxis = p2.sub(p1)
yaxis = up.cross(zaxis)
if yaxis.Length > 0:
xaxis = zaxis.cross(yaxis)
if horizontal:
pl.Rotation = FreeCAD.Rotation(zaxis, yaxis, xaxis, "ZXY")
else:
pl.Rotation = FreeCAD.Rotation(xaxis, yaxis, zaxis, "ZXY")
pl.Rotation = FreeCAD.Rotation(
pl.Rotation.multVec(FreeCAD.Vector(0, 0, 1)), 90
).multiply(pl.Rotation)
return pl
class CommandStructuresFromSelection:
"""The Arch Structures from selection command definition."""
def __init__(self):
pass
def GetResources(self):
return {
"Pixmap": "Arch_MultipleStructures",
"MenuText": QT_TRANSLATE_NOOP(
"Arch_StructuresFromSelection", "Multiple Structures"
),
"ToolTip": QT_TRANSLATE_NOOP(
"Arch_StructuresFromSelection",
"Create multiple Arch Structures from a selected base, using each selected edge as an extrusion path",
),
}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
selex = FreeCADGui.Selection.getSelectionEx()
if len(selex) >= 2:
FreeCAD.ActiveDocument.openTransaction(
translate("Arch", "Create Structures From Selection")
)
FreeCADGui.addModule("Arch")
FreeCADGui.addModule("Draft")
base = selex[
0
].Object # The first selected object is the base for the Structure objects
for selexi in selex[
1:
]: # All the edges from the other objects are used as a Tool (extrusion paths)
if len(selexi.SubElementNames) == 0:
subelement_names = [
"Edge" + str(i)
for i in range(1, len(selexi.Object.Shape.Edges) + 1)
]
else:
subelement_names = [
sub for sub in selexi.SubElementNames if sub.startswith("Edge")
]
for sub in subelement_names:
FreeCADGui.doCommand(
"structure = Arch.makeStructure(FreeCAD.ActiveDocument."
+ base.Name
+ ")"
)
FreeCADGui.doCommand(
"structure.Tool = (FreeCAD.ActiveDocument."
+ selexi.Object.Name
+ ", '"
+ sub
+ "')"
)
FreeCADGui.doCommand("structure.BasePerpendicularToTool = True")
FreeCADGui.doCommand("Draft.autogroup(structure)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
else:
FreeCAD.Console.PrintError(
translate(
"Arch",
"Please select the base object first and then the edges to use as extrusion paths",
)
+ "\n"
)
class CommandStructuralSystem:
"""The Arch Structural System command definition."""
def __init__(self):
pass
def GetResources(self):
return {
"Pixmap": "Arch_StructuralSystem",
"MenuText": QT_TRANSLATE_NOOP("Arch_StructuralSystem", "Structural System"),
"ToolTip": QT_TRANSLATE_NOOP(
"Arch_StructuralSystem",
"Create a structural system from a selected structure and axis",
),
}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
sel = FreeCADGui.Selection.getSelection()
if sel:
st = Draft.getObjectsOfType(sel, "Structure")
ax = Draft.getObjectsOfType(sel, "Axis")
if ax:
FreeCAD.ActiveDocument.openTransaction(
translate("Arch", "Create Structural System")
)
FreeCADGui.addModule("Arch")
if st:
FreeCADGui.doCommand(
"obj = Arch.makeStructuralSystem("
+ ArchCommands.getStringList(st)
+ ", "
+ ArchCommands.getStringList(ax)
+ ")"
)
else:
FreeCADGui.doCommand(
"obj = Arch.makeStructuralSystem(axes = "
+ ArchCommands.getStringList(ax)
+ ")"
)
FreeCADGui.addModule("Draft")
FreeCADGui.doCommand("Draft.autogroup(obj)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
else:
FreeCAD.Console.PrintError(
translate("Arch", "Please select at least an axis object") + "\n"
)
class _CommandStructure:
"the Arch Structure command definition"
def __init__(self):
self.beammode = False
def GetResources(self):
return {
"Pixmap": "Arch_Structure",
"MenuText": QT_TRANSLATE_NOOP("Arch_Structure", "Structure"),
"Accel": "S, T",
"ToolTip": QT_TRANSLATE_NOOP(
"Arch_Structure",
"Creates a structure from scratch or from a selected object (sketch, wire, face or solid)",
),
}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
p = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch")
self.Width = p.GetFloat("StructureWidth", 100)
if self.beammode:
self.Height = p.GetFloat("StructureLength", 100)
self.Length = p.GetFloat("StructureHeight", 1000)
else:
self.Length = p.GetFloat("StructureLength", 100)
self.Height = p.GetFloat("StructureHeight", 1000)
self.Profile = None
self.continueCmd = False
self.bpoint = None
self.bmode = False
self.precastvalues = None
sel = FreeCADGui.Selection.getSelection()
if sel:
st = Draft.getObjectsOfType(sel, "Structure")
ax = Draft.getObjectsOfType(sel, "Axis")
if ax:
FreeCADGui.runCommand("Arch_StructuralSystem")
return
elif not (ax) and not (st):
FreeCAD.ActiveDocument.openTransaction(
translate("Arch", "Create Structure")
)
FreeCADGui.addModule("Arch")
for obj in sel:
FreeCADGui.doCommand(
"obj = Arch.makeStructure(FreeCAD.ActiveDocument."
+ obj.Name
+ ")"
)
FreeCADGui.addModule("Draft")
FreeCADGui.doCommand("Draft.autogroup(obj)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
return
# interactive mode
if hasattr(FreeCAD, "DraftWorkingPlane"):
FreeCAD.DraftWorkingPlane.setup()
self.points = []
self.tracker = DraftTrackers.boxTracker()
self.tracker.width(self.Width)
self.tracker.height(self.Height)
self.tracker.length(self.Length)
self.tracker.setRotation(FreeCAD.DraftWorkingPlane.getRotation().Rotation)
self.tracker.on()
self.precast = ArchPrecast._PrecastTaskPanel()
self.dents = ArchPrecast._DentsTaskPanel()
self.precast.Dents = self.dents
if self.beammode:
title = translate("Arch", "First point of the beam") + ":"
else:
title = translate("Arch", "Base point of column") + ":"
FreeCADGui.Snapper.getPoint(
callback=self.getPoint,
movecallback=self.update,
extradlg=[self.taskbox(), self.precast.form, self.dents.form],
title=title,
)
def getPoint(self, point=None, obj=None):
"this function is called by the snapper when it has a 3D point"
self.bmode = self.modeb.isChecked()
if point is None:
self.tracker.finalize()
return
if self.bmode and (self.bpoint is None):
self.bpoint = point
FreeCADGui.Snapper.getPoint(
last=point,
callback=self.getPoint,
movecallback=self.update,
extradlg=[self.taskbox(), self.precast.form, self.dents.form],
title=translate("Arch", "Next point") + ":",
mode="line",
)
return
self.tracker.finalize()
horiz = True # determines the type of rotation to apply to the final object
FreeCAD.ActiveDocument.openTransaction(translate("Arch", "Create Structure"))
FreeCADGui.addModule("Arch")
if self.Profile is not None:
try: # try to update latest precast values - fails if dialog has been destroyed already
self.precastvalues = self.precast.getValues()
except Exception:
pass
if ("Precast" in self.Profile) and self.precastvalues:
# precast concrete
self.precastvalues["PrecastType"] = self.Profile.split("_")[1]
self.precastvalues["Length"] = self.Length
self.precastvalues["Width"] = self.Width
self.precastvalues["Height"] = self.Height
argstring = ""
# fix for precast placement, since their (0,0) point is the lower left corner
if self.bmode:
delta = FreeCAD.Vector(0, 0 - self.Width / 2, 0)
else:
delta = FreeCAD.Vector(-self.Length / 2, -self.Width / 2, 0)
if hasattr(FreeCAD, "DraftWorkingPlane"):
delta = FreeCAD.DraftWorkingPlane.getRotation().multVec(delta)
point = point.add(delta)
if self.bpoint:
self.bpoint = self.bpoint.add(delta)
# build the string definition
for pair in self.precastvalues.items():
argstring += pair[0].lower() + "="
if isinstance(pair[1], str):
argstring += '"' + pair[1] + '",'
else:
argstring += str(pair[1]) + ","
FreeCADGui.addModule("ArchPrecast")
FreeCADGui.doCommand("s = ArchPrecast.makePrecast(" + argstring + ")")
else:
# metal profile
FreeCADGui.doCommand("p = Arch.makeProfile(" + str(self.Profile) + ")")
if (
abs(self.Length - self.Profile[4]) >= 0.1
) or self.bmode: # forgive rounding errors
# horizontal
FreeCADGui.doCommand(
"s = Arch.makeStructure(p,length=" + str(self.Length) + ")"
)
horiz = False
else:
# vertical
FreeCADGui.doCommand(
"s = Arch.makeStructure(p,height=" + str(self.Height) + ")"
)
# if not self.bmode:
# FreeCADGui.doCommand('s.Placement.Rotation = FreeCAD.Rotation(-0.5,0.5,-0.5,0.5)')
FreeCADGui.doCommand('s.Profile = "' + self.Profile[2] + '"')
else:
FreeCADGui.doCommand(
"s = Arch.makeStructure(length="
+ str(self.Length)
+ ",width="
+ str(self.Width)
+ ",height="
+ str(self.Height)
+ ")"
)
# calculate rotation
if self.bmode and self.bpoint:
FreeCADGui.doCommand(
"s.Placement = Arch.placeAlongEdge("
+ DraftVecUtils.toString(self.bpoint)
+ ","
+ DraftVecUtils.toString(point)
+ ","
+ str(horiz)
+ ")"
)
else:
FreeCADGui.doCommand("s.Placement.Base = " + DraftVecUtils.toString(point))
FreeCADGui.doCommand(
"s.Placement.Rotation = s.Placement.Rotation.multiply(FreeCAD.DraftWorkingPlane.getRotation().Rotation)"
)
FreeCADGui.addModule("Draft")
FreeCADGui.doCommand("Draft.autogroup(s)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if self.continueCmd:
self.Activated()
def _createItemlist(self, baselist):
"create nice labels for presets in the task panel"
ilist = []
for p in baselist:
f = FreeCAD.Units.Quantity(p[4], FreeCAD.Units.Length).getUserPreferred()
d = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Units").GetInt(
"Decimals", 2
)
s1 = str(round(p[4] / f[1], d))
s2 = str(round(p[5] / f[1], d))
s3 = str(f[2])
ilist.append(p[2] + " (" + s1 + "x" + s2 + s3 + ")")
return ilist
def taskbox(self):
"sets up a taskbox widget"
w = QtGui.QWidget()
ui = FreeCADGui.UiLoader()
w.setWindowTitle(translate("Arch", "Structure options"))
grid = QtGui.QGridLayout(w)
# mode box
labelmode = QtGui.QLabel(translate("Arch", "Drawing mode") + ":")
self.modeb = QtGui.QRadioButton(translate("Arch", "Beam"))
self.modec = QtGui.QRadioButton(translate("Arch", "Column"))
if self.bpoint or self.beammode:
self.modeb.setChecked(True)
else:
self.modec.setChecked(True)
grid.addWidget(labelmode, 0, 0, 1, 2)
grid.addWidget(self.modeb, 1, 0, 1, 1)
grid.addWidget(self.modec, 1, 1, 1, 1)
# categories box
labelc = QtGui.QLabel(translate("Arch", "Category"))
self.valuec = QtGui.QComboBox()
self.valuec.addItems([" ", "Precast concrete"] + Categories)
grid.addWidget(labelc, 2, 0, 1, 1)
grid.addWidget(self.valuec, 2, 1, 1, 1)
# presets box
labelp = QtGui.QLabel(translate("Arch", "Preset"))
self.vPresets = QtGui.QComboBox()
self.pSelect = [None]
fpresets = [" "]
self.vPresets.addItems(fpresets)
grid.addWidget(labelp, 3, 0, 1, 1)
grid.addWidget(self.vPresets, 3, 1, 1, 1)
# length
label1 = QtGui.QLabel(translate("Arch", "Length"))
self.vLength = ui.createWidget("Gui::InputField")
if self.modeb.isChecked():
self.vLength.setText(
FreeCAD.Units.Quantity(self.Height, FreeCAD.Units.Length).UserString
)
else:
self.vLength.setText(
FreeCAD.Units.Quantity(self.Length, FreeCAD.Units.Length).UserString
)
grid.addWidget(label1, 4, 0, 1, 1)
grid.addWidget(self.vLength, 4, 1, 1, 1)
# width
label2 = QtGui.QLabel(translate("Arch", "Width"))
self.vWidth = ui.createWidget("Gui::InputField")
self.vWidth.setText(
FreeCAD.Units.Quantity(self.Width, FreeCAD.Units.Length).UserString
)
grid.addWidget(label2, 5, 0, 1, 1)
grid.addWidget(self.vWidth, 5, 1, 1, 1)
# height
label3 = QtGui.QLabel(translate("Arch", "Height"))
self.vHeight = ui.createWidget("Gui::InputField")
if self.modeb.isChecked():
self.vHeight.setText(
FreeCAD.Units.Quantity(self.Length, FreeCAD.Units.Length).UserString
)
else:
self.vHeight.setText(
FreeCAD.Units.Quantity(self.Height, FreeCAD.Units.Length).UserString
)
grid.addWidget(label3, 6, 0, 1, 1)
grid.addWidget(self.vHeight, 6, 1, 1, 1)
# horizontal button
value5 = QtGui.QPushButton(translate("Arch", "Switch L/H"))
grid.addWidget(value5, 7, 0, 1, 1)
value6 = QtGui.QPushButton(translate("Arch", "Switch L/W"))
grid.addWidget(value6, 7, 1, 1, 1)
# continue button
label4 = QtGui.QLabel(translate("Arch", "Con&tinue"))
value4 = QtGui.QCheckBox()
value4.setObjectName("ContinueCmd")
value4.setLayoutDirection(QtCore.Qt.RightToLeft)
label4.setBuddy(value4)
if hasattr(FreeCADGui, "draftToolBar"):
value4.setChecked(FreeCADGui.draftToolBar.continueMode)
self.continueCmd = FreeCADGui.draftToolBar.continueMode
grid.addWidget(label4, 8, 0, 1, 1)
grid.addWidget(value4, 8, 1, 1, 1)
# connect slots
QtCore.QObject.connect(
self.valuec, QtCore.SIGNAL("currentIndexChanged(int)"), self.setCategory
)
QtCore.QObject.connect(
self.vPresets, QtCore.SIGNAL("currentIndexChanged(int)"), self.setPreset
)
QtCore.QObject.connect(
self.vLength, QtCore.SIGNAL("valueChanged(double)"), self.setLength
)
QtCore.QObject.connect(
self.vWidth, QtCore.SIGNAL("valueChanged(double)"), self.setWidth
)
QtCore.QObject.connect(
self.vHeight, QtCore.SIGNAL("valueChanged(double)"), self.setHeight
)
QtCore.QObject.connect(
value4, QtCore.SIGNAL("stateChanged(int)"), self.setContinue
)
QtCore.QObject.connect(value5, QtCore.SIGNAL("pressed()"), self.rotateLH)
QtCore.QObject.connect(value6, QtCore.SIGNAL("pressed()"), self.rotateLW)
QtCore.QObject.connect(
self.modeb, QtCore.SIGNAL("toggled(bool)"), self.switchLH
)
# restore preset
stored = FreeCAD.ParamGet(
"User parameter:BaseApp/Preferences/Mod/Arch"
).GetString("StructurePreset", "")
if stored:
if stored.lower().startswith("precast_"):
self.valuec.setCurrentIndex(1)
tp = stored.split("_")[1]
if tp and (tp in self.precast.PrecastTypes):
self.vPresets.setCurrentIndex(self.precast.PrecastTypes.index(tp))
elif ";" in stored:
stored = stored.split(";")
if len(stored) >= 3:
if stored[1] in Categories:
self.valuec.setCurrentIndex(2 + Categories.index(stored[1]))
ps = [p[2] for p in self.pSelect]
if stored[2] in ps:
self.vPresets.setCurrentIndex(ps.index(stored[2]))
return w
def update(self, point, info):
"this function is called by the Snapper when the mouse is moved"
if FreeCADGui.Control.activeDialog():
try: # try to update latest precast values - fails if dialog has been destroyed already
self.precastvalues = self.precast.getValues()
except Exception:
pass
if self.Height >= self.Length:
delta = Vector(0, 0, self.Height / 2)
else:
delta = Vector(self.Length / 2, 0, 0)
if hasattr(FreeCAD, "DraftWorkingPlane"):
delta = FreeCAD.DraftWorkingPlane.getRotation().multVec(delta)
if self.modec.isChecked():
self.tracker.pos(point.add(delta))
self.tracker.on()
else:
if self.bpoint:
delta = Vector(0, 0, -self.Height / 2)
if hasattr(FreeCAD, "DraftWorkingPlane"):
delta = FreeCAD.DraftWorkingPlane.getRotation().multVec(delta)
self.tracker.update([self.bpoint.add(delta), point.add(delta)])
self.tracker.on()
l = (point.sub(self.bpoint)).Length
self.vLength.setText(
FreeCAD.Units.Quantity(l, FreeCAD.Units.Length).UserString
)
else:
self.tracker.off()
def setWidth(self, d):
self.Width = d
self.tracker.width(d)
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").SetFloat(
"StructureWidth", d
)
def setHeight(self, d):
self.Height = d
self.tracker.height(d)
if self.modeb.isChecked():
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").SetFloat(
"StructureLength", d
)
else:
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").SetFloat(
"StructureHeight", d
)
def setLength(self, d):
self.Length = d
self.tracker.length(d)
if self.modeb.isChecked():
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").SetFloat(
"StructureHeight", d
)
else:
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").SetFloat(
"StructureLength", d
)
def setContinue(self, i):
self.continueCmd = bool(i)
if hasattr(FreeCADGui, "draftToolBar"):
FreeCADGui.draftToolBar.continueMode = bool(i)
def setCategory(self, i):
self.vPresets.clear()
if i > 1:
self.precast.form.hide()
self.pSelect = [p for p in Presets if p[1] == Categories[i - 2]]
fpresets = self._createItemlist(self.pSelect)
self.vPresets.addItems(fpresets)
self.setPreset(0)
elif i == 1:
self.precast.form.show()
self.pSelect = self.precast.PrecastTypes
fpresets = self.precast.PrecastTypes
self.vPresets.addItems(fpresets)
self.setPreset(0)
else:
self.precast.form.hide()
self.pSelect = [None]
fpresets = [" "]
self.vPresets.addItems(fpresets)
FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").SetString(
"StructurePreset", ""
)
def setPreset(self, i):
self.Profile = None
elt = self.pSelect[i]
if elt:
if elt in self.precast.PrecastTypes:
self.precast.setPreset(elt)
self.Profile = "Precast_" + elt
if elt in ["Pillar", "Beam"]:
self.dents.form.show()
else:
self.dents.form.hide()
FreeCAD.ParamGet(
"User parameter:BaseApp/Preferences/Mod/Arch"
).SetString("StructurePreset", self.Profile)
else:
p = elt[0] - 1 # Presets indexes are 1-based
self.vLength.setText(
FreeCAD.Units.Quantity(
float(Presets[p][4]), FreeCAD.Units.Length
).UserString
)
self.vWidth.setText(
FreeCAD.Units.Quantity(
float(Presets[p][5]), FreeCAD.Units.Length
).UserString
)
self.Profile = Presets[p]
FreeCAD.ParamGet(
"User parameter:BaseApp/Preferences/Mod/Arch"
).SetString("StructurePreset", ";".join([str(i) for i in self.Profile]))
def switchLH(self, bmode):
if bmode:
self.bmode = True
if self.Height > self.Length:
self.rotateLH()
else:
self.bmode = False
if self.Length > self.Height:
self.rotateLH()
self.tracker.setRotation(FreeCAD.Rotation())
def rotateLH(self):
h = self.Height
l = self.Length
self.vLength.setText(FreeCAD.Units.Quantity(h, FreeCAD.Units.Length).UserString)
self.vHeight.setText(FreeCAD.Units.Quantity(l, FreeCAD.Units.Length).UserString)
def rotateLW(self):
w = self.Width
l = self.Length
self.vLength.setText(FreeCAD.Units.Quantity(w, FreeCAD.Units.Length).UserString)
self.vWidth.setText(FreeCAD.Units.Quantity(l, FreeCAD.Units.Length).UserString)
class _Structure(ArchComponent.Component):
"The Structure object"
def __init__(self, obj):
ArchComponent.Component.__init__(self, obj)
self.setProperties(obj)
obj.IfcType = "Beam"
def setProperties(self, obj):
pl = obj.PropertiesList
if not "Tool" in pl:
obj.addProperty(
"App::PropertyLinkSubList",
"Tool",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property", "An optional extrusion path for this element"
),
)
if not "ComputedLength" in pl:
obj.addProperty(
"App::PropertyDistance",
"ComputedLength",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property", "The computed length of the extrusion path"
),
1,
)
if not "ToolOffsetFirst" in pl:
obj.addProperty(
"App::PropertyDistance",
"ToolOffsetFirst",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"Start offset distance along the extrusion path (positive: extend, negative: trim)",
),
)
if not "ToolOffsetLast" in pl:
obj.addProperty(
"App::PropertyDistance",
"ToolOffsetLast",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"End offset distance along the extrusion path (positive: extend, negative: trim)",
),
)
if not "BasePerpendicularToTool" in pl:
obj.addProperty(
"App::PropertyBool",
"BasePerpendicularToTool",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"Automatically align the Base of the Structure perpendicular to the Tool axis",
),
)
if not "BaseOffsetX" in pl:
obj.addProperty(
"App::PropertyDistance",
"BaseOffsetX",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"X offset between the Base origin and the Tool axis (only used if BasePerpendicularToTool is True)",
),
)
if not "BaseOffsetY" in pl:
obj.addProperty(
"App::PropertyDistance",
"BaseOffsetY",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"Y offset between the Base origin and the Tool axis (only used if BasePerpendicularToTool is True)",
),
)
if not "BaseMirror" in pl:
obj.addProperty(
"App::PropertyBool",
"BaseMirror",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"Mirror the Base along its Y axis (only used if BasePerpendicularToTool is True)",
),
)
if not "BaseRotation" in pl:
obj.addProperty(
"App::PropertyAngle",
"BaseRotation",
"ExtrusionPath",
QT_TRANSLATE_NOOP(
"App::Property",
"Base rotation around the Tool axis (only used if BasePerpendicularToTool is True)",
),
)
if not "Length" in pl:
obj.addProperty(
"App::PropertyLength",
"Length",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"The length of this element, if not based on a profile",
),
)
if not "Width" in pl:
obj.addProperty(
"App::PropertyLength",
"Width",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"The width of this element, if not based on a profile",
),
)
if not "Height" in pl:
obj.addProperty(
"App::PropertyLength",
"Height",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"The height or extrusion depth of this element. Keep 0 for automatic",
),
)
if not "Normal" in pl:
obj.addProperty(
"App::PropertyVector",
"Normal",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"The normal extrusion direction of this object (keep (0,0,0) for automatic normal)",
),
)
if not "Nodes" in pl:
obj.addProperty(
"App::PropertyVectorList",
"Nodes",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property", "The structural nodes of this element"
),
)
if not "Profile" in pl:
obj.addProperty(
"App::PropertyString",
"Profile",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"A description of the standard profile this element is based upon",
),
)
if not "NodesOffset" in pl:
obj.addProperty(
"App::PropertyDistance",
"NodesOffset",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"Offset distance between the centerline and the nodes line",
),
)
if not "FaceMaker" in pl:
obj.addProperty(
"App::PropertyEnumeration",
"FaceMaker",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"The facemaker type to use to build the profile of this object",
),
)
obj.FaceMaker = ["None", "Simple", "Cheese", "Bullseye"]
if not "ArchSketchEdges" in pl: # PropertyStringList
obj.addProperty(
"App::PropertyStringList",
"ArchSketchEdges",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"Selected edges (or group of edges) of the base ArchSketch, to use in creating the shape of this Arch Structure (instead of using all the Base shape's edges by default). Input are index numbers of edges or groups.",
),
)
else:
# test if the property was added but as IntegerList, then update;
type = obj.getTypeIdOfProperty("ArchSketchEdges")
if type == "App::PropertyIntegerList":
oldIntValue = obj.ArchSketchEdges
newStrValue = [str(x) for x in oldIntValue]
obj.removeProperty("ArchSketchEdges")
obj.addProperty(
"App::PropertyStringList",
"ArchSketchEdges",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property",
"Selected edges (or group of edges) of the base ArchSketch, to use in creating the shape of this Arch Structure (instead of using all the Base shape's edges by default). Input are index numbers of edges or groups.",
),
)
obj.ArchSketchEdges = newStrValue
self.Type = "Structure"
def onDocumentRestored(self, obj):
ArchComponent.Component.onDocumentRestored(self, obj)
self.setProperties(obj)
def execute(self, obj):
"creates the structure shape"
import DraftGeomUtils
import Part
if self.clone(obj):
return
base = None
pl = obj.Placement
extdata = self.getExtrusionData(obj)
if extdata:
sh = extdata[0]
if not isinstance(sh, list):
sh = [sh]
ev = extdata[1]
if not isinstance(ev, list):
ev = [ev]
pla = extdata[2]
if not isinstance(pla, list):
pla = [pla]
base = []
extrusion_length = 0.0
for i in range(len(sh)):
shi = sh[i]
if i < len(ev):
evi = ev[i]
else:
evi = ev[-1]
if isinstance(evi, FreeCAD.Vector):
evi = FreeCAD.Vector(evi)
else:
evi = evi.copy()
if i < len(pla):
pli = pla[i]
else:
pli = pla[-1].copy()
shi.Placement = pli.multiply(shi.Placement)
if isinstance(evi, FreeCAD.Vector):
extv = pla[0].Rotation.multVec(evi)
shi = shi.extrude(extv)
else:
try:
shi = evi.makePipe(shi)
except Part.OCCError:
FreeCAD.Console.PrintError(
translate(
"Arch",
"Error: The base shape couldn't be extruded along this tool object",
)
+ "\n"
)
return
base.append(shi)
extrusion_length += evi.Length
if len(base) == 1:
base = base[0]
else:
base = Part.makeCompound(base)
obj.ComputedLength = FreeCAD.Units.Quantity(
extrusion_length, FreeCAD.Units.Length
)
if obj.Base:
if hasattr(obj.Base, "Shape"):
if obj.Base.Shape.isNull():
return
if not obj.Base.Shape.isValid():
if not obj.Base.Shape.Solids:
# let pass invalid objects if they have solids...
return
elif obj.Base.Shape.Solids:
base = obj.Base.Shape.copy()
elif obj.Base.isDerivedFrom("Mesh::Feature"):
if obj.Base.Mesh.isSolid():
if obj.Base.Mesh.countComponents() == 1:
sh = ArchCommands.getShapeFromMesh(obj.Base.Mesh)
if (
sh.isClosed()
and sh.isValid()
and sh.Solids
and (not sh.isNull())
):
base = sh
else:
FreeCAD.Console.PrintWarning(
translate("Arch", "This mesh is an invalid solid")
+ "\n"
)
obj.Base.ViewObject.show()
if (not base) and (not obj.Additions):
# FreeCAD.Console.PrintError(translate("Arch","Error: Invalid base object")+"\n")
return
base = self.processSubShapes(obj, base, pl)
self.applyShape(obj, base, pl)
def getExtrusionData(self, obj):
"""returns (shape,extrusion vector or path,placement) or None"""
if hasattr(obj, "IfcType"):
IfcType = obj.IfcType
else:
IfcType = None
import DraftGeomUtils
import Part
data = ArchComponent.Component.getExtrusionData(self, obj)
if data:
if not isinstance(data[0], list):
# multifuses not considered here
return data
length = obj.Length.Value
width = obj.Width.Value
height = obj.Height.Value
if not height:
height = self.getParentHeight(obj)
baseface = None
extrusion = None
normal = None
if obj.Base:
if hasattr(obj.Base, "Shape"):
if obj.Base.Shape:
if obj.Base.Shape.Solids:
return None
elif obj.Base.Shape.Faces:
if not DraftGeomUtils.isCoplanar(
obj.Base.Shape.Faces, tol=0.01
):
return None
else:
baseface = obj.Base.Shape.copy()
elif obj.Base.Shape.Wires:
# ArchSketch feature :
# Get base shape wires, and faceMaker, for Structure (slab. etc.) from Base Objects if they store and provide by getStructureBaseShapeWires()
# (thickness, normal/extrusion, length, width, baseface maybe for later) of structure (slab etc.)
structureBaseShapeWires = None
baseShapeWires = (
None # baseSlabWires / baseSlabOpeningWires = None
)
faceMaker = None
if hasattr(obj.Base, "Proxy"):
if hasattr(obj.Base.Proxy, "getStructureBaseShapeWires"):
structureBaseShapeWires = (
obj.Base.Proxy.getStructureBaseShapeWires(
obj.Base, archsketchEdges=obj.ArchSketchEdges
)
)
# provide selected edges, or groups, in obj.ArchSketchEdges for processing in getStructureBaseShapeWires() (getSortedClusters) as override
# returned a {dict} ( or a [list] )
# get slab wires; use original wires if structureBaseShapeWires() provided none
if structureBaseShapeWires: # would be false (none) if both base ArchSketch and obj do not have the edges stored / inputted by user
# if structureBaseShapeWires is {dict}
baseShapeWires = structureBaseShapeWires.get("slabWires")
faceMaker = structureBaseShapeWires.get("faceMaker")
if not baseShapeWires:
baseShapeWires = obj.Base.Shape.Wires
if faceMaker or (obj.FaceMaker != "None"):
if not faceMaker:
faceMaker = obj.FaceMaker
try:
baseface = Part.makeFace(
baseShapeWires, "Part::FaceMaker" + str(faceMaker)
)
except Exception:
FreeCAD.Console.PrintError(
translate("Arch", "Facemaker returned an error")
+ "\n"
)
# Not returning even Part.makeFace fails, fall back to 'non-Part.makeFace' method
if not baseface:
for w in baseShapeWires:
if not w.isClosed():
p0 = w.OrderedVertexes[0].Point
p1 = w.OrderedVertexes[-1].Point
if p0 != p1:
e = Part.LineSegment(p0, p1).toShape()
w.add(e)
w.fix(0.1, 0, 1) # fixes self-intersecting wires
f = Part.Face(w)
# check if it is 1st face (f) created from w in baseShapeWires; if not, fuse()
if baseface:
baseface = baseface.fuse(f)
else:
# TODO use Part.Shape() rather than shape.copy() ... ?
baseface = f.copy()
elif length and width and height:
if (length > height) and (IfcType != "Slab"):
h2 = height / 2 or 0.5
w2 = width / 2 or 0.5
v1 = Vector(0, -w2, -h2)
v4 = Vector(0, -w2, h2)
v3 = Vector(0, w2, h2)
v2 = Vector(0, w2, -h2)
else:
l2 = length / 2 or 0.5
w2 = width / 2 or 0.5
v1 = Vector(-l2, -w2, 0)
v2 = Vector(l2, -w2, 0)
v3 = Vector(l2, w2, 0)
v4 = Vector(-l2, w2, 0)
import Part
baseface = Part.Face(Part.makePolygon([v1, v2, v3, v4, v1]))
if baseface:
if hasattr(obj, "Tool") and obj.Tool:
tool = obj.Tool
edges = DraftGeomUtils.get_referenced_edges(tool)
if len(edges) > 0:
extrusion = Part.Wire(Part.__sortEdges__(edges))
if hasattr(obj, "ToolOffsetFirst"):
offset_start = float(obj.ToolOffsetFirst.getValueAs("mm"))
else:
offset_start = 0.0
if hasattr(obj, "ToolOffsetLast"):
offset_end = float(obj.ToolOffsetLast.getValueAs("mm"))
else:
offset_end = 0.0
if offset_start != 0.0 or offset_end != 0.0:
extrusion = DraftGeomUtils.get_extended_wire(
extrusion, offset_start, offset_end
)
if (
hasattr(obj, "BasePerpendicularToTool")
and obj.BasePerpendicularToTool
):
pl = FreeCAD.Placement()
if hasattr(obj, "BaseRotation"):
pl.rotate(
FreeCAD.Vector(0, 0, 0),
FreeCAD.Vector(0, 0, 1),
-obj.BaseRotation,
)
if hasattr(obj, "BaseOffsetX") and hasattr(obj, "BaseOffsetY"):
pl.translate(
FreeCAD.Vector(obj.BaseOffsetX, obj.BaseOffsetY, 0)
)
if hasattr(obj, "BaseMirror") and obj.BaseMirror:
pl.rotate(
FreeCAD.Vector(0, 0, 0), FreeCAD.Vector(0, 1, 0), 180
)
baseface.Placement = (
DraftGeomUtils.get_placement_perpendicular_to_wire(
extrusion
).multiply(pl)
)
else:
if obj.Normal.Length:
normal = Vector(obj.Normal).normalize()
else:
normal = baseface.Faces[0].normalAt(
0, 0
) ## TODO to use ArchSketch's 'normal' for consistency
base = None
placement = None
inverse_placement = None
if len(baseface.Faces) > 1:
base = []
placement = []
hint = baseface.Faces[0].normalAt(0, 0) ## TODO anything to do ?
for f in baseface.Faces:
bf, pf = self.rebase(f, hint)
base.append(bf)
placement.append(pf)
inverse_placement = placement[0].inverse()
else:
base, placement = self.rebase(baseface)
inverse_placement = placement.inverse()
if extrusion:
if (
len(extrusion.Edges) == 1
and DraftGeomUtils.geomType(extrusion.Edges[0]) == "Line"
):
extrusion = DraftGeomUtils.vec(extrusion.Edges[0], True)
if isinstance(extrusion, FreeCAD.Vector):
extrusion = inverse_placement.Rotation.multVec(extrusion)
elif normal:
normal = inverse_placement.Rotation.multVec(normal)
if not normal:
normal = Vector(0, 0, 1)
if not normal.Length:
normal = Vector(0, 0, 1)
extrusion = normal
if (length > height) and (IfcType != "Slab"):
if length:
extrusion = normal.multiply(length)
else:
if height:
extrusion = normal.multiply(height)
if extrusion:
return (base, extrusion, placement)
return None
def onChanged(self, obj, prop):
if hasattr(obj, "IfcType"):
IfcType = obj.IfcType
else:
IfcType = None
self.hideSubobjects(obj, prop)
if prop in ["Shape", "ResetNodes", "NodesOffset"]:
# ResetNodes is not a property but it allows us to use this function to force reset the nodes
nodes = None
extdata = self.getExtrusionData(obj)
if extdata and not isinstance(extdata[0], list):
nodes = extdata[0]
if IfcType not in ["Slab"]:
if not isinstance(extdata[1], FreeCAD.Vector):
nodes = extdata[1]
elif extdata[1].Length > 0:
if hasattr(nodes, "CenterOfMass"):
import Part
nodes = Part.LineSegment(
nodes.CenterOfMass, nodes.CenterOfMass.add(extdata[1])
).toShape()
if isinstance(extdata[1], FreeCAD.Vector):
nodes.Placement = nodes.Placement.multiply(extdata[2])
offset = FreeCAD.Vector()
if hasattr(obj, "NodesOffset"):
offset = FreeCAD.Vector(0, 0, obj.NodesOffset.Value)
if obj.Nodes and (prop != "ResetNodes"):
if hasattr(self, "nodes"):
if self.nodes:
if obj.Nodes != self.nodes:
# nodes are set manually: don't touch them
return
else:
# nodes haven't been calculated yet, but are set (file load)
# we set the nodes now but don't change the property
if nodes:
self.nodes = [v.Point.add(offset) for v in nodes.Vertexes]
return
# we set the nodes
if nodes:
self.nodes = [v.Point.add(offset) for v in nodes.Vertexes]
obj.Nodes = self.nodes
ArchComponent.Component.onChanged(self, obj, prop)
def getNodeEdges(self, obj):
"returns a list of edges from structural nodes"
edges = []
if obj.Nodes:
import Part
for i in range(len(obj.Nodes) - 1):
edges.append(
Part.LineSegment(
obj.Placement.multVec(obj.Nodes[i]),
obj.Placement.multVec(obj.Nodes[i + 1]),
).toShape()
)
if hasattr(obj.ViewObject, "NodeType"):
if (obj.ViewObject.NodeType == "Area") and (len(obj.Nodes) > 2):
edges.append(
Part.LineSegment(
obj.Placement.multVec(obj.Nodes[-1]),
obj.Placement.multVec(obj.Nodes[0]),
).toShape()
)
return edges
class _ViewProviderStructure(ArchComponent.ViewProviderComponent):
"A View Provider for the Structure object"
def __init__(self, vobj):
ArchComponent.ViewProviderComponent.__init__(self, vobj)
# setProperties of ArchComponent will be overwritten
# thus setProperties from ArchComponent will be explicit called to get the properties
ArchComponent.ViewProviderComponent.setProperties(self, vobj)
self.setProperties(vobj)
vobj.ShapeColor = ArchCommands.getDefaultColor("Structure")
def setProperties(self, vobj):
pl = vobj.PropertiesList
if not "ShowNodes" in pl:
vobj.addProperty(
"App::PropertyBool",
"ShowNodes",
"Nodes",
QT_TRANSLATE_NOOP("App::Property", "If the nodes are visible or not"),
).ShowNodes = False
if not "NodeLine" in pl:
vobj.addProperty(
"App::PropertyFloat",
"NodeLine",
"Nodes",
QT_TRANSLATE_NOOP("App::Property", "The width of the nodes line"),
)
if not "NodeSize" in pl:
vobj.addProperty(
"App::PropertyFloat",
"NodeSize",
"Nodes",
QT_TRANSLATE_NOOP("App::Property", "The size of the node points"),
)
vobj.NodeSize = 6
if not "NodeColor" in pl:
vobj.addProperty(
"App::PropertyColor",
"NodeColor",
"Nodes",
QT_TRANSLATE_NOOP("App::Property", "The color of the nodes line"),
)
vobj.NodeColor = (1.0, 1.0, 1.0, 1.0)
if not "NodeType" in pl:
vobj.addProperty(
"App::PropertyEnumeration",
"NodeType",
"Nodes",
QT_TRANSLATE_NOOP("App::Property", "The type of structural node"),
)
vobj.NodeType = ["Linear", "Area"]
def onDocumentRestored(self, vobj):
self.setProperties(vobj)
def getIcon(self):
import Arch_rc
if hasattr(self, "Object"):
if hasattr(self.Object, "CloneOf"):
if self.Object.CloneOf:
return ":/icons/Arch_Structure_Clone.svg"
return ":/icons/Arch_Structure_Tree.svg"
def updateData(self, obj, prop):
if prop == "Nodes":
if obj.Nodes:
if hasattr(self, "nodes"):
p = []
self.pointset.numPoints.setValue(0)
self.lineset.coordIndex.deleteValues(0)
self.faceset.coordIndex.deleteValues(0)
for n in obj.Nodes:
p.append([n.x, n.y, n.z])
self.coords.point.setValues(0, len(p), p)
self.pointset.numPoints.setValue(len(p))
self.lineset.coordIndex.setValues(
0, len(p) + 1, list(range(len(p))) + [-1]
)
if hasattr(obj.ViewObject, "NodeType"):
if (obj.ViewObject.NodeType == "Area") and (len(p) > 2):
self.coords.point.set1Value(
len(p), p[0][0], p[0][1], p[0][2]
)
self.lineset.coordIndex.setValues(
0, len(p) + 2, list(range(len(p) + 1)) + [-1]
)
self.faceset.coordIndex.setValues(
0, len(p) + 1, list(range(len(p))) + [-1]
)
elif prop in ["IfcType"]:
if hasattr(obj.ViewObject, "NodeType"):
if hasattr(obj, "IfcType"):
IfcType = obj.IfcType
else:
IfcType = None
if IfcType == "Slab":
obj.ViewObject.NodeType = "Area"
else:
obj.ViewObject.NodeType = "Linear"
else:
ArchComponent.ViewProviderComponent.updateData(self, obj, prop)
def onChanged(self, vobj, prop):
if prop == "ShowNodes":
if hasattr(self, "nodes"):
vobj.Annotation.removeChild(self.nodes)
del self.nodes
if vobj.ShowNodes:
from pivy import coin
self.nodes = coin.SoAnnotation()
self.coords = coin.SoCoordinate3()
self.mat = coin.SoMaterial()
self.pointstyle = coin.SoDrawStyle()
self.pointstyle.style = coin.SoDrawStyle.POINTS
self.pointset = coin.SoType.fromName("SoBrepPointSet").createInstance()
self.linestyle = coin.SoDrawStyle()
self.linestyle.style = coin.SoDrawStyle.LINES
self.lineset = coin.SoType.fromName("SoBrepEdgeSet").createInstance()
self.facestyle = coin.SoDrawStyle()
self.facestyle.style = coin.SoDrawStyle.FILLED
self.shapehints = coin.SoShapeHints()
self.shapehints.faceType = coin.SoShapeHints.UNKNOWN_FACE_TYPE
self.fmat = coin.SoMaterial()
self.fmat.transparency.setValue(0.75)
self.faceset = coin.SoIndexedFaceSet()
self.nodes.addChild(self.coords)
self.nodes.addChild(self.mat)
self.nodes.addChild(self.pointstyle)
self.nodes.addChild(self.pointset)
self.nodes.addChild(self.linestyle)
self.nodes.addChild(self.lineset)
self.nodes.addChild(self.facestyle)
self.nodes.addChild(self.shapehints)
self.nodes.addChild(self.fmat)
self.nodes.addChild(self.faceset)
vobj.Annotation.addChild(self.nodes)
self.updateData(vobj.Object, "Nodes")
self.onChanged(vobj, "NodeColor")
self.onChanged(vobj, "NodeLine")
self.onChanged(vobj, "NodeSize")
elif prop == "NodeColor":
if hasattr(self, "mat"):
l = vobj.NodeColor
self.mat.diffuseColor.setValue([l[0], l[1], l[2]])
self.fmat.diffuseColor.setValue([l[0], l[1], l[2]])
elif prop == "NodeLine":
if hasattr(self, "linestyle"):
self.linestyle.lineWidth = vobj.NodeLine
elif prop == "NodeSize":
if hasattr(self, "pointstyle"):
self.pointstyle.pointSize = vobj.NodeSize
elif prop == "NodeType":
self.updateData(vobj.Object, "Nodes")
else:
ArchComponent.ViewProviderComponent.onChanged(self, vobj, prop)
def setEdit(self, vobj, mode):
if mode != 0:
return None
taskd = StructureTaskPanel(vobj.Object)
taskd.obj = self.Object
taskd.update()
FreeCADGui.Control.showDialog(taskd)
return True
class StructureTaskPanel(ArchComponent.ComponentTaskPanel):
def __init__(self, obj):
ArchComponent.ComponentTaskPanel.__init__(self)
self.nodes_widget = QtGui.QWidget()
self.nodes_widget.setWindowTitle(
QtGui.QApplication.translate("Arch", "Node Tools", None)
)
lay = QtGui.QVBoxLayout(self.nodes_widget)
self.resetButton = QtGui.QPushButton(self.nodes_widget)
self.resetButton.setIcon(QtGui.QIcon(":/icons/edit-undo.svg"))
self.resetButton.setText(
QtGui.QApplication.translate("Arch", "Reset nodes", None)
)
lay.addWidget(self.resetButton)
QtCore.QObject.connect(
self.resetButton, QtCore.SIGNAL("clicked()"), self.resetNodes
)
self.editButton = QtGui.QPushButton(self.nodes_widget)
self.editButton.setIcon(QtGui.QIcon(":/icons/Draft_Edit.svg"))
self.editButton.setText(
QtGui.QApplication.translate("Arch", "Edit nodes", None)
)
lay.addWidget(self.editButton)
QtCore.QObject.connect(
self.editButton, QtCore.SIGNAL("clicked()"), self.editNodes
)
self.extendButton = QtGui.QPushButton(self.nodes_widget)
self.extendButton.setIcon(QtGui.QIcon(":/icons/Snap_Perpendicular.svg"))
self.extendButton.setText(
QtGui.QApplication.translate("Arch", "Extend nodes", None)
)
self.extendButton.setToolTip(
QtGui.QApplication.translate(
"Arch",
"Extends the nodes of this element to reach the nodes of another element",
None,
)
)
lay.addWidget(self.extendButton)
QtCore.QObject.connect(
self.extendButton, QtCore.SIGNAL("clicked()"), self.extendNodes
)
self.connectButton = QtGui.QPushButton(self.nodes_widget)
self.connectButton.setIcon(QtGui.QIcon(":/icons/Snap_Intersection.svg"))
self.connectButton.setText(
QtGui.QApplication.translate("Arch", "Connect nodes", None)
)
self.connectButton.setToolTip(
QtGui.QApplication.translate(
"Arch",
"Connects nodes of this element with the nodes of another element",
None,
)
)
lay.addWidget(self.connectButton)
QtCore.QObject.connect(
self.connectButton, QtCore.SIGNAL("clicked()"), self.connectNodes
)
self.toggleButton = QtGui.QPushButton(self.nodes_widget)
self.toggleButton.setIcon(QtGui.QIcon(":/icons/dagViewVisible.svg"))
self.toggleButton.setText(
QtGui.QApplication.translate("Arch", "Toggle all nodes", None)
)
self.toggleButton.setToolTip(
QtGui.QApplication.translate(
"Arch", "Toggles all structural nodes of the document on/off", None
)
)
lay.addWidget(self.toggleButton)
QtCore.QObject.connect(
self.toggleButton, QtCore.SIGNAL("clicked()"), self.toggleNodes
)
self.extrusion_widget = QtGui.QWidget()
self.extrusion_widget.setWindowTitle(
QtGui.QApplication.translate("Arch", "Extrusion Tools", None)
)
lay = QtGui.QVBoxLayout(self.extrusion_widget)
self.selectToolButton = QtGui.QPushButton(self.extrusion_widget)
self.selectToolButton.setIcon(QtGui.QIcon())
self.selectToolButton.setText(
QtGui.QApplication.translate("Arch", "Select tool...", None)
)
self.selectToolButton.setToolTip(
QtGui.QApplication.translate(
"Arch",
"Select object or edges to be used as a Tool (extrusion path)",
None,
)
)
lay.addWidget(self.selectToolButton)
QtCore.QObject.connect(
self.selectToolButton, QtCore.SIGNAL("clicked()"), self.setSelectionFromTool
)
self.form = [self.form, self.nodes_widget, self.extrusion_widget]
self.Object = obj
self.observer = None
self.nodevis = None
def editNodes(self):
FreeCADGui.Control.closeDialog()
FreeCADGui.runCommand("Draft_Edit")
def resetNodes(self):
self.Object.Proxy.onChanged(self.Object, "ResetNodes")
def extendNodes(self, other=None):
if not other:
self.observer = StructSelectionObserver(self.extendNodes)
FreeCADGui.Selection.addObserver(self.observer)
FreeCAD.Console.PrintMessage(
translate("Arch", "Choose another Structure object:")
)
else:
FreeCADGui.Selection.removeObserver(self.observer)
self.observer = None
if Draft.getType(other) != "Structure":
FreeCAD.Console.PrintError(
translate("Arch", "The chosen object is not a Structure") + "\n"
)
else:
if not other.Nodes:
FreeCAD.Console.PrintError(
translate("Arch", "The chosen object has no structural nodes")
+ "\n"
)
else:
if (len(self.Object.Nodes) != 2) or (len(other.Nodes) != 2):
FreeCAD.Console.PrintError(
translate(
"Arch", "One of these objects has more than 2 nodes"
)
+ "\n"
)
else:
import DraftGeomUtils
nodes1 = [
self.Object.Placement.multVec(v) for v in self.Object.Nodes
]
nodes2 = [other.Placement.multVec(v) for v in other.Nodes]
intersect = DraftGeomUtils.findIntersection(
nodes1[0], nodes1[1], nodes2[0], nodes2[1], True, True
)
if not intersect:
FreeCAD.Console.PrintError(
translate(
"Arch",
"Unable to find a suitable intersection point",
)
+ "\n"
)
else:
intersect = intersect[0]
FreeCAD.Console.PrintMessage(
translate("Arch", "Intersection found.\n")
)
if DraftGeomUtils.findClosest(intersect, nodes1) == 0:
self.Object.Nodes = [
self.Object.Placement.inverse().multVec(intersect),
self.Object.Nodes[1],
]
else:
self.Object.Nodes = [
self.Object.Nodes[0],
self.Object.Placement.inverse().multVec(intersect),
]
def connectNodes(self, other=None):
if not other:
self.observer = StructSelectionObserver(self.connectNodes)
FreeCADGui.Selection.addObserver(self.observer)
FreeCAD.Console.PrintMessage(
translate("Arch", "Choose another Structure object:")
)
else:
FreeCADGui.Selection.removeObserver(self.observer)
self.observer = None
if Draft.getType(other) != "Structure":
FreeCAD.Console.PrintError(
translate("Arch", "The chosen object is not a Structure") + "\n"
)
else:
if not other.Nodes:
FreeCAD.Console.PrintError(
translate("Arch", "The chosen object has no structural nodes")
+ "\n"
)
else:
if (len(self.Object.Nodes) != 2) or (len(other.Nodes) != 2):
FreeCAD.Console.PrintError(
translate(
"Arch", "One of these objects has more than 2 nodes"
)
+ "\n"
)
else:
import DraftGeomUtils
nodes1 = [
self.Object.Placement.multVec(v) for v in self.Object.Nodes
]
nodes2 = [other.Placement.multVec(v) for v in other.Nodes]
intersect = DraftGeomUtils.findIntersection(
nodes1[0], nodes1[1], nodes2[0], nodes2[1], True, True
)
if not intersect:
FreeCAD.Console.PrintError(
translate(
"Arch",
"Unable to find a suitable intersection point",
)
+ "\n"
)
else:
intersect = intersect[0]
FreeCAD.Console.PrintMessage(
translate("Arch", "Intersection found.") + "\n"
)
if DraftGeomUtils.findClosest(intersect, nodes1) == 0:
self.Object.Nodes = [
self.Object.Placement.inverse().multVec(intersect),
self.Object.Nodes[1],
]
else:
self.Object.Nodes = [
self.Object.Nodes[0],
self.Object.Placement.inverse().multVec(intersect),
]
if DraftGeomUtils.findClosest(intersect, nodes2) == 0:
other.Nodes = [
other.Placement.inverse().multVec(intersect),
other.Nodes[1],
]
else:
other.Nodes = [
other.Nodes[0],
other.Placement.inverse().multVec(intersect),
]
def toggleNodes(self):
if self.nodevis:
for obj in self.nodevis:
obj[0].ViewObject.ShowNodes = obj[1]
self.nodevis = None
else:
self.nodevis = []
for obj in FreeCAD.ActiveDocument.Objects:
if hasattr(obj.ViewObject, "ShowNodes"):
self.nodevis.append([obj, obj.ViewObject.ShowNodes])
obj.ViewObject.ShowNodes = True
def setSelectionFromTool(self):
FreeCADGui.Selection.clearSelection()
if hasattr(self.Object, "Tool"):
tool = self.Object.Tool
if hasattr(tool, "Shape") and tool.Shape:
FreeCADGui.Selection.addSelection(tool)
else:
if not isinstance(tool, list):
tool = [tool]
for o, subs in tool:
FreeCADGui.Selection.addSelection(o, subs)
QtCore.QObject.disconnect(
self.selectToolButton, QtCore.SIGNAL("clicked()"), self.setSelectionFromTool
)
QtCore.QObject.connect(
self.selectToolButton, QtCore.SIGNAL("clicked()"), self.setToolFromSelection
)
self.selectToolButton.setText(
QtGui.QApplication.translate("Arch", "Done", None)
)
def setToolFromSelection(self):
objectList = []
selEx = FreeCADGui.Selection.getSelectionEx()
for selExi in selEx:
if len(selExi.SubElementNames) == 0:
# Add entirely selected objects
objectList.append(selExi.Object)
else:
subElementsNames = [
subElementName
for subElementName in selExi.SubElementNames
if subElementName.startswith("Edge")
]
# Check that at least an edge is selected from the object's shape
if len(subElementsNames) > 0:
objectList.append((selExi.Object, subElementsNames))
if self.Object.getTypeIdOfProperty("Tool") != "App::PropertyLinkSubList":
# Upgrade property Tool from App::PropertyLink to App::PropertyLinkSubList (note: Undo/Redo fails)
self.Object.removeProperty("Tool")
self.Object.addProperty(
"App::PropertyLinkSubList",
"Tool",
"Structure",
QT_TRANSLATE_NOOP(
"App::Property", "An optional extrusion path for this element"
),
)
self.Object.Tool = objectList
QtCore.QObject.disconnect(
self.selectToolButton, QtCore.SIGNAL("clicked()"), self.setToolFromSelection
)
QtCore.QObject.connect(
self.selectToolButton, QtCore.SIGNAL("clicked()"), self.setSelectionFromTool
)
self.selectToolButton.setText(
QtGui.QApplication.translate("Arch", "Select tool...", None)
)
def accept(self):
if self.observer:
FreeCADGui.Selection.removeObserver(self.observer)
if self.nodevis:
self.toggleNodes()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
class StructSelectionObserver:
def __init__(self, callback):
self.callback = callback
def addSelection(self, docName, objName, sub, pos):
print("got ", objName)
obj = FreeCAD.getDocument(docName).getObject(objName)
self.callback(obj)
class _StructuralSystem(
ArchComponent.Component
): # OBSOLETE - All Arch objects can now be based on axes
"The Structural System object"
def __init__(self, obj):
ArchComponent.Component.__init__(self, obj)
obj.addProperty(
"App::PropertyLinkList",
"Axes",
"Arch",
QT_TRANSLATE_NOOP(
"App::Property", "Axes systems this structure is built on"
),
)
obj.addProperty(
"App::PropertyIntegerList",
"Exclude",
"Arch",
QT_TRANSLATE_NOOP(
"App::Property",
"The element numbers to exclude when this structure is based on axes",
),
)
obj.addProperty(
"App::PropertyBool",
"Align",
"Arch",
QT_TRANSLATE_NOOP(
"App::Property", "If true the element are aligned with axes"
),
).Align = False
self.Type = "StructuralSystem"
def execute(self, obj):
"creates the structure shape"
import DraftGeomUtils
import Part
# creating base shape
pl = obj.Placement
if obj.Base:
if hasattr(obj.Base, "Shape"):
if obj.Base.Shape.isNull():
return
if not obj.Base.Shape.Solids:
return
base = None
# applying axes
pts = self.getAxisPoints(obj)
if hasattr(obj, "Align"):
if obj.Align == False:
apl = self.getAxisPlacement(obj)
if obj.Align:
apl = None
else:
apl = self.getAxisPlacement(obj)
if pts:
fsh = []
for i in range(len(pts)):
sh = obj.Base.Shape.copy()
if hasattr(obj, "Exclude"):
if i in obj.Exclude:
continue
if apl:
sh.Placement.Rotation = sh.Placement.Rotation.multiply(
apl.Rotation
)
sh.translate(pts[i])
fsh.append(sh)
if fsh:
base = Part.makeCompound(fsh)
base = self.processSubShapes(obj, base, pl)
if base:
if not base.isNull():
if base.isValid() and base.Solids:
if base.Volume < 0:
base.reverse()
if base.Volume < 0:
FreeCAD.Console.PrintError(
translate("Arch", "Couldn't compute a shape")
)
return
base = base.removeSplitter()
obj.Shape = base
if not pl.isNull():
obj.Placement = pl
def getAxisPoints(self, obj):
"returns the gridpoints of linked axes"
import DraftGeomUtils
pts = []
if len(obj.Axes) == 1:
if hasattr(obj, "Align"):
if obj.Align:
p0 = obj.Axes[0].Shape.Edges[0].Vertexes[1].Point
for e in obj.Axes[0].Shape.Edges:
p = e.Vertexes[1].Point
p = p.sub(p0)
pts.append(p)
else:
for e in obj.Axes[0].Shape.Edges:
pts.append(e.Vertexes[0].Point)
else:
for e in obj.Axes[0].Shape.Edges:
pts.append(e.Vertexes[0].Point)
elif len(obj.Axes) >= 2:
set1 = obj.Axes[0].Shape.Edges
set2 = obj.Axes[1].Shape.Edges
for e1 in set1:
for e2 in set2:
pts.extend(DraftGeomUtils.findIntersection(e1, e2))
return pts
def getAxisPlacement(self, obj):
"returns an axis placement"
if obj.Axes:
return obj.Axes[0].Placement
return None
class _ViewProviderStructuralSystem(ArchComponent.ViewProviderComponent):
"A View Provider for the Structural System object"
def getIcon(self):
import Arch_rc
return ":/icons/Arch_StructuralSystem_Tree.svg"
if FreeCAD.GuiUp:
FreeCADGui.addCommand("Arch_Structure", _CommandStructure())
FreeCADGui.addCommand("Arch_StructuralSystem", CommandStructuralSystem())
FreeCADGui.addCommand(
"Arch_StructuresFromSelection", CommandStructuresFromSelection()
)
class _ArchStructureGroupCommand:
def GetCommands(self):
return (
"Arch_Structure",
"Arch_StructuralSystem",
"Arch_StructuresFromSelection",
)
def GetResources(self):
return {
"MenuText": QT_TRANSLATE_NOOP("Arch_StructureTools", "Structure tools"),
"ToolTip": QT_TRANSLATE_NOOP("Arch_StructureTools", "Structure tools"),
}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
FreeCADGui.addCommand("Arch_StructureTools", _ArchStructureGroupCommand())
|
dag-factory | dag_factory | import os
from dag_factory.components.deprecated.news_cleaner import NewsCleaner
from dag_factory.components.deprecated.update_na_news_impprt import UpdateNANewsImport
from dag_factory.components.deprecated.update_old_news_impprt import UpdateOldNewsImport
from dag_factory.components.mongo_import import MongoImport
from dag_factory.components.news_crawler import NewsCrawler
from dag_factory.components.old_news_import import OldNewsImport
from dag_factory.components.update_mongo_news import UpdateMongoNews
from tfx.orchestration import metadata, pipeline
from tfx.orchestration.airflow.airflow_dag_runner import (
AirflowDagRunner,
AirflowPipelineConfig,
)
def create_dag(
name,
url,
airflow_config,
backup_dir="pipelines_backup",
mongo_ip=None,
mongo_port=None,
dag_type="default",
output_dir="/output",
updated_collections=[],
update_collections=[],
):
pipeline_name = name.replace(".py", "")
pipeline_root = os.path.join(output_dir, "pipelines", pipeline_name)
metadata_path = os.path.join(output_dir, "metadata", pipeline_name, "metadata.db")
components = []
if dag_type == "default":
crawler = NewsCrawler(url=url)
mongo = MongoImport(
ip=mongo_ip,
port=mongo_port,
rss_feed=crawler.outputs["rss_feed"],
colname=pipeline_name,
)
components = components + [crawler, mongo]
elif dag_type == "backup":
load_news = OldNewsImport(
backup_dir=os.path.join("/output", backup_dir), ip=mongo_ip, port=mongo_port
)
components = components + [load_news]
elif dag_type == "update":
update_news = UpdateMongoNews(
ip=mongo_ip,
port=mongo_port,
updated_collections=updated_collections,
update_collections=update_collections,
)
components = components + [update_news]
airflow_config["catchup"] = False
tfx_pipeline = pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
enable_cache=False,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path
),
)
return AirflowDagRunner(AirflowPipelineConfig(airflow_config)).run(tfx_pipeline)
|
libtorrent | torrentdef | """
Author(s): Arno Bakker
"""
import itertools
import logging
from hashlib import sha1
import aiohttp
from tribler.core.components.libtorrent.utils import torrent_utils
from tribler.core.components.libtorrent.utils.libtorrent_helper import libtorrent as lt
from tribler.core.utilities import maketorrent, path_util
from tribler.core.utilities.path_util import Path
from tribler.core.utilities.simpledefs import INFOHASH_LENGTH
from tribler.core.utilities.unicode import ensure_unicode
from tribler.core.utilities.utilities import (
bdecode_compat,
is_valid_url,
parse_magnetlink,
)
def escape_as_utf8(string, encoding="utf8"):
"""
Make a string UTF-8 compliant, destroying characters if necessary.
:param string: the string to convert
:type string: str
:return: the utf-8 string derivative
:rtype: str
"""
try:
# Try seeing if the delivered encoding is correct and we
# can convert to utf8 without any issues.
return string.decode(encoding).encode("utf8").decode("utf8")
except (LookupError, TypeError, ValueError):
try:
# The delivered encoding is incorrect, cast it to
# latin1 and hope for the best (minor corruption).
return string.decode("latin1").encode("utf8", "ignore").decode("utf8")
except (TypeError, ValueError):
# This is a very nasty string (e.g. u'\u266b'), remove the illegal entries.
return string.encode("utf8", "ignore").decode("utf8")
class TorrentDef:
"""
This object acts as a wrapper around some libtorrent metadata.
It can be used to create new torrents, or analyze existing ones.
"""
def __init__(self, metainfo=None, torrent_parameters=None, ignore_validation=False):
"""
Create a new TorrentDef object, possibly based on existing data.
:param metainfo: A dictionary with metainfo, i.e. from a .torrent file.
:param torrent_parameters: User-defined parameters for the new TorrentDef.
:param ignore_validation: Whether we ignore the libtorrent validation.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self.torrent_parameters = {}
self.metainfo = None
self.files_list = []
self.infohash = None
if metainfo is not None:
# First, make sure the passed metainfo is valid
if not ignore_validation:
try:
lt.torrent_info(metainfo)
except RuntimeError as exc:
raise ValueError from exc
self.metainfo = metainfo
self.infohash = sha1(lt.bencode(self.metainfo[b"info"])).digest()
self.copy_metainfo_to_torrent_parameters()
elif torrent_parameters:
self.torrent_parameters.update(torrent_parameters)
def copy_metainfo_to_torrent_parameters(self):
"""
Populate the torrent_parameters dictionary with information from the metainfo.
"""
for key in [
b"comment",
b"created by",
b"creation date",
b"announce",
b"announce-list",
b"nodes",
b"httpseeds",
b"urllist",
]:
if self.metainfo and key in self.metainfo:
self.torrent_parameters[key] = self.metainfo[key]
infokeys = [b"name", b"piece length"]
for key in infokeys:
if self.metainfo and key in self.metainfo[b"info"]:
self.torrent_parameters[key] = self.metainfo[b"info"][key]
@staticmethod
def load(filepath):
"""
Create a TorrentDef object from a .torrent file
:param filepath: The path to the .torrent file
"""
with open(filepath, "rb") as torrent_file:
file_content = torrent_file.read()
return TorrentDef.load_from_memory(file_content)
@staticmethod
def load_from_memory(bencoded_data):
"""
Load some bencoded data into a TorrentDef.
:param bencoded_data: The bencoded data to decode and use as metainfo
"""
metainfo = bdecode_compat(bencoded_data)
# Some versions of libtorrent will not raise an exception when providing invalid data.
# This issue is present in 1.0.8 (included with Tribler 7.3.0), but has been fixed since at least 1.2.1.
if metainfo is None:
raise ValueError("Data is not a bencoded string")
return TorrentDef.load_from_dict(metainfo)
@staticmethod
def load_from_dict(metainfo):
"""
Load a metainfo dictionary into a TorrentDef object.
:param metainfo: The metainfo dictionary
"""
return TorrentDef(metainfo=metainfo)
@staticmethod
async def load_from_url(url):
"""
Create a TorrentDef with information from a remote source.
:param url: The HTTP/HTTPS url where to fetch the torrent info from.
"""
async with aiohttp.ClientSession(raise_for_status=True) as session:
response = await session.get(url)
body = await response.read()
return TorrentDef.load_from_memory(body)
def _filter_characters(self, name: bytes) -> str:
"""
Sanitize the names in path to unicode by replacing out all
characters that may -even remotely- cause problems with the '?'
character.
:param name: the name to sanitize
:type name: bytes
:return: the sanitized string
:rtype: str
"""
def filter_character(char: int) -> str:
if 0 < char < 128:
return chr(char)
self._logger.debug("Bad character 0x%X", char)
return "?"
return "".join(map(filter_character, name))
def add_content(self, file_path):
"""
Add some content to the torrent file.
:param file_path: The (absolute) path of the file to add.
"""
self.files_list.append(Path(file_path).absolute())
def set_encoding(self, enc):
"""
Set the character encoding for e.g. the 'name' field
:param enc: The new encoding of the file.
"""
self.torrent_parameters[b"encoding"] = enc
def get_encoding(self):
"""
Returns the used encoding of the TorrentDef.
"""
return ensure_unicode(
self.torrent_parameters.get(b"encoding", b"utf-8"), "utf-8"
)
def set_tracker(self, url):
"""
Set the tracker of this torrent, according to a given URL.
:param url: The tracker url.
"""
if not is_valid_url(url):
raise ValueError("Invalid URL")
if url.endswith("/"): # Some tracker code can't deal with / at end
url = url[:-1]
self.torrent_parameters[b"announce"] = url
def get_tracker(self):
"""
Returns the torrent announce URL.
"""
return self.torrent_parameters.get(b"announce", None)
def get_tracker_hierarchy(self):
"""
Returns the hierarchy of trackers.
"""
return self.torrent_parameters.get(b"announce-list", [])
def get_trackers(self) -> set:
"""
Returns a flat set of all known trackers.
:return: all known trackers
:rtype: set
"""
if self.get_tracker_hierarchy():
trackers = itertools.chain.from_iterable(self.get_tracker_hierarchy())
return set(filter(None, trackers))
tracker = self.get_tracker()
if tracker:
return {tracker}
return set()
def set_piece_length(self, piece_length):
"""
Set the size of the pieces in which the content is traded.
The piece size must be a multiple of the chunk size, the unit in which
it is transmitted, which is 16K by default. The default is automatic (value 0).
:param piece_length: The piece length.
"""
if not isinstance(piece_length, int):
raise ValueError("Piece length not an int/long")
self.torrent_parameters[b"piece length"] = piece_length
def get_piece_length(self):
"""
Returns the piece size.
"""
return self.torrent_parameters.get(b"piece length", 0)
def get_nr_pieces(self):
"""
Returns the number of pieces.
"""
if not self.metainfo:
return 0
return len(self.metainfo[b"info"][b"pieces"]) // 20
def get_pieces(self):
"""
Returns the pieces.
"""
if not self.metainfo:
return []
return self.metainfo[b"info"][b"pieces"][:]
def get_infohash(self):
"""
Returns the infohash of the torrent, if metainfo is provided. Might be None if no metainfo is provided.
"""
return self.infohash
def get_metainfo(self):
"""
Returns the metainfo of the torrent. Might be None if no metainfo is provided.
"""
return self.metainfo
def get_name(self):
"""
Returns the name as raw string of bytes.
"""
return self.torrent_parameters[b"name"]
def get_name_utf8(self):
"""
Not all names are utf-8, attempt to construct it as utf-8 anyway.
"""
return escape_as_utf8(self.get_name(), self.get_encoding())
def set_name(self, name):
"""
Set the name of this torrent.
:param name: The new name of the torrent
"""
self.torrent_parameters[b"name"] = name
def get_name_as_unicode(self):
"""Returns the info['name'] field as Unicode string.
@return Unicode string."""
if self.metainfo and b"name.utf-8" in self.metainfo[b"info"]:
# There is an utf-8 encoded name. We assume that it is
# correctly encoded and use it normally
try:
return ensure_unicode(self.metainfo[b"info"][b"name.utf-8"], "UTF-8")
except UnicodeError:
pass
if self.metainfo and b"name" in self.metainfo[b"info"]:
# Try to use the 'encoding' field. If it exists, it
# should contain something like 'utf-8'
if "encoding" in self.metainfo:
try:
return ensure_unicode(
self.metainfo[b"info"][b"name"], self.metainfo[b"encoding"]
)
except UnicodeError:
pass
except LookupError:
# Some encodings are not supported by python. For
# instance, the MBCS codec which is used by
# Windows is not supported (Jan 2010)
pass
# Try to convert the names in path to unicode, assuming
# that it was encoded as utf-8
try:
return ensure_unicode(self.metainfo[b"info"][b"name"], "UTF-8")
except UnicodeError:
pass
# Convert the names in path to unicode by replacing out
# all characters that may -even remotely- cause problems
# with the '?' character
try:
return self._filter_characters(self.metainfo[b"info"][b"name"])
except UnicodeError:
pass
# We failed. Returning an empty string
return ""
def save(self, torrent_filepath=None):
"""
Generate the metainfo and save the torrent file.
:param torrent_filepath: An optional absolute path to where to save the generated .torrent file.
"""
torrent_dict = torrent_utils.create_torrent_file(
self.files_list, self.torrent_parameters, torrent_filepath=torrent_filepath
)
self.metainfo = bdecode_compat(torrent_dict["metainfo"])
self.copy_metainfo_to_torrent_parameters()
self.infohash = torrent_dict["infohash"]
def _get_all_files_as_unicode_with_length(self):
"""Get a generator for files in the torrent def. No filtering
is possible and all tricks are allowed to obtain a unicode
list of filenames.
@return A unicode filename generator.
"""
if self.metainfo and b"files" in self.metainfo[b"info"]:
# Multi-file torrent
files = self.metainfo[b"info"][b"files"]
for file_dict in files:
if b"path.utf-8" in file_dict:
# This file has an utf-8 encoded list of elements.
# We assume that it is correctly encoded and use
# it normally
try:
yield (
Path(
*(
ensure_unicode(element, "UTF-8")
for element in file_dict[b"path.utf-8"]
)
),
file_dict[b"length"],
)
continue
except UnicodeError:
pass
if b"path" in file_dict:
# Try to use the 'encoding' field. If it exists,
# it should contain something like 'utf-8'
if b"encoding" in self.metainfo:
encoding = ensure_unicode(self.metainfo[b"encoding"], "utf8")
try:
yield (
Path(
*(
ensure_unicode(element, encoding)
for element in file_dict[b"path"]
)
),
file_dict[b"length"],
)
continue
except UnicodeError:
pass
except LookupError:
# Some encodings are not supported by
# python. For instance, the MBCS codec
# which is used by Windows is not
# supported (Jan 2010)
pass
# Try to convert the names in path to unicode,
# assuming that it was encoded as utf-8
try:
yield (
Path(
*(
ensure_unicode(element, "UTF-8")
for element in file_dict[b"path"]
)
),
file_dict[b"length"],
)
continue
except UnicodeError:
pass
# Convert the names in path to unicode by
# replacing out all characters that may -even
# remotely- cause problems with the '?' character
try:
yield (
Path(*map(self._filter_characters, file_dict[b"path"])),
file_dict[b"length"],
)
continue
except UnicodeError:
pass
elif self.metainfo:
# Single-file torrent
yield self.get_name_as_unicode(), self.metainfo[b"info"][b"length"]
def get_files_with_length(self, exts=None):
"""The list of files in the torrent def.
@param exts (Optional) list of filename extensions (without leading .)
to search for.
@return A list of filenames.
"""
videofiles = []
for filename, length in self._get_all_files_as_unicode_with_length():
ext = path_util.Path(filename).suffix
if ext != "" and ext[0] == ".":
ext = ext[1:]
if exts is None or ext.lower() in exts:
videofiles.append((filename, length))
return videofiles
def get_files(self, exts=None):
return [filename for filename, _ in self.get_files_with_length(exts)]
def get_length(self, selectedfiles=None):
"""Returns the total size of the content in the torrent. If the
optional selectedfiles argument is specified, the method returns
the total size of only those files.
@return A length (long)
"""
if self.metainfo:
return maketorrent.get_length_from_metainfo(self.metainfo, selectedfiles)
return 0
def get_creation_date(self):
"""
Returns the creation date of the torrent.
"""
return self.metainfo.get(b"creation date", 0) if self.metainfo else 0
def is_multifile_torrent(self):
"""
Returns whether this TorrentDef is a multi-file torrent.
"""
if self.metainfo:
return b"files" in self.metainfo[b"info"]
return False
def is_private(self) -> bool:
"""
Returns whether this TorrentDef is a private torrent (and is not announced in the DHT).
"""
try:
private = (
int(self.metainfo[b"info"].get(b"private", 0)) if self.metainfo else 0
)
except (ValueError, KeyError) as e:
self._logger.warning(f"{e.__class__.__name__}: {e}")
private = 0
return private == 1
def get_index_of_file_in_files(self, file):
if not self.metainfo:
raise ValueError("TorrentDef does not have metainfo")
info = self.metainfo[b"info"]
if file is not None and b"files" in info:
for i in range(len(info[b"files"])):
file_dict = info[b"files"][i]
if b"path.utf-8" in file_dict:
intorrentpath = maketorrent.pathlist2filename(
file_dict[b"path.utf-8"]
)
else:
intorrentpath = maketorrent.pathlist2filename(file_dict[b"path"])
if intorrentpath == path_util.Path(ensure_unicode(file, "utf8")):
return i
raise ValueError("File not found in torrent")
else:
raise ValueError("File not found in single-file torrent")
class TorrentDefNoMetainfo:
"""
Instances of this class are used when working with a torrent def that contains no metainfo (yet), for instance,
when starting a download with only an infohash. Other methods that are using this class do not distinguish between
a TorrentDef with and without data and may still expect this class to have various methods in TorrentDef
implemented.
"""
def __init__(self, infohash, name, url=None):
assert isinstance(
infohash, bytes
), f"INFOHASH has invalid type: {type(infohash)}"
assert len(infohash) == INFOHASH_LENGTH, (
"INFOHASH has invalid length: %d" % len(infohash)
)
self.infohash = infohash
self.name = name
self.url = url
def get_name(self):
return self.name
def get_infohash(self):
return self.infohash
def get_length(self, selectedfiles=None): # pylint: disable=unused-argument
return 0
def get_metainfo(self):
return None
def get_url(self):
return self.url
def is_multifile_torrent(self):
return False
def get_name_utf8(self):
"""
Not all names are utf-8, attempt to construct it as utf-8 anyway.
"""
return escape_as_utf8(
self.name.encode("utf-8 ") if isinstance(self.name, str) else self.name
)
def get_name_as_unicode(self):
return ensure_unicode(self.name, "utf-8")
def get_files(self, exts=None):
return []
def get_files_with_length(self, exts=None):
return []
def get_trackers(self) -> set:
"""
Returns a flat set of all known trackers.
:return: all known trackers
:rtype: set
"""
if self.url and self.url.startswith("magnet:"):
trackers = parse_magnetlink(self.url)[2]
return set(trackers)
return set()
def is_private(self):
return False
def get_nr_pieces(self):
return 0
|
chardet | euckrfreq | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = (
13,
130,
120,
1396,
481,
1719,
1720,
328,
609,
212,
1721,
707,
400,
299,
1722,
87,
1397,
1723,
104,
536,
1117,
1203,
1724,
1267,
685,
1268,
508,
1725,
1726,
1727,
1728,
1398,
1399,
1729,
1730,
1731,
141,
621,
326,
1057,
368,
1732,
267,
488,
20,
1733,
1269,
1734,
945,
1400,
1735,
47,
904,
1270,
1736,
1737,
773,
248,
1738,
409,
313,
786,
429,
1739,
116,
987,
813,
1401,
683,
75,
1204,
145,
1740,
1741,
1742,
1743,
16,
847,
667,
622,
708,
1744,
1745,
1746,
966,
787,
304,
129,
1747,
60,
820,
123,
676,
1748,
1749,
1750,
1751,
617,
1752,
626,
1753,
1754,
1755,
1756,
653,
1757,
1758,
1759,
1760,
1761,
1762,
856,
344,
1763,
1764,
1765,
1766,
89,
401,
418,
806,
905,
848,
1767,
1768,
1769,
946,
1205,
709,
1770,
1118,
1771,
241,
1772,
1773,
1774,
1271,
1775,
569,
1776,
999,
1777,
1778,
1779,
1780,
337,
751,
1058,
28,
628,
254,
1781,
177,
906,
270,
349,
891,
1079,
1782,
19,
1783,
379,
1784,
315,
1785,
629,
754,
1402,
559,
1786,
636,
203,
1206,
1787,
710,
567,
1788,
935,
814,
1789,
1790,
1207,
766,
528,
1791,
1792,
1208,
1793,
1794,
1795,
1796,
1797,
1403,
1798,
1799,
533,
1059,
1404,
1405,
1156,
1406,
936,
884,
1080,
1800,
351,
1801,
1802,
1803,
1804,
1805,
801,
1806,
1807,
1808,
1119,
1809,
1157,
714,
474,
1407,
1810,
298,
899,
885,
1811,
1120,
802,
1158,
1812,
892,
1813,
1814,
1408,
659,
1815,
1816,
1121,
1817,
1818,
1819,
1820,
1821,
1822,
319,
1823,
594,
545,
1824,
815,
937,
1209,
1825,
1826,
573,
1409,
1022,
1827,
1210,
1828,
1829,
1830,
1831,
1832,
1833,
556,
722,
807,
1122,
1060,
1834,
697,
1835,
900,
557,
715,
1836,
1410,
540,
1411,
752,
1159,
294,
597,
1211,
976,
803,
770,
1412,
1837,
1838,
39,
794,
1413,
358,
1839,
371,
925,
1840,
453,
661,
788,
531,
723,
544,
1023,
1081,
869,
91,
1841,
392,
430,
790,
602,
1414,
677,
1082,
457,
1415,
1416,
1842,
1843,
475,
327,
1024,
1417,
795,
121,
1844,
733,
403,
1418,
1845,
1846,
1847,
300,
119,
711,
1212,
627,
1848,
1272,
207,
1849,
1850,
796,
1213,
382,
1851,
519,
1852,
1083,
893,
1853,
1854,
1855,
367,
809,
487,
671,
1856,
663,
1857,
1858,
956,
471,
306,
857,
1859,
1860,
1160,
1084,
1861,
1862,
1863,
1864,
1865,
1061,
1866,
1867,
1868,
1869,
1870,
1871,
282,
96,
574,
1872,
502,
1085,
1873,
1214,
1874,
907,
1875,
1876,
827,
977,
1419,
1420,
1421,
268,
1877,
1422,
1878,
1879,
1880,
308,
1881,
2,
537,
1882,
1883,
1215,
1884,
1885,
127,
791,
1886,
1273,
1423,
1887,
34,
336,
404,
643,
1888,
571,
654,
894,
840,
1889,
0,
886,
1274,
122,
575,
260,
908,
938,
1890,
1275,
410,
316,
1891,
1892,
100,
1893,
1894,
1123,
48,
1161,
1124,
1025,
1895,
633,
901,
1276,
1896,
1897,
115,
816,
1898,
317,
1899,
694,
1900,
909,
734,
1424,
572,
866,
1425,
691,
85,
524,
1010,
543,
394,
841,
1901,
1902,
1903,
1026,
1904,
1905,
1906,
1907,
1908,
1909,
30,
451,
651,
988,
310,
1910,
1911,
1426,
810,
1216,
93,
1912,
1913,
1277,
1217,
1914,
858,
759,
45,
58,
181,
610,
269,
1915,
1916,
131,
1062,
551,
443,
1000,
821,
1427,
957,
895,
1086,
1917,
1918,
375,
1919,
359,
1920,
687,
1921,
822,
1922,
293,
1923,
1924,
40,
662,
118,
692,
29,
939,
887,
640,
482,
174,
1925,
69,
1162,
728,
1428,
910,
1926,
1278,
1218,
1279,
386,
870,
217,
854,
1163,
823,
1927,
1928,
1929,
1930,
834,
1931,
78,
1932,
859,
1933,
1063,
1934,
1935,
1936,
1937,
438,
1164,
208,
595,
1938,
1939,
1940,
1941,
1219,
1125,
1942,
280,
888,
1429,
1430,
1220,
1431,
1943,
1944,
1945,
1946,
1947,
1280,
150,
510,
1432,
1948,
1949,
1950,
1951,
1952,
1953,
1954,
1011,
1087,
1955,
1433,
1043,
1956,
881,
1957,
614,
958,
1064,
1065,
1221,
1958,
638,
1001,
860,
967,
896,
1434,
989,
492,
553,
1281,
1165,
1959,
1282,
1002,
1283,
1222,
1960,
1961,
1962,
1963,
36,
383,
228,
753,
247,
454,
1964,
876,
678,
1965,
1966,
1284,
126,
464,
490,
835,
136,
672,
529,
940,
1088,
1435,
473,
1967,
1968,
467,
50,
390,
227,
587,
279,
378,
598,
792,
968,
240,
151,
160,
849,
882,
1126,
1285,
639,
1044,
133,
140,
288,
360,
811,
563,
1027,
561,
142,
523,
1969,
1970,
1971,
7,
103,
296,
439,
407,
506,
634,
990,
1972,
1973,
1974,
1975,
645,
1976,
1977,
1978,
1979,
1980,
1981,
236,
1982,
1436,
1983,
1984,
1089,
192,
828,
618,
518,
1166,
333,
1127,
1985,
818,
1223,
1986,
1987,
1988,
1989,
1990,
1991,
1992,
1993,
342,
1128,
1286,
746,
842,
1994,
1995,
560,
223,
1287,
98,
8,
189,
650,
978,
1288,
1996,
1437,
1997,
17,
345,
250,
423,
277,
234,
512,
226,
97,
289,
42,
167,
1998,
201,
1999,
2000,
843,
836,
824,
532,
338,
783,
1090,
182,
576,
436,
1438,
1439,
527,
500,
2001,
947,
889,
2002,
2003,
2004,
2005,
262,
600,
314,
447,
2006,
547,
2007,
693,
738,
1129,
2008,
71,
1440,
745,
619,
688,
2009,
829,
2010,
2011,
147,
2012,
33,
948,
2013,
2014,
74,
224,
2015,
61,
191,
918,
399,
637,
2016,
1028,
1130,
257,
902,
2017,
2018,
2019,
2020,
2021,
2022,
2023,
2024,
2025,
2026,
837,
2027,
2028,
2029,
2030,
179,
874,
591,
52,
724,
246,
2031,
2032,
2033,
2034,
1167,
969,
2035,
1289,
630,
605,
911,
1091,
1168,
2036,
2037,
2038,
1441,
912,
2039,
623,
2040,
2041,
253,
1169,
1290,
2042,
1442,
146,
620,
611,
577,
433,
2043,
1224,
719,
1170,
959,
440,
437,
534,
84,
388,
480,
1131,
159,
220,
198,
679,
2044,
1012,
819,
1066,
1443,
113,
1225,
194,
318,
1003,
1029,
2045,
2046,
2047,
2048,
1067,
2049,
2050,
2051,
2052,
2053,
59,
913,
112,
2054,
632,
2055,
455,
144,
739,
1291,
2056,
273,
681,
499,
2057,
448,
2058,
2059,
760,
2060,
2061,
970,
384,
169,
245,
1132,
2062,
2063,
414,
1444,
2064,
2065,
41,
235,
2066,
157,
252,
877,
568,
919,
789,
580,
2067,
725,
2068,
2069,
1292,
2070,
2071,
1445,
2072,
1446,
2073,
2074,
55,
588,
66,
1447,
271,
1092,
2075,
1226,
2076,
960,
1013,
372,
2077,
2078,
2079,
2080,
2081,
1293,
2082,
2083,
2084,
2085,
850,
2086,
2087,
2088,
2089,
2090,
186,
2091,
1068,
180,
2092,
2093,
2094,
109,
1227,
522,
606,
2095,
867,
1448,
1093,
991,
1171,
926,
353,
1133,
2096,
581,
2097,
2098,
2099,
1294,
1449,
1450,
2100,
596,
1172,
1014,
1228,
2101,
1451,
1295,
1173,
1229,
2102,
2103,
1296,
1134,
1452,
949,
1135,
2104,
2105,
1094,
1453,
1454,
1455,
2106,
1095,
2107,
2108,
2109,
2110,
2111,
2112,
2113,
2114,
2115,
2116,
2117,
804,
2118,
2119,
1230,
1231,
805,
1456,
405,
1136,
2120,
2121,
2122,
2123,
2124,
720,
701,
1297,
992,
1457,
927,
1004,
2125,
2126,
2127,
2128,
2129,
2130,
22,
417,
2131,
303,
2132,
385,
2133,
971,
520,
513,
2134,
1174,
73,
1096,
231,
274,
962,
1458,
673,
2135,
1459,
2136,
152,
1137,
2137,
2138,
2139,
2140,
1005,
1138,
1460,
1139,
2141,
2142,
2143,
2144,
11,
374,
844,
2145,
154,
1232,
46,
1461,
2146,
838,
830,
721,
1233,
106,
2147,
90,
428,
462,
578,
566,
1175,
352,
2148,
2149,
538,
1234,
124,
1298,
2150,
1462,
761,
565,
2151,
686,
2152,
649,
2153,
72,
173,
2154,
460,
415,
2155,
1463,
2156,
1235,
305,
2157,
2158,
2159,
2160,
2161,
2162,
579,
2163,
2164,
2165,
2166,
2167,
747,
2168,
2169,
2170,
2171,
1464,
669,
2172,
2173,
2174,
2175,
2176,
1465,
2177,
23,
530,
285,
2178,
335,
729,
2179,
397,
2180,
2181,
2182,
1030,
2183,
2184,
698,
2185,
2186,
325,
2187,
2188,
369,
2189,
799,
1097,
1015,
348,
2190,
1069,
680,
2191,
851,
1466,
2192,
2193,
10,
2194,
613,
424,
2195,
979,
108,
449,
589,
27,
172,
81,
1031,
80,
774,
281,
350,
1032,
525,
301,
582,
1176,
2196,
674,
1045,
2197,
2198,
1467,
730,
762,
2199,
2200,
2201,
2202,
1468,
2203,
993,
2204,
2205,
266,
1070,
963,
1140,
2206,
2207,
2208,
664,
1098,
972,
2209,
2210,
2211,
1177,
1469,
1470,
871,
2212,
2213,
2214,
2215,
2216,
1471,
2217,
2218,
2219,
2220,
2221,
2222,
2223,
2224,
2225,
2226,
2227,
1472,
1236,
2228,
2229,
2230,
2231,
2232,
2233,
2234,
2235,
1299,
2236,
2237,
200,
2238,
477,
373,
2239,
2240,
731,
825,
777,
2241,
2242,
2243,
521,
486,
548,
2244,
2245,
2246,
1473,
1300,
53,
549,
137,
875,
76,
158,
2247,
1301,
1474,
469,
396,
1016,
278,
712,
2248,
321,
442,
503,
767,
744,
941,
1237,
1178,
1475,
2249,
82,
178,
1141,
1179,
973,
2250,
1302,
2251,
297,
2252,
2253,
570,
2254,
2255,
2256,
18,
450,
206,
2257,
290,
292,
1142,
2258,
511,
162,
99,
346,
164,
735,
2259,
1476,
1477,
4,
554,
343,
798,
1099,
2260,
1100,
2261,
43,
171,
1303,
139,
215,
2262,
2263,
717,
775,
2264,
1033,
322,
216,
2265,
831,
2266,
149,
2267,
1304,
2268,
2269,
702,
1238,
135,
845,
347,
309,
2270,
484,
2271,
878,
655,
238,
1006,
1478,
2272,
67,
2273,
295,
2274,
2275,
461,
2276,
478,
942,
412,
2277,
1034,
2278,
2279,
2280,
265,
2281,
541,
2282,
2283,
2284,
2285,
2286,
70,
852,
1071,
2287,
2288,
2289,
2290,
21,
56,
509,
117,
432,
2291,
2292,
331,
980,
552,
1101,
148,
284,
105,
393,
1180,
1239,
755,
2293,
187,
2294,
1046,
1479,
2295,
340,
2296,
63,
1047,
230,
2297,
2298,
1305,
763,
1306,
101,
800,
808,
494,
2299,
2300,
2301,
903,
2302,
37,
1072,
14,
5,
2303,
79,
675,
2304,
312,
2305,
2306,
2307,
2308,
2309,
1480,
6,
1307,
2310,
2311,
2312,
1,
470,
35,
24,
229,
2313,
695,
210,
86,
778,
15,
784,
592,
779,
32,
77,
855,
964,
2314,
259,
2315,
501,
380,
2316,
2317,
83,
981,
153,
689,
1308,
1481,
1482,
1483,
2318,
2319,
716,
1484,
2320,
2321,
2322,
2323,
2324,
2325,
1485,
2326,
2327,
128,
57,
68,
261,
1048,
211,
170,
1240,
31,
2328,
51,
435,
742,
2329,
2330,
2331,
635,
2332,
264,
456,
2333,
2334,
2335,
425,
2336,
1486,
143,
507,
263,
943,
2337,
363,
920,
1487,
256,
1488,
1102,
243,
601,
1489,
2338,
2339,
2340,
2341,
2342,
2343,
2344,
861,
2345,
2346,
2347,
2348,
2349,
2350,
395,
2351,
1490,
1491,
62,
535,
166,
225,
2352,
2353,
668,
419,
1241,
138,
604,
928,
2354,
1181,
2355,
1492,
1493,
2356,
2357,
2358,
1143,
2359,
696,
2360,
387,
307,
1309,
682,
476,
2361,
2362,
332,
12,
222,
156,
2363,
232,
2364,
641,
276,
656,
517,
1494,
1495,
1035,
416,
736,
1496,
2365,
1017,
586,
2366,
2367,
2368,
1497,
2369,
242,
2370,
2371,
2372,
1498,
2373,
965,
713,
2374,
2375,
2376,
2377,
740,
982,
1499,
944,
1500,
1007,
2378,
2379,
1310,
1501,
2380,
2381,
2382,
785,
329,
2383,
2384,
1502,
2385,
2386,
2387,
932,
2388,
1503,
2389,
2390,
2391,
2392,
1242,
2393,
2394,
2395,
2396,
2397,
994,
950,
2398,
2399,
2400,
2401,
1504,
1311,
2402,
2403,
2404,
2405,
1049,
749,
2406,
2407,
853,
718,
1144,
1312,
2408,
1182,
1505,
2409,
2410,
255,
516,
479,
564,
550,
214,
1506,
1507,
1313,
413,
239,
444,
339,
1145,
1036,
1508,
1509,
1314,
1037,
1510,
1315,
2411,
1511,
2412,
2413,
2414,
176,
703,
497,
624,
593,
921,
302,
2415,
341,
165,
1103,
1512,
2416,
1513,
2417,
2418,
2419,
376,
2420,
700,
2421,
2422,
2423,
258,
768,
1316,
2424,
1183,
2425,
995,
608,
2426,
2427,
2428,
2429,
221,
2430,
2431,
2432,
2433,
2434,
2435,
2436,
2437,
195,
323,
726,
188,
897,
983,
1317,
377,
644,
1050,
879,
2438,
452,
2439,
2440,
2441,
2442,
2443,
2444,
914,
2445,
2446,
2447,
2448,
915,
489,
2449,
1514,
1184,
2450,
2451,
515,
64,
427,
495,
2452,
583,
2453,
483,
485,
1038,
562,
213,
1515,
748,
666,
2454,
2455,
2456,
2457,
334,
2458,
780,
996,
1008,
705,
1243,
2459,
2460,
2461,
2462,
2463,
114,
2464,
493,
1146,
366,
163,
1516,
961,
1104,
2465,
291,
2466,
1318,
1105,
2467,
1517,
365,
2468,
355,
951,
1244,
2469,
1319,
2470,
631,
2471,
2472,
218,
1320,
364,
320,
756,
1518,
1519,
1321,
1520,
1322,
2473,
2474,
2475,
2476,
997,
2477,
2478,
2479,
2480,
665,
1185,
2481,
916,
1521,
2482,
2483,
2484,
584,
684,
2485,
2486,
797,
2487,
1051,
1186,
2488,
2489,
2490,
1522,
2491,
2492,
370,
2493,
1039,
1187,
65,
2494,
434,
205,
463,
1188,
2495,
125,
812,
391,
402,
826,
699,
286,
398,
155,
781,
771,
585,
2496,
590,
505,
1073,
2497,
599,
244,
219,
917,
1018,
952,
646,
1523,
2498,
1323,
2499,
2500,
49,
984,
354,
741,
2501,
625,
2502,
1324,
2503,
1019,
190,
357,
757,
491,
95,
782,
868,
2504,
2505,
2506,
2507,
2508,
2509,
134,
1524,
1074,
422,
1525,
898,
2510,
161,
2511,
2512,
2513,
2514,
769,
2515,
1526,
2516,
2517,
411,
1325,
2518,
472,
1527,
2519,
2520,
2521,
2522,
2523,
2524,
985,
2525,
2526,
2527,
2528,
2529,
2530,
764,
2531,
1245,
2532,
2533,
25,
204,
311,
2534,
496,
2535,
1052,
2536,
2537,
2538,
2539,
2540,
2541,
2542,
199,
704,
504,
468,
758,
657,
1528,
196,
44,
839,
1246,
272,
750,
2543,
765,
862,
2544,
2545,
1326,
2546,
132,
615,
933,
2547,
732,
2548,
2549,
2550,
1189,
1529,
2551,
283,
1247,
1053,
607,
929,
2552,
2553,
2554,
930,
183,
872,
616,
1040,
1147,
2555,
1148,
1020,
441,
249,
1075,
2556,
2557,
2558,
466,
743,
2559,
2560,
2561,
92,
514,
426,
420,
526,
2562,
2563,
2564,
2565,
2566,
2567,
2568,
185,
2569,
2570,
2571,
2572,
776,
1530,
658,
2573,
362,
2574,
361,
922,
1076,
793,
2575,
2576,
2577,
2578,
2579,
2580,
1531,
251,
2581,
2582,
2583,
2584,
1532,
54,
612,
237,
1327,
2585,
2586,
275,
408,
647,
111,
2587,
1533,
1106,
465,
3,
458,
9,
38,
2588,
107,
110,
890,
209,
26,
737,
498,
2589,
1534,
2590,
431,
202,
88,
1535,
356,
287,
1107,
660,
1149,
2591,
381,
1536,
986,
1150,
445,
1248,
1151,
974,
2592,
2593,
846,
2594,
446,
953,
184,
1249,
1250,
727,
2595,
923,
193,
883,
2596,
2597,
2598,
102,
324,
539,
817,
2599,
421,
1041,
2600,
832,
2601,
94,
175,
197,
406,
2602,
459,
2603,
2604,
2605,
2606,
2607,
330,
555,
2608,
2609,
2610,
706,
1108,
389,
2611,
2612,
2613,
2614,
233,
2615,
833,
558,
931,
954,
1251,
2616,
2617,
1537,
546,
2618,
2619,
1009,
2620,
2621,
2622,
1538,
690,
1328,
2623,
955,
2624,
1539,
2625,
2626,
772,
2627,
2628,
2629,
2630,
2631,
924,
648,
863,
603,
2632,
2633,
934,
1540,
864,
865,
2634,
642,
1042,
670,
1190,
2635,
2636,
2637,
2638,
168,
2639,
652,
873,
542,
1054,
1541,
2640,
2641,
2642, # 512, 256
# Everything below is of no interest for detection purpose
2643,
2644,
2645,
2646,
2647,
2648,
2649,
2650,
2651,
2652,
2653,
2654,
2655,
2656,
2657,
2658,
2659,
2660,
2661,
2662,
2663,
2664,
2665,
2666,
2667,
2668,
2669,
2670,
2671,
2672,
2673,
2674,
2675,
2676,
2677,
2678,
2679,
2680,
2681,
2682,
2683,
2684,
2685,
2686,
2687,
2688,
2689,
2690,
2691,
2692,
2693,
2694,
2695,
2696,
2697,
2698,
2699,
1542,
880,
2700,
2701,
2702,
2703,
2704,
2705,
2706,
2707,
2708,
2709,
2710,
2711,
2712,
2713,
2714,
2715,
2716,
2717,
2718,
2719,
2720,
2721,
2722,
2723,
2724,
2725,
1543,
2726,
2727,
2728,
2729,
2730,
2731,
2732,
1544,
2733,
2734,
2735,
2736,
2737,
2738,
2739,
2740,
2741,
2742,
2743,
2744,
2745,
2746,
2747,
2748,
2749,
2750,
2751,
2752,
2753,
2754,
1545,
2755,
2756,
2757,
2758,
2759,
2760,
2761,
2762,
2763,
2764,
2765,
2766,
1546,
2767,
1547,
2768,
2769,
2770,
2771,
2772,
2773,
2774,
2775,
2776,
2777,
2778,
2779,
2780,
2781,
2782,
2783,
2784,
2785,
2786,
1548,
2787,
2788,
2789,
1109,
2790,
2791,
2792,
2793,
2794,
2795,
2796,
2797,
2798,
2799,
2800,
2801,
2802,
2803,
2804,
2805,
2806,
2807,
2808,
2809,
2810,
2811,
2812,
1329,
2813,
2814,
2815,
2816,
2817,
2818,
2819,
2820,
2821,
2822,
2823,
2824,
2825,
2826,
2827,
2828,
2829,
2830,
2831,
2832,
2833,
2834,
2835,
2836,
2837,
2838,
2839,
2840,
2841,
2842,
2843,
2844,
2845,
2846,
2847,
2848,
2849,
2850,
2851,
2852,
2853,
2854,
2855,
2856,
1549,
2857,
2858,
2859,
2860,
1550,
2861,
2862,
1551,
2863,
2864,
2865,
2866,
2867,
2868,
2869,
2870,
2871,
2872,
2873,
2874,
1110,
1330,
2875,
2876,
2877,
2878,
2879,
2880,
2881,
2882,
2883,
2884,
2885,
2886,
2887,
2888,
2889,
2890,
2891,
2892,
2893,
2894,
2895,
2896,
2897,
2898,
2899,
2900,
2901,
2902,
2903,
2904,
2905,
2906,
2907,
2908,
2909,
2910,
2911,
2912,
2913,
2914,
2915,
2916,
2917,
2918,
2919,
2920,
2921,
2922,
2923,
2924,
2925,
2926,
2927,
2928,
2929,
2930,
1331,
2931,
2932,
2933,
2934,
2935,
2936,
2937,
2938,
2939,
2940,
2941,
2942,
2943,
1552,
2944,
2945,
2946,
2947,
2948,
2949,
2950,
2951,
2952,
2953,
2954,
2955,
2956,
2957,
2958,
2959,
2960,
2961,
2962,
2963,
2964,
1252,
2965,
2966,
2967,
2968,
2969,
2970,
2971,
2972,
2973,
2974,
2975,
2976,
2977,
2978,
2979,
2980,
2981,
2982,
2983,
2984,
2985,
2986,
2987,
2988,
2989,
2990,
2991,
2992,
2993,
2994,
2995,
2996,
2997,
2998,
2999,
3000,
3001,
3002,
3003,
3004,
3005,
3006,
3007,
3008,
3009,
3010,
3011,
3012,
1553,
3013,
3014,
3015,
3016,
3017,
1554,
3018,
1332,
3019,
3020,
3021,
3022,
3023,
3024,
3025,
3026,
3027,
3028,
3029,
3030,
3031,
3032,
3033,
3034,
3035,
3036,
3037,
3038,
3039,
3040,
3041,
3042,
3043,
3044,
3045,
3046,
3047,
3048,
3049,
3050,
1555,
3051,
3052,
3053,
1556,
1557,
3054,
3055,
3056,
3057,
3058,
3059,
3060,
3061,
3062,
3063,
3064,
3065,
3066,
3067,
1558,
3068,
3069,
3070,
3071,
3072,
3073,
3074,
3075,
3076,
1559,
3077,
3078,
3079,
3080,
3081,
3082,
3083,
1253,
3084,
3085,
3086,
3087,
3088,
3089,
3090,
3091,
3092,
3093,
3094,
3095,
3096,
3097,
3098,
3099,
3100,
3101,
3102,
3103,
3104,
3105,
3106,
3107,
3108,
1152,
3109,
3110,
3111,
3112,
3113,
1560,
3114,
3115,
3116,
3117,
1111,
3118,
3119,
3120,
3121,
3122,
3123,
3124,
3125,
3126,
3127,
3128,
3129,
3130,
3131,
3132,
3133,
3134,
3135,
3136,
3137,
3138,
3139,
3140,
3141,
3142,
3143,
3144,
3145,
3146,
3147,
3148,
3149,
3150,
3151,
3152,
3153,
3154,
3155,
3156,
3157,
3158,
3159,
3160,
3161,
3162,
3163,
3164,
3165,
3166,
3167,
3168,
3169,
3170,
3171,
3172,
3173,
3174,
3175,
3176,
1333,
3177,
3178,
3179,
3180,
3181,
3182,
3183,
3184,
3185,
3186,
3187,
3188,
3189,
1561,
3190,
3191,
1334,
3192,
3193,
3194,
3195,
3196,
3197,
3198,
3199,
3200,
3201,
3202,
3203,
3204,
3205,
3206,
3207,
3208,
3209,
3210,
3211,
3212,
3213,
3214,
3215,
3216,
3217,
3218,
3219,
3220,
3221,
3222,
3223,
3224,
3225,
3226,
3227,
3228,
3229,
3230,
3231,
3232,
3233,
3234,
1562,
3235,
3236,
3237,
3238,
3239,
3240,
3241,
3242,
3243,
3244,
3245,
3246,
3247,
3248,
3249,
3250,
3251,
3252,
3253,
3254,
3255,
3256,
3257,
3258,
3259,
3260,
3261,
3262,
3263,
3264,
3265,
3266,
3267,
3268,
3269,
3270,
3271,
3272,
3273,
3274,
3275,
3276,
3277,
1563,
3278,
3279,
3280,
3281,
3282,
3283,
3284,
3285,
3286,
3287,
3288,
3289,
3290,
3291,
3292,
3293,
3294,
3295,
3296,
3297,
3298,
3299,
3300,
3301,
3302,
3303,
3304,
3305,
3306,
3307,
3308,
3309,
3310,
3311,
3312,
3313,
3314,
3315,
3316,
3317,
3318,
3319,
3320,
3321,
3322,
3323,
3324,
3325,
3326,
3327,
3328,
3329,
3330,
3331,
3332,
3333,
3334,
3335,
3336,
3337,
3338,
3339,
3340,
3341,
3342,
3343,
3344,
3345,
3346,
3347,
3348,
3349,
3350,
3351,
3352,
3353,
3354,
3355,
3356,
3357,
3358,
3359,
3360,
3361,
3362,
3363,
3364,
1335,
3365,
3366,
3367,
3368,
3369,
3370,
3371,
3372,
3373,
3374,
3375,
3376,
3377,
3378,
3379,
3380,
3381,
3382,
3383,
3384,
3385,
3386,
3387,
1336,
3388,
3389,
3390,
3391,
3392,
3393,
3394,
3395,
3396,
3397,
3398,
3399,
3400,
3401,
3402,
3403,
3404,
3405,
3406,
3407,
3408,
3409,
3410,
3411,
3412,
3413,
3414,
1337,
3415,
3416,
3417,
3418,
3419,
1338,
3420,
3421,
3422,
1564,
1565,
3423,
3424,
3425,
3426,
3427,
3428,
3429,
3430,
3431,
1254,
3432,
3433,
3434,
1339,
3435,
3436,
3437,
3438,
3439,
1566,
3440,
3441,
3442,
3443,
3444,
3445,
3446,
3447,
3448,
3449,
3450,
3451,
3452,
3453,
3454,
1255,
3455,
3456,
3457,
3458,
3459,
1567,
1191,
3460,
1568,
1569,
3461,
3462,
3463,
1570,
3464,
3465,
3466,
3467,
3468,
1571,
3469,
3470,
3471,
3472,
3473,
1572,
3474,
3475,
3476,
3477,
3478,
3479,
3480,
3481,
3482,
3483,
3484,
3485,
3486,
1340,
3487,
3488,
3489,
3490,
3491,
3492,
1021,
3493,
3494,
3495,
3496,
3497,
3498,
1573,
3499,
1341,
3500,
3501,
3502,
3503,
3504,
3505,
3506,
3507,
3508,
3509,
3510,
3511,
1342,
3512,
3513,
3514,
3515,
3516,
1574,
1343,
3517,
3518,
3519,
1575,
3520,
1576,
3521,
3522,
3523,
3524,
3525,
3526,
3527,
3528,
3529,
3530,
3531,
3532,
3533,
3534,
3535,
3536,
3537,
3538,
3539,
3540,
3541,
3542,
3543,
3544,
3545,
3546,
3547,
3548,
3549,
3550,
3551,
3552,
3553,
3554,
3555,
3556,
3557,
3558,
3559,
3560,
3561,
3562,
3563,
3564,
3565,
3566,
3567,
3568,
3569,
3570,
3571,
3572,
3573,
3574,
3575,
3576,
3577,
3578,
3579,
3580,
1577,
3581,
3582,
1578,
3583,
3584,
3585,
3586,
3587,
3588,
3589,
3590,
3591,
3592,
3593,
3594,
3595,
3596,
3597,
3598,
3599,
3600,
3601,
3602,
3603,
3604,
1579,
3605,
3606,
3607,
3608,
3609,
3610,
3611,
3612,
3613,
3614,
3615,
3616,
3617,
3618,
3619,
3620,
3621,
3622,
3623,
3624,
3625,
3626,
3627,
3628,
3629,
1580,
3630,
3631,
1581,
3632,
3633,
3634,
3635,
3636,
3637,
3638,
3639,
3640,
3641,
3642,
3643,
3644,
3645,
3646,
3647,
3648,
3649,
3650,
3651,
3652,
3653,
3654,
3655,
3656,
1582,
3657,
3658,
3659,
3660,
3661,
3662,
3663,
3664,
3665,
3666,
3667,
3668,
3669,
3670,
3671,
3672,
3673,
3674,
3675,
3676,
3677,
3678,
3679,
3680,
3681,
3682,
3683,
3684,
3685,
3686,
3687,
3688,
3689,
3690,
3691,
3692,
3693,
3694,
3695,
3696,
3697,
3698,
3699,
3700,
1192,
3701,
3702,
3703,
3704,
1256,
3705,
3706,
3707,
3708,
1583,
1257,
3709,
3710,
3711,
3712,
3713,
3714,
3715,
3716,
1584,
3717,
3718,
3719,
3720,
3721,
3722,
3723,
3724,
3725,
3726,
3727,
3728,
3729,
3730,
3731,
3732,
3733,
3734,
3735,
3736,
3737,
3738,
3739,
3740,
3741,
3742,
3743,
3744,
3745,
1344,
3746,
3747,
3748,
3749,
3750,
3751,
3752,
3753,
3754,
3755,
3756,
1585,
3757,
3758,
3759,
3760,
3761,
3762,
3763,
3764,
3765,
3766,
1586,
3767,
3768,
3769,
3770,
3771,
3772,
3773,
3774,
3775,
3776,
3777,
3778,
1345,
3779,
3780,
3781,
3782,
3783,
3784,
3785,
3786,
3787,
3788,
3789,
3790,
3791,
3792,
3793,
3794,
3795,
1346,
1587,
3796,
3797,
1588,
3798,
3799,
3800,
3801,
3802,
3803,
3804,
3805,
3806,
1347,
3807,
3808,
3809,
3810,
3811,
1589,
3812,
3813,
3814,
3815,
3816,
3817,
3818,
3819,
3820,
3821,
1590,
3822,
3823,
1591,
1348,
3824,
3825,
3826,
3827,
3828,
3829,
3830,
1592,
3831,
3832,
1593,
3833,
3834,
3835,
3836,
3837,
3838,
3839,
3840,
3841,
3842,
3843,
3844,
1349,
3845,
3846,
3847,
3848,
3849,
3850,
3851,
3852,
3853,
3854,
3855,
3856,
3857,
3858,
1594,
3859,
3860,
3861,
3862,
3863,
3864,
3865,
3866,
3867,
3868,
3869,
1595,
3870,
3871,
3872,
3873,
1596,
3874,
3875,
3876,
3877,
3878,
3879,
3880,
3881,
3882,
3883,
3884,
3885,
3886,
1597,
3887,
3888,
3889,
3890,
3891,
3892,
3893,
3894,
3895,
1598,
3896,
3897,
3898,
1599,
1600,
3899,
1350,
3900,
1351,
3901,
3902,
1352,
3903,
3904,
3905,
3906,
3907,
3908,
3909,
3910,
3911,
3912,
3913,
3914,
3915,
3916,
3917,
3918,
3919,
3920,
3921,
3922,
3923,
3924,
1258,
3925,
3926,
3927,
3928,
3929,
3930,
3931,
1193,
3932,
1601,
3933,
3934,
3935,
3936,
3937,
3938,
3939,
3940,
3941,
3942,
3943,
1602,
3944,
3945,
3946,
3947,
3948,
1603,
3949,
3950,
3951,
3952,
3953,
3954,
3955,
3956,
3957,
3958,
3959,
3960,
3961,
3962,
3963,
3964,
3965,
1604,
3966,
3967,
3968,
3969,
3970,
3971,
3972,
3973,
3974,
3975,
3976,
3977,
1353,
3978,
3979,
3980,
3981,
3982,
3983,
3984,
3985,
3986,
3987,
3988,
3989,
3990,
3991,
1354,
3992,
3993,
3994,
3995,
3996,
3997,
3998,
3999,
4000,
4001,
4002,
4003,
4004,
4005,
4006,
4007,
4008,
4009,
4010,
4011,
4012,
4013,
4014,
4015,
4016,
4017,
4018,
4019,
4020,
4021,
4022,
4023,
1355,
4024,
4025,
4026,
4027,
4028,
4029,
4030,
4031,
4032,
4033,
4034,
4035,
4036,
4037,
4038,
4039,
4040,
1605,
4041,
4042,
4043,
4044,
4045,
4046,
4047,
4048,
4049,
4050,
4051,
4052,
4053,
4054,
4055,
4056,
4057,
4058,
4059,
4060,
1606,
4061,
4062,
4063,
4064,
1607,
4065,
4066,
4067,
4068,
4069,
4070,
4071,
4072,
4073,
4074,
4075,
4076,
1194,
4077,
4078,
1608,
4079,
4080,
4081,
4082,
4083,
4084,
4085,
4086,
4087,
1609,
4088,
4089,
4090,
4091,
4092,
4093,
4094,
4095,
4096,
4097,
4098,
4099,
4100,
4101,
4102,
4103,
4104,
4105,
4106,
4107,
4108,
1259,
4109,
4110,
4111,
4112,
4113,
4114,
4115,
4116,
4117,
4118,
4119,
4120,
4121,
4122,
4123,
4124,
1195,
4125,
4126,
4127,
1610,
4128,
4129,
4130,
4131,
4132,
4133,
4134,
4135,
4136,
4137,
1356,
4138,
4139,
4140,
4141,
4142,
4143,
4144,
1611,
4145,
4146,
4147,
4148,
4149,
4150,
4151,
4152,
4153,
4154,
4155,
4156,
4157,
4158,
4159,
4160,
4161,
4162,
4163,
4164,
4165,
4166,
4167,
4168,
4169,
4170,
4171,
4172,
4173,
4174,
4175,
4176,
4177,
4178,
4179,
4180,
4181,
4182,
4183,
4184,
4185,
4186,
4187,
4188,
4189,
4190,
4191,
4192,
4193,
4194,
4195,
4196,
4197,
4198,
4199,
4200,
4201,
4202,
4203,
4204,
4205,
4206,
4207,
4208,
4209,
4210,
4211,
4212,
4213,
4214,
4215,
4216,
4217,
4218,
4219,
1612,
4220,
4221,
4222,
4223,
4224,
4225,
4226,
4227,
1357,
4228,
1613,
4229,
4230,
4231,
4232,
4233,
4234,
4235,
4236,
4237,
4238,
4239,
4240,
4241,
4242,
4243,
1614,
4244,
4245,
4246,
4247,
4248,
4249,
4250,
4251,
4252,
4253,
4254,
4255,
4256,
4257,
4258,
4259,
4260,
4261,
4262,
4263,
4264,
4265,
4266,
4267,
4268,
4269,
4270,
1196,
1358,
4271,
4272,
4273,
4274,
4275,
4276,
4277,
4278,
4279,
4280,
4281,
4282,
4283,
4284,
4285,
4286,
4287,
1615,
4288,
4289,
4290,
4291,
4292,
4293,
4294,
4295,
4296,
4297,
4298,
4299,
4300,
4301,
4302,
4303,
4304,
4305,
4306,
4307,
4308,
4309,
4310,
4311,
4312,
4313,
4314,
4315,
4316,
4317,
4318,
4319,
4320,
4321,
4322,
4323,
4324,
4325,
4326,
4327,
4328,
4329,
4330,
4331,
4332,
4333,
4334,
1616,
4335,
4336,
4337,
4338,
4339,
4340,
4341,
4342,
4343,
4344,
4345,
4346,
4347,
4348,
4349,
4350,
4351,
4352,
4353,
4354,
4355,
4356,
4357,
4358,
4359,
4360,
1617,
4361,
4362,
4363,
4364,
4365,
1618,
4366,
4367,
4368,
4369,
4370,
4371,
4372,
4373,
4374,
4375,
4376,
4377,
4378,
4379,
4380,
4381,
4382,
4383,
4384,
4385,
4386,
4387,
4388,
4389,
4390,
4391,
4392,
4393,
4394,
4395,
4396,
4397,
4398,
4399,
4400,
4401,
4402,
4403,
4404,
4405,
4406,
4407,
4408,
4409,
4410,
4411,
4412,
4413,
4414,
4415,
4416,
1619,
4417,
4418,
4419,
4420,
4421,
4422,
4423,
4424,
4425,
1112,
4426,
4427,
4428,
4429,
4430,
1620,
4431,
4432,
4433,
4434,
4435,
4436,
4437,
4438,
4439,
4440,
4441,
4442,
1260,
1261,
4443,
4444,
4445,
4446,
4447,
4448,
4449,
4450,
4451,
4452,
4453,
4454,
4455,
1359,
4456,
4457,
4458,
4459,
4460,
4461,
4462,
4463,
4464,
4465,
1621,
4466,
4467,
4468,
4469,
4470,
4471,
4472,
4473,
4474,
4475,
4476,
4477,
4478,
4479,
4480,
4481,
4482,
4483,
4484,
4485,
4486,
4487,
4488,
4489,
1055,
4490,
4491,
4492,
4493,
4494,
4495,
4496,
4497,
4498,
4499,
4500,
4501,
4502,
4503,
4504,
4505,
4506,
4507,
4508,
4509,
4510,
4511,
4512,
4513,
4514,
4515,
4516,
4517,
4518,
1622,
4519,
4520,
4521,
1623,
4522,
4523,
4524,
4525,
4526,
4527,
4528,
4529,
4530,
4531,
4532,
4533,
4534,
4535,
1360,
4536,
4537,
4538,
4539,
4540,
4541,
4542,
4543,
975,
4544,
4545,
4546,
4547,
4548,
4549,
4550,
4551,
4552,
4553,
4554,
4555,
4556,
4557,
4558,
4559,
4560,
4561,
4562,
4563,
4564,
4565,
4566,
4567,
4568,
4569,
4570,
4571,
1624,
4572,
4573,
4574,
4575,
4576,
1625,
4577,
4578,
4579,
4580,
4581,
4582,
4583,
4584,
1626,
4585,
4586,
4587,
4588,
4589,
4590,
4591,
4592,
4593,
4594,
4595,
1627,
4596,
4597,
4598,
4599,
4600,
4601,
4602,
4603,
4604,
4605,
4606,
4607,
4608,
4609,
4610,
4611,
4612,
4613,
4614,
4615,
1628,
4616,
4617,
4618,
4619,
4620,
4621,
4622,
4623,
4624,
4625,
4626,
4627,
4628,
4629,
4630,
4631,
4632,
4633,
4634,
4635,
4636,
4637,
4638,
4639,
4640,
4641,
4642,
4643,
4644,
4645,
4646,
4647,
4648,
4649,
1361,
4650,
4651,
4652,
4653,
4654,
4655,
4656,
4657,
4658,
4659,
4660,
4661,
1362,
4662,
4663,
4664,
4665,
4666,
4667,
4668,
4669,
4670,
4671,
4672,
4673,
4674,
4675,
4676,
4677,
4678,
4679,
4680,
4681,
4682,
1629,
4683,
4684,
4685,
4686,
4687,
1630,
4688,
4689,
4690,
4691,
1153,
4692,
4693,
4694,
1113,
4695,
4696,
4697,
4698,
4699,
4700,
4701,
4702,
4703,
4704,
4705,
4706,
4707,
4708,
4709,
4710,
4711,
1197,
4712,
4713,
4714,
4715,
4716,
4717,
4718,
4719,
4720,
4721,
4722,
4723,
4724,
4725,
4726,
4727,
4728,
4729,
4730,
4731,
4732,
4733,
4734,
4735,
1631,
4736,
1632,
4737,
4738,
4739,
4740,
4741,
4742,
4743,
4744,
1633,
4745,
4746,
4747,
4748,
4749,
1262,
4750,
4751,
4752,
4753,
4754,
1363,
4755,
4756,
4757,
4758,
4759,
4760,
4761,
4762,
4763,
4764,
4765,
4766,
4767,
4768,
1634,
4769,
4770,
4771,
4772,
4773,
4774,
4775,
4776,
4777,
4778,
1635,
4779,
4780,
4781,
4782,
4783,
4784,
4785,
4786,
4787,
4788,
4789,
1636,
4790,
4791,
4792,
4793,
4794,
4795,
4796,
4797,
4798,
4799,
4800,
4801,
4802,
4803,
4804,
4805,
4806,
1637,
4807,
4808,
4809,
1638,
4810,
4811,
4812,
4813,
4814,
4815,
4816,
4817,
4818,
1639,
4819,
4820,
4821,
4822,
4823,
4824,
4825,
4826,
4827,
4828,
4829,
4830,
4831,
4832,
4833,
1077,
4834,
4835,
4836,
4837,
4838,
4839,
4840,
4841,
4842,
4843,
4844,
4845,
4846,
4847,
4848,
4849,
4850,
4851,
4852,
4853,
4854,
4855,
4856,
4857,
4858,
4859,
4860,
4861,
4862,
4863,
4864,
4865,
4866,
4867,
4868,
4869,
4870,
4871,
4872,
4873,
4874,
4875,
4876,
4877,
4878,
4879,
4880,
4881,
4882,
4883,
1640,
4884,
4885,
1641,
4886,
4887,
4888,
4889,
4890,
4891,
4892,
4893,
4894,
4895,
4896,
4897,
4898,
4899,
4900,
4901,
4902,
4903,
4904,
4905,
4906,
4907,
4908,
4909,
4910,
4911,
1642,
4912,
4913,
4914,
1364,
4915,
4916,
4917,
4918,
4919,
4920,
4921,
4922,
4923,
4924,
4925,
4926,
4927,
4928,
4929,
4930,
4931,
1643,
4932,
4933,
4934,
4935,
4936,
4937,
4938,
4939,
4940,
4941,
4942,
4943,
4944,
4945,
4946,
4947,
4948,
4949,
4950,
4951,
4952,
4953,
4954,
4955,
4956,
4957,
4958,
4959,
4960,
4961,
4962,
4963,
4964,
4965,
4966,
4967,
4968,
4969,
4970,
4971,
4972,
4973,
4974,
4975,
4976,
4977,
4978,
4979,
4980,
1644,
4981,
4982,
4983,
4984,
1645,
4985,
4986,
1646,
4987,
4988,
4989,
4990,
4991,
4992,
4993,
4994,
4995,
4996,
4997,
4998,
4999,
5000,
5001,
5002,
5003,
5004,
5005,
1647,
5006,
1648,
5007,
5008,
5009,
5010,
5011,
5012,
1078,
5013,
5014,
5015,
5016,
5017,
5018,
5019,
5020,
5021,
5022,
5023,
5024,
5025,
5026,
5027,
5028,
1365,
5029,
5030,
5031,
5032,
5033,
5034,
5035,
5036,
5037,
5038,
5039,
1649,
5040,
5041,
5042,
5043,
5044,
5045,
1366,
5046,
5047,
5048,
5049,
5050,
5051,
5052,
5053,
5054,
5055,
1650,
5056,
5057,
5058,
5059,
5060,
5061,
5062,
5063,
5064,
5065,
5066,
5067,
5068,
5069,
5070,
5071,
5072,
5073,
5074,
5075,
5076,
5077,
1651,
5078,
5079,
5080,
5081,
5082,
5083,
5084,
5085,
5086,
5087,
5088,
5089,
5090,
5091,
5092,
5093,
5094,
5095,
5096,
5097,
5098,
5099,
5100,
5101,
5102,
5103,
5104,
5105,
5106,
5107,
5108,
5109,
5110,
1652,
5111,
5112,
5113,
5114,
5115,
5116,
5117,
5118,
1367,
5119,
5120,
5121,
5122,
5123,
5124,
5125,
5126,
5127,
5128,
5129,
1653,
5130,
5131,
5132,
5133,
5134,
5135,
5136,
5137,
5138,
5139,
5140,
5141,
5142,
5143,
5144,
5145,
5146,
5147,
5148,
5149,
1368,
5150,
1654,
5151,
1369,
5152,
5153,
5154,
5155,
5156,
5157,
5158,
5159,
5160,
5161,
5162,
5163,
5164,
5165,
5166,
5167,
5168,
5169,
5170,
5171,
5172,
5173,
5174,
5175,
5176,
5177,
5178,
1370,
5179,
5180,
5181,
5182,
5183,
5184,
5185,
5186,
5187,
5188,
5189,
5190,
5191,
5192,
5193,
5194,
5195,
5196,
5197,
5198,
1655,
5199,
5200,
5201,
5202,
1656,
5203,
5204,
5205,
5206,
1371,
5207,
1372,
5208,
5209,
5210,
5211,
1373,
5212,
5213,
1374,
5214,
5215,
5216,
5217,
5218,
5219,
5220,
5221,
5222,
5223,
5224,
5225,
5226,
5227,
5228,
5229,
5230,
5231,
5232,
5233,
5234,
5235,
5236,
5237,
5238,
5239,
5240,
5241,
5242,
5243,
5244,
5245,
5246,
5247,
1657,
5248,
5249,
5250,
5251,
1658,
1263,
5252,
5253,
5254,
5255,
5256,
1375,
5257,
5258,
5259,
5260,
5261,
5262,
5263,
5264,
5265,
5266,
5267,
5268,
5269,
5270,
5271,
5272,
5273,
5274,
5275,
5276,
5277,
5278,
5279,
5280,
5281,
5282,
5283,
1659,
5284,
5285,
5286,
5287,
5288,
5289,
5290,
5291,
5292,
5293,
5294,
5295,
5296,
5297,
5298,
5299,
5300,
1660,
5301,
5302,
5303,
5304,
5305,
5306,
5307,
5308,
5309,
5310,
5311,
5312,
5313,
5314,
5315,
5316,
5317,
5318,
5319,
5320,
5321,
1376,
5322,
5323,
5324,
5325,
5326,
5327,
5328,
5329,
5330,
5331,
5332,
5333,
1198,
5334,
5335,
5336,
5337,
5338,
5339,
5340,
5341,
5342,
5343,
1661,
5344,
5345,
5346,
5347,
5348,
5349,
5350,
5351,
5352,
5353,
5354,
5355,
5356,
5357,
5358,
5359,
5360,
5361,
5362,
5363,
5364,
5365,
5366,
5367,
5368,
5369,
5370,
5371,
5372,
5373,
5374,
5375,
5376,
5377,
5378,
5379,
5380,
5381,
5382,
5383,
5384,
5385,
5386,
5387,
5388,
5389,
5390,
5391,
5392,
5393,
5394,
5395,
5396,
5397,
5398,
1264,
5399,
5400,
5401,
5402,
5403,
5404,
5405,
5406,
5407,
5408,
5409,
5410,
5411,
5412,
1662,
5413,
5414,
5415,
5416,
1663,
5417,
5418,
5419,
5420,
5421,
5422,
5423,
5424,
5425,
5426,
5427,
5428,
5429,
5430,
5431,
5432,
5433,
5434,
5435,
5436,
5437,
5438,
1664,
5439,
5440,
5441,
5442,
5443,
5444,
5445,
5446,
5447,
5448,
5449,
5450,
5451,
5452,
5453,
5454,
5455,
5456,
5457,
5458,
5459,
5460,
5461,
5462,
5463,
5464,
5465,
5466,
5467,
5468,
5469,
5470,
5471,
5472,
5473,
5474,
5475,
5476,
5477,
5478,
1154,
5479,
5480,
5481,
5482,
5483,
5484,
5485,
1665,
5486,
5487,
5488,
5489,
5490,
5491,
5492,
5493,
5494,
5495,
5496,
5497,
5498,
5499,
5500,
5501,
5502,
5503,
5504,
5505,
5506,
5507,
5508,
5509,
5510,
5511,
5512,
5513,
5514,
5515,
5516,
5517,
5518,
5519,
5520,
5521,
5522,
5523,
5524,
5525,
5526,
5527,
5528,
5529,
5530,
5531,
5532,
5533,
5534,
5535,
5536,
5537,
5538,
5539,
5540,
5541,
5542,
5543,
5544,
5545,
5546,
5547,
5548,
1377,
5549,
5550,
5551,
5552,
5553,
5554,
5555,
5556,
5557,
5558,
5559,
5560,
5561,
5562,
5563,
5564,
5565,
5566,
5567,
5568,
5569,
5570,
1114,
5571,
5572,
5573,
5574,
5575,
5576,
5577,
5578,
5579,
5580,
5581,
5582,
5583,
5584,
5585,
5586,
5587,
5588,
5589,
5590,
5591,
5592,
1378,
5593,
5594,
5595,
5596,
5597,
5598,
5599,
5600,
5601,
5602,
5603,
5604,
5605,
5606,
5607,
5608,
5609,
5610,
5611,
5612,
5613,
5614,
1379,
5615,
5616,
5617,
5618,
5619,
5620,
5621,
5622,
5623,
5624,
5625,
5626,
5627,
5628,
5629,
5630,
5631,
5632,
5633,
5634,
1380,
5635,
5636,
5637,
5638,
5639,
5640,
5641,
5642,
5643,
5644,
5645,
5646,
5647,
5648,
5649,
1381,
1056,
5650,
5651,
5652,
5653,
5654,
5655,
5656,
5657,
5658,
5659,
5660,
1666,
5661,
5662,
5663,
5664,
5665,
5666,
5667,
5668,
1667,
5669,
1668,
5670,
5671,
5672,
5673,
5674,
5675,
5676,
5677,
5678,
1155,
5679,
5680,
5681,
5682,
5683,
5684,
5685,
5686,
5687,
5688,
5689,
5690,
5691,
5692,
5693,
5694,
5695,
5696,
5697,
5698,
1669,
5699,
5700,
5701,
5702,
5703,
5704,
5705,
1670,
5706,
5707,
5708,
5709,
5710,
1671,
5711,
5712,
5713,
5714,
1382,
5715,
5716,
5717,
5718,
5719,
5720,
5721,
5722,
5723,
5724,
5725,
1672,
5726,
5727,
1673,
1674,
5728,
5729,
5730,
5731,
5732,
5733,
5734,
5735,
5736,
1675,
5737,
5738,
5739,
5740,
5741,
5742,
5743,
5744,
1676,
5745,
5746,
5747,
5748,
5749,
5750,
5751,
1383,
5752,
5753,
5754,
5755,
5756,
5757,
5758,
5759,
5760,
5761,
5762,
5763,
5764,
5765,
5766,
5767,
5768,
1677,
5769,
5770,
5771,
5772,
5773,
1678,
5774,
5775,
5776,
998,
5777,
5778,
5779,
5780,
5781,
5782,
5783,
5784,
5785,
1384,
5786,
5787,
5788,
5789,
5790,
5791,
5792,
5793,
5794,
5795,
5796,
5797,
5798,
5799,
5800,
1679,
5801,
5802,
5803,
1115,
1116,
5804,
5805,
5806,
5807,
5808,
5809,
5810,
5811,
5812,
5813,
5814,
5815,
5816,
5817,
5818,
5819,
5820,
5821,
5822,
5823,
5824,
5825,
5826,
5827,
5828,
5829,
5830,
5831,
5832,
5833,
5834,
5835,
5836,
5837,
5838,
5839,
5840,
5841,
5842,
5843,
5844,
5845,
5846,
5847,
5848,
5849,
5850,
5851,
5852,
5853,
5854,
5855,
1680,
5856,
5857,
5858,
5859,
5860,
5861,
5862,
5863,
5864,
1681,
5865,
5866,
5867,
1682,
5868,
5869,
5870,
5871,
5872,
5873,
5874,
5875,
5876,
5877,
5878,
5879,
1683,
5880,
1684,
5881,
5882,
5883,
5884,
1685,
5885,
5886,
5887,
5888,
5889,
5890,
5891,
5892,
5893,
5894,
5895,
5896,
5897,
5898,
5899,
5900,
5901,
5902,
5903,
5904,
5905,
5906,
5907,
1686,
5908,
5909,
5910,
5911,
5912,
5913,
5914,
5915,
5916,
5917,
5918,
5919,
5920,
5921,
5922,
5923,
5924,
5925,
5926,
5927,
5928,
5929,
5930,
5931,
5932,
5933,
5934,
5935,
1687,
5936,
5937,
5938,
5939,
5940,
5941,
5942,
5943,
5944,
5945,
5946,
5947,
5948,
5949,
5950,
5951,
5952,
1688,
1689,
5953,
1199,
5954,
5955,
5956,
5957,
5958,
5959,
5960,
5961,
1690,
5962,
5963,
5964,
5965,
5966,
5967,
5968,
5969,
5970,
5971,
5972,
5973,
5974,
5975,
5976,
5977,
5978,
5979,
5980,
5981,
1385,
5982,
1386,
5983,
5984,
5985,
5986,
5987,
5988,
5989,
5990,
5991,
5992,
5993,
5994,
5995,
5996,
5997,
5998,
5999,
6000,
6001,
6002,
6003,
6004,
6005,
6006,
6007,
6008,
6009,
6010,
6011,
6012,
6013,
6014,
6015,
6016,
6017,
6018,
6019,
6020,
6021,
6022,
6023,
6024,
6025,
6026,
6027,
1265,
6028,
6029,
1691,
6030,
6031,
6032,
6033,
6034,
6035,
6036,
6037,
6038,
6039,
6040,
6041,
6042,
6043,
6044,
6045,
6046,
6047,
6048,
6049,
6050,
6051,
6052,
6053,
6054,
6055,
6056,
6057,
6058,
6059,
6060,
6061,
6062,
6063,
6064,
6065,
6066,
6067,
6068,
6069,
6070,
6071,
6072,
6073,
6074,
6075,
6076,
6077,
6078,
6079,
6080,
6081,
6082,
6083,
6084,
1692,
6085,
6086,
6087,
6088,
6089,
6090,
6091,
6092,
6093,
6094,
6095,
6096,
6097,
6098,
6099,
6100,
6101,
6102,
6103,
6104,
6105,
6106,
6107,
6108,
6109,
6110,
6111,
6112,
6113,
6114,
6115,
6116,
6117,
6118,
6119,
6120,
6121,
6122,
6123,
6124,
6125,
6126,
6127,
6128,
6129,
6130,
6131,
1693,
6132,
6133,
6134,
6135,
6136,
1694,
6137,
6138,
6139,
6140,
6141,
1695,
6142,
6143,
6144,
6145,
6146,
6147,
6148,
6149,
6150,
6151,
6152,
6153,
6154,
6155,
6156,
6157,
6158,
6159,
6160,
6161,
6162,
6163,
6164,
6165,
6166,
6167,
6168,
6169,
6170,
6171,
6172,
6173,
6174,
6175,
6176,
6177,
6178,
6179,
6180,
6181,
6182,
6183,
6184,
6185,
1696,
6186,
6187,
6188,
6189,
6190,
6191,
6192,
6193,
6194,
6195,
6196,
6197,
6198,
6199,
6200,
6201,
6202,
6203,
6204,
6205,
6206,
6207,
6208,
6209,
6210,
6211,
6212,
6213,
6214,
6215,
6216,
6217,
6218,
6219,
1697,
6220,
6221,
6222,
6223,
6224,
6225,
6226,
6227,
6228,
6229,
6230,
6231,
6232,
6233,
6234,
6235,
6236,
6237,
6238,
6239,
6240,
6241,
6242,
6243,
6244,
6245,
6246,
6247,
6248,
6249,
6250,
6251,
6252,
6253,
1698,
6254,
6255,
6256,
6257,
6258,
6259,
6260,
6261,
6262,
6263,
1200,
6264,
6265,
6266,
6267,
6268,
6269,
6270,
6271, # 1024
6272,
6273,
6274,
6275,
6276,
6277,
6278,
6279,
6280,
6281,
6282,
6283,
6284,
6285,
6286,
6287,
6288,
6289,
6290,
6291,
6292,
6293,
6294,
6295,
6296,
6297,
6298,
6299,
6300,
6301,
6302,
1699,
6303,
6304,
1700,
6305,
6306,
6307,
6308,
6309,
6310,
6311,
6312,
6313,
6314,
6315,
6316,
6317,
6318,
6319,
6320,
6321,
6322,
6323,
6324,
6325,
6326,
6327,
6328,
6329,
6330,
6331,
6332,
6333,
6334,
6335,
6336,
6337,
6338,
6339,
1701,
6340,
6341,
6342,
6343,
6344,
1387,
6345,
6346,
6347,
6348,
6349,
6350,
6351,
6352,
6353,
6354,
6355,
6356,
6357,
6358,
6359,
6360,
6361,
6362,
6363,
6364,
6365,
6366,
6367,
6368,
6369,
6370,
6371,
6372,
6373,
6374,
6375,
6376,
6377,
6378,
6379,
6380,
6381,
6382,
6383,
6384,
6385,
6386,
6387,
6388,
6389,
6390,
6391,
6392,
6393,
6394,
6395,
6396,
6397,
6398,
6399,
6400,
6401,
6402,
6403,
6404,
6405,
6406,
6407,
6408,
6409,
6410,
6411,
6412,
6413,
1702,
6414,
6415,
6416,
6417,
6418,
6419,
6420,
6421,
6422,
1703,
6423,
6424,
6425,
6426,
6427,
6428,
6429,
6430,
6431,
6432,
6433,
6434,
6435,
6436,
6437,
6438,
1704,
6439,
6440,
6441,
6442,
6443,
6444,
6445,
6446,
6447,
6448,
6449,
6450,
6451,
6452,
6453,
6454,
6455,
6456,
6457,
6458,
6459,
6460,
6461,
6462,
6463,
6464,
6465,
6466,
6467,
6468,
6469,
6470,
6471,
6472,
6473,
6474,
6475,
6476,
6477,
6478,
6479,
6480,
6481,
6482,
6483,
6484,
6485,
6486,
6487,
6488,
6489,
6490,
6491,
6492,
6493,
6494,
6495,
6496,
6497,
6498,
6499,
6500,
6501,
6502,
6503,
1266,
6504,
6505,
6506,
6507,
6508,
6509,
6510,
6511,
6512,
6513,
6514,
6515,
6516,
6517,
6518,
6519,
6520,
6521,
6522,
6523,
6524,
6525,
6526,
6527,
6528,
6529,
6530,
6531,
6532,
6533,
6534,
6535,
6536,
6537,
6538,
6539,
6540,
6541,
6542,
6543,
6544,
6545,
6546,
6547,
6548,
6549,
6550,
6551,
1705,
1706,
6552,
6553,
6554,
6555,
6556,
6557,
6558,
6559,
6560,
6561,
6562,
6563,
6564,
6565,
6566,
6567,
6568,
6569,
6570,
6571,
6572,
6573,
6574,
6575,
6576,
6577,
6578,
6579,
6580,
6581,
6582,
6583,
6584,
6585,
6586,
6587,
6588,
6589,
6590,
6591,
6592,
6593,
6594,
6595,
6596,
6597,
6598,
6599,
6600,
6601,
6602,
6603,
6604,
6605,
6606,
6607,
6608,
6609,
6610,
6611,
6612,
6613,
6614,
6615,
6616,
6617,
6618,
6619,
6620,
6621,
6622,
6623,
6624,
6625,
6626,
6627,
6628,
6629,
6630,
6631,
6632,
6633,
6634,
6635,
6636,
6637,
1388,
6638,
6639,
6640,
6641,
6642,
6643,
6644,
1707,
6645,
6646,
6647,
6648,
6649,
6650,
6651,
6652,
6653,
6654,
6655,
6656,
6657,
6658,
6659,
6660,
6661,
6662,
6663,
1708,
6664,
6665,
6666,
6667,
6668,
6669,
6670,
6671,
6672,
6673,
6674,
1201,
6675,
6676,
6677,
6678,
6679,
6680,
6681,
6682,
6683,
6684,
6685,
6686,
6687,
6688,
6689,
6690,
6691,
6692,
6693,
6694,
6695,
6696,
6697,
6698,
6699,
6700,
6701,
6702,
6703,
6704,
6705,
6706,
6707,
6708,
6709,
6710,
6711,
6712,
6713,
6714,
6715,
6716,
6717,
6718,
6719,
6720,
6721,
6722,
6723,
6724,
6725,
1389,
6726,
6727,
6728,
6729,
6730,
6731,
6732,
6733,
6734,
6735,
6736,
1390,
1709,
6737,
6738,
6739,
6740,
6741,
6742,
1710,
6743,
6744,
6745,
6746,
1391,
6747,
6748,
6749,
6750,
6751,
6752,
6753,
6754,
6755,
6756,
6757,
1392,
6758,
6759,
6760,
6761,
6762,
6763,
6764,
6765,
6766,
6767,
6768,
6769,
6770,
6771,
6772,
6773,
6774,
6775,
6776,
6777,
6778,
6779,
6780,
1202,
6781,
6782,
6783,
6784,
6785,
6786,
6787,
6788,
6789,
6790,
6791,
6792,
6793,
6794,
6795,
6796,
6797,
6798,
6799,
6800,
6801,
6802,
6803,
6804,
6805,
6806,
6807,
6808,
6809,
1711,
6810,
6811,
6812,
6813,
6814,
6815,
6816,
6817,
6818,
6819,
6820,
6821,
6822,
6823,
6824,
6825,
6826,
6827,
6828,
6829,
6830,
6831,
6832,
6833,
6834,
6835,
6836,
1393,
6837,
6838,
6839,
6840,
6841,
6842,
6843,
6844,
6845,
6846,
6847,
6848,
6849,
6850,
6851,
6852,
6853,
6854,
6855,
6856,
6857,
6858,
6859,
6860,
6861,
6862,
6863,
6864,
6865,
6866,
6867,
6868,
6869,
6870,
6871,
6872,
6873,
6874,
6875,
6876,
6877,
6878,
6879,
6880,
6881,
6882,
6883,
6884,
6885,
6886,
6887,
6888,
6889,
6890,
6891,
6892,
6893,
6894,
6895,
6896,
6897,
6898,
6899,
6900,
6901,
6902,
1712,
6903,
6904,
6905,
6906,
6907,
6908,
6909,
6910,
1713,
6911,
6912,
6913,
6914,
6915,
6916,
6917,
6918,
6919,
6920,
6921,
6922,
6923,
6924,
6925,
6926,
6927,
6928,
6929,
6930,
6931,
6932,
6933,
6934,
6935,
6936,
6937,
6938,
6939,
6940,
6941,
6942,
6943,
6944,
6945,
6946,
6947,
6948,
6949,
6950,
6951,
6952,
6953,
6954,
6955,
6956,
6957,
6958,
6959,
6960,
6961,
6962,
6963,
6964,
6965,
6966,
6967,
6968,
6969,
6970,
6971,
6972,
6973,
6974,
1714,
6975,
6976,
6977,
6978,
6979,
6980,
6981,
6982,
6983,
6984,
6985,
6986,
6987,
6988,
1394,
6989,
6990,
6991,
6992,
6993,
6994,
6995,
6996,
6997,
6998,
6999,
7000,
1715,
7001,
7002,
7003,
7004,
7005,
7006,
7007,
7008,
7009,
7010,
7011,
7012,
7013,
7014,
7015,
7016,
7017,
7018,
7019,
7020,
7021,
7022,
7023,
7024,
7025,
7026,
7027,
7028,
1716,
7029,
7030,
7031,
7032,
7033,
7034,
7035,
7036,
7037,
7038,
7039,
7040,
7041,
7042,
7043,
7044,
7045,
7046,
7047,
7048,
7049,
7050,
7051,
7052,
7053,
7054,
7055,
7056,
7057,
7058,
7059,
7060,
7061,
7062,
7063,
7064,
7065,
7066,
7067,
7068,
7069,
7070,
7071,
7072,
7073,
7074,
7075,
7076,
7077,
7078,
7079,
7080,
7081,
7082,
7083,
7084,
7085,
7086,
7087,
7088,
7089,
7090,
7091,
7092,
7093,
7094,
7095,
7096,
7097,
7098,
7099,
7100,
7101,
7102,
7103,
7104,
7105,
7106,
7107,
7108,
7109,
7110,
7111,
7112,
7113,
7114,
7115,
7116,
7117,
7118,
7119,
7120,
7121,
7122,
7123,
7124,
7125,
7126,
7127,
7128,
7129,
7130,
7131,
7132,
7133,
7134,
7135,
7136,
7137,
7138,
7139,
7140,
7141,
7142,
7143,
7144,
7145,
7146,
7147,
7148,
7149,
7150,
7151,
7152,
7153,
7154,
7155,
7156,
7157,
7158,
7159,
7160,
7161,
7162,
7163,
7164,
7165,
7166,
7167,
7168,
7169,
7170,
7171,
7172,
7173,
7174,
7175,
7176,
7177,
7178,
7179,
7180,
7181,
7182,
7183,
7184,
7185,
7186,
7187,
7188,
7189,
7190,
7191,
7192,
7193,
7194,
7195,
7196,
7197,
7198,
7199,
7200,
7201,
7202,
7203,
7204,
7205,
7206,
7207,
1395,
7208,
7209,
7210,
7211,
7212,
7213,
1717,
7214,
7215,
7216,
7217,
7218,
7219,
7220,
7221,
7222,
7223,
7224,
7225,
7226,
7227,
7228,
7229,
7230,
7231,
7232,
7233,
7234,
7235,
7236,
7237,
7238,
7239,
7240,
7241,
7242,
7243,
7244,
7245,
7246,
7247,
7248,
7249,
7250,
7251,
7252,
7253,
7254,
7255,
7256,
7257,
7258,
7259,
7260,
7261,
7262,
7263,
7264,
7265,
7266,
7267,
7268,
7269,
7270,
7271,
7272,
7273,
7274,
7275,
7276,
7277,
7278,
7279,
7280,
7281,
7282,
7283,
7284,
7285,
7286,
7287,
7288,
7289,
7290,
7291,
7292,
7293,
7294,
7295,
7296,
7297,
7298,
7299,
7300,
7301,
7302,
7303,
7304,
7305,
7306,
7307,
7308,
7309,
7310,
7311,
7312,
7313,
1718,
7314,
7315,
7316,
7317,
7318,
7319,
7320,
7321,
7322,
7323,
7324,
7325,
7326,
7327,
7328,
7329,
7330,
7331,
7332,
7333,
7334,
7335,
7336,
7337,
7338,
7339,
7340,
7341,
7342,
7343,
7344,
7345,
7346,
7347,
7348,
7349,
7350,
7351,
7352,
7353,
7354,
7355,
7356,
7357,
7358,
7359,
7360,
7361,
7362,
7363,
7364,
7365,
7366,
7367,
7368,
7369,
7370,
7371,
7372,
7373,
7374,
7375,
7376,
7377,
7378,
7379,
7380,
7381,
7382,
7383,
7384,
7385,
7386,
7387,
7388,
7389,
7390,
7391,
7392,
7393,
7394,
7395,
7396,
7397,
7398,
7399,
7400,
7401,
7402,
7403,
7404,
7405,
7406,
7407,
7408,
7409,
7410,
7411,
7412,
7413,
7414,
7415,
7416,
7417,
7418,
7419,
7420,
7421,
7422,
7423,
7424,
7425,
7426,
7427,
7428,
7429,
7430,
7431,
7432,
7433,
7434,
7435,
7436,
7437,
7438,
7439,
7440,
7441,
7442,
7443,
7444,
7445,
7446,
7447,
7448,
7449,
7450,
7451,
7452,
7453,
7454,
7455,
7456,
7457,
7458,
7459,
7460,
7461,
7462,
7463,
7464,
7465,
7466,
7467,
7468,
7469,
7470,
7471,
7472,
7473,
7474,
7475,
7476,
7477,
7478,
7479,
7480,
7481,
7482,
7483,
7484,
7485,
7486,
7487,
7488,
7489,
7490,
7491,
7492,
7493,
7494,
7495,
7496,
7497,
7498,
7499,
7500,
7501,
7502,
7503,
7504,
7505,
7506,
7507,
7508,
7509,
7510,
7511,
7512,
7513,
7514,
7515,
7516,
7517,
7518,
7519,
7520,
7521,
7522,
7523,
7524,
7525,
7526,
7527,
7528,
7529,
7530,
7531,
7532,
7533,
7534,
7535,
7536,
7537,
7538,
7539,
7540,
7541,
7542,
7543,
7544,
7545,
7546,
7547,
7548,
7549,
7550,
7551,
7552,
7553,
7554,
7555,
7556,
7557,
7558,
7559,
7560,
7561,
7562,
7563,
7564,
7565,
7566,
7567,
7568,
7569,
7570,
7571,
7572,
7573,
7574,
7575,
7576,
7577,
7578,
7579,
7580,
7581,
7582,
7583,
7584,
7585,
7586,
7587,
7588,
7589,
7590,
7591,
7592,
7593,
7594,
7595,
7596,
7597,
7598,
7599,
7600,
7601,
7602,
7603,
7604,
7605,
7606,
7607,
7608,
7609,
7610,
7611,
7612,
7613,
7614,
7615,
7616,
7617,
7618,
7619,
7620,
7621,
7622,
7623,
7624,
7625,
7626,
7627,
7628,
7629,
7630,
7631,
7632,
7633,
7634,
7635,
7636,
7637,
7638,
7639,
7640,
7641,
7642,
7643,
7644,
7645,
7646,
7647,
7648,
7649,
7650,
7651,
7652,
7653,
7654,
7655,
7656,
7657,
7658,
7659,
7660,
7661,
7662,
7663,
7664,
7665,
7666,
7667,
7668,
7669,
7670,
7671,
7672,
7673,
7674,
7675,
7676,
7677,
7678,
7679,
7680,
7681,
7682,
7683,
7684,
7685,
7686,
7687,
7688,
7689,
7690,
7691,
7692,
7693,
7694,
7695,
7696,
7697,
7698,
7699,
7700,
7701,
7702,
7703,
7704,
7705,
7706,
7707,
7708,
7709,
7710,
7711,
7712,
7713,
7714,
7715,
7716,
7717,
7718,
7719,
7720,
7721,
7722,
7723,
7724,
7725,
7726,
7727,
7728,
7729,
7730,
7731,
7732,
7733,
7734,
7735,
7736,
7737,
7738,
7739,
7740,
7741,
7742,
7743,
7744,
7745,
7746,
7747,
7748,
7749,
7750,
7751,
7752,
7753,
7754,
7755,
7756,
7757,
7758,
7759,
7760,
7761,
7762,
7763,
7764,
7765,
7766,
7767,
7768,
7769,
7770,
7771,
7772,
7773,
7774,
7775,
7776,
7777,
7778,
7779,
7780,
7781,
7782,
7783,
7784,
7785,
7786,
7787,
7788,
7789,
7790,
7791,
7792,
7793,
7794,
7795,
7796,
7797,
7798,
7799,
7800,
7801,
7802,
7803,
7804,
7805,
7806,
7807,
7808,
7809,
7810,
7811,
7812,
7813,
7814,
7815,
7816,
7817,
7818,
7819,
7820,
7821,
7822,
7823,
7824,
7825,
7826,
7827,
7828,
7829,
7830,
7831,
7832,
7833,
7834,
7835,
7836,
7837,
7838,
7839,
7840,
7841,
7842,
7843,
7844,
7845,
7846,
7847,
7848,
7849,
7850,
7851,
7852,
7853,
7854,
7855,
7856,
7857,
7858,
7859,
7860,
7861,
7862,
7863,
7864,
7865,
7866,
7867,
7868,
7869,
7870,
7871,
7872,
7873,
7874,
7875,
7876,
7877,
7878,
7879,
7880,
7881,
7882,
7883,
7884,
7885,
7886,
7887,
7888,
7889,
7890,
7891,
7892,
7893,
7894,
7895,
7896,
7897,
7898,
7899,
7900,
7901,
7902,
7903,
7904,
7905,
7906,
7907,
7908,
7909,
7910,
7911,
7912,
7913,
7914,
7915,
7916,
7917,
7918,
7919,
7920,
7921,
7922,
7923,
7924,
7925,
7926,
7927,
7928,
7929,
7930,
7931,
7932,
7933,
7934,
7935,
7936,
7937,
7938,
7939,
7940,
7941,
7942,
7943,
7944,
7945,
7946,
7947,
7948,
7949,
7950,
7951,
7952,
7953,
7954,
7955,
7956,
7957,
7958,
7959,
7960,
7961,
7962,
7963,
7964,
7965,
7966,
7967,
7968,
7969,
7970,
7971,
7972,
7973,
7974,
7975,
7976,
7977,
7978,
7979,
7980,
7981,
7982,
7983,
7984,
7985,
7986,
7987,
7988,
7989,
7990,
7991,
7992,
7993,
7994,
7995,
7996,
7997,
7998,
7999,
8000,
8001,
8002,
8003,
8004,
8005,
8006,
8007,
8008,
8009,
8010,
8011,
8012,
8013,
8014,
8015,
8016,
8017,
8018,
8019,
8020,
8021,
8022,
8023,
8024,
8025,
8026,
8027,
8028,
8029,
8030,
8031,
8032,
8033,
8034,
8035,
8036,
8037,
8038,
8039,
8040,
8041,
8042,
8043,
8044,
8045,
8046,
8047,
8048,
8049,
8050,
8051,
8052,
8053,
8054,
8055,
8056,
8057,
8058,
8059,
8060,
8061,
8062,
8063,
8064,
8065,
8066,
8067,
8068,
8069,
8070,
8071,
8072,
8073,
8074,
8075,
8076,
8077,
8078,
8079,
8080,
8081,
8082,
8083,
8084,
8085,
8086,
8087,
8088,
8089,
8090,
8091,
8092,
8093,
8094,
8095,
8096,
8097,
8098,
8099,
8100,
8101,
8102,
8103,
8104,
8105,
8106,
8107,
8108,
8109,
8110,
8111,
8112,
8113,
8114,
8115,
8116,
8117,
8118,
8119,
8120,
8121,
8122,
8123,
8124,
8125,
8126,
8127,
8128,
8129,
8130,
8131,
8132,
8133,
8134,
8135,
8136,
8137,
8138,
8139,
8140,
8141,
8142,
8143,
8144,
8145,
8146,
8147,
8148,
8149,
8150,
8151,
8152,
8153,
8154,
8155,
8156,
8157,
8158,
8159,
8160,
8161,
8162,
8163,
8164,
8165,
8166,
8167,
8168,
8169,
8170,
8171,
8172,
8173,
8174,
8175,
8176,
8177,
8178,
8179,
8180,
8181,
8182,
8183,
8184,
8185,
8186,
8187,
8188,
8189,
8190,
8191,
8192,
8193,
8194,
8195,
8196,
8197,
8198,
8199,
8200,
8201,
8202,
8203,
8204,
8205,
8206,
8207,
8208,
8209,
8210,
8211,
8212,
8213,
8214,
8215,
8216,
8217,
8218,
8219,
8220,
8221,
8222,
8223,
8224,
8225,
8226,
8227,
8228,
8229,
8230,
8231,
8232,
8233,
8234,
8235,
8236,
8237,
8238,
8239,
8240,
8241,
8242,
8243,
8244,
8245,
8246,
8247,
8248,
8249,
8250,
8251,
8252,
8253,
8254,
8255,
8256,
8257,
8258,
8259,
8260,
8261,
8262,
8263,
8264,
8265,
8266,
8267,
8268,
8269,
8270,
8271,
8272,
8273,
8274,
8275,
8276,
8277,
8278,
8279,
8280,
8281,
8282,
8283,
8284,
8285,
8286,
8287,
8288,
8289,
8290,
8291,
8292,
8293,
8294,
8295,
8296,
8297,
8298,
8299,
8300,
8301,
8302,
8303,
8304,
8305,
8306,
8307,
8308,
8309,
8310,
8311,
8312,
8313,
8314,
8315,
8316,
8317,
8318,
8319,
8320,
8321,
8322,
8323,
8324,
8325,
8326,
8327,
8328,
8329,
8330,
8331,
8332,
8333,
8334,
8335,
8336,
8337,
8338,
8339,
8340,
8341,
8342,
8343,
8344,
8345,
8346,
8347,
8348,
8349,
8350,
8351,
8352,
8353,
8354,
8355,
8356,
8357,
8358,
8359,
8360,
8361,
8362,
8363,
8364,
8365,
8366,
8367,
8368,
8369,
8370,
8371,
8372,
8373,
8374,
8375,
8376,
8377,
8378,
8379,
8380,
8381,
8382,
8383,
8384,
8385,
8386,
8387,
8388,
8389,
8390,
8391,
8392,
8393,
8394,
8395,
8396,
8397,
8398,
8399,
8400,
8401,
8402,
8403,
8404,
8405,
8406,
8407,
8408,
8409,
8410,
8411,
8412,
8413,
8414,
8415,
8416,
8417,
8418,
8419,
8420,
8421,
8422,
8423,
8424,
8425,
8426,
8427,
8428,
8429,
8430,
8431,
8432,
8433,
8434,
8435,
8436,
8437,
8438,
8439,
8440,
8441,
8442,
8443,
8444,
8445,
8446,
8447,
8448,
8449,
8450,
8451,
8452,
8453,
8454,
8455,
8456,
8457,
8458,
8459,
8460,
8461,
8462,
8463,
8464,
8465,
8466,
8467,
8468,
8469,
8470,
8471,
8472,
8473,
8474,
8475,
8476,
8477,
8478,
8479,
8480,
8481,
8482,
8483,
8484,
8485,
8486,
8487,
8488,
8489,
8490,
8491,
8492,
8493,
8494,
8495,
8496,
8497,
8498,
8499,
8500,
8501,
8502,
8503,
8504,
8505,
8506,
8507,
8508,
8509,
8510,
8511,
8512,
8513,
8514,
8515,
8516,
8517,
8518,
8519,
8520,
8521,
8522,
8523,
8524,
8525,
8526,
8527,
8528,
8529,
8530,
8531,
8532,
8533,
8534,
8535,
8536,
8537,
8538,
8539,
8540,
8541,
8542,
8543,
8544,
8545,
8546,
8547,
8548,
8549,
8550,
8551,
8552,
8553,
8554,
8555,
8556,
8557,
8558,
8559,
8560,
8561,
8562,
8563,
8564,
8565,
8566,
8567,
8568,
8569,
8570,
8571,
8572,
8573,
8574,
8575,
8576,
8577,
8578,
8579,
8580,
8581,
8582,
8583,
8584,
8585,
8586,
8587,
8588,
8589,
8590,
8591,
8592,
8593,
8594,
8595,
8596,
8597,
8598,
8599,
8600,
8601,
8602,
8603,
8604,
8605,
8606,
8607,
8608,
8609,
8610,
8611,
8612,
8613,
8614,
8615,
8616,
8617,
8618,
8619,
8620,
8621,
8622,
8623,
8624,
8625,
8626,
8627,
8628,
8629,
8630,
8631,
8632,
8633,
8634,
8635,
8636,
8637,
8638,
8639,
8640,
8641,
8642,
8643,
8644,
8645,
8646,
8647,
8648,
8649,
8650,
8651,
8652,
8653,
8654,
8655,
8656,
8657,
8658,
8659,
8660,
8661,
8662,
8663,
8664,
8665,
8666,
8667,
8668,
8669,
8670,
8671,
8672,
8673,
8674,
8675,
8676,
8677,
8678,
8679,
8680,
8681,
8682,
8683,
8684,
8685,
8686,
8687,
8688,
8689,
8690,
8691,
8692,
8693,
8694,
8695,
8696,
8697,
8698,
8699,
8700,
8701,
8702,
8703,
8704,
8705,
8706,
8707,
8708,
8709,
8710,
8711,
8712,
8713,
8714,
8715,
8716,
8717,
8718,
8719,
8720,
8721,
8722,
8723,
8724,
8725,
8726,
8727,
8728,
8729,
8730,
8731,
8732,
8733,
8734,
8735,
8736,
8737,
8738,
8739,
8740,
8741,
)
# flake8: noqa
|
hal | tests | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os
from unittest import expectedFailure, skip
import responses
from backend.oai import OaiPaperSource
from backend.translators import BASEDCTranslator
from deposit.hal.metadata import AOFRFormatter
from deposit.hal.protocol import HALProtocol
from deposit.models import DepositRecord, Repository
from deposit.tests.test_protocol import ProtocolTest
from django.test import TestCase
from papers.models import OaiRecord, Paper
from upload.models import UploadedPDF
class AOFRTest(TestCase):
def setUp(self):
super(AOFRTest, self).setUp()
# This currently fails and is unused
# xsd_fname = path.join(path.dirname(__file__), 'aofr-sword.xsd')
# with open(xsd_fname, 'r') as f:
# elem = etree.parse(f)
# cls.xsd = etree.XMLSchema(elem)
def test_generate_metadata_doi(self):
# f =
AOFRFormatter()
dois = ["10.1175/jas-d-15-0240.1"]
for doi in dois:
Paper.create_by_doi(doi)
# form = TODO
# rendered = f.render(p, 'article.pdf', form)
# with open('/tmp/xml_validation.xml', 'w') as f:
# f.write(etree.tostring(rendered, pretty_print=True))
# XSD validation currently fails
# self.xsd.assertValid(rendered)
class TopicPredictionTest(TestCase):
def setUp(self):
self.protocol = HALProtocol(repository=Repository())
@responses.activate
def test_predict_topic(self):
text = "A complete model for faceted dataflow programs"
expected_response = {
"results": [
{
"label": "Computer science",
"score": 0.8055065870285034,
"uri": "https://aurehal.archives-ouvertes.fr/domain/INFO",
},
{
"label": "Cognitive science",
"score": 0.09652446210384369,
"uri": "https://aurehal.archives-ouvertes.fr/domain/SCCO",
},
{
"label": "Statistics",
"score": 0.02962828427553177,
"uri": "https://aurehal.archives-ouvertes.fr/domain/STAT",
},
]
}
responses.add(
responses.POST,
"https://annif.dissem.in/v1/projects/hal-fasttext/suggest",
json=expected_response,
)
self.assertEqual(self.protocol.predict_topic(text), "INFO")
@responses.activate
def test_predict_empty_text(self):
self.assertEqual(self.protocol.predict_topic(""), None)
@skip(
"""
HAL tests are currently disabled because of deletion issues in
hal-preprod. Feel free to reactivate once this has been solved.
(2017-10-23)
"""
)
class HALProtocolTest(ProtocolTest):
def setUp(self):
super(HALProtocolTest, self).setUp()
self.setUpForProtocol(
HALProtocol,
Repository(
username="test_ws",
password="test",
endpoint="https://api-preprod.archives-ouvertes.fr/sword/",
),
)
def test_encode(self):
self.assertEqual(self.proto.encodeUserData(), b"Basic dGVzdF93czp0ZXN0")
@expectedFailure
def test_lncs_many_authors(self):
"""
Submit a paper from LNCS (type: book-chapter).
This fails with the default test account because
it does not have the right to deposit with only one
affiliation.
"""
# the DOI below should *not* already exist in HAL
# so it may need to be changed if the test fails
p = Paper.create_by_doi("10.1007/978-3-319-63342-8_1")
r = self.dry_deposit(
p,
abstract="this is an abstract",
topic="INFO",
depositing_author=0,
affiliation=59704,
) # ENS
self.assertEqualOrLog(r.status, "faked")
def test_lncs(self):
"""
Same as test_lncs but with only one author
"""
p = Paper.create_by_doi("10.1007/978-3-319-63342-8_1")
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="this is an abstract",
topic="INFO",
depositing_author=0,
affiliation=59704,
) # ENS
self.assertEqualOrLog(r.status, "faked")
def test_lics(self):
"""
Submit a paper from LICS (type: conference-proceedings)
"""
p = Paper.create_by_doi("10.1109/lics.2015.37")
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="here is my great result",
topic="NLIN",
depositing_author=0,
affiliation=128940,
)
self.assertEqualOrLog(r.status, "faked")
def test_journal_article(self):
"""
Submit a journal article
"""
p = Paper.create_by_doi("10.1016/j.agee.2004.10.001")
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="here is my great result",
topic="SDV",
depositing_author=0,
affiliation=128940,
)
self.assertEqualOrLog(r.status, "faked")
def test_topic_set_to_other(self):
"""
Submit a journal article with "OTHER" as topic,
which is forbidden by HAL
"""
p = Paper.create_by_doi("10.1016/j.agee.2004.10.001")
self.proto.init_deposit(p, self.user)
# the user is presented with initial data
args = self.proto.get_form_initial_data()
# they fill the form with an invalid topic
form_fields = {
"abstract": "here is my great result",
"topic": "OTHER",
"depositing_author": 0,
"affiliation": 128940,
}
args.update(form_fields)
# the form should reject the "OTHER" topic
form = self.proto.get_bound_form(args)
self.assertFalse(form.is_valid())
def test_keywords(self):
"""
Keywords are mandatory
"""
p = Paper.create_by_doi("10.1007/s00268-016-3429-x")
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="bla ble bli blo blu",
topic="SDV",
depositing_author=0,
affiliation=128940,
)
self.assertEqualOrLog(r.status, "faked")
def test_preprint(self):
"""
Submit a preprint
"""
oai = OaiPaperSource(self.oaisource)
oai.add_translator(BASEDCTranslator(self.oaisource))
p = oai.create_paper_by_identifier(
"ftarxivpreprints:oai:arXiv.org:1207.2079", "base_dc"
)
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="here is my great result",
topic="SDV",
depositing_author=0,
affiliation=128940,
)
self.assertEqualOrLog(r.status, "faked")
def test_bad_journal_article(self):
"""
Submit something that pretends to be a journal article,
but for which we fail to find publication metadata.
The interface should fall back on something lighter.
"""
oai = OaiPaperSource(self.oaisource)
oai.add_translator(BASEDCTranslator(self.oaisource))
p = oai.create_paper_by_identifier(
"ftalborguniv:oai:pure.atira.dk:openaire/30feea10-9c2f-11db-8ed6-000ea68e967b",
"base_dc",
)
p.authors_list = [p.authors_list[0]]
p.doctype = "journal-article"
p.save()
r = self.dry_deposit(
p,
abstract="hey you, yes you",
topic="SDV",
depositing_author=0,
affiliation=128940,
)
self.assertEqualOrLog(r.status, "faked")
def test_paper_already_in_hal(self):
p = Paper.create_by_hal_id("hal-01062241v1")
enabled = self.proto.init_deposit(p, self.user)
self.assertFalse(enabled)
def test_refresh_deposit_status(self):
# This is the identifier of a paper which should
# currently be published on HAL preprod
hal_id = "hal-01211282"
# First, fake the deposition of a paper
p = Paper.create_by_doi("10.1109/lics.2015.37")
r = OaiRecord.new(
source=self.repo.oaisource,
identifier="deposition:1:" + hal_id,
splash_url="https://hal-preprod.archives-ouvertes.fr/" + hal_id,
pdf_url=None,
about=p,
)
f = UploadedPDF.objects.create(
user=self.user,
orig_name="File.pdf",
file=os.path.join(self.testdir, "testdata/blank.pdf"),
thumbnail="my_thumbnail.png",
)
d = DepositRecord.objects.create(
paper=p,
oairecord=r,
repository=self.repo,
user=self.user,
status="pending",
identifier=hal_id,
upload_type="postprint",
file=f,
)
self.proto.refresh_deposit_status(d)
self.assertEqual(d.status, "published")
self.assertTrue(r.pdf_url)
def test_get_new_status(self):
cases = {
"tel-01584471": "published",
"hal-01038374": "deleted",
# the document below should have "pending" status on hal-preprod
# and may need to be updated if the preprod database is reset
"hal-01587501": "pending",
}
for identifier in cases:
self.assertEqual(self.proto.get_new_status(identifier), cases[identifier])
def test_paper_already_in_hal_but_not_in_dissemin(self):
"""
In this case, Dissemin missed the paper on HAL
(for some reason) and so the deposit interface was
enabled. But HAL refuses the deposit! We have to
give a good error message to the user.
"""
# this paper is currently in HAL-preprod
p = Paper.create_by_doi("10.1051/jphys:01975003607-8060700")
# this is just to make sure that we are depositing with
# a single author (otherwise, the deposit would fail because
# we are not providing enough affiliations).
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="this is an abstract",
topic="INFO",
depositing_author=0,
affiliation=59704,
) # ENS
# Deposit fails: a duplicate is found
self.assertEqualOrLog(r.status, "failed")
# The error message should be specific
self.assertTrue("already in HAL" in r.message)
def test_on_behalf_of(self):
# Set on-behalf-of to some user
# Currently we are using "test_ws" as deposit account
preferences = self.proto.get_preferences(self.user)
preferences.on_behalf_of = "dissemin_test"
preferences.save()
# the DOI here should *not* already exist in HAL
# so it may need to be changed if the test fails
p = Paper.create_by_doi("10.1007/978-3-319-63342-8_1")
p.authors_list = [p.authors_list[0]]
r = self.dry_deposit(
p,
abstract="this is an abstract",
topic="INFO",
depositing_author=0,
affiliation=59704,
) # ENS
self.assertEqualOrLog(r.status, "faked")
|
BOPTools | Utils | # /***************************************************************************
# * Copyright (c) 2016 Victor Titov (DeepSOIC) <vv.titov@gmail.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ***************************************************************************/
__title__ = "BOPTools.Utils module"
__author__ = "DeepSOIC"
__url__ = "http://www.freecad.org"
__doc__ = "Utility code, used by various modules of BOPTools."
class HashableShape(object):
"Decorator for Part.Shape, that can be used as key in dicts. Based on isSame method."
def __init__(self, shape):
self.Shape = shape
self.hash = shape.hashCode()
def __eq__(self, other):
return self.Shape.isSame(other.Shape)
def __hash__(self):
return self.hash
class HashableShape_Deep(object):
"""Similar to HashableShape, except that the things the shape is composed of are compared.
Example:
>>> wire2 = Part.Wire(wire1.childShapes())
>>> wire2.isSame(wire1)
False # <--- the wire2 is a new wire, although made of edges of wire1
>>> HashableShape_Deep(wire2) == HashableShape_Deep(wire1)
True # <--- made of same set of elements
"""
def __init__(self, shape):
self.Shape = shape
self.hash = 0
for el in shape.childShapes():
self.hash = self.hash ^ el.hashCode()
def __eq__(self, other):
# avoiding extensive comparison for now. Just doing a few extra tests should reduce the already-low chances of false-positives
if self.hash == other.hash:
if len(self.Shape.childShapes()) == len(other.Shape.childShapes()):
if self.Shape.ShapeType == other.Shape.ShapeType:
return True
return False
def __hash__(self):
return self.hash
def compoundLeaves(shape_or_compound):
"""compoundLeaves(shape_or_compound): extracts all non-compound shapes from a nested compound.
Note: shape_or_compound may be a non-compound; then, it is the only thing in the
returned list."""
if shape_or_compound.ShapeType == "Compound":
leaves = []
for child in shape_or_compound.childShapes():
leaves.extend(compoundLeaves(child))
return leaves
else:
return [shape_or_compound]
def upgradeToAggregateIfNeeded(list_of_shapes, types=None):
"""upgradeToAggregateIfNeeded(list_of_shapes, types = None): upgrades non-aggregate type
shapes to aggregate-type shapes if the list has a mix of aggregate and non-aggregate
type shapes. Returns the new list. Recursively traverses into compounds.
aggregate shape types are Wire, Shell, CompSolid
non-aggregate shape types are Vertex, Edge, Face, Solid
Compounds are something special: they are recursively traversed to upgrade the
contained shapes.
Examples:
list_of_shapes contains only faces -> nothing happens
list_of_shapes contains faces and shells -> faces are converted to shells
'types' argument is needed for recursive traversal. Do not supply."""
import Part
if types is None:
types = set()
for shape in list_of_shapes:
types.add(shape.ShapeType)
subshapes = compoundLeaves(shape)
for subshape in subshapes:
types.add(subshape.ShapeType)
if "Wire" in types:
list_of_shapes = [
(Part.Wire([shape]) if shape.ShapeType == "Edge" else shape)
for shape in list_of_shapes
]
if "Shell" in types:
list_of_shapes = [
(Part.Shell([shape]) if shape.ShapeType == "Face" else shape)
for shape in list_of_shapes
]
if "CompSolid" in types:
list_of_shapes = [
(Part.CompSolid([shape]) if shape.ShapeType == "Solid" else shape)
for shape in list_of_shapes
]
if "Compound" in types:
list_of_shapes = [
(
Part.Compound(upgradeToAggregateIfNeeded(shape.childShapes(), types))
if shape.ShapeType == "Compound"
else shape
)
for shape in list_of_shapes
]
return list_of_shapes
# adapted from http://stackoverflow.com/a/3603824/6285007
class FrozenClass(object):
"""FrozenClass: prevents adding new attributes to class outside of __init__"""
__isfrozen = False
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise TypeError(
"{cls} has no attribute {attr}".format(
cls=self.__class__.__name__, attr=key
)
)
object.__setattr__(self, key, value)
def _freeze(self):
self.__isfrozen = True
def _unfreeze(self):
self.__isfrozen = False
|
docs | make_numbered_listing | #!/usr/bin/env python
import os
import os.path
import sys
from optparse import OptionParser
def quote_line(line):
line = line.replace("&", "&")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace("'", "'")
line = line.replace('"', """)
return line
def generate_listing(input_filename, title=None):
inf = open(input_filename, "r")
output_filename = os.path.basename(input_filename) + ".xml"
outf = open(output_filename, "w")
outf.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
# outf.write ('<example id="%s">\n' % (input_filename,))
# if not title:
# title = input_filename
# outf.write ('<title>')
# outf.write (title)
# outf.write ('</title>\n')
outf.write("<programlisting>\n")
lineno = 0
for line in inf:
line = line.expandtabs(8)
line = quote_line(line)
lineno = lineno + 1
outf.write("%3d %s" % (lineno, line))
outf.write("</programlisting>\n")
# outf.write ('</example>\n')
def main():
for file in sys.argv[1:]:
generate_listing(file)
if __name__ == "__main__":
main()
|
neubot | http_clnt | # neubot/http_clnt.py
#
# Copyright (c) 2011-2012 Simone Basso <bassosimone@gmail.com>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
""" HTTP client """
# Adapted from neubot/http/stream.py
# Python3-ready: yes
import collections
import getopt
import logging
import sys
if __name__ == "__main__":
sys.path.insert(0, ".")
from neubot import six, utils_version
from neubot.brigade import Brigade
from neubot.handler import Handler
from neubot.poller import POLLER
from neubot.stream import Stream
MAXLINE = 512
MAXPIECE = 524288
MAXREAD = 8000
MAXRECEIVE = 262144
CHUNKED = six.b("chunked")
CLOSE = six.b("close")
CODE204 = six.b("204")
CODE304 = six.b("304")
COLON = six.b(":")
COMMASPACE = six.b(", ")
CONNECTION = six.b("connection")
CONTENT_LENGTH = six.b("content-length")
CRLF = six.b("\r\n")
EMPTY_STRING = six.b("")
HEAD = six.b("HEAD")
HTTP_PREFIX = six.b("HTTP/")
HTTP10 = six.b("HTTP/1.0")
HTTP11 = six.b("HTTP/1.1")
LAST_CHUNK = six.b("0\r\n")
ONE = six.b("1")
SPACE = six.b(" ")
TAB = six.b("\t")
TRANSFER_ENCODING = six.b("transfer-encoding")
class ClientContext(Brigade):
"""HTTP client context"""
def __init__(self, extra, connection_made, connection_lost):
Brigade.__init__(self)
self.outq = []
self.outfp = None
self.method = EMPTY_STRING
self.protocol = EMPTY_STRING
self.code = EMPTY_STRING
self.reason = EMPTY_STRING
self.headers = {}
self.last_hdr = EMPTY_STRING
self.body = None
self.handle_piece = None
self.handle_line = None
self.left = 0
self.extra = extra
self.connection_made = connection_made
self.connection_lost = connection_lost
class HttpClient(Handler):
"""HTTP client"""
#
# Setup. The user should implement handle_connect() and invoke the
# create_stream() function to setup a new stream. Stream creation is
# wrapped by a function because the HTTP code must kick off the HTTP
# receiver once the connection is ready. Indeed, the receiver is "ON"
# during the whole connection lifetime, so that EOF and RST can be
# detected immediately (the underlying socket is in select() read set
# as long as the HTTP code keeps receiving).
#
def create_stream(
self, sock, connection_made, connection_lost, sslconfig, sslcert, extra
):
"""Creates an HTTP stream"""
logging.debug("http_clnt: stream setup... in progress")
context = ClientContext(extra, connection_made, connection_lost)
Stream(
sock,
self._handle_connection_made,
self._handle_connection_lost,
sslconfig,
sslcert,
context,
)
def _handle_connection_made(self, stream):
"""Internally handles the CONNECTION_MADE event"""
context = stream.opaque
stream.recv(MAXRECEIVE, self._handle_data) # Kick receiver off
context.handle_line = self._handle_firstline
logging.debug("http_clnt: stream setup... complete")
context.connection_made(stream)
@staticmethod
def _handle_connection_lost(stream):
"""Internally handles the CONNECTION_LOST event"""
context = stream.opaque
if context.connection_lost:
context.connection_lost(stream)
#
# Send path. This section provides methods to append stuff to the internal
# output buffer, including an open file handle. The user is expected to
# append everything needed to make an HTTP message, and, once that is done,
# he/she is expected to invoke the send_message() to start sending the whole
# message to the other end. Once done, the handle_send_complete() function
# is invoked (this is an empty method that the user may want to override.)
#
@staticmethod
def append_request(stream, method, uri, protocol):
"""Append request to output buffer"""
context = stream.opaque
logging.debug("> %s %s %s", method, uri, protocol)
context.method = six.b(method)
context.outq.append(six.b(method))
context.outq.append(SPACE)
context.outq.append(six.b(uri))
context.outq.append(SPACE)
context.outq.append(six.b(protocol))
context.outq.append(CRLF)
@staticmethod
def append_header(stream, name, value):
"""Append header to output buffer"""
context = stream.opaque
logging.debug("> %s: %s", name, value)
context.outq.append(six.b(name))
context.outq.append(COLON)
context.outq.append(SPACE)
context.outq.append(six.b(value))
context.outq.append(CRLF)
@staticmethod
def append_end_of_headers(stream):
"""Append end-of-headers (an empty line) to output buffer"""
context = stream.opaque
logging.debug(">")
context.outq.append(CRLF)
@staticmethod
def append_string(stream, string):
"""Append string to output buffer"""
context = stream.opaque
context.outq.append(six.b(string))
@staticmethod
def append_bytes(stream, bytez):
"""Append bytes to output buffer"""
context = stream.opaque
context.outq.append(bytez)
@staticmethod
def append_chunk(stream, bytez):
"""Append chunk to output buffer"""
context = stream.opaque
logging.debug("> {chunk len=%d}", len(bytez))
context.outq.append(six.b("%x\r\n" % len(bytez)))
context.outq.append(bytez)
context.outq.append(CRLF)
@staticmethod
def append_last_chunk(stream):
"""Append last-chunk to output buffer"""
context = stream.opaque
logging.debug("> {last-chunk}")
context.outq.append(LAST_CHUNK)
@staticmethod
def append_file(stream, filep):
"""Append file to output buffer"""
context = stream.opaque
logging.debug("> {file}")
context.outfp = filep
def send_message(self, stream):
"""Send output buffer content to the other end"""
context = stream.opaque
string = EMPTY_STRING.join(context.outq)
stream.send(string, self._handle_send_complete)
context.outq = []
def _handle_send_complete(self, stream):
"""Internally handles the SEND_COMPLETE event"""
context = stream.opaque
if context.outfp:
bytez = context.outfp.read(MAXREAD)
if bytez:
# Note: honor the queue, don't send directly
context.outq.append(bytez)
self.send_message(stream)
return
context.outfp = None
self.handle_send_complete(stream)
def handle_send_complete(self, stream):
"""Handles the SEND_COMPLETE event"""
#
# Receive path. The receiver is always active and the user is expected to
# handle the end of headers and end of body events, by overriding the
# corresponding functions. One caveat: the decision whether a response
# is expected or not is left to the user (typically you expect a response
# after a full request was sent, but there are exceptions, e.g. the "100
# Continue" case). Also, it should be noted that, by default, response
# body is discarded: the user is expected to override context.body and
# point it to a file-like object, if he/she wants to save it.
#
def _handle_data(self, stream, bytez):
"""Handles the DATA event"""
context = stream.opaque
context.bufferise(bytez)
while True:
if context.left > 0:
tmp = context.pullup(min(context.left, MAXRECEIVE))
if not tmp:
break
context.left -= len(tmp) # MUST be before got_piece()
context.handle_piece(stream, tmp)
elif context.left == 0:
tmp = context.getline(MAXLINE)
if not tmp:
break
context.handle_line(stream, tmp)
else:
raise RuntimeError("negative context.left")
if not stream.isclosed:
stream.recv(MAXRECEIVE, self._handle_data)
def _handle_firstline(self, stream, line):
"""Handles the FIRSTLINE event"""
context = stream.opaque
line = line.rstrip()
logging.debug("< %s", six.bytes_to_string_safe(line, "utf-8"))
vector = line.split(None, 2)
if len(vector) != 3:
raise RuntimeError("http_clnt: invalid first line")
context.protocol = vector[0]
if not context.protocol.startswith(HTTP_PREFIX):
raise RuntimeError("http_clnt: invalid protocol")
if context.protocol not in (HTTP11, HTTP10):
raise RuntimeError("http_clnt: unsuppored protocol")
context.code = vector[1]
context.reason = vector[2]
context.last_hdr = EMPTY_STRING
context.headers = {}
context.handle_line = self._handle_header
def _handle_header(self, stream, line):
"""Handles the HEADER event"""
self._handle_header_ex(stream, line, self.handle_end_of_headers)
@staticmethod
def _handle_header_ex(stream, line, handle_done):
"""Handles the HEADER_EX event"""
context = stream.opaque
line = line.rstrip()
if not line:
logging.debug("<")
handle_done(stream)
return
logging.debug("< %s", six.bytes_to_string_safe(line, "utf-8"))
# Note: must preceed header parsing to permit colons in folded line(s)
if context.last_hdr and line[0:1] in (SPACE, TAB):
value = context.headers[context.last_hdr]
value += SPACE
value += line.strip()
# Note: make sure there are no leading or trailing spaces
context.headers[context.last_hdr] = value.strip()
return
index = line.find(COLON)
if index >= 0:
name, value = line.split(COLON, 1)
name = name.strip().lower()
value = value.strip()
if name not in context.headers:
context.headers[name] = value
else:
#
# "Multiple message-header fields with the same field-name MAY
# be present in a message if and only if the entire field-value
# for that header field is defined as a comma-separated list
# [i.e., #(values)]. It MUST be possible to combine the multiple
# header fields into one "field-name: field-value" pair, without
# changing the semantics of the message, by appending each
# subsequent field-value to the first, each separated by
# a comma." (RFC2616, sect. 4.2)
#
context.headers[name] += COMMASPACE
context.headers[name] += value
context.last_hdr = name
return
#
# N.B. I have observed that this error is often triggered when one
# overrides handle_end_of_body() and forgets to invoke the parent class
# method, which resets the line reader.
#
raise RuntimeError("http_clnt: internal error #2")
def handle_end_of_headers(self, stream):
"""Handle END_OF_HEADERS event"""
#
# "[...] All responses to the HEAD request method MUST NOT include a
# message-body, even though the presence of entity-header fields might
# lead one to believe they do. All 1xx (informational), 204 (no content)
# and 304 (not modified) responses MUST NOT include a message-body. All
# other responses do include a message-body, although it MAY be of zero
# length." (RFC2616, sect. 4.3)
#
context = stream.opaque
if (
context.method == HEAD
or context.code[0:1] == ONE
or context.code == CODE204
or context.code == CODE304
):
logging.debug("http_clnt: expecting no message body")
self.handle_end_of_body(stream)
return
if context.headers.get(TRANSFER_ENCODING) == CHUNKED:
logging.debug("http_clnt: expecting chunked message body")
context.handle_line = self._handle_chunklen
return
tmp = context.headers.get(CONTENT_LENGTH)
if tmp:
length = int(tmp)
if length > 0:
logging.debug("http_clnt: expecting bounded message body")
context.handle_piece = self._handle_piece_bounded
context.left = length
return
if length == 0:
logging.debug("http_clnt: expecting no message body")
self.handle_end_of_body(stream)
return
raise RuntimeError("http_clnt: invalid content length")
logging.debug("http_clnt: expecting unbounded message body")
context.handle_piece = self._handle_piece_unbounded
context.left = MAXPIECE
def _handle_chunklen(self, stream, line):
"""Handles the CHUNKLEN event"""
context = stream.opaque
vector = line.split()
if vector:
tmp = int(vector[0], 16)
if tmp < 0:
raise RuntimeError("http_clnt: negative chunk-length")
elif tmp == 0:
context.handle_line = self._handle_trailer
logging.debug("< {last-chunk/}")
else:
context.left = tmp
context.handle_piece = self._handle_piece_chunked
logging.debug("< {chunk len=%d}", tmp)
else:
raise RuntimeError("http_clnt: bad chunk-length line")
def _handle_chunkend(self, stream, line):
"""Handles the CHUNKEND event"""
context = stream.opaque
if not line.strip():
logging.debug("< {/chunk}")
context.handle_line = self._handle_chunklen
else:
raise RuntimeError("http_clnt: bad chunk-end line")
def _handle_trailer(self, stream, line):
"""Handles the TRAILER event"""
self._handle_header_ex(stream, line, self.handle_end_of_body)
def _handle_piece_unbounded(self, stream, piece):
"""Handles the PIECE_UNBOUNDED event"""
context = stream.opaque
self.handle_piece(stream, piece)
context.left = MAXPIECE # Read until the connection is closed
def _handle_piece_bounded(self, stream, piece):
"""Handles the PIECE_BOUNDED event"""
context = stream.opaque
self.handle_piece(stream, piece)
if context.left == 0:
self.handle_end_of_body(stream)
def _handle_piece_chunked(self, stream, piece):
"""Handles the PIECE_CHUNKED event"""
context = stream.opaque
self.handle_piece(stream, piece)
if context.left == 0:
context.handle_line = self._handle_chunkend
@staticmethod
def handle_piece(stream, piece):
"""Handle the PIECE event"""
# Note: by default the body is discarded
context = stream.opaque
if context.body:
context.body.write(piece)
def handle_end_of_body(self, stream):
"""Handle the END_OF_BODY event"""
#
# It is user responsibility to close the stream here
# if appropriate (i.e. if HTTP/1.0 or if the "Connection"
# header value is set to "close").
#
context = stream.opaque
context.handle_line = self._handle_firstline # Restart
class HttpClientSmpl(HttpClient):
"""Simple HTTP client"""
def handle_connect(self, connector, sock, rtt, sslconfig, extra):
self.create_stream(sock, self.connection_made, None, sslconfig, None, extra)
def connection_made(self, stream):
"""Invoked when the connection is established"""
context = stream.opaque
address, port, paths, cntvec = context.extra
if not paths:
stream.close()
return
self.append_request(stream, "GET", paths.popleft(), "HTTP/1.1")
self.append_header(stream, "Host", "%s:%s" % (address, port))
self.append_header(stream, "User-Agent", utils_version.HTTP_HEADER)
self.append_header(stream, "Cache-Control", "no-cache")
self.append_header(stream, "Pragma", "no-cache")
self.append_end_of_headers(stream)
self.send_message(stream)
context.body = self # Want to print the body
cntvec[0] += 1
def handle_end_of_body(self, stream):
HttpClient.handle_end_of_body(self, stream)
context = stream.opaque
cntvec = context.extra[3]
if cntvec[0] <= 0: # No unexpected responses
raise RuntimeError("http_dload: unexpected response")
cntvec[0] -= 1
sys.stdout.flush()
# XXX ignoring the "Connection" header for HTTP/1.0
if context.protocol == HTTP10 or context.headers.get(CONNECTION) == CLOSE:
stream.close()
return
self.connection_made(stream)
def write(self, data):
"""Write data on standard output"""
# Remember that with Python 3 we need to decode data
sys.stdout.write(six.bytes_to_string(data, "utf-8"))
USAGE = "usage: neubot http_clnt [-6Sv] [-A address] [-p port] path..."
def main(args):
"""Main function"""
try:
options, arguments = getopt.getopt(args[1:], "6A:p:Sv")
except getopt.error:
sys.exit(USAGE)
if not arguments:
sys.exit(USAGE)
prefer_ipv6 = 0
address = "127.0.0.1"
sslconfig = 0
port = 80
level = logging.INFO
for name, value in options:
if name == "-6":
prefer_ipv6 = 1
elif name == "-A":
address = value
elif name == "-p":
port = int(value)
elif name == "-S":
sslconfig = 1
elif name == "-v":
level = logging.DEBUG
logging.getLogger().setLevel(level)
handler = HttpClientSmpl()
handler.connect(
(address, port),
prefer_ipv6,
sslconfig,
(address, port, collections.deque(arguments), [0]),
)
POLLER.loop()
if __name__ == "__main__":
main(sys.argv)
|
addons | XFileSharing | # -*- coding: utf-8 -*-
import inspect
import re
from ..base.addon import BaseAddon
class XFileSharing(BaseAddon):
__name__ = "XFileSharing"
__type__ = "addon"
__version__ = "0.56"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", False),
("use_downloader_list", "bool", "Load listed hosters only", False),
("use_decrypter_list", "bool", "Load listed crypters only", False),
("use_builtin_list", "bool", "Load built-in plugin list", True),
("hoster_list", "str", "Downloader list (comma separated)", ""),
("crypter_list", "str", "Decrypter list (comma separated)", ""),
]
__description__ = (
"""Load XFileSharing hosters and crypters which don't need a own plugin"""
)
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
_regexmap = {
"downloader": (
r"(?:https?://(?:www\.)?)(?!(?:www\.)?(?:{}))(?P<DOMAIN>(?:[\d.]+|[\w\-^_]{{3,63}}(?:\.[a-zA-Z]{{2,}})+)(?:\:\d+)?)/(?:embed-)?\w{{12}}(?:\W|$)",
r"https?://(?:[^/]+\.)?(?P<DOMAIN>{})/(?:embed-)?\w+",
),
"decrypter": (
r"(?:https?://(?:www\.)?)(?!(?:www\.)?(?:{}))(?P<DOMAIN>(?:[\d.]+|[\w\-^_]{{3,63}}(?:\.[a-zA-Z]{{2,}})+)(?:\:\d+)?)/(?:user|folder)s?/\w+",
r"https?://(?:[^/]+\.)?(?P<DOMAIN>{})/(?:user|folder)s?/\w+",
),
}
BUILTIN_HOSTERS = [ #: WORKING HOSTERS:
"ani-stream.com",
"backin.net",
"cloudshares.net",
"cloudsix.me",
"eyesfile.ca",
"file4safe.com",
"fileband.com",
"filedwon.com",
"fileparadox.in",
"filevice.com",
"hostingbulk.com",
"junkyvideo.com",
"ravishare.com",
"salefiles.com",
"sendmyway.com",
"sharebeast.com",
"sharesix.com",
"thefile.me",
"verzend.be",
"worldbytez.com",
"xvidstage.com",
# NOT TESTED:
"101shared.com",
"4upfiles.com",
"filemaze.ws",
"filenuke.com",
"linkzhost.com",
"mightyupload.com",
"rockdizfile.com",
"sharerepo.com",
"shareswift.com",
"uploadbaz.com",
"uploadc.com",
"vidbull.com",
"zalaa.com",
"zomgupload.com",
# NOT WORKING:
"amonshare.com",
"banicrazy.info",
"boosterking.com",
"host4desi.com",
"laoupload.com",
"rd-fs.com",
]
BUILTIN_CRYPTERS = ["junocloud.me", "rapidfileshare.net"]
def activate(self):
for type, plugin in (
("downloader", "XFileSharing"),
("decrypter", "XFileSharingFolder"),
):
self._load(type, plugin)
def deactivate(self):
for type, plugin in (
("downloader", "XFileSharing"),
("decrypter", "XFileSharingFolder"),
):
self._unload(type, plugin)
def get_pattern(self, type, plugin):
if self.config.get("use_{}_list".format(type)):
plugin_list = self.config.get("{}_list".format(type))
plugin_list = plugin_list.replace(" ", "").replace("\\", "")
plugin_list = plugin_list.replace("|", ",").replace(";", ",")
plugin_list = plugin_list.lower().split(",")
plugin_set = set(plugin_list)
if self.config.get("use_builtin_list"):
builtin_list = getattr(self, "BUILTIN_{}S".format(type.upper()))
plugin_set.update(builtin_list)
plugin_set.difference_update(("", ""))
if not plugin_set:
self.log_info(self._("No {} to handle").format(type))
return
match_list = "|".join(sorted(plugin_set)).replace(".", r"\.")
pattern = self._regexmap[type][1].format(match_list)
self.log_info(
self._("Handle {} {}{}: {}").format(
len(plugin_set),
type,
"" if len(plugin_set) == 1 else "s",
match_list.replace(r"\.", ".").replace("|", ", "),
)
)
else:
plugin_list = []
is_xfs = lambda klass: any(
k.__name__.startswith("XFS") for k in inspect.getmro(klass)
)
for p in self.pyload.plugin_manager.plugins[type].values():
try:
klass = self.pyload.plugin_manager.load_class(type, p["name"])
except AttributeError as exc:
self.log_debug(
exc,
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
continue
if (
hasattr(klass, "PLUGIN_DOMAIN")
and klass.PLUGIN_DOMAIN
and is_xfs(klass)
):
plugin_list.append(klass.PLUGIN_DOMAIN)
unmatch_list = "|".join(sorted(plugin_list)).replace(".", r"\.")
pattern = self._regexmap[type][0].format(unmatch_list)
self.log_info(self._("Auto-discover new {}s").format(type))
return pattern
def _load(self, type, plugin):
dict = self.pyload.plugin_manager.plugins[type][plugin]
pattern = self.get_pattern(type, plugin)
if not pattern:
return
dict["pattern"] = pattern
dict["re"] = re.compile(pattern)
self.log_debug(f"Pattern for {type}: {pattern}")
def _unload(self, type, plugin):
dict = self.pyload.plugin_manager.plugins[type][plugin]
dict["pattern"] = r"^unmatchable$"
dict["re"] = re.compile(dict["pattern"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.